[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-Yutong-Zhou-cv--Awesome-Text-to-Image":3,"tool-Yutong-Zhou-cv--Awesome-Text-to-Image":64},[4,17,27,35,43,56],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":16},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,3,"2026-04-05T11:01:52",[13,14,15],"开发框架","图像","Agent","ready",{"id":18,"name":19,"github_repo":20,"description_zh":21,"stars":22,"difficulty_score":23,"last_commit_at":24,"category_tags":25,"status":16},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",138956,2,"2026-04-05T11:33:21",[13,15,26],"语言模型",{"id":28,"name":29,"github_repo":30,"description_zh":31,"stars":32,"difficulty_score":23,"last_commit_at":33,"category_tags":34,"status":16},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,"2026-04-03T11:11:01",[13,14,15],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":23,"last_commit_at":41,"category_tags":42,"status":16},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[13,26],{"id":44,"name":45,"github_repo":46,"description_zh":47,"stars":48,"difficulty_score":23,"last_commit_at":49,"category_tags":50,"status":16},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,"2026-04-05T10:45:23",[14,51,52,53,15,54,26,13,55],"数据工具","视频","插件","其他","音频",{"id":57,"name":58,"github_repo":59,"description_zh":60,"stars":61,"difficulty_score":10,"last_commit_at":62,"category_tags":63,"status":16},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,"2026-04-04T04:44:48",[15,14,13,26,54],{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":69,"readme_en":70,"readme_zh":71,"quickstart_zh":72,"use_case_zh":73,"hero_image_url":74,"owner_login":75,"owner_name":76,"owner_avatar_url":77,"owner_bio":78,"owner_company":79,"owner_location":80,"owner_email":79,"owner_twitter":81,"owner_website":82,"owner_url":83,"languages":79,"stars":84,"forks":85,"last_commit_at":86,"license":87,"difficulty_score":88,"env_os":89,"env_gpu":89,"env_ram":89,"env_deps":90,"category_tags":93,"github_topics":94,"view_count":23,"oss_zip_url":79,"oss_zip_packed_at":79,"status":16,"created_at":105,"updated_at":106,"faqs":107,"releases":138},1012,"Yutong-Zhou-cv\u002FAwesome-Text-to-Image","Awesome-Text-to-Image","(ෆ`꒳´ෆ) A Survey on Text-to-Image Generation\u002FSynthesis.","Awesome-Text-to-Image 是一个整理文本生成图像领域资源的开源集合，汇集了论文、代码、数据集、评估指标等关键资料。它解决了该领域资源分散、查找困难的问题，为研究者和开发者提供一站式查询平台。通过结构化分类（如数据集、项目、论文与代码、评估指标等）和精选Best Collection列表，用户能快速定位最新成果。特别适合AI研究人员、算法工程师及创意设计师，帮助他们高效追踪技术动态。项目持续更新，最新版本2.0新增时间排序和专题分类，还包含CVPRW 2023收录的综述论文，确保内容紧跟前沿进展。无需复杂操作，直接访问即可获取权威资源，助力文本到图像生成技术的探索与应用。","# \u003Cp align=center>𝓐𝔀𝓮𝓼𝓸𝓶𝓮 𝓣𝓮𝔁𝓽📝-𝓽𝓸-𝓘𝓶𝓪𝓰𝓮🌇\u003C\u002Fp>\n\u003C!--# \u003Cp align=center>`Awesome Text📝-to-Image🌇`\u003C\u002Fp>-->\n\u003Cdiv align=center>\n\n\u003Cp>\n \n ![GitHub stars](https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fstars\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image.svg?color=red&style=for-the-badge) \n ![GitHub forks](https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fforks\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image.svg?style=for-the-badge) \n ![GitHub activity](https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Flast-commit\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image?color=yellow&style=for-the-badge) \n ![GitHub issues](https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fissues\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image?style=for-the-badge)\n ![GitHub closed issues](https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fissues-closed\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image?color=inactive&style=for-the-badge)\n \n [![Awesome](https:\u002F\u002Fcdn.rawgit.com\u002Fsindresorhus\u002Fawesome\u002Fd7305f38d29fed78fa85652e3a63e154dd8e8829\u002Fmedia\u002Fbadge.svg)](https:\u002F\u002Fgithub.com\u002Fsindresorhus\u002Fawesome) \n [![Hits](https:\u002F\u002Fhits.seeyoufarm.com\u002Fapi\u002Fcount\u002Fincr\u002Fbadge.svg?url=https%3A%2F%2Fgithub.com%2FYutong-Zhou-cv%2Fawesome-Text-to-Image&count_bg=%23DD4B78&title_bg=%23555555&icon=jabber.svg&icon_color=%23E7E7E7&title=Hits(2023.05~)&edge_flat=false)](https:\u002F\u002Fhits.seeyoufarm.com)\n\u003C\u002Fp>\n\n𝓐 𝓬𝓸𝓵𝓵𝓮𝓬𝓽𝓲𝓸𝓷 𝓸𝓯 𝓻𝓮𝓼𝓸𝓾𝓻𝓬𝓮𝓼 𝓸𝓷 𝓽𝓮𝔁𝓽-𝓽𝓸-𝓲𝓶𝓪𝓰𝓮 𝓼𝔂𝓷𝓽𝓱𝓮𝓼𝓲𝓼\u002F𝓶𝓪𝓷𝓲𝓹𝓾𝓵𝓪𝓽𝓲𝓸𝓷 𝓽𝓪𝓼𝓴𝓼.\n \n\u003C\u002Fdiv>\n\n\u003C!--\n![Figure from paper](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FYutong-Zhou-cv_Awesome-Text-to-Image_readme_32fff393e6f2.png)\n> *From: [Hierarchical Text-Conditional Image Generation with CLIP Latents](https:\u002F\u002Fcdn.openai.com\u002Fpapers\u002Fdall-e-2.pdf)*\n-->\n\n## ⭐ Citation\n\nIf you find this paper and repo helpful for your research, please cite it below:\n\n```bibtex\n\n@inproceedings{zhou2023vision+,\n  title={Vision+ Language Applications: A Survey},\n  author={Zhou, Yutong and Shimada, Nobutaka},\n  booktitle={Proceedings of the IEEE\u002FCVF Conference on Computer Vision and Pattern Recognition},\n  pages={826--842},\n  year={2023}\n}\n\n```\n\n## 🎑 News\n> [!TIP]\n> **Version 1.0** (All-in-one version) can be found [here](https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Ftree\u002F2024-Version-1.0) and will be **stop updating from 24\u002F02\u002F29**.\n* [24\u002F02\u002F29] Update **\"Awesome Text to Image\" Version 2.0**! *Paper With Code* and *Other Related Works* will also be gradually updated in March.\n* [23\u002F05\u002F26] 🔥 Add our survey paper \"[**Vision + Language Applications: A Survey**](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023W\u002FGCV\u002Fhtml\u002FZhou_Vision__Language_Applications_A_Survey_CVPRW_2023_paper.html)\" and a special [**Best Collection**](https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fblob\u002Fmain\u002F%5BCVPRW%202023%F0%9F%8E%88%5D%20%20Best%20Collection.md) list!\n* [23\u002F04\u002F04] \"**Vision + Language Applications: A Survey**\" was accepted by CVPRW2023.\n* [20\u002F10\u002F13] **Awesome-Text-to-Image** repo is created.\n\n## *\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FYutong-Zhou-cv_Awesome-Text-to-Image_readme_eac0a2beaf02.png\" width=\"25\" \u002F> To Do*\n* - [ ] Add **Topic Order** list and **Chronological Order** list\n* - [x] Add [**Best Collection**](https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fblob\u002Fmain\u002F%5BCVPRW%202023%F0%9F%8E%88%5D%20%20Best%20Collection.md) \n* - [x] Create [**⏳Recently Focused Papers**](https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fblob\u002Fmain\u002F%E2%8F%B3Recently%20Focused%20Papers.md)\n\n## \u003Cspan id=\"head-content\"> *Content* \u003C\u002Fspan>\n* - [ ] [**1. Description**](#head1)\n\n* - [ ] [**2. Quantitative Evaluation Metrics**](https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fblob\u002F2024-Version-2.0\u002FLists\u002F2-Quantitative%20Evaluation%20Metrics.md)\n \n* - [ ] [**3. Datasets**](https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fblob\u002F2024-Version-2.0\u002FLists\u002F3-Datasets.md)  \n\n* - [ ] [**4. Project**](https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fblob\u002F2024-Version-2.0\u002FLists\u002F4-Project.md)\n\n* - [ ] [5. Paper With Code](#head5)\n  * - [ ] [Text to Face👨🏻🧒👧🏼🧓🏽](#head-t2f)\n  * - [ ] [Specific Issues🤔](#head-si)\n  * - [ ] [**Survey**](https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fblob\u002F2024-Version-2.0\u002FLists\u002F5.0-Survey.md)&emsp;&emsp;&nbsp;\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FNumber%20of%20Papers-11-E83015?style=social&logo=data:image\u002Fsvg%2bxml;base64,PHN2ZyBpZD0iQ2FwYV8xIiBlbmFibGUtYmFja2dyb3VuZD0ibmV3IDAgMCA1MTIgNTEyIiBoZWlnaHQ9IjUxMiIgdmlld0JveD0iMCAwIDUxMiA1MTIiIHdpZHRoPSI1MTIiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGc+PHBhdGggZD0ibTM5NS44MiAxODIuNjE2LTE4OC43MiAxODguNzItMTIuOTEgMS43Mi05LjM1IDIwLjU0LTM0LjMxIDM0LjMxLTExLjAxLS43My0xMS4yNSAyMi45OS01Ni40OCA1Ni40OGMtMi45MyAyLjkzLTYuNzcgNC4zOS0xMC42MSA0LjM5cy03LjY4LTEuNDYtMTAuNjEtNC4zOWwtMjIuNjItMjIuNjJoLS4wMWwtMjIuNjItMjIuNjNjLTUuODYtNS44Ni01Ljg2LTE1LjM2IDAtMjEuMjJsNzcuNjMtNzcuNjMgMTYuNi03LjAzIDUuNjYtMTUuMjMgMzQuMzEtMzQuMzEgMTQuODQtNC45MiA3LjQyLTE3LjM0IDE2Ny41Ny0xNjcuNTcgMzMuMjQgMzMuMjR6IiBmaWxsPSIjZjY2Ii8+PHBhdGggZD0ibTM5NS44MiAxMTYuMTQ2djY2LjQ3bC0xODguNzIgMTg4LjcyLTEyLjkxIDEuNzItOS4zNSAyMC41NC0zNC4zMSAzNC4zMS0xMS4wMS0uNzMtMTEuMjUgMjIuOTktNTYuNDggNTYuNDhjLTIuOTMgMi45My02Ljc3IDQuMzktMTAuNjEgNC4zOXMtNy42OC0xLjQ2LTEwLjYxLTQuMzlsLTIyLjYyLTIyLjYyIDMzNC42NC0zMzQuNjR6IiBmaWxsPSIjZTYyZTZiIi8+PHBhdGggZD0ibTUwNi42MSAyMDkuMDA2LTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xNy00LjUyLTQuNTMtMTEuNDItNS42OC0xNy4xNy0yLjg4bC04OC4zOCA0My4wNS02OS4xMy02OS4xNGMtNC4zNS00LjM1LTEwLjkyLTUuNi0xNi41Ni0zLjE2LTUuNjUgMi40NS05LjIzIDguMDktOS4wNCAxNC4yNGwyLjg2IDkwLjQ1LTg1LjM3IDU3LjgzYy00LjkxIDMuMzItNy40IDkuMjItNi4zNiAxNS4wNCAxLjA0IDUuODMgNS40IDEwLjUxIDExLjE1IDExLjk0bDk2LjYyIDI0LjAxIDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZ6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTI5Ni4yNiAyMTUuNzA2IDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZsLTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xN3oiIGZpbGw9IiNmZDkwMjUiLz48cGF0aCBkPSJtNDY1IDQxNi45NjZjLTI1LjkyIDAtNDcgMjEuMDgtNDcgNDdzMjEuMDggNDcgNDcgNDcgNDctMjEuMDggNDctNDctMjEuMDgtNDctNDctNDd6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTEwNCAyOC45NjZoLTEzdi0xM2MwLTguMjg0LTYuNzE2LTE1LTE1LTE1cy0xNSA2LjcxNi0xNSAxNXYxM2gtMTNjLTguMjg0IDAtMTUgNi43MTYtMTUgMTVzNi43MTYgMTUgMTUgMTVoMTN2MTNjMCA4LjI4NCA2LjcxNiAxNSAxNSAxNXMxNS02LjcxNiAxNS0xNXYtMTNoMTNjOC4yODQgMCAxNS02LjcxNiAxNS0xNXMtNi43MTYtMTUtMTUtMTV6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTIwNy4xIDM3MS4zMzYtMjIuMjYgMjIuMjYtNDUuMzItODcuNjIgMjIuMjYtMjIuMjZ6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTE4NC44NCAzOTMuNTk2IDIyLjI2LTIyLjI2LTIyLjY2LTQzLjgxLTIyLjI2NSAyMi4yNjV6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTE1MC41MyA0MjcuOTA2LTIyLjI2IDIyLjI2LTQ1LjMyLTg3LjYyIDIyLjI2LTIyLjI2eiIgZmlsbD0iI2ZlZDg0MyIvPjxwYXRoIGQ9Im0xMjguMjcgNDUwLjE2NiAyMi4yNi0yMi4yNi0yMi42NTUtNDMuODE1LTIyLjI2IDIyLjI2eiIgZmlsbD0iI2ZhYmUyYyIvPjxjaXJjbGUgY3g9IjE1IiBjeT0iMTE5Ljk2OSIgZmlsbD0iIzVlZDhkMyIgcj0iMTUiLz48Y2lyY2xlIGN4PSIxMjgiIGN5PSIxOTkuOTY5IiBmaWxsPSIjZDU5OWVkIiByPSIxNSIvPjxjaXJjbGUgY3g9IjE5MiIgY3k9IjYzLjk2NCIgZmlsbD0iI2Y2NiIgcj0iMTUiLz48Y2lyY2xlIGN4PSIzMjgiIGN5PSI0MTUuOTY3IiBmaWxsPSIjMzFiZWJlIiByPSIxNSIvPjxjaXJjbGUgY3g9IjQ0MCIgY3k9IjMyNy45NjciIGZpbGw9IiNhZDc3ZTMiIHI9IjE0Ljk5OSIvPjwvZz48L3N2Zz4=\" alt=\"PaperNum\"\u002F>\n  * - [ ] [2025](#head-2024)&emsp;&emsp;&emsp;&nbsp;\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FNumber%20of%20Papers-xx-B481BB?style=flat-square&logo=data:image\u002Fsvg%2bxml;base64,PHN2ZyBpZD0iQ2FwYV8xIiBlbmFibGUtYmFja2dyb3VuZD0ibmV3IDAgMCA1MTIgNTEyIiBoZWlnaHQ9IjUxMiIgdmlld0JveD0iMCAwIDUxMiA1MTIiIHdpZHRoPSI1MTIiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGc+PHBhdGggZD0ibTM5NS44MiAxODIuNjE2LTE4OC43MiAxODguNzItMTIuOTEgMS43Mi05LjM1IDIwLjU0LTM0LjMxIDM0LjMxLTExLjAxLS43My0xMS4yNSAyMi45OS01Ni40OCA1Ni40OGMtMi45MyAyLjkzLTYuNzcgNC4zOS0xMC42MSA0LjM5cy03LjY4LTEuNDYtMTAuNjEtNC4zOWwtMjIuNjItMjIuNjJoLS4wMWwtMjIuNjItMjIuNjNjLTUuODYtNS44Ni01Ljg2LTE1LjM2IDAtMjEuMjJsNzcuNjMtNzcuNjMgMTYuNi03LjAzIDUuNjYtMTUuMjMgMzQuMzEtMzQuMzEgMTQuODQtNC45MiA3LjQyLTE3LjM0IDE2Ny41Ny0xNjcuNTcgMzMuMjQgMzMuMjR6IiBmaWxsPSIjZjY2Ii8+PHBhdGggZD0ibTM5NS44MiAxMTYuMTQ2djY2LjQ3bC0xODguNzIgMTg4LjcyLTEyLjkxIDEuNzItOS4zNSAyMC41NC0zNC4zMSAzNC4zMS0xMS4wMS0uNzMtMTEuMjUgMjIuOTktNTYuNDggNTYuNDhjLTIuOTMgMi45My02Ljc3IDQuMzktMTAuNjEgNC4zOXMtNy42OC0xLjQ2LTEwLjYxLTQuMzlsLTIyLjYyLTIyLjYyIDMzNC42NC0zMzQuNjR6IiBmaWxsPSIjZTYyZTZiIi8+PHBhdGggZD0ibTUwNi42MSAyMDkuMDA2LTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xNy00LjUyLTQuNTMtMTEuNDItNS42OC0xNy4xNy0yLjg4bC04OC4zOCA0My4wNS02OS4xMy02OS4xNGMtNC4zNS00LjM1LTEwLjkyLTUuNi0xNi41Ni0zLjE2LTUuNjUgMi40NS05LjIzIDguMDktOS4wNCAxNC4yNGwyLjg2IDkwLjQ1LTg1LjM3IDU3LjgzYy00LjkxIDMuMzItNy40IDkuMjItNi4zNiAxNS4wNCAxLjA0IDUuODMgNS40IDEwLjUxIDExLjE1IDExLjk0bDk2LjYyIDI0LjAxIDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZ6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTI5Ni4yNiAyMTUuNzA2IDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZsLTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xN3oiIGZpbGw9IiNmZDkwMjUiLz48cGF0aCBkPSJtNDY1IDQxNi45NjZjLTI1LjkyIDAtNDcgMjEuMDgtNDcgNDdzMjEuMDggNDcgNDcgNDcgNDctMjEuMDggNDctNDctMjEuMDgtNDctNDctNDd6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTEwNCAyOC45NjZoLTEzdi0xM2MwLTguMjg0LTYuNzE2LTE1LTE1LTE1cy0xNSA2LjcxNi0xNSAxNXYxM2gtMTNjLTguMjg0IDAtMTUgNi43MTYtMTUgMTVzNi43MTYgMTUgMTUgMTVoMTN2MTNjMCA4LjI4NCA2LjcxNiAxNSAxNSAxNXMxNS02LjcxNiAxNS0xNXYtMTNoMTNjOC4yODQgMCAxNS02LjcxNiAxNS0xNXMtNi43MTYtMTUtMTUtMTV6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTIwNy4xIDM3MS4zMzYtMjIuMjYgMjIuMjYtNDUuMzItODcuNjIgMjIuMjYtMjIuMjZ6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTE4NC44NCAzOTMuNTk2IDIyLjI2LTIyLjI2LTIyLjY2LTQzLjgxLTIyLjI2NSAyMi4yNjV6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTE1MC41MyA0MjcuOTA2LTIyLjI2IDIyLjI2LTQ1LjMyLTg3LjYyIDIyLjI2LTIyLjI2eiIgZmlsbD0iI2ZlZDg0MyIvPjxwYXRoIGQ9Im0xMjguMjcgNDUwLjE2NiAyMi4yNi0yMi4yNi0yMi42NTUtNDMuODE1LTIyLjI2IDIyLjI2eiIgZmlsbD0iI2ZhYmUyYyIvPjxjaXJjbGUgY3g9IjE1IiBjeT0iMTE5Ljk2OSIgZmlsbD0iIzVlZDhkMyIgcj0iMTUiLz48Y2lyY2xlIGN4PSIxMjgiIGN5PSIxOTkuOTY5IiBmaWxsPSIjZDU5OWVkIiByPSIxNSIvPjxjaXJjbGUgY3g9IjE5MiIgY3k9IjYzLjk2NCIgZmlsbD0iI2Y2NiIgcj0iMTUiLz48Y2lyY2xlIGN4PSIzMjgiIGN5PSI0MTUuOTY3IiBmaWxsPSIjMzFiZWJlIiByPSIxNSIvPjxjaXJjbGUgY3g9IjQ0MCIgY3k9IjMyNy45NjciIGZpbGw9IiNhZDc3ZTMiIHI9IjE0Ljk5OSIvPjwvZz48L3N2Zz4=\" alt=\"PaperNum\"\u002F>\n  * - [ ] [2024](#head-2024)&emsp;&emsp;&emsp;&nbsp;\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FNumber%20of%20Papers-xx-B481BB?style=flat-square&logo=data:image\u002Fsvg%2bxml;base64,PHN2ZyBpZD0iQ2FwYV8xIiBlbmFibGUtYmFja2dyb3VuZD0ibmV3IDAgMCA1MTIgNTEyIiBoZWlnaHQ9IjUxMiIgdmlld0JveD0iMCAwIDUxMiA1MTIiIHdpZHRoPSI1MTIiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGc+PHBhdGggZD0ibTM5NS44MiAxODIuNjE2LTE4OC43MiAxODguNzItMTIuOTEgMS43Mi05LjM1IDIwLjU0LTM0LjMxIDM0LjMxLTExLjAxLS43My0xMS4yNSAyMi45OS01Ni40OCA1Ni40OGMtMi45MyAyLjkzLTYuNzcgNC4zOS0xMC42MSA0LjM5cy03LjY4LTEuNDYtMTAuNjEtNC4zOWwtMjIuNjItMjIuNjJoLS4wMWwtMjIuNjItMjIuNjNjLTUuODYtNS44Ni01Ljg2LTE1LjM2IDAtMjEuMjJsNzcuNjMtNzcuNjMgMTYuNi03LjAzIDUuNjYtMTUuMjMgMzQuMzEtMzQuMzEgMTQuODQtNC45MiA3LjQyLTE3LjM0IDE2Ny41Ny0xNjcuNTcgMzMuMjQgMzMuMjR6IiBmaWxsPSIjZjY2Ii8+PHBhdGggZD0ibTM5NS44MiAxMTYuMTQ2djY2LjQ3bC0xODguNzIgMTg4LjcyLTEyLjkxIDEuNzItOS4zNSAyMC41NC0zNC4zMSAzNC4zMS0xMS4wMS0uNzMtMTEuMjUgMjIuOTktNTYuNDggNTYuNDhjLTIuOTMgMi45My02Ljc3IDQuMzktMTAuNjEgNC4zOXMtNy42OC0xLjQ2LTEwLjYxLTQuMzlsLTIyLjYyLTIyLjYyIDMzNC42NC0zMzQuNjR6IiBmaWxsPSIjZTYyZTZiIi8+PHBhdGggZD0ibTUwNi42MSAyMDkuMDA2LTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xNy00LjUyLTQuNTMtMTEuNDItNS42OC0xNy4xNy0yLjg4bC04OC4zOCA0My4wNS02OS4xMy02OS4xNGMtNC4zNS00LjM1LTEwLjkyLTUuNi0xNi41Ni0zLjE2LTUuNjUgMi40NS05LjIzIDguMDktOS4wNCAxNC4yNGwyLjg2IDkwLjQ1LTg1LjM3IDU3LjgzYy00LjkxIDMuMzItNy40IDkuMjItNi4zNiAxNS4wNCAxLjA0IDUuODMgNS40IDEwLjUxIDExLjE1IDExLjk0bDk2LjYyIDI0LjAxIDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZ6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTI5Ni4yNiAyMTUuNzA2IDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZsLTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xN3oiIGZpbGw9IiNmZDkwMjUiLz48cGF0aCBkPSJtNDY1IDQxNi45NjZjLTI1LjkyIDAtNDcgMjEuMDgtNDcgNDdzMjEuMDggNDcgNDcgNDcgNDctMjEuMDggNDctNDctMjEuMDgtNDctNDctNDd6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTEwNCAyOC45NjZoLTEzdi0xM2MwLTguMjg0LTYuNzE2LTE1LTE1LTE1cy0xNSA2LjcxNi0xNSAxNXYxM2gtMTNjLTguMjg0IDAtMTUgNi43MTYtMTUgMTVzNi43MTYgMTUgMTUgMTVoMTN2MTNjMCA4LjI4NCA2LjcxNiAxNSAxNSAxNXMxNS02LjcxNiAxNS0xNXYtMTNoMTNjOC4yODQgMCAxNS02LjcxNiAxNS0xNXMtNi43MTYtMTUtMTUtMTV6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTIwNy4xIDM3MS4zMzYtMjIuMjYgMjIuMjYtNDUuMzItODcuNjIgMjIuMjYtMjIuMjZ6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTE4NC44NCAzOTMuNTk2IDIyLjI2LTIyLjI2LTIyLjY2LTQzLjgxLTIyLjI2NSAyMi4yNjV6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTE1MC41MyA0MjcuOTA2LTIyLjI2IDIyLjI2LTQ1LjMyLTg3LjYyIDIyLjI2LTIyLjI2eiIgZmlsbD0iI2ZlZDg0MyIvPjxwYXRoIGQ9Im0xMjguMjcgNDUwLjE2NiAyMi4yNi0yMi4yNi0yMi42NTUtNDMuODE1LTIyLjI2IDIyLjI2eiIgZmlsbD0iI2ZhYmUyYyIvPjxjaXJjbGUgY3g9IjE1IiBjeT0iMTE5Ljk2OSIgZmlsbD0iIzVlZDhkMyIgcj0iMTUiLz48Y2lyY2xlIGN4PSIxMjgiIGN5PSIxOTkuOTY5IiBmaWxsPSIjZDU5OWVkIiByPSIxNSIvPjxjaXJjbGUgY3g9IjE5MiIgY3k9IjYzLjk2NCIgZmlsbD0iI2Y2NiIgcj0iMTUiLz48Y2lyY2xlIGN4PSIzMjgiIGN5PSI0MTUuOTY3IiBmaWxsPSIjMzFiZWJlIiByPSIxNSIvPjxjaXJjbGUgY3g9IjQ0MCIgY3k9IjMyNy45NjciIGZpbGw9IiNhZDc3ZTMiIHI9IjE0Ljk5OSIvPjwvZz48L3N2Zz4=\" alt=\"PaperNum\"\u002F>\n  * - [ ] [2023](#head-2023)&emsp;&emsp;&emsp;&nbsp;\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FNumber%20of%20Papers-xx-90B44B?style=flat-square&logo=data:image\u002Fsvg%2bxml;base64,PHN2ZyBpZD0iQ2FwYV8xIiBlbmFibGUtYmFja2dyb3VuZD0ibmV3IDAgMCA1MTIgNTEyIiBoZWlnaHQ9IjUxMiIgdmlld0JveD0iMCAwIDUxMiA1MTIiIHdpZHRoPSI1MTIiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGc+PHBhdGggZD0ibTM5NS44MiAxODIuNjE2LTE4OC43MiAxODguNzItMTIuOTEgMS43Mi05LjM1IDIwLjU0LTM0LjMxIDM0LjMxLTExLjAxLS43My0xMS4yNSAyMi45OS01Ni40OCA1Ni40OGMtMi45MyAyLjkzLTYuNzcgNC4zOS0xMC42MSA0LjM5cy03LjY4LTEuNDYtMTAuNjEtNC4zOWwtMjIuNjItMjIuNjJoLS4wMWwtMjIuNjItMjIuNjNjLTUuODYtNS44Ni01Ljg2LTE1LjM2IDAtMjEuMjJsNzcuNjMtNzcuNjMgMTYuNi03LjAzIDUuNjYtMTUuMjMgMzQuMzEtMzQuMzEgMTQuODQtNC45MiA3LjQyLTE3LjM0IDE2Ny41Ny0xNjcuNTcgMzMuMjQgMzMuMjR6IiBmaWxsPSIjZjY2Ii8+PHBhdGggZD0ibTM5NS44MiAxMTYuMTQ2djY2LjQ3bC0xODguNzIgMTg4LjcyLTEyLjkxIDEuNzItOS4zNSAyMC41NC0zNC4zMSAzNC4zMS0xMS4wMS0uNzMtMTEuMjUgMjIuOTktNTYuNDggNTYuNDhjLTIuOTMgMi45My02Ljc3IDQuMzktMTAuNjEgNC4zOXMtNy42OC0xLjQ2LTEwLjYxLTQuMzlsLTIyLjYyLTIyLjYyIDMzNC42NC0zMzQuNjR6IiBmaWxsPSIjZTYyZTZiIi8+PHBhdGggZD0ibTUwNi42MSAyMDkuMDA2LTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xNy00LjUyLTQuNTMtMTEuNDItNS42OC0xNy4xNy0yLjg4bC04OC4zOCA0My4wNS02OS4xMy02OS4xNGMtNC4zNS00LjM1LTEwLjkyLTUuNi0xNi41Ni0zLjE2LTUuNjUgMi40NS05LjIzIDguMDktOS4wNCAxNC4yNGwyLjg2IDkwLjQ1LTg1LjM3IDU3LjgzYy00LjkxIDMuMzItNy40IDkuMjItNi4zNiAxNS4wNCAxLjA0IDUuODMgNS40IDEwLjUxIDExLjE1IDExLjk0bDk2LjYyIDI0LjAxIDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZ6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTI5Ni4yNiAyMTUuNzA2IDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZsLTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xN3oiIGZpbGw9IiNmZDkwMjUiLz48cGF0aCBkPSJtNDY1IDQxNi45NjZjLTI1LjkyIDAtNDcgMjEuMDgtNDcgNDdzMjEuMDggNDcgNDcgNDcgNDctMjEuMDggNDctNDctMjEuMDgtNDctNDctNDd6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTEwNCAyOC45NjZoLTEzdi0xM2MwLTguMjg0LTYuNzE2LTE1LTE1LTE1cy0xNSA2LjcxNi0xNSAxNXYxM2gtMTNjLTguMjg0IDAtMTUgNi43MTYtMTUgMTVzNi43MTYgMTUgMTUgMTVoMTN2MTNjMCA4LjI4NCA2LjcxNiAxNSAxNSAxNXMxNS02LjcxNiAxNS0xNXYtMTNoMTNjOC4yODQgMCAxNS02LjcxNiAxNS0xNXMtNi43MTYtMTUtMTUtMTV6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTIwNy4xIDM3MS4zMzYtMjIuMjYgMjIuMjYtNDUuMzItODcuNjIgMjIuMjYtMjIuMjZ6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTE4NC44NCAzOTMuNTk2IDIyLjI2LTIyLjI2LTIyLjY2LTQzLjgxLTIyLjI2NSAyMi4yNjV6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTE1MC41MyA0MjcuOTA2LTIyLjI2IDIyLjI2LTQ1LjMyLTg3LjYyIDIyLjI2LTIyLjI2eiIgZmlsbD0iI2ZlZDg0MyIvPjxwYXRoIGQ9Im0xMjguMjcgNDUwLjE2NiAyMi4yNi0yMi4yNi0yMi42NTUtNDMuODE1LTIyLjI2IDIyLjI2eiIgZmlsbD0iI2ZhYmUyYyIvPjxjaXJjbGUgY3g9IjE1IiBjeT0iMTE5Ljk2OSIgZmlsbD0iIzVlZDhkMyIgcj0iMTUiLz48Y2lyY2xlIGN4PSIxMjgiIGN5PSIxOTkuOTY5IiBmaWxsPSIjZDU5OWVkIiByPSIxNSIvPjxjaXJjbGUgY3g9IjE5MiIgY3k9IjYzLjk2NCIgZmlsbD0iI2Y2NiIgcj0iMTUiLz48Y2lyY2xlIGN4PSIzMjgiIGN5PSI0MTUuOTY3IiBmaWxsPSIjMzFiZWJlIiByPSIxNSIvPjxjaXJjbGUgY3g9IjQ0MCIgY3k9IjMyNy45NjciIGZpbGw9IiNhZDc3ZTMiIHI9IjE0Ljk5OSIvPjwvZz48L3N2Zz4=\" alt=\"PaperNum\"\u002F>\n  * - [x] [**2022**](https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fblob\u002F2024-Version-2.0\u002FLists\u002F5.3-2022.md)&emsp;&emsp;&emsp;&nbsp;\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FNumber%20of%20Papers-69-2EA9DF?style=flat-square&logo=data:image\u002Fsvg%2bxml;base64,PHN2ZyBpZD0iQ2FwYV8xIiBlbmFibGUtYmFja2dyb3VuZD0ibmV3IDAgMCA1MTIgNTEyIiBoZWlnaHQ9IjUxMiIgdmlld0JveD0iMCAwIDUxMiA1MTIiIHdpZHRoPSI1MTIiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGc+PHBhdGggZD0ibTM5NS44MiAxODIuNjE2LTE4OC43MiAxODguNzItMTIuOTEgMS43Mi05LjM1IDIwLjU0LTM0LjMxIDM0LjMxLTExLjAxLS43My0xMS4yNSAyMi45OS01Ni40OCA1Ni40OGMtMi45MyAyLjkzLTYuNzcgNC4zOS0xMC42MSA0LjM5cy03LjY4LTEuNDYtMTAuNjEtNC4zOWwtMjIuNjItMjIuNjJoLS4wMWwtMjIuNjItMjIuNjNjLTUuODYtNS44Ni01Ljg2LTE1LjM2IDAtMjEuMjJsNzcuNjMtNzcuNjMgMTYuNi03LjAzIDUuNjYtMTUuMjMgMzQuMzEtMzQuMzEgMTQuODQtNC45MiA3LjQyLTE3LjM0IDE2Ny41Ny0xNjcuNTcgMzMuMjQgMzMuMjR6IiBmaWxsPSIjZjY2Ii8+PHBhdGggZD0ibTM5NS44MiAxMTYuMTQ2djY2LjQ3bC0xODguNzIgMTg4LjcyLTEyLjkxIDEuNzItOS4zNSAyMC41NC0zNC4zMSAzNC4zMS0xMS4wMS0uNzMtMTEuMjUgMjIuOTktNTYuNDggNTYuNDhjLTIuOTMgMi45My02Ljc3IDQuMzktMTAuNjEgNC4zOXMtNy42OC0xLjQ2LTEwLjYxLTQuMzlsLTIyLjYyLTIyLjYyIDMzNC42NC0zMzQuNjR6IiBmaWxsPSIjZTYyZTZiIi8+PHBhdGggZD0ibTUwNi42MSAyMDkuMDA2LTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xNy00LjUyLTQuNTMtMTEuNDItNS42OC0xNy4xNy0yLjg4bC04OC4zOCA0My4wNS02OS4xMy02OS4xNGMtNC4zNS00LjM1LTEwLjkyLTUuNi0xNi41Ni0zLjE2LTUuNjUgMi40NS05LjIzIDguMDktOS4wNCAxNC4yNGwyLjg2IDkwLjQ1LTg1LjM3IDU3LjgzYy00LjkxIDMuMzItNy40IDkuMjItNi4zNiAxNS4wNCAxLjA0IDUuODMgNS40IDEwLjUxIDExLjE1IDExLjk0bDk2LjYyIDI0LjAxIDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZ6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTI5Ni4yNiAyMTUuNzA2IDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZsLTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xN3oiIGZpbGw9IiNmZDkwMjUiLz48cGF0aCBkPSJtNDY1IDQxNi45NjZjLTI1LjkyIDAtNDcgMjEuMDgtNDcgNDdzMjEuMDggNDcgNDcgNDcgNDctMjEuMDggNDctNDctMjEuMDgtNDctNDctNDd6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTEwNCAyOC45NjZoLTEzdi0xM2MwLTguMjg0LTYuNzE2LTE1LTE1LTE1cy0xNSA2LjcxNi0xNSAxNXYxM2gtMTNjLTguMjg0IDAtMTUgNi43MTYtMTUgMTVzNi43MTYgMTUgMTUgMTVoMTN2MTNjMCA4LjI4NCA2LjcxNiAxNSAxNSAxNXMxNS02LjcxNiAxNS0xNXYtMTNoMTNjOC4yODQgMCAxNS02LjcxNiAxNS0xNXMtNi43MTYtMTUtMTUtMTV6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTIwNy4xIDM3MS4zMzYtMjIuMjYgMjIuMjYtNDUuMzItODcuNjIgMjIuMjYtMjIuMjZ6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTE4NC44NCAzOTMuNTk2IDIyLjI2LTIyLjI2LTIyLjY2LTQzLjgxLTIyLjI2NSAyMi4yNjV6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTE1MC41MyA0MjcuOTA2LTIyLjI2IDIyLjI2LTQ1LjMyLTg3LjYyIDIyLjI2LTIyLjI2eiIgZmlsbD0iI2ZlZDg0MyIvPjxwYXRoIGQ9Im0xMjguMjcgNDUwLjE2NiAyMi4yNi0yMi4yNi0yMi42NTUtNDMuODE1LTIyLjI2IDIyLjI2eiIgZmlsbD0iI2ZhYmUyYyIvPjxjaXJjbGUgY3g9IjE1IiBjeT0iMTE5Ljk2OSIgZmlsbD0iIzVlZDhkMyIgcj0iMTUiLz48Y2lyY2xlIGN4PSIxMjgiIGN5PSIxOTkuOTY5IiBmaWxsPSIjZDU5OWVkIiByPSIxNSIvPjxjaXJjbGUgY3g9IjE5MiIgY3k9IjYzLjk2NCIgZmlsbD0iI2Y2NiIgcj0iMTUiLz48Y2lyY2xlIGN4PSIzMjgiIGN5PSI0MTUuOTY3IiBmaWxsPSIjMzFiZWJlIiByPSIxNSIvPjxjaXJjbGUgY3g9IjQ0MCIgY3k9IjMyNy45NjciIGZpbGw9IiNhZDc3ZTMiIHI9IjE0Ljk5OSIvPjwvZz48L3N2Zz4=\" alt=\"PaperNum\"\u002F>\n  * - [x] [**2021**](https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fblob\u002F2024-Version-2.0\u002FLists\u002F5.2-2021.md)&emsp;&emsp;&emsp;&nbsp;\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FNumber%20of%20Papers-31-F9BF45?style=flat-square&logo=data:image\u002Fsvg%2bxml;base64,PHN2ZyBpZD0iQ2FwYV8xIiBlbmFibGUtYmFja2dyb3VuZD0ibmV3IDAgMCA1MTIgNTEyIiBoZWlnaHQ9IjUxMiIgdmlld0JveD0iMCAwIDUxMiA1MTIiIHdpZHRoPSI1MTIiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGc+PHBhdGggZD0ibTM5NS44MiAxODIuNjE2LTE4OC43MiAxODguNzItMTIuOTEgMS43Mi05LjM1IDIwLjU0LTM0LjMxIDM0LjMxLTExLjAxLS43My0xMS4yNSAyMi45OS01Ni40OCA1Ni40OGMtMi45MyAyLjkzLTYuNzcgNC4zOS0xMC42MSA0LjM5cy03LjY4LTEuNDYtMTAuNjEtNC4zOWwtMjIuNjItMjIuNjJoLS4wMWwtMjIuNjItMjIuNjNjLTUuODYtNS44Ni01Ljg2LTE1LjM2IDAtMjEuMjJsNzcuNjMtNzcuNjMgMTYuNi03LjAzIDUuNjYtMTUuMjMgMzQuMzEtMzQuMzEgMTQuODQtNC45MiA3LjQyLTE3LjM0IDE2Ny41Ny0xNjcuNTcgMzMuMjQgMzMuMjR6IiBmaWxsPSIjZjY2Ii8+PHBhdGggZD0ibTM5NS44MiAxMTYuMTQ2djY2LjQ3bC0xODguNzIgMTg4LjcyLTEyLjkxIDEuNzItOS4zNSAyMC41NC0zNC4zMSAzNC4zMS0xMS4wMS0uNzMtMTEuMjUgMjIuOTktNTYuNDggNTYuNDhjLTIuOTMgMi45My02Ljc3IDQuMzktMTAuNjEgNC4zOXMtNy42OC0xLjQ2LTEwLjYxLTQuMzlsLTIyLjYyLTIyLjYyIDMzNC42NC0zMzQuNjR6IiBmaWxsPSIjZTYyZTZiIi8+PHBhdGggZD0ibTUwNi42MSAyMDkuMDA2LTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xNy00LjUyLTQuNTMtMTEuNDItNS42OC0xNy4xNy0yLjg4bC04OC4zOCA0My4wNS02OS4xMy02OS4xNGMtNC4zNS00LjM1LTEwLjkyLTUuNi0xNi41Ni0zLjE2LTUuNjUgMi40NS05LjIzIDguMDktOS4wNCAxNC4yNGwyLjg2IDkwLjQ1LTg1LjM3IDU3LjgzYy00LjkxIDMuMzItNy40IDkuMjItNi4zNiAxNS4wNCAxLjA0IDUuODMgNS40IDEwLjUxIDExLjE1IDExLjk0bDk2LjYyIDI0LjAxIDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZ6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTI5Ni4yNiAyMTUuNzA2IDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZsLTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xN3oiIGZpbGw9IiNmZDkwMjUiLz48cGF0aCBkPSJtNDY1IDQxNi45NjZjLTI1LjkyIDAtNDcgMjEuMDgtNDcgNDdzMjEuMDggNDcgNDcgNDcgNDctMjEuMDggNDctNDctMjEuMDgtNDctNDctNDd6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTEwNCAyOC45NjZoLTEzdi0xM2MwLTguMjg0LTYuNzE2LTE1LTE1LTE1cy0xNSA2LjcxNi0xNSAxNXYxM2gtMTNjLTguMjg0IDAtMTUgNi43MTYtMTUgMTVzNi43MTYgMTUgMTUgMTVoMTN2MTNjMCA4LjI4NCA2LjcxNiAxNSAxNSAxNXMxNS02LjcxNiAxNS0xNXYtMTNoMTNjOC4yODQgMCAxNS02LjcxNiAxNS0xNXMtNi43MTYtMTUtMTUtMTV6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTIwNy4xIDM3MS4zMzYtMjIuMjYgMjIuMjYtNDUuMzItODcuNjIgMjIuMjYtMjIuMjZ6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTE4NC44NCAzOTMuNTk2IDIyLjI2LTIyLjI2LTIyLjY2LTQzLjgxLTIyLjI2NSAyMi4yNjV6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTE1MC41MyA0MjcuOTA2LTIyLjI2IDIyLjI2LTQ1LjMyLTg3LjYyIDIyLjI2LTIyLjI2eiIgZmlsbD0iI2ZlZDg0MyIvPjxwYXRoIGQ9Im0xMjguMjcgNDUwLjE2NiAyMi4yNi0yMi4yNi0yMi42NTUtNDMuODE1LTIyLjI2IDIyLjI2eiIgZmlsbD0iI2ZhYmUyYyIvPjxjaXJjbGUgY3g9IjE1IiBjeT0iMTE5Ljk2OSIgZmlsbD0iIzVlZDhkMyIgcj0iMTUiLz48Y2lyY2xlIGN4PSIxMjgiIGN5PSIxOTkuOTY5IiBmaWxsPSIjZDU5OWVkIiByPSIxNSIvPjxjaXJjbGUgY3g9IjE5MiIgY3k9IjYzLjk2NCIgZmlsbD0iI2Y2NiIgcj0iMTUiLz48Y2lyY2xlIGN4PSIzMjgiIGN5PSI0MTUuOTY3IiBmaWxsPSIjMzFiZWJlIiByPSIxNSIvPjxjaXJjbGUgY3g9IjQ0MCIgY3k9IjMyNy45NjciIGZpbGw9IiNhZDc3ZTMiIHI9IjE0Ljk5OSIvPjwvZz48L3N2Zz4=\" alt=\"PaperNum\"\u002F>\n  * - [x] [**2016~2020**](https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fblob\u002F2024-Version-2.0\u002FLists\u002F5.1-2016~2020.md)&ensp;\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FNumber%20of%20Papers-46-E83015?style=flat-square&logo=data:image\u002Fsvg%2bxml;base64,PHN2ZyBpZD0iQ2FwYV8xIiBlbmFibGUtYmFja2dyb3VuZD0ibmV3IDAgMCA1MTIgNTEyIiBoZWlnaHQ9IjUxMiIgdmlld0JveD0iMCAwIDUxMiA1MTIiIHdpZHRoPSI1MTIiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGc+PHBhdGggZD0ibTM5NS44MiAxODIuNjE2LTE4OC43MiAxODguNzItMTIuOTEgMS43Mi05LjM1IDIwLjU0LTM0LjMxIDM0LjMxLTExLjAxLS43My0xMS4yNSAyMi45OS01Ni40OCA1Ni40OGMtMi45MyAyLjkzLTYuNzcgNC4zOS0xMC42MSA0LjM5cy03LjY4LTEuNDYtMTAuNjEtNC4zOWwtMjIuNjItMjIuNjJoLS4wMWwtMjIuNjItMjIuNjNjLTUuODYtNS44Ni01Ljg2LTE1LjM2IDAtMjEuMjJsNzcuNjMtNzcuNjMgMTYuNi03LjAzIDUuNjYtMTUuMjMgMzQuMzEtMzQuMzEgMTQuODQtNC45MiA3LjQyLTE3LjM0IDE2Ny41Ny0xNjcuNTcgMzMuMjQgMzMuMjR6IiBmaWxsPSIjZjY2Ii8+PHBhdGggZD0ibTM5NS44MiAxMTYuMTQ2djY2LjQ3bC0xODguNzIgMTg4LjcyLTEyLjkxIDEuNzItOS4zNSAyMC41NC0zNC4zMSAzNC4zMS0xMS4wMS0uNzMtMTEuMjUgMjIuOTktNTYuNDggNTYuNDhjLTIuOTMgMi45My02Ljc3IDQuMzktMTAuNjEgNC4zOXMtNy42OC0xLjQ2LTEwLjYxLTQuMzlsLTIyLjYyLTIyLjYyIDMzNC42NC0zMzQuNjR6IiBmaWxsPSIjZTYyZTZiIi8+PHBhdGggZD0ibTUwNi42MSAyMDkuMDA2LTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xNy00LjUyLTQuNTMtMTEuNDItNS42OC0xNy4xNy0yLjg4bC04OC4zOCA0My4wNS02OS4xMy02OS4xNGMtNC4zNS00LjM1LTEwLjkyLTUuNi0xNi41Ni0zLjE2LTUuNjUgMi40NS05LjIzIDguMDktOS4wNCAxNC4yNGwyLjg2IDkwLjQ1LTg1LjM3IDU3LjgzYy00LjkxIDMuMzItNy40IDkuMjItNi4zNiAxNS4wNCAxLjA0IDUuODMgNS40IDEwLjUxIDExLjE1IDExLjk0bDk2LjYyIDI0LjAxIDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZ6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTI5Ni4yNiAyMTUuNzA2IDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZsLTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xN3oiIGZpbGw9IiNmZDkwMjUiLz48cGF0aCBkPSJtNDY1IDQxNi45NjZjLTI1LjkyIDAtNDcgMjEuMDgtNDcgNDdzMjEuMDggNDcgNDcgNDcgNDctMjEuMDggNDctNDctMjEuMDgtNDctNDctNDd6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTEwNCAyOC45NjZoLTEzdi0xM2MwLTguMjg0LTYuNzE2LTE1LTE1LTE1cy0xNSA2LjcxNi0xNSAxNXYxM2gtMTNjLTguMjg0IDAtMTUgNi43MTYtMTUgMTVzNi43MTYgMTUgMTUgMTVoMTN2MTNjMCA4LjI4NCA2LjcxNiAxNSAxNSAxNXMxNS02LjcxNiAxNS0xNXYtMTNoMTNjOC4yODQgMCAxNS02LjcxNiAxNS0xNXMtNi43MTYtMTUtMTUtMTV6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTIwNy4xIDM3MS4zMzYtMjIuMjYgMjIuMjYtNDUuMzItODcuNjIgMjIuMjYtMjIuMjZ6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTE4NC44NCAzOTMuNTk2IDIyLjI2LTIyLjI2LTIyLjY2LTQzLjgxLTIyLjI2NSAyMi4yNjV6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTE1MC41MyA0MjcuOTA2LTIyLjI2IDIyLjI2LTQ1LjMyLTg3LjYyIDIyLjI2LTIyLjI2eiIgZmlsbD0iI2ZlZDg0MyIvPjxwYXRoIGQ9Im0xMjguMjcgNDUwLjE2NiAyMi4yNi0yMi4yNi0yMi42NTUtNDMuODE1LTIyLjI2IDIyLjI2eiIgZmlsbD0iI2ZhYmUyYyIvPjxjaXJjbGUgY3g9IjE1IiBjeT0iMTE5Ljk2OSIgZmlsbD0iIzVlZDhkMyIgcj0iMTUiLz48Y2lyY2xlIGN4PSIxMjgiIGN5PSIxOTkuOTY5IiBmaWxsPSIjZDU5OWVkIiByPSIxNSIvPjxjaXJjbGUgY3g9IjE5MiIgY3k9IjYzLjk2NCIgZmlsbD0iI2Y2NiIgcj0iMTUiLz48Y2lyY2xlIGN4PSIzMjgiIGN5PSI0MTUuOTY3IiBmaWxsPSIjMzFiZWJlIiByPSIxNSIvPjxjaXJjbGUgY3g9IjQ0MCIgY3k9IjMyNy45NjciIGZpbGw9IiNhZDc3ZTMiIHI9IjE0Ljk5OSIvPjwvZz48L3N2Zz4=\" alt=\"PaperNum\"\u002F>\n  \n* - [ ] [6. Other Related Works](#head6)\n  * - [ ] [📝Prompt Engineering📝](#head-pe)\n  * - [ ] [⭐Multimodality⭐](#head-mm)\n  * - [ ] [🛫Applications🛫](#head-app)\n  * - [ ] [Text+Image\u002FVideo → Image\u002FVideo](#head-ti2i)\n  * - [ ] [Text+Layout → Image](#head-tl2i)\n  * - [ ] [Others+Text+Image\u002FVideo → Image\u002FVideo](#head-oti2i)\n  * - [ ] [Layout\u002FMask → Image](#head-l2i)\n  * - [ ] [Label-set → Semantic maps](#head-l2s)\n  * - [ ] [Speech → Image](#head-s2i)\n  * - [ ] [Scene Graph → Image](#head-sg2i)\n  * - [ ] [Text → Visual Retrieval](#head-t2vr)\n  * - [ ] [Text → 3D\u002FMotion\u002FShape\u002FMesh\u002FObject...](#head-t2m)\n  * - [ ] [Text → Video](#head-t2v)\n  * - [ ] [Text → Music](#head-t2music)\n\n* [Contact Me](#head7)\n  \n* [Contributors](#head8)\n\n ## \u003Cspan id=\"head1\"> *Description* \u003C\u002Fspan>\n\n* In the last few decades, the fields of Computer Vision (CV) and Natural Language Processing (NLP) have been made several major technological breakthroughs in deep learning research. Recently, researchers interested in combining semantic information and visual information in these traditionally independent fields. \nA number of studies have been conducted on text-to-image synthesis techniques that transfer input textual descriptions (keywords or sentences) into realistic images.\n\n* Papers, codes, and datasets for the text-to-image task are available here.\n\n>🐌 Markdown Format:\n> * (Conference\u002FJournal Year) **Title**, First Author et al. [[Paper](URL)] [[Code](URL)] [[Project](URL)]\n\n## \u003Cspan id=\"head5\"> *Paper With Code* \u003C\u002Fspan>\n\n* \u003Cspan id=\"head-t2f\"> **Text to Face👨🏻🧒👧🏼🧓🏽**  \u003C\u002Fspan> \n    * (ECCV 2024) **PreciseControl: Enhancing Text-To-Image Diffusion Models with Fine-Grained Attribute Control**, Rishubh Parihar et al. [[Paper](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2408.05083)] [[Project](https:\u002F\u002Frishubhpar.github.io\u002FPreciseControl.home\u002F)] \n    * (arXiv preprint 2024) [💬 Dataset] **15M Multimodal Facial Image-Text Dataset**, Dawei Dai et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.08515)] \n    * (arXiv preprint 2024) [💬 3D] **Portrait3D: Text-Guided High-Quality 3D Portrait Generation Using Pyramid Representation and GANs Prior**, Yiqian Wu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.10394v1)] \n    * (CVPR 2024) **CosmicMan: A Text-to-Image Foundation Model for Humans**, Shikai Li et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.01294)] [[Project](https:\u002F\u002Fcosmicman-cvpr2024.github.io\u002F)] \n    * (ICML 2024) **Fast Text-to-3D-Aware Face Generation and Manipulation via Direct Cross-modal Mapping and Geometric Regularization**, Jinlu Zhang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.06702)] [[Code](https:\u002F\u002Fgithub.com\u002FAria-Zhangjl\u002FE3-FaceNet)]    \n    * (NeurIPS 2023) **Inserting Anybody in Diffusion Models via Celeb Basis**, Ge Yuan et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.00926)] [[Project](https:\u002F\u002Fceleb-basis.github.io\u002F)] \n    * (IJACSA 2023) **Mukh-Oboyob: Stable Diffusion and BanglaBERT enhanced Bangla Text-to-Face Synthesis**, Aloke Kumar Saha et al. [[Paper](https:\u002F\u002Fthesai.org\u002FPublications\u002FViewPaper?Volume=14&Issue=11&Code=IJACSA&SerialNo=142)] [[Code](https:\u002F\u002Fgithub.com\u002FCodernob\u002FMukh-Oboyob)]\n    * (SIGGRAPH 2023) [💬 3D] **DreamFace: Progressive Generation of Animatable 3D Faces under Text Guidance**, Longwen Zhang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03117)] [[Project](https:\u002F\u002Fsites.google.com\u002Fview\u002Fdreamface)] [[HuggingFace](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FDEEMOSTECH\u002FChatAvatar)]\n    * (CVPR 2023) [💬 3D] **High-Fidelity 3D Face Generation from Natural Language Descriptions**, Menghua Wu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.03302)] [[Code](https:\u002F\u002Fgithub.com\u002Fzhuhao-nju\u002Fdescribe3d)] [[Project](https:\u002F\u002Fmhwu2017.github.io\u002F)]\n    * (CVPR 2023) **Collaborative Diffusion for Multi-Modal Face Generation and Editing**, Ziqi Huang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.10530v1)] [[Code](https:\u002F\u002Fgithub.com\u002Fziqihuangg\u002FCollaborative-Diffusion)] [[Project](https:\u002F\u002Fziqihuangg.github.io\u002Fprojects\u002Fcollaborative-diffusion.html)]\n    * (Pattern Recognition 2023) **Where you edit is what you get: Text-guided image editing with region-based attention**, Changming Xiao et al. [[Paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320323001589)] [[Code](https:\u002F\u002Fgithub.com\u002FBig-Brother-Pikachu\u002FWhere2edit)]\n    * (arXiv preprint 2022) **Bridging CLIP and StyleGAN through Latent Alignment for Image Editing**, Wanfeng Zheng et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.04506)] \n    * (ACMMM 2022) **Learning Dynamic Prior Knowledge for Text-to-Face Pixel Synthesis**, Jun Peng et al. [[Paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3503161.3547818)]\n    * (ACMMM 2022) **Towards Open-Ended Text-to-Face Generation, Combination and Manipulation**, Jun Peng et al. [[Paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3503161.3547758)]\n    * (BMVC 2022) **clip2latent: Text driven sampling of a pre-trained StyleGAN using denoising diffusion and CLIP**, Justin N. M. Pinkney et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.02347v1)] [[Code](https:\u002F\u002Fgithub.com\u002Fjustinpinkney\u002Fclip2latent)]\n    * (arXiv preprint 2022) **ManiCLIP: Multi-Attribute Face Manipulation from Text**, Hao Wang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.00445)]\n    * (arXiv preprint 2022) **Generated Faces in the Wild: Quantitative Comparison of Stable Diffusion, Midjourney and DALL-E 2**, Ali Borji, [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.00586)] [[Code](https:\u002F\u002Fgithub.com\u002Faliborji\u002FGFW)] [[Data](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F1EhbUK64J3d0_chmD2mpBuWB-Ic7LeFlP\u002Fview)]\n    * (arXiv preprint 2022) **Text-Free Learning of a Natural Language Interface for Pretrained Face Generators**, Xiaodan Du et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.03953)] [[Code](https:\u002F\u002Fgithub.com\u002Fduxiaodan\u002FFast_text2StyleGAN)]\n    * (Knowledge-Based Systems-2022) **CMAFGAN: A Cross-Modal Attention Fusion based Generative Adversarial Network for attribute word-to-face synthesis**, Xiaodong Luo et al. [[Paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0950705122008863)]\n    * (Neural Networks-2022) **DualG-GAN, a Dual-channel Generator based Generative Adversarial Network for text-to-face synthesis**, Xiaodong Luo et al. [[Paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0893608022003161)]\n    * (arXiv preprint 2022) **Text-to-Face Generation with StyleGAN2**, D. M. A. Ayanthi et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.12512)]\n    * (CVPR 2022) **StyleT2I: Toward Compositional and High-Fidelity Text-to-Image Synthesis**, Zhiheng Li et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15799)] [[Code](https:\u002F\u002Fgithub.com\u002Fzhihengli-UR\u002FStyleT2I)]\n    * (arXiv preprint 2022) **StyleT2F: Generating Human Faces from Textual Description Using StyleGAN2**, Mohamed Shawky Sabae et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.07924)] [[Code](https:\u002F\u002Fgithub.com\u002FDarkGeekMS\u002FRetratista)]\n    * (CVPR 2022) **AnyFace: Free-style Text-to-Face Synthesis and Manipulation**, Jianxin Sun et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15334)] \n    * (IEEE Transactions on Network Science and Engineering-2022) **TextFace: Text-to-Style Mapping based Face Generation and Manipulation**, Xianxu Hou et al. [[Paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9737433)]\n    * (CVPR 2021) **TediGAN: Text-Guided Diverse Image Generation and Manipulation**, Weihao Xia et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.03308.pdf)] [[Extended Version](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.08910.pdf)][[Code](https:\u002F\u002Fgithub.com\u002FIIGROUP\u002FTediGAN)] [[Dataset](https:\u002F\u002Fgithub.com\u002FIIGROUP\u002FMulti-Modal-CelebA-HQ-Dataset)] [[Colab](https:\u002F\u002Fcolab.research.google.com\u002Fgithub\u002Fweihaox\u002FTediGAN\u002Fblob\u002Fmain\u002Fplayground.ipynb)] [[Video](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=L8Na2f5viAM)] \n    * (FG 2021) **Generative Adversarial Network for Text-to-Face Synthesis and Manipulation with Pretrained BERT Model**, Yutong Zhou et al. [[Paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9666791)] \n    * (ACMMM 2021) **Multi-caption Text-to-Face Synthesis: Dataset and Algorithm**, Jianxin Sun et al. [[Paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3474085.3475391)] [[Code](https:\u002F\u002Fgithub.com\u002Fcripac-sjx\u002FSEA-T2F)]\n    * (ACMMM 2021) **Generative Adversarial Network for Text-to-Face Synthesis and Manipulation**, Yutong Zhou. [[Paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3474085.3481026)]\n    * (WACV 2021) **Faces a la Carte: Text-to-Face Generation via Attribute Disentanglement**, Tianren Wang et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2021\u002Fpapers\u002FWang_Faces_a_la_Carte_Text-to-Face_Generation_via_Attribute_Disentanglement_WACV_2021_paper.pdf)] \n    * (arXiv preprint 2019) **FTGAN: A Fully-trained Generative Adversarial Networks for Text to Face Generation**, Xiang Chen et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1904.05729)]\n\n[\u003Cu>\u003C🎯Back to Top>\u003C\u002Fu>](#head-content)\n\n* \u003Cspan id=\"head-si\"> **Specific Issues🤔**  \u003C\u002Fspan>\n    * (arXiv preprint 2026) [🖼️ Aesthetic Dataset] **Moonworks Lunara Aesthetic Dataset**, Yan Wang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2601.07941)] [[Dataset](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fmoonworks\u002Flunara-aesthetic)] \n    * (arXiv preprint 2026) [📸 Variation Dataset] **Moonworks Lunara Aesthetic II: An Image Variation Dataset** Yan Wang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2602.01666)] [[Dataset](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fmoonworks\u002Flunara-aesthetic-image-variations)] \n    * (arXiv preprint 2025) [💬 Differentiable Object Counting] **YOLO-Count: Differentiable Object Counting for Text-to-Image Generation**, Guanning Zeng et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.00728)] \n    * (arXiv preprint 2024) [💬 Gender Bias Alignment] **PopAlign: Population-Level Alignment for Fair Text-to-Image Generation**, Shufan Li et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.19668)] [[Code](https:\u002F\u002Fgithub.com\u002Fjacklishufan\u002FPopAlignSDXL)]\n    * (arXiv preprint 2024) [💬 Fine-Grained Feedback] **Beyond Thumbs Up\u002FDown: Untangling Challenges of Fine-Grained Feedback for Text-to-Image Generation**, Katherine M. Collins et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.16807)] \n    * (CVPR 2024-Best Paper) [💬 Human Feedback] **Rich Human Feedback for Text-to-Image Generation**, Youwei Liang et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.10240)] \n    * (ICLR 2024) [💬 Unauthorized Data] **DIAGNOSIS: Detecting Unauthorized Data Usages in Text-to-image Diffusion Models**, Zhenting Wang et al.  [[Paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=f8S3aLm0Vp)] [[Code](https:\u002F\u002Fgithub.com\u002FZhentingWang\u002FDIAGNOSIS)]\n    * (CVPR 2024) [💬 Open-set Bias Detection] **OpenBias: Open-set Bias Detection in Text-to-Image Generative Models**, Moreno D'Incà et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.07990)] \n    * (arXiv preprint 2024) [💬 Spatial Consistency] **Getting it Right: Improving Spatial Consistency in Text-to-Image Models**, Agneet Chatterjee et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.01197)] [[Project](https:\u002F\u002Fspright-t2i.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FSPRIGHT-T2I\u002FSPRIGHT)] [[Dataset](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FSPRIGHT-T2I\u002Fspright)]\n    * (arXiv preprint 2024) [💬 Safety] **SafeGen: Mitigating Unsafe Content Generation in Text-to-Image Models**, Xinfeng Li et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.06666)] [[Code](https:\u002F\u002Fgithub.com\u002FLetterLiGo\u002Ftext-agnostic-governance)]\n    * (arXiv preprint 2024) [💬 Aesthetic] **Playground v2.5: Three Insights towards Enhancing Aesthetic Quality in Text-to-Image Generation**, Daiqing Li et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.17245)] [[Project](https:\u002F\u002Fblog.playgroundai.com\u002Fplayground-v2-5\u002F)] [[HuggingFace](https:\u002F\u002Fhuggingface.co\u002Fplaygroundai\u002Fplayground-v2.5-1024px-aesthetic)]\n    * (EMNLP 2023) [💬 Text Visualness] **Learning the Visualness of Text Using Large Vision-Language Models**, Gaurav Verma et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.10434)] [[Project](https:\u002F\u002Fgaurav22verma.github.io\u002Ftext-visualness\u002F)]\n    * (arXiv preprint 2023) [💬 Against Malicious Adaptation] **IMMA: Immunizing text-to-image Models against Malicious Adaptation**, Yijia Zheng et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.18815)] [[Project](https:\u002F\u002Fzhengyjzoe.github.io\u002Fimma\u002F)]\n    * (arXiv preprint 2023) [💬 Principled Recaptioning] **A Picture is Worth a Thousand Words: Principled Recaptioning Improves Image Generation**, Eyal Segalis et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.16656)]\n    * ⭐⭐(NeurIPS 2023) [💬 Holistic Evaluation] **Holistic Evaluation of Text-To-Image Models**, Tony Lee et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.04287)] [[Code](https:\u002F\u002Fgithub.com\u002Fstanford-crfm\u002Fhelm)] [[Project](https:\u002F\u002Fcrfm.stanford.edu\u002Fheim\u002Fv1.1.0\u002F)]\n    * (ICCV 2023) [💬 Safety] **Rickrolling the Artist: Injecting Backdoors into Text Encoders for Text-to-Image Synthesis**, Lukas Struppek et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.02408)] [[Code](https:\u002F\u002Fgithub.com\u002FLukasStruppek\u002FRickrolling-the-Artist)]\n    * (arXiv preprint 2023) [💬 Natural Attack Capability] **Intriguing Properties of Diffusion Models: A Large-Scale Dataset for Evaluating Natural Attack Capability in Text-to-Image Generative Models**, Takami Sato et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.15692)]\n    * (ACL 2023) [💬 Bias] **A Multi-dimensional study on Bias in Vision-Language models**, Gabriele Ruggeri et al.  [[Paper](https:\u002F\u002Faclanthology.org\u002F2023.findings-acl.403\u002F)]\n    * (FAACT 2023) [💬 Demographic Stereotypes] **Easily Accessible Text-to-Image Generation Amplifies Demographic Stereotypes at Large Scale**, Federico Bianchi et al.  [[Paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3593013.3594095)]\n    * (arXiv preprint 2023) [💬 Robustness] **Evaluating the Robustness of Text-to-image Diffusion Models against Real-world Attacks**, Hongcheng Gao et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.13103)] \n    * (CVPR 2023) [💬 Adversarial Robustness Analysis] **RIATIG: Reliable and Imperceptible Adversarial Text-to-Image Generation With Natural Prompts**, Han Liu et al.  [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FLiu_RIATIG_Reliable_and_Imperceptible_Adversarial_Text-to-Image_Generation_With_Natural_Prompts_CVPR_2023_paper.html)] \n    * (arXiv preprint 2023) [💬 Textual Inversion] **Is This Loss Informative? Speeding Up Textual Inversion with Deterministic Objective Evaluation**, Anton Voronov et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.04841)] [[Code](https:\u002F\u002Fgithub.com\u002Fyandex-research\u002FDVAR)]\n    * (arXiv preprint 2022) [💬 Interpretable Intervention] **Not Just Pretty Pictures: Text-to-Image Generators Enable Interpretable Interventions for Robust Representations**, Jianhao Yuan et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.11237)]\n    * (arXiv preprint 2022) [💬 Ethical Image Manipulation] **Judge, Localize, and Edit: Ensuring Visual Commonsense Morality for Text-to-Image Generation**, Seongbeom Park et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.03507)]\n    * (arXiv preprint 2022) [💬 Creativity Transfer] **Inversion-Based Creativity Transfer with Diffusion Models**, Yuxin Zhang et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.13203)]\n    * (arXiv preprint 2022) [💬 Ambiguity] **Is the Elephant Flying? Resolving Ambiguities in Text-to-Image Generative Models**, Ninareh Mehrabi et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.12503)]\n    * (arXiv preprint 2022) [💬 Racial Politics] **A Sign That Spells: DALL-E 2, Invisual Images and The Racial Politics of Feature Space**, Fabian Offert et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.06323)]\n    * (arXiv preprint 2022) [💬 Privacy Analysis] **Membership Inference Attacks Against Text-to-image Generation Models**, Yixin Wu et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.00968)]\n    * (arXiv preprint 2022) [💬 Authenticity Evaluation for Fake Images] **DE-FAKE: Detection and Attribution of Fake Images Generated by Text-to-Image Diffusion Models**, Zeyang Sha et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.06998v1)] \n    * (arXiv preprint 2022) [💬 Cultural Bias] **The Biased Artist: Exploiting Cultural Biases via Homoglyphs in Text-Guided Image Generation Models**, Lukas Struppek et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.08891)] \n\n[\u003Cu>\u003C🎯Back to Top>\u003C\u002Fu>](#head-content)\n\n* \u003Cspan id=\"head-2025\"> **2025**  \u003C\u002Fspan> \n     * (arXiv preprint 2025) **GenExam: A Multidisciplinary Text-to-Image Exam**, Zhaokai Wang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.14232)]\n     * (arXiv preprint 2025) **RefVNLI: Towards Scalable Evaluation of Subject-driven Text-to-image Generation**, Aviv Slobodkin et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.17502)]\n     * (arXiv preprint 2025) **An Empirical Study of GPT-4o Image Generation Capabilities**, Sixiang Chen et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05979)]\n\n\n* \u003Cspan id=\"head-2024\"> **2024**  \u003C\u002Fspan> \n     * (arXiv preprint 2024) **Flow Generator Matching**, Zemin Huang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.19310)]\n     * (EMNLP 2024) **Kandinsky 3: Text-to-Image Synthesis for Multifunctional Generative Framework**, Vladimir Arkhipkin et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.21061)] [[Code](https:\u002F\u002Fgithub.com\u002Fai-forever\u002FKandinsky-3)] [[Project](https:\u002F\u002Fai-forever.github.io\u002FKandinsky-3\u002F)] \n     * (arXiv preprint 2024) **Data Extrapolation for Text-to-image Generation on Small Datasets**, Senmao Ye and Fei Liu [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.01638)]\n     * ⭐⭐(arXiv preprint 2024) **Imagen 3**, ImagenTeam-Google [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.07009)]\n     * (arXiv preprint 2024) **MARS: Mixture of Auto-Regressive Models for Fine-grained Text-to-image Synthesis**, Wanggui He et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.07614)]\n     * (Kuaishou) **Kolors: Effective Training of Diffusion Model for Photorealistic Text-to-Image Synthesis**, Sixian Zhang et al. [[Paper](https:\u002F\u002Fgithub.com\u002FKwai-Kolors\u002FKolors\u002Fblob\u002Fmaster\u002Fimgs\u002FKolors_paper.pdf)] [[Code](https:\u002F\u002Fgithub.com\u002FKwai-Kolors\u002FKolors)] [[Project](https:\u002F\u002Fkwai-kolors.github.io\u002Fpost\u002Fpost-2\u002F)] \n     * (CVPR 2024) [💬Human Preferences] **Learning Multi-dimensional Human Preference for Text-to-Image Generation**, Sixian Zhang et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fpapers\u002FZhang_Learning_Multi-Dimensional_Human_Preference_for_Text-to-Image_Generation_CVPR_2024_paper.pdf)] [[Code](https:\u002F\u002Fgithub.com\u002Fwangbohan97\u002FKolors-MPS)] [[Project](https:\u002F\u002Fkwai-kolors.github.io\u002Fpost\u002Fpost-1\u002F)] \n     * (CVPR 2024) [💬 Text-to-layout → Text+Layout-to-Image] **Grounded Text-to-Image Synthesis with Attention Refocusing**, Quynh Phung et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.05427)] [[Project](https:\u002F\u002Fattention-refocusing.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FAttention-Refocusing\u002Fattention-refocusing)] \n     * (arXiv preprint 2024) **Dimba: Transformer-Mamba Diffusion Models**, Zhengcong Fei et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.01159)]\n     * (arXiv preprint 2024) [💬 Generation and Editing] **MultiEdits: Simultaneous Multi-Aspect Editing with Text-to-Image Diffusion Models**, Mingzhen Huang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.00985)] [[Project](https:\u002F\u002Fmingzhenhuang.com\u002Fprojects\u002FMultiEdits.html)]\n     * (arXiv preprint 2024) **AutoStudio: Crafting Consistent Subjects in Multi-turn Interactive Image Generation**, Junhao Cheng et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.01388)] [[Project](https:\u002F\u002Fhowe183.github.io\u002FAutoStudio.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fdonahowe\u002FAutoStudio)] \n     * (arXiv preprint 2024) **TheaterGen: Character Management with LLM for Consistent Multi-turn Image Generation**, Junhao Cheng et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.18919)] [[Project](https:\u002F\u002Fhowe140.github.io\u002Ftheatergen.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fdonahowe\u002FTheatergen)] \n     * (CVPR 2024) **Ranni: Taming Text-to-Image Diffusion for Accurate Instruction Following**, Yutong Feng et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.17002)] [[Project](https:\u002F\u002Franni-t2i.github.io\u002FRanni\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fali-vilab\u002FRanni)] \n     * (arXiv preprint 2024) **CoMat: Aligning Text-to-Image Diffusion Model with Image-to-Text Concept Matching**, Dongzhi Jiang et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.03653)] [[Project](https:\u002F\u002Fcaraj7.github.io\u002Fcomat\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FCaraJ7\u002FCoMat)] \n     * (arXiv preprint 2024) **TextCraftor: Your Text Encoder Can be Image Quality Controller**, Yanyu Li et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.18978)] \n     * (CVPR 2024) **ECLIPSE: A Resource-Efficient Text-to-Image Prior for Image Generations**, Maitreya Patel et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.04655)] [[Project](https:\u002F\u002Feclipse-t2i.vercel.app\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Feclipse-t2i\u002Feclipse-inference)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FECLIPSE-Community\u002FECLIPSE-Kandinsky-v2.2)]\n     * (arXiv preprint 2024) **SELMA: Learning and Merging Skill-Specific Text-to-Image Experts with Auto-Generated Data**, Jialu Li et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.06952)] [[Project](https:\u002F\u002Fselma-t2i.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fjialuli-luka\u002FSELMA)]\n     * (ICLR 2024) **PixArt-α: Fast Training of Diffusion Transformer for Photorealistic Text-to-Image Synthesis**, Junsong Chen et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.00426)] [[Project](https:\u002F\u002Fpixart-alpha.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FPixArt-alpha\u002FPixArt-alpha)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FPixArt-alpha\u002FPixArt-LCM)]\n     * (arXiv preprint 2024) **PixArt-Σ: Weak-to-Strong Training of Diffusion Transformer for 4K Text-to-Image Generation**, Junsong Chen et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.04692)] \n     * (arXiv preprint 2024) **PIXART-δ: Fast and Controllable Image Generation with Latent Consistency Models**, Junsong Chen et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.05252)] \n     * (CVPR 2024) **Discriminative Probing and Tuning for Text-to-Image Generation**, Leigang Qu et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.04321)] [[Project](https:\u002F\u002Fdpt-t2i.github.io\u002F)] \n     * (CVPR 2024) **RealCustom: Narrowing Real Text Word for Real-Time Open-Domain Text-to-Image Customization**, Mengqi Huang et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.00483)] [[Project](https:\u002F\u002Fcorleone-huang.github.io\u002Frealcustom\u002F)] \n     * ⭐(arXiv preprint 2024) **SDXL-Lightning: Progressive Adversarial Diffusion Distillation**, Shanchuan Lin et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.13929)] [[HuggingFace](https:\u002F\u002Fhuggingface.co\u002FByteDance\u002FSDXL-Lightning)] [[Demo](https:\u002F\u002Ffastsdxl.ai\u002F)]\n     * ⭐(arXiv preprint 2024) **RealCompo: Dynamic Equilibrium between Realism and Compositionality Improves Text-to-Image Diffusion Models**, Xinchen Zhang et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.12908)] [[Code](https:\u002F\u002Fgithub.com\u002FYangLing0818\u002FRealCompo)] \n     * (arXiv preprint 2024) **Learning Continuous 3D Words for Text-to-Image Generation**, Ta-Ying Cheng et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.08654)] [[Project](https:\u002F\u002Fttchengab.github.io\u002Fcontinuous_3d_words\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fttchengab\u002Fcontinuous_3d_words_code\u002F)]\n     * (arXiv preprint 2024) **DiffusionGPT: LLM-Driven Text-to-Image Generation System**, Jie Qin et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.10061)] [[Project](https:\u002F\u002Fdiffusiongpt.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FDiffusionGPT\u002FDiffusionGPT)]\n     * (arXiv preprint 2024) **DressCode: Autoregressively Sewing and Generating Garments from Text Guidance**, Kai He et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.16465)] [[Project](https:\u002F\u002Fsites.google.com\u002Fview\u002Fprojectpage-dresscode)]\n\n[\u003Cu>\u003C🎯Back to Top>\u003C\u002Fu>](#head-content)\n\n* \u003Cspan id=\"head-2023\"> **2023**  \u003C\u002Fspan>\n     * (arXiv preprint 2023) **CoDi-2: In-Context, Interleaved, and Interactive Any-to-Any Generation**, Zineng Tang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.18775)] [[Project](https:\u002F\u002Fcodi-2.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002Fi-Code\u002Ftree\u002Fmain\u002FCoDi-2)] \n     * (arXiv preprint 2023) **DiffBlender: Scalable and Composable Multimodal Text-to-Image Diffusion Models**, Sungnyun Kim et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15194)] [[Code](https:\u002F\u002Fgithub.com\u002Fsungnyun\u002Fdiffblender)] [[Project](https:\u002F\u002Fsungnyun.github.io\u002Fdiffblender\u002F)] \n     * (arXiv preprint 2023) **ElasticDiffusion: Training-free Arbitrary Size Image Generation**, Moayed Haji-Ali et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.18822)] [[Project](https:\u002F\u002Felasticdiffusion.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fmoayedhajiali\u002Felasticdiffusion-official)] [[Demo](https:\u002F\u002Freplicate.com\u002Fmoayedhajiali\u002Felasticdiffusion)]\n     * (ICCV 2023) **BoxDiff: Text-to-Image Synthesis with Training-Free Box-Constrained Diffusion**, Jinheng Xie et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.10816)] [[Code](https:\u002F\u002Fgithub.com\u002Fshowlab\u002FBoxDiff)] \n     * (arXiv preprint 2023) **Late-Constraint Diffusion Guidance for Controllable Image Synthesis**, Chang Liu et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.11520)] [[Code](https:\u002F\u002Fgithub.com\u002FAlonzoLeeeooo\u002FLCDG)]\n     * (arXiv preprint 2023) **An Image is Worth Multiple Words: Multi-attribute Inversion for Constrained Text-to-Image Synthesis**, Aishwarya Agarwal et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.11919)] \n     * ⭐(arXiv preprint 2023) **UFOGen: You Forward Once Large Scale Text-to-Image Generation via Diffusion GANs**, Yanwu Xu et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.09257)] \n     * (ICCV 2023) **ITI-GEN: Inclusive Text-to-Image Generation**, Cheng Zhang et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fhtml\u002FZhang_ITI-GEN_Inclusive_Text-to-Image_Generation_ICCV_2023_paper.html)] [[Code](https:\u002F\u002Fgithub.com\u002Fhumansensinglab\u002FITI-GEN)] [[Project](https:\u002F\u002Fczhang0528.github.io\u002Fiti-gen)] \n     * (arXiv preprint 2023) **Mini-DALLE3: Interactive Text to Image by Prompting Large Language Models**, Zeqiang Lai et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.07653)] [[Code](https:\u002F\u002Fgithub.com\u002FZeqiang-Lai\u002FMini-DALLE3)] [[Demo](http:\u002F\u002F139.224.23.16:10085\u002F)] [[Project](https:\u002F\u002Fminidalle3.github.io\u002F)] \n     * (arXiv preprint 2023) [💬Evaluation] **GenEval: An Object-Focused Framework for Evaluating Text-to-Image Alignment**, Dhruba Ghosh et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.11513v1)] [[Code](https:\u002F\u002Fgithub.com\u002Fdjghosh13\u002Fgeneval)] \n     * ⭐(arXiv preprint 2023) **Kandinsky: an Improved Text-to-Image Synthesis with Image Prior and Latent Diffusion**, Anton Razzhigaev et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.03502)] [[Code](https:\u002F\u002Fgithub.com\u002Fai-forever\u002FKandinsky-2)] [[Demo](https:\u002F\u002Ffusionbrain.ai\u002Fen\u002Feditor\u002F)] [[Demo Video](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=c7zHPc59cWU)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fkandinsky-community)]\n     * ⭐⭐(ICCV 2023) **Adding Conditional Control to Text-to-Image Diffusion Models**, Lvmin Zhang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.05543)] [[Code](https:\u002F\u002Fgithub.com\u002Flllyasviel\u002FControlNet)] \n     * (ICCV 2023) **DiffCloth: Diffusion Based Garment Synthesis and Manipulation via Structural Cross-modal Semantic Alignment**, Xujie Zhang et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.11206)] \n     * (ICCV 2023) **Unsupervised Compositional Concepts Discovery with Text-to-Image Generative Models**, Nan Liu et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.05357)] [[Code](https:\u002F\u002Fgithub.com\u002Fnanlliu\u002FUnsupervised-Compositional-Concepts-Discovery)] [[Project](https:\u002F\u002Fenergy-based-model.github.io\u002Funsupervised-concept-discovery\u002F)] \n     * (arXiv preprint 2023) **Text-to-Image Generation for Abstract Concepts**, Jiayi Liao et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.14623)]\n     * (arXiv preprint 2023) **T2I-CompBench: A Comprehensive Benchmark for Open-world Compositional Text-to-image Generation**, Kaiyi Huang et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.06350)] [[Code](https:\u002F\u002Fgithub.com\u002FKarine-Huang\u002FT2I-CompBench)] [[Project](https:\u002F\u002Fkarine-h.github.io\u002FT2I-CompBench\u002F)] \n     * (arXiv preprint 2023) [💬 Evaluation] **Human Preference Score v2: A Solid Benchmark for Evaluating Human Preferences of Text-to-Image Synthesis**, Xiaoshi Wu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.09341)] [[Code](https:\u002F\u002Fgithub.com\u002Ftgxs002\u002FHPSv2)]\n     * (arXiv preprint 2023) **Towards Unified Text-based Person Retrieval: A Large-scale Multi-Attribute and Language Search Benchmark**, Shuyu Yang et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.02898)] [[Code](https:\u002F\u002Fgithub.com\u002FShuyu-XJTU\u002FAPTM)] [[Project](https:\u002F\u002Fwww.zdzheng.xyz\u002Fpublication\u002FTowards-2023)]\n     * (arXiv preprint 2023) **Synthesizing Artistic Cinemagraphs from Text**, Aniruddha Mahapatra et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.02236)] [[Code](https:\u002F\u002Fgithub.com\u002Ftext2cinemagraph\u002Fartistic-cinemagraph)] [[Project](https:\u002F\u002Ftext2cinemagraph.github.io\u002Fwebsite\u002F)]\n     * (arXiv preprint 2023) **Detector Guidance for Multi-Object Text-to-Image Generation**, Luping Liu et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.02236)]\n     * (arXiv preprint 2023) **A-STAR: Test-time Attention Segregation and Retention for Text-to-image Synthesis**, Aishwarya Agarwal et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.14544)]\n     * (arXiv preprint 2023) [💬Evaluation] **ConceptBed: Evaluating Concept Learning Abilities of Text-to-Image Diffusion Models**, Maitreya Patel et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.04695)] [[Code](https:\u002F\u002Fgithub.com\u002FConceptBed\u002Fevaluations)] [[Project](https:\u002F\u002Fconceptbed.github.io\u002F)]\n     * ⭐(arXiv preprint 2023) **StyleDrop: Text-to-Image Generation in Any Style**, Kihyuk Sohn et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.00983)] [[Project](https:\u002F\u002Fstyledrop.github.io\u002F)]\n     * ⭐⭐(arXiv preprint 2023) **Prompt-Free Diffusion: Taking \"Text\" out of Text-to-Image Diffusion Models**, Xingqian Xu et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.16223)] [[Code](https:\u002F\u002Fgithub.com\u002FSHI-Labs\u002FPrompt-Free-Diffusion)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fshi-labs\u002FPrompt-Free-Diffusion)]\n     * ⭐⭐ (SIGGRAPH 2023) **Blended Latent Diffusion**, Omri Avrahami et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.02779)] [[Code](https:\u002F\u002Fgithub.com\u002Fomriav\u002Fblended-latent-diffusion)] [[Project](https:\u002F\u002Fomriavrahami.com\u002Fblended-latent-diffusion-page\u002F)]\n     * (CVPR 2023) [💬Controllable] **SpaText: Spatio-Textual Representation for Controllable Image Generation**, Omri Avrahami et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.14305)] [[Project](https:\u002F\u002Fomriavrahami.com\u002Fspatext\u002F)]\n     * ⭐⭐ (arXiv 2023) **The Chosen One: Consistent Characters in Text-to-Image Diffusion Models**, Omri Avrahami et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.10093)] [[Code](https:\u002F\u002Fgithub.com\u002FZichengDuan\u002FTheChosenOne)] [[Project](https:\u002F\u002Fomriavrahami.com\u002Fthe-chosen-one\u002F)]\n     * (CVPR 2023) [💬Stable Diffusion with Brain] **High-resolution image reconstruction with latent diffusion models from human brain activity**, Yu Takagi et al. [[Paper](https:\u002F\u002Fwww.biorxiv.org\u002Fcontent\u002F10.1101\u002F2022.11.18.517004v1)] \n     * (arXiv preprint 2023) **BLIP-Diffusion: Pre-trained Subject Representation for Controllable Text-to-Image Generation and Editing**, Dongxu Li et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.14720)] \n     * (arXiv preprint 2023) [💬Evaluation] **LLMScore: Unveiling the Power of Large Language Models in Text-to-Image Synthesis Evaluation**, Yujie Lu et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.11116)] [[Code](https:\u002F\u002Fgithub.com\u002FYujieLu10\u002FLLMScore)] \n     * (arXiv preprint 2023) **P+ : Extended Textual Conditioning in Text-to-Image Generation**, Andrey Voynov et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.09522)] [[Project](https:\u002F\u002Fprompt-plus.github.io\u002F)] \n     * (arXiv preprint 2023) **Taming Encoder for Zero Fine-tuning Image Customization with Text-to-Image Diffusion Models**, Xuhui Jia et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.02642)] \n     * (ICML 2023) **TR0N: Translator Networks for 0-Shot Plug-and-Play Conditional Generation**, Zhaoyan Liu et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.13742)] [[Code](https:\u002F\u002Fgithub.com\u002Flayer6ai-labs\u002Ftr0n)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FLayer6\u002FTR0N)]\n     * (ICLR 2023) [💬3D]**DreamFusion: Text-to-3D using 2D Diffusion**, Ben Poole et al.  [[Paper (arXiv)](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.14988)] [[Paper (OpenReview)](https:\u002F\u002Fopenreview.net\u002Fforum?id=FjNys5c7VyY)] [[Project](https:\u002F\u002Fdreamfusion3d.github.io\u002F)] [[Short Read](https:\u002F\u002Fwww.louisbouchard.ai\u002Fdreamfusion\u002F)]\n     * (ICLR 2023) **Training-Free Structured Diffusion Guidance for Compositional Text-to-Image Synthesis**, Weixi Feng et al.  [[Paper (arXiv)](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.05032)] [[Paper (OpenReview)](https:\u002F\u002Fopenreview.net\u002Fforum?id=PUIqjT4rzq7)] [[Code](https:\u002F\u002Fgithub.com\u002Fshunk031\u002Ftraining-free-structured-diffusion-guidance)]\n     * ⭐⭐(arXiv preprint 2023) **Pick-a-Pic: An Open Dataset of User Preferences for Text-to-Image Generation**, Yuval Kirstain et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.01569)] [[Code](https:\u002F\u002Fgithub.com\u002Fyuvalkirstain\u002FPickScore)] [[Dataset](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fyuvalkirstain\u002Fpickapic_v1)] [[Online Application](https:\u002F\u002Fpickapic.io\u002F)] [[PickScore](https:\u002F\u002Fhuggingface.co\u002Fyuvalkirstain\u002FPickScore_v1)] \n     * (arXiv preprint 2023) **TTIDA: Controllable Generative Data Augmentation via Text-to-Text and Text-to-Image Models**, Yuwei Yin et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.08821)] \n     * (arXiv preprint 2023) [💬 Textual Inversion] **Controllable Textual Inversion for Personalized Text-to-Image Generation**, Jianan Yang et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.05265)] \n     * (arXiv preprint 2023) **Diffusion Explainer: Visual Explanation for Text-to-image Stable Diffusion**, Seongmin Lee et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.03509)] [[Project](https:\u002F\u002Fpoloclub.github.io\u002Fdiffusion-explainer\u002F)] \n     * ⭐⭐(Findings of ACL 2023) [💬 Multi-language-to-Image] **AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities**, Zhongzhi Chen et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.06679)] [[Code-AltDiffusion](https:\u002F\u002Fgithub.com\u002FFlagAI-Open\u002FFlagAI\u002Ftree\u002Fmaster\u002Fexamples\u002FAltDiffusion-m18)] [[Code-AltCLIP](https:\u002F\u002Fgithub.com\u002FFlagAI-Open\u002FFlagAI\u002Ftree\u002Fmaster\u002Fexamples\u002FAltCLIP-m18)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002FBAAI\u002FAltDiffusion-m18)] \n     * (arXiv preprint 2023) [💬 Seed selection] **It is all about where you start: Text-to-image generation with seed selection**, Dvir Samuel et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.14530)] \n     * (arXiv preprint 2023) [💬 Audio\u002FSound\u002FMulti-language-to-Image] **GlueGen: Plug and Play Multi-modal Encoders for X-to-image Generation**, Can Qin et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.10056)] \n     * (arXiv preprint 2023) [💬Faithfulness Evaluation] **TIFA: Accurate and Interpretable Text-to-Image Faithfulness Evaluation with Question Answering**, Yushi Hu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.11897)] [[Project](https:\u002F\u002Ftifa-benchmark.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FYushi-Hu\u002Ftifa)] \n     * (arXiv preprint 2023) **InstantBooth: Personalized Text-to-Image Generation without Test-Time Finetuning**, Jing Shi et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03411)] [[Project](https:\u002F\u002Fjshi31.github.io\u002FInstantBooth\u002F)]\n     * (TOMM 2023) **LFR-GAN: Local Feature Refinement based Generative Adversarial Network for Text-to-Image Generation**, Zijun Deng et al. [[Paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3589002)] [[Code](https:\u002F\u002Fgithub.com\u002FPKU-ICST-MIPL\u002FLFR-GAN_TOMM2023)] \n     * (ICCV 2023) **Expressive Text-to-Image Generation with Rich Text**, Songwei Ge et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.06720)] [[Code](https:\u002F\u002Fgithub.com\u002FSongweiGe\u002Frich-text-to-image)] [[Project](https:\u002F\u002Frich-text-to-image.github.io\u002F)] [[Demo](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fsongweig\u002Frich-text-to-image\u002Fdiscussions)]\n     * (arXiv preprint 2023) [💬Human Preferences] **ImageReward: Learning and Evaluating Human Preferences for Text-to-Image Generation**, Jiazheng Xu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.05977)] [[Code](https:\u002F\u002Fgithub.com\u002FTHUDM\u002FImageReward)] \n     * (arXiv preprint 2023) **eDiff-I: Text-to-Image Diffusion Models with an Ensemble of Expert Denoisers**, Yogesh Balaji et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.01324)] [[Project](https:\u002F\u002Fresearch.nvidia.com\u002Flabs\u002Fdir\u002FeDiff-I\u002F)] \n     * (CVPR 2023) **GALIP: Generative Adversarial CLIPs for Text-to-Image Synthesis**, Ming Tao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.12959)] [[Code](https:\u002F\u002Fgithub.com\u002Ftobran\u002FGALIP)]\n     * (CVPR 2023) [💬Human Evaluation] **Toward Verifiable and Reproducible Human Evaluation for Text-to-Image Generation**, Mayu Otani et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.01816)] \n     * (arXiv preprint 2023) **Text2Room: Extracting Textured 3D Meshes from 2D Text-to-Image Models**, Lukas Höllein et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.11989)] [[Project](https:\u002F\u002Flukashoel.github.io\u002Ftext-to-room\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FlukasHoel\u002Ftext2room)] [[Video](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=fjRnFL91EZc)]\n     * (arXiv preprint 2023) **Editing Implicit Assumptions in Text-to-Image Diffusion Models**, Hadas Orgad et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.08084)] [[Project](https:\u002F\u002Ftime-diffusion.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fbahjat-kawar\u002Ftime-diffusion)] \n     * ⭐⭐(arXiv preprint 2023) **Visual ChatGPT: Talking, Drawing and Editing with Visual Foundation Models**, Chenfei Wu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.04671)] [[Code](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002Fvisual-chatgpt)]\n     * (arXiv preprint 2023) **X&Fuse: Fusing Visual Information in Text-to-Image Generation**, Yuval Kirstain et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.01000v1)]\n     * (CVPR 2023) [💬Stable Diffusion with Brain] **High-resolution image reconstruction with latent diffusion models from human brain activity**, Yu Takagi et al. [[Paper](https:\u002F\u002Fwww.biorxiv.org\u002Fcontent\u002F10.1101\u002F2022.11.18.517004v1)] [[Project](https:\u002F\u002Fsites.google.com\u002Fview\u002Fstablediffusion-with-brain\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fyu-takagi\u002FStableDiffusionReconstruction)]\n     * ⭐⭐(arXiv preprint 2023) **Universal Guidance for Diffusion Models**, Arpit Bansal et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.07121)] [[Code](https:\u002F\u002Fgithub.com\u002Farpitbansal297\u002FUniversal-Guided-Diffusion)] \n     * ⭐(arXiv preprint 2023) **Attend-and-Excite: Attention-Based Semantic Guidance for Text-to-Image Diffusion Models**, Hila Chefer et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.13826)] [[Project](https:\u002F\u002Fattendandexcite.github.io\u002FAttend-and-Excite\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FAttendAndExcite\u002FAttend-and-Excite)]\n     * (BMVC 2023) **Divide & Bind Your Attention for Improved Generative Semantic Nursing**, Yumeng Li et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.10864)] [[Project](https:\u002F\u002Fsites.google.com\u002Fview\u002Fdivide-and-bind)] [[Code](https:\u002F\u002Fgithub.com\u002Fboschresearch\u002FDivide-and-Bind)]   \n     * (IEEE Transactions on Multimedia) **ALR-GAN: Adaptive Layout Refinement for Text-to-Image Synthesis**, Hongchen Tan et al. [[Paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10023990)] \n     * ⭐(CVPR 2023) **Multi-Concept Customization of Text-to-Image Diffusion**, Nupur Kumari et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.04488)] [[Project](https:\u002F\u002Fwww.cs.cmu.edu\u002F~custom-diffusion\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fadobe-research\u002Fcustom-diffusion)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fnupurkmr9\u002Fcustom-diffusion)]\n     * (CVPR 2023) **GLIGEN: Open-Set Grounded Text-to-Image Generation**, Yuheng Li et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.07093)] [[Code](https:\u002F\u002Fgithub.com\u002Fgligen\u002FGLIGEN)] [[Project](https:\u002F\u002Fgligen.github.io\u002F)] [[Hugging Face Demo](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fgligen\u002Fdemo)] \n     * (arXiv preprint 2023) **Attribute-Centric Compositional Text-to-Image Generation**, Yuren Cong et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.01413)] [[Project](https:\u002F\u002Fgithub.com\u002Fyrcong\u002FACTIG)] \n     * (arXiv preprint 2023) **Muse: Text-To-Image Generation via Masked Generative Transformers**, Huiwen Chang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.00704v1)] [[Project](https:\u002F\u002Fmuse-model.github.io\u002F)] \n\n[\u003Cu>\u003C🎯Back to Top>\u003C\u002Fu>](#head-content)\n\n## \u003Cspan id=\"head6\"> *6. Other Related Works* \u003C\u002Fspan>\n   * \u003Cspan id=\"head-pe\"> **📝Prompt Engineering📝** \u003C\u002Fspan> \n       * (CHI 2024) **PromptCharm: Text-to-Image Generation through Multi-modal Prompting and Refinement**, Zhijie Wang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.04014)] \n       * (arXiv preprint 2024) **Automated Black-box Prompt Engineering for Personalized Text-to-Image Generation**, Yutong He et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.191039)] \n       * (EMNLP 2023) **BeautifulPrompt: Towards Automatic Prompt Engineering for Text-to-Image Synthesis**, Tingfeng Cao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.06752)] \n       * (arXiv preprint 2023) [💬Optimizing Prompts] **NeuroPrompts: An Adaptive Framework to Optimize Prompts for Text-to-Image Generation**, Shachar Rosenman et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.12229)] [[Video Demo](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=Cmca_RWYn2g)] \n       * (arXiv preprint 2022) [💬Optimizing Prompts] **Optimizing Prompts for Text-to-Image Generation**, Yaru Hao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.09611)] [[Code](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FLMOps)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fmicrosoft\u002FPromptist)] \n       * (arXiv preprint 2022) [💬Aesthetic Image Generation] **Best Prompts for Text-to-Image Models and How to Find Them**, Nikita Pavlichenko et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.11711)] \n       * (arXiv preprint 2022) **A Taxonomy of Prompt Modifiers for Text-To-Image Generation**, Jonas Oppenlaender [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.13988)] \n       * (CHI 2022) **Design Guidelines for Prompt Engineering Text-to-Image Generative Models**, Vivian Liu et al. [[Paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3491102.3501825)] \n\n[\u003Cu>\u003C🎯Back to Top>\u003C\u002Fu>](#head-content)\n\n   * \u003Cspan id=\"head-mm\"> **⭐Multimodality⭐** \u003C\u002Fspan> \n       * (arXiv preprint 2024) **4M-21: An Any-to-Any Vision Model for Tens of Tasks and Modalities**, Roman Bachmann et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.09406)] [[4M-Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.06647)] [[Project](https:\u002F\u002F4m.epfl.ch\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fapple\u002Fml-4m\u002F)]\n         * 📚 Any-to-any, RGB-to-all(Caption, BBox, Semantic segmentation, depth, ...), Fine-grained generation & editing, Multimodal guidance, Any-to-RGB Retrieval, RGB-to-any retrieval, \n       * (arXiv preprint 2024) **Ctrl-X: Controlling Structure and Appearance for Text-To-Image Generation Without Guidance**, Kuan Heng Lin et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.07540)] [[Project](https:\u002F\u002Fgenforce.github.io\u002Fctrl-x\u002F)] \n         * 📚 Structure (natural images, canny maps, normal maps, wireframes, 3D meshes, etc.) + Image → Image, Structure (mask, 3D mesh, canny maps, depth maps, etc.) + Text → Image\n       * (arXiv preprint 2024) **Lumina-T2X: Transforming Text into Any Modality, Resolution, and Duration via Flow-based Large Diffusion Transformers**, Peng Gao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.05945)] [[Code](https:\u002F\u002Fgithub.com\u002FAlpha-VLLM\u002FLumina-T2X)] \n         * 📚 Text → Image\u002FVideo\u002FAudio\u002F3D\u002FMusic\n       * (ICLR 2024) **Cross-Modal Contextualized Diffusion Models for Text-Guided Visual Generation and Editing**, Ling Yang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.16627v1)] [[Code](https:\u002F\u002Fgithub.com\u002FYangLing0818\u002FContextDiff?tab=readme-ov-file)]\n         * 📚 Text → Image, Text → Video\n       * (arXiv preprint 2024) **TMT: Tri-Modal Translation between Speech, Image, and Text by Processing Different Modalities as Different Languages**, Minsu Kim et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.16021v1)] \n         * 📚 Image → Text, Image → Speech, Text → Image, Speech → Image, Speech → Text, Text → Speech\n       * ⭐⭐(NeurIPS 2023) **CoDi: Any-to-Any Generation via Composable Diffusion**, Zineng Tang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.11846)] [[Project](https:\u002F\u002Fcodi-gen.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002Fi-Code\u002Ftree\u002Fmain\u002Fi-Code-V3)] \n         * 📚[Single-to-Single Generation] Text → Image, Audio → Image, Image → Video, Image → Audio, Audio → Text, Image → Text\n         * 📚[Multi-Outputs Joint Generation] Text → Video + Audio, Text → Text + Audio + Image, Text + Image → Text + Image\n         * 📚[Multiple Conditioning] Text + Audio → Image, Text + Image → Image, Text + Audio + Image → Image, Text + Audio → Video, Text + Image → Video, Video + Audio → Text, Image + Audio → Audio, Text + Image → Audio\n       * ⭐⭐(CVPR 2023) **ImageBind: One Embedding Space To Bind Them All**, Rohit Girdhar et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.05665)] [[Project](https:\u002F\u002Fai.facebook.com\u002Fblog\u002Fimagebind-six-modalities-binding-ai\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FImageBind)] \n         * 📚Image-to-Audio retrieval, Audio-to-Image retrieval, Text-to-Image+Audio, Audio+Image-to-Image, Audio-to-Image generation, Zero-shot text to audio retrieval and classification... \n       * ⭐(CVPR 2023) **Scaling up GANs for Text-to-Image Synthesis**, Minguk Kang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.05511)] [[Project](https:\u002F\u002Fmingukkang.github.io\u002FGigaGAN\u002F)] \n         * 📚Text-to-Image, Controllable image synthesis (Style Mixing, Prompt Interpolation, Prompt Mixing), Super Resolution (Text-conditioned, Unconditional)\n       * (arXiv preprint 2023) **DiffBlender: Scalable and Composable Multimodal Text-to-Image Diffusion Models**, Sungnyun Kim et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15194)] [[Code](https:\u002F\u002Fgithub.com\u002Fsungnyun\u002Fdiffblender)] [[Project](https:\u002F\u002Fsungnyun.github.io\u002Fdiffblender\u002F)]\n         * 📚Text-to-Image, Multimodal controllable image synthesis, Text + Image + Spatial\u002FNon-spatial Tokens → Image\n       * (arXiv preprint 2023) **TextIR: A Simple Framework for Text-based Editable Image Restoration**, Yunpeng Bai et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.14736)] [[Code](https:\u002F\u002Fgithub.com\u002Fhaha-lisa\u002FRDM-Region-Aware-Diffusion-Model)] \n         * 📚Image Inpainting, Image Colorization, Image Super-resolution, Image Editing via Degradation\n       * (arXiv preprint 2023) **Modulating Pretrained Diffusion Models for Multimodal Image Synthesis**, Cusuh Ham et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.12764)]\n         * 📚Sketch-to-Image, Segmentation-to-Image, Text+Sketch-to-Image, Text+Segmentation-to-Image, Text+Sketch+Segmentation-to-Image\n       * (arXiv preprint 2023) **Muse: Text-To-Image Generation via Masked Generative Transformers**, Huiwen Chang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.00704v1)] [[Project](https:\u002F\u002Fmuse-model.github.io\u002F)] \n         * 📚Text-to-Image, Zero-shot+Mask-free editing, Zero-shot Inpainting\u002FOutpainting\n       * (arXiv preprint 2022) **Versatile Diffusion: Text, Images and Variations All in One Diffusion Model**, Xingqian Xu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.08332)] [[Code](https:\u002F\u002Fgithub.com\u002FSHI-Labs\u002FVersatile-Diffusion)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fshi-labs\u002FVersatile-Diffusion)]\n         * 📚Text-to-Image, Image-Variation, Image-to-Text, Disentanglement, Text+Image-Guided Generation, Editable I2T2I\n       * (arXiv preprint 2022) **Frido: Feature Pyramid Diffusion for Complex Scene Image Synthesis**, Wan-Cyuan Fan et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.13753)] [[Code](https:\u002F\u002Fgithub.com\u002Fdavidhalladay\u002FFrido)]\n         * 📚Text-to-Image, Scene Gragh to Image, Layout-to-Image, Uncondition Image Generation\n       * (arXiv preprint 2022) **NUWA-Infinity: Autoregressive over Autoregressive Generation for Infinite Visual Synthesis**, Chenfei Wu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.09814)] [[Code](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FNUWA)] [[Project](https:\u002F\u002Fnuwa-infinity.microsoft.com\u002F#\u002F)]\n         * 📚Unconditional Image Generation(HD), Text-to-Image(HD), Image Animation(HD), Image Outpainting(HD), Text-to-Video(HD)\n       * (ECCV 2022) **NÜWA: Visual Synthesis Pre-training for Neural visUal World creAtion**, Chenfei Wu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.12417)] [[Code](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FNUWA)]\n         * **Multimodal Pretrained Model for Multi-tasks🎄**: Text-To-Image, Sketch-to-Image, Image Completion, Text-Guided Image Manipulation, Text-to-Video, Video Prediction, Sketch-to-Video, Text-Guided Video Manipulation\n       * (ACMMM 2022) **Rethinking Super-Resolution as Text-Guided Details Generation**, Chenxi Ma et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.06604)] \n         * 📚Text-to-Image, High-resolution, Text-guided High-resolution\n       * (arXiv preprint 2022) **Discrete Contrastive Diffusion for Cross-Modal and Conditional Generation**, Ye Zhu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.07771)] [[Code](https:\u002F\u002Fgithub.com\u002FL-YeZhu\u002FCDCD)] \n         * 📚Text-to-Image, Dance-to-Music, Class-to-Image\n       * (arXiv preprint 2022) **M6-Fashion: High-Fidelity Multi-modal Image Generation and Editing**, Zhikang Li et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.11705)] \n         * 📚Text-to-Image, Unconditional Image Generation, Local-editing, Text-guided Local-editing, In\u002FOut-painting, Style-mixing\n       * (CVPR 2022) **Show Me What and Tell Me How: Video Synthesis via Multimodal Conditioning**, Yogesh Balaji et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.02573)] [[Code](https:\u002F\u002Fgithub.com\u002Fsnap-research\u002FMMVID)] [Project](https:\u002F\u002Fsnap-research.github.io\u002FMMVID\u002F)\n         * 📚Text-to-Video, Independent Multimodal Controls, Dependent Multimodal Controls\n       * ⭐⭐(CVPR 2022) **High-Resolution Image Synthesis with Latent Diffusion Models**, Robin Rombach et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.10752)] [[Code](https:\u002F\u002Fgithub.com\u002FCompVis\u002Flatent-diffusion)] [[Stable Diffusion Code](https:\u002F\u002Fgithub.com\u002FCompVis\u002Fstable-diffusion)]\n         * 📚Text-to-Image, Conditional Latent Diffusion, Super-Resolution, Inpainting\n       * ⭐⭐(arXiv preprint 2022) **Unifying Architectures, Tasks, and Modalities Through a Simple Sequence-to-Sequence Learning Framework**, Peng Wang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.03052v1)]  [[Code](https:\u002F\u002Fgithub.com\u002Fofa-sys\u002Fofa)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002FOFA-Sys)]\n         * 📚Text-to-Image Generation, Image Captioning, Text Summarization, Self-Supervised Image Classification, **[SOTA]** Referring Expression Comprehension, Visual Entailment, Visual Question Answering\n       * (arXiv preprint 2021) **Multimodal Conditional Image Synthesis with Product-of-Experts GANs**, Xun Huang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.05130)]  [[Project](https:\u002F\u002Fdeepimagination.cc\u002FPoE-GAN\u002F)]\n         * 📚Text-to-Image, Segmentation-to-Image, Text+Segmentation\u002FSketch\u002FImage→Image, Sketch+Segmentation\u002FImage→Image, Segmentation+Image→Image\n       * (NeurIPS 2021) **M6-UFC: Unifying Multi-Modal Controls for Conditional Image Synthesis via Non-Autoregressive Generative Transformers**, Zhu Zhang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.14211)] \n         * 📚Text-to-Image, Sketch-to-Image, Style Transfer, Image Inpainting, Multi-Modal Control to Image\n       * (arXiv preprint 2021) **ERNIE-ViLG: Unified Generative Pre-training for Bidirectional Vision-Language Generation**, Han Zhang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.15283)] \n         * A pre-trained **10-billion** parameter model: ERNIE-ViLG.\n         * A large-scale dataset of **145 million** high-quality Chinese image-text pairs.\n         * 📚Text-to-Image, Image Captioning,  Generative Visual Question Answering\n       * (arXiv preprint 2021) **Multimodal Conditional Image Synthesis with Product-of-Experts GANs**, Xun Huang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.05130)]  [[Project](https:\u002F\u002Fdeepimagination.cc\u002FPoE-GAN\u002F)]\n         * 📚Text-to-Image, Segmentation-to-Image, Text+Segmentation\u002FSketch\u002FImage → Image, Sketch+Segmentation\u002FImage → Image, Segmentation+Image → Image\n       * (arXiv preprint 2021) **L-Verse: Bidirectional Generation Between Image and Text**, Taehoon Kim et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.11133)] [[Code](https:\u002F\u002Fgithub.com\u002Ftgisaturday\u002FL-Verse)] \n         * 📚Text-To-Image, Image-To-Text, Image Reconstruction \n       * (arXiv preprint 2021) [💬Semantic Diffusion Guidance] **More Control for Free! Image Synthesis with Semantic Diffusion Guidance**, Xihui Liu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.05744)] [[Project](https:\u002F\u002Fxh-liu.github.io\u002Fsdg\u002F)] \n         * 📚Text-To-Image, Image-To-Image, Text+Image → Image \n\n[\u003Cu>\u003C🎯Back to Top>\u003C\u002Fu>](#head-content)\n\n   * \u003Cspan id=\"head-app\"> **🛫Applications🛫** \u003C\u002Fspan> \n       * (arXiv preprint 2024) [💬Photo Retouching] **JarvisArt: Liberating Human Artistic Creativity via an Intelligent Photo Retouching Agent**, Yunlong Lin et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.17612)] [[Project](https:\u002F\u002Fjarvisart.vercel.app\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FLYL1015\u002FJarvisArt)]\n       * (CVPR 2025) [💬Image Restoration] **Acquire and then Adapt: Squeezing out Text-to-Image Model for Image Restoration**, Junyuan Deng et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.15159)]\n       * (arXiv preprint 2024) [💬Multi-Concept Composition] **Gen4Gen: Generative Data Pipeline for Generative Multi-Concept Composition**, Chun-Hsiao Yeh et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.15504)] [[Project](https:\u002F\u002Fdanielchyeh.github.io\u002FGen4Gen\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FlouisYen\u002FGen4Gen)]\n       * (arXiv preprint 2023) [💬3D Hairstyle Generation] **HAAR: Text-Conditioned Generative Model of 3D Strand-based Human Hairstyles**, Vanessa Sklyarova et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.11666)] [[Project](https:\u002F\u002Fhaar.is.tue.mpg.de\u002F)] \n       * (arXiv preprint 2023) [💬Image Super-Resolution] **Image Super-Resolution with Text Prompt Diffusion**, Zheng Chen et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.14282)] [[Code](https:\u002F\u002Fgithub.com\u002Fzhengchen1999\u002FPromptSR)] \n       * (2023) [💬Image Editing] **Generative Fill**. [[Project](https:\u002F\u002Fwww.adobe.com\u002Fproducts\u002Fphotoshop\u002Fgenerative-fill.html)] \n       * (arXiv preprint 2023) [💬LLMs] **LLM as an Art Director (LaDi): Using LLMs to improve Text-to-Media Generators**, Allen Roush et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.03716v1)]\n       * (arXiv preprint 2023) [💬Segmentation] **SegGen: Supercharging Segmentation Models with Text2Mask and Mask2Img Synthesis**, Hanrong Ye et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.03355)] [[Project](https:\u002F\u002Fseggenerator.github.io\u002F)]\n       * (arXiv preprint 2023) [💬Text Editing] **DiffUTE: Universal Text Editing Diffusion Model**, Haoxing Chen et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.10825)] \n       * (arXiv preprint 2023) [💬Text Character Generation] **TextDiffuser: Diffusion Models as Text Painters**, Jingye Chen et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.10855)] \n       * (CVPR 2023) [💬Open-Vocabulary Panoptic Segmentation] **Open-Vocabulary Panoptic Segmentation with Text-to-Image Diffusion Models**, Jiarui Xu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.04803)] [[Code](https:\u002F\u002Fgithub.com\u002FNVlabs\u002FODISE)] [Project](https:\u002F\u002Fjerryxu.net\u002FODISE\u002F)] [HuggingFace](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fxvjiarui\u002FODISE)]\n       * (arXiv preprint 2023) [💬Chinese Text Character Generation] **GlyphDraw: Learning to Draw Chinese Characters in Image Synthesis Models Coherently**, Jian Ma et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.17870)] [[Project](https:\u002F\u002F1073521013.github.io\u002Fglyph-draw.github.io\u002F)] \n       * (arXiv preprint 2023) [💬Grounded Generation] **Guiding Text-to-Image Diffusion Model Towards Grounded Generation**, Ziyi Li et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.05221)] [[Code](https:\u002F\u002Fgithub.com\u002FLipurple\u002FGrounded-Diffusion)] [Project](https:\u002F\u002Flipurple.github.io\u002FGrounded_Diffusion\u002F)]\n       * (arXiv preprint 2022) [💬Semantic segmentation] **CLIP is Also an Efficient Segmenter: A Text-Driven Approach for Weakly Supervised Semantic Segmentation**, Yuqi Lin et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.09506)] [[Code](https:\u002F\u002Fgithub.com\u002Flinyq2117\u002FCLIP-ES)]\n       * (arXiv preprint 2022) [💬Unsupervised semantic segmentation] **Peekaboo: Text to Image Diffusion Models are Zero-Shot Segmentors**, Ryan Burgert et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.13224)]\n       * (SIGGRAPH Asia 2022) [💬Text+Speech → Gesture] **Rhythmic Gesticulator: Rhythm-Aware Co-Speech Gesture Synthesis with Hierarchical Neural Embeddings**, Tenglong Ao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.01448)] [[Code](https:\u002F\u002Fgithub.com\u002FAubrey-ao\u002FHumanBehaviorAnimation)]\n       * (arXiv preprint 2022) [💬Text+Image+Shape → Image] **Shape-Guided Diffusion with Inside-Outside Attention**, Dong Huk Park et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.00210v1)] [[Project](https:\u002F\u002Fshape-guided-diffusion.github.io\u002F)]\n\n[\u003Cu>\u003C🎯Back to Top>\u003C\u002Fu>](#head-content)\n\n   * \u003Cspan id=\"head-ti2i\"> **Text+Image\u002FVideo → Image\u002FVideo** \u003C\u002Fspan> \n       * (arXiv preprint 2025) **In-Context Edit: Enabling Instructional Image Editing with In-Context Generation in Large Scale Diffusion Transformer**, Zechuan Zhang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.20690)] [[Project](https:\u002F\u002Friver-zhang.github.io\u002FICEdit-gh-pages\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FRiver-Zhang\u002FICEdit)]\n       * (arXiv preprint 2025) **MAGREF: Masked Guidance for Any-Reference Video Generation**, Yufan Deng et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.23742v1)] [[Project](https:\u002F\u002Fmagref-video.github.io\u002Fmagref.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FMAGREF-Video\u002FMAGREF)]\n       * (arXiv preprint 2025) **Generating Multi-Image Synthetic Data for Text-to-Image Customization**, Nupur Kumari et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.01720)] [[Project](https:\u002F\u002Fwww.cs.cmu.edu\u002F~syncd-project\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fnupurkmr9\u002Fsyncd)]\n       * (arXiv preprint 2024) [💬Style Transfer] **StyleShot: A Snapshot on Any Style**, Junyao Gao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.01414)] [[Project](https:\u002F\u002Fgithub.com\u002Fopen-mmlab\u002FStyleShot)]\n       * (CVPR 2024) **SmartEdit: Exploring Complex Instruction-based Image Editing with Multimodal Large Language Models**, Yuzhou Huang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.06739)] [[Project](https:\u002F\u002Fyuzhou914.github.io\u002FSmartEdit\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FSmartEdit)]\n       * (arXiv preprint 2024) **MM-Diff: High-Fidelity Image Personalization via Multi-Modal Condition Integration**, Zhichao Wei et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.15059)] \n       * (CVPR 2024) **Instruct-Imagen: Image Generation with Multi-modal Instruction**, Hexiang Hu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.01952)] [[Project](https:\u002F\u002Finstruct-imagen.github.io\u002F)]\n       * (arXiv preprint 2024) [💬NERF] **InseRF: Text-Driven Generative Object Insertion in Neural 3D Scenes**, Mohamad Shahbazi et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.05335)] [[Project](https:\u002F\u002Fmohamad-shahbazi.github.io\u002Finserf\u002F)]\n       * (arXiv preprint 2023) **ViCo: Plug-and-play Visual Condition for Personalized Text-to-image Generation**, Shaozhe Hao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.00971)] [[Code](https:\u002F\u002Fgithub.com\u002Fhaoosz\u002FViCo)]\n       * (arXiv preprint 2023) [💬Video Editing] **MagicStick: Controllable Video Editing via Control Handle Transformations**, Yue Ma et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.03047v1)] [[Project](https:\u002F\u002Fmagic-stick-edit.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fmayuelala\u002FMagicStick)]\n       * (arXiv preprint 2023) **Lego: Learning to Disentangle and Invert Concepts Beyond Object Appearance in Text-to-Image Diffusion Models**, Chen Henry Wu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.13833)] \n       * (ACMMM 2023) [💬Style Transfer] **ControlStyle: Text-Driven Stylized Image Generation Using Diffusion Priors**, Jingwen Chen et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.05463)] \n       * (ICCV 2023) **A Latent Space of Stochastic Diffusion Models for Zero-Shot Image Editing and Guidance**, Chen Henry Wu et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FWu_A_Latent_Space_of_Stochastic_Diffusion_Models_for_Zero-Shot_Image_ICCV_2023_paper.pdf)] [[Arxiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.05559)] [[Code](https:\u002F\u002Fgithub.com\u002Fchenwu98\u002Fcycle-diffusion)]\n       * (arXiv preprint 2023) [💬Multi-Subject Generation] **VideoDreamer: Customized Multi-Subject Text-to-Video Generation with Disen-Mix Finetuning**, Hong Chen et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.00990v1)] [[Project](https:\u002F\u002Fvideodreamer23.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fvideodreamer23\u002Fvideodreamer23.github.io)]\n       * (arXiv preprint 2023) [💬Video Editing] **CCEdit: Creative and Controllable Video Editing via Diffusion Models**, Ruoyu Feng et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.16496)] [[Demo video](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=UQw4jq-igN4)] \n       * ⭐⭐ (SIGGRAPH Asia 2023) **Break-A-Scene: Extracting Multiple Concepts from a Single Image**, Omri Avrahami et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.16311)] [[Project](https:\u002F\u002Fomriavrahami.com\u002Fbreak-a-scene\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fbreak-a-scene)]\n       * (arXiv preprint 2023) **Visual Instruction Inversion: Image Editing via Visual Prompting**, Thao Nguyen et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.14331)] [[Project](https:\u002F\u002Fthaoshibe.github.io\u002Fvisii\u002F)]\n       * (CVPR 2023) [💬3D Shape Editing] **ShapeTalk: A Language Dataset and Framework for 3D Shape Edits and Deformations**, Panos Achlioptas et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FAchlioptas_ShapeTalk_A_Language_Dataset_and_Framework_for_3D_Shape_Edits_CVPR_2023_paper.pdf)] [[Code](https:\u002F\u002Fgithub.com\u002Foptas\u002Fchangeit3d)] [[Project](https:\u002F\u002Fchangeit3d.github.io\u002F)] \n       * (arXiv preprint 2023) [💬Colorization] **DiffColor: Toward High Fidelity Text-Guided Image Colorization with Diffusion Models**, Jianxin Lin et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.01655)] \n       * (ICCV 2023) [💬Video Editing] **FateZero: Fusing Attentions for Zero-shot Text-based Video Editing**, Chenyang Qi et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.09535)] [[Code](https:\u002F\u002Fgithub.com\u002FChenyangQiQi\u002FFateZero)] [[Project](https:\u002F\u002Ffate-zero-edit.github.io\u002F)] [Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fchenyangqi\u002FFateZero)] \n       * (arXiv preprint 2023) [💬3D] **AvatarVerse: High-quality & Stable 3D Avatar Creation from Text and Pose**, Huichao Zhang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.03610)] [[Project](https:\u002F\u002Favatarverse3d.github.io\u002F)] \n       * (ACM Transactions on Graphics 2023) **CLIP-Guided StyleGAN Inversion for Text-Driven Real Image Editing**, Ahmet Canberk Baykal et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.08397)] \n       * (arXiv preprint 2023) ⭐⭐**AnimateDiff: Animate Your Personalized Text-to-Image Diffusion Models without Specific Tuning**, Yuwei Guo et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.04725)] [[Project](https:\u002F\u002Fanimatediff.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fguoyww\u002Fanimatediff\u002F)]\n       * (ICLR 2023) **DiffEdit: Diffusion-based semantic image editing with mask guidance**, Guillaume Couairon et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.11427v1)] \n       * (arXiv preprint 2023) **Controlling Text-to-Image Diffusion by Orthogonal Finetuning**, Zeju Qiu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.07280)] [[Project](https:\u002F\u002Foft.wyliu.com\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FZeju1997\u002Foft)]\n       * (arXiv preprint 2023) [💬Reject Human Instructions] **Accountable Textual-Visual Chat Learns to Reject Human Instructions in Image Re-creation**, Zhiwei Zhang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.05983)] [[Project](https:\u002F\u002Fmatrix-alpha.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fmatrix-alpha\u002FAccountable-Textual-Visual-Chat)]\n       * (arXiv preprint 2023) **MultiFusion: Fusing Pre-Trained Models for Multi-Lingual, Multi-Modal Image Generation**, Marco Bellagente et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15296)]\n       * (CVPR 2023) **Text-Guided Unsupervised Latent Transformation for Multi-Attribute Image Manipulation**, Xiwen Wei et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FWei_Text-Guided_Unsupervised_Latent_Transformation_for_Multi-Attribute_Image_Manipulation_CVPR_2023_paper.html)] \n       * (arXiv preprint 2023) **Uni-ControlNet: All-in-One Control to Text-to-Image Diffusion Models**, Shihao Zhao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.16322v1)] [[Project](https:\u002F\u002Fshihaozhaozsh.github.io\u002Funicontrolnet\u002F)]\n       * (arXiv preprint 2023) **Unified Multi-Modal Latent Diffusion for Joint Subject and Text Conditional Image Generation**, Yiyang Ma et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.09319)] \n       * (arXiv preprint 2023) **DisenBooth: Disentangled Parameter-Efficient Tuning for Subject-Driven Text-to-Image Generation**, Hong Chen et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.03374)] \n       * (arXiv preprint 2023) [💬Image Editing] **Guided Image Synthesis via Initial Image Editing in Diffusion Model**, Jiafeng Mao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.03382)] \n       * (arXiv preprint 2023) [💬Image Editing] **Prompt Tuning Inversion for Text-Driven Image Editing Using Diffusion Models**, Wenkai Dong et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.04441)] \n       * (CVPR 2023) **DreamBooth: Fine Tuning Text-to-Image Diffusion Models for Subject-Driven Generation**, Nataniel Ruiz et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.12242)] [[Project](https:\u002F\u002Fdreambooth.github.io\u002F)]\n       * (arXiv preprint 2023) **Shape-Guided Diffusion with Inside-Outside Attention**, Dong Huk Park et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.00210)] [[Code](https:\u002F\u002Fgithub.com\u002Fshape-guided-diffusion\u002Fshape-guided-diffusion)] [[Project](https:\u002F\u002Fshape-guided-diffusion.github.io\u002F)] [Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fshape-guided-diffusion\u002Fshape-guided-diffusion)] \n       * (arXiv preprint 2023) [💬Image Editing] **iEdit: Localised Text-guided Image Editing with Weak Supervision**, Rumeysa Bodur et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.05947)] \n       * (PR 2023) [💬Person Re-identification] **BDNet: A BERT-based Dual-path Network for Text-to-Image Cross-modal Person Re-identification**, Qiang Liu et al. [[Paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320323003370)] \n       * (arXiv preprint 2023) **MagicFusion: Boosting Text-to-Image Generation Performance by Fusing Diffusion Models**, Jing Zhao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.13126)] [[Code](https:\u002F\u002Fgithub.com\u002FMagicFusion\u002FMagicFusion.github.io)] [Project](https:\u002F\u002Fmagicfusion.github.io\u002F)] \n       * (CVPR 2023) [💬3D] **TAPS3D: Text-Guided 3D Textured Shape Generation from Pseudo Supervision**, Jiacheng Wei et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.13273)] \n       * ⭐⭐(arXiv preprint 2023) [💬Image Editing] **MasaCtrl: Tuning-free Mutual Self-Attention Control for Consistent Image Synthesis and Editing**, Mingdeng Cao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.08465)] [[Code](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FMasaCtrl)] [[Project](https:\u002F\u002Fljzycmd.github.io\u002Fprojects\u002FMasaCtrl\u002F)]\n       * (arXiv preprint 2023) **Follow Your Pose: Pose-Guided Text-to-Video Generation using Pose-Free Videos**, Yue Ma et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.01186)] [[Code](https:\u002F\u002Fgithub.com\u002Fmayuelala\u002FFollowYourPose)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FYueMafighting\u002FFollowYourPose)]\n       * ⭐⭐(arXiv preprint 2023) [💬Image Editing] **Delta Denoising Score**, Amir Hertz et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.07090)] \n       * (arXiv preprint 2023) **Subject-driven Text-to-Image Generation via Apprenticeship Learning**, Wenhu Chen et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.00186)] [[Project](https:\u002F\u002Fdelta-denoising-score.github.io\u002F)]\n       * (arXiv preprint 2023) [💬Image Editing] **Region-Aware Diffusion for Zero-shot Text-driven Image Editing**, Nisha Huang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.11797)] [[Code](https:\u002F\u002Fgithub.com\u002Fhaha-lisa\u002FRDM-Region-Aware-Diffusion-Model)] \n       * ⭐⭐(arXiv preprint 2023) [💬Text+Video → Video]**Structure and Content-Guided Video Synthesis with Diffusion Models**, Patrick Esser et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.03011)] [[Project](https:\u002F\u002Fresearch.runwayml.com\u002Fgen1)]\n       * (arXiv preprint 2023) **ELITE: Encoding Visual Concepts into Textual Embeddings for Customized Text-to-Image Generation**, Yuxiang Wei et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.13848)]\n       * (arXiv preprint 2023) [💬Fashion Image Editing] **FICE: Text-Conditioned Fashion Image Editing With Guided GAN Inversion**, Martin Pernuš et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.02110)] [[Code](https:\u002F\u002Fgithub.com\u002FMartinPernus\u002FFICE)] \n       * (AAAI 2023) **CLIPVG: Text-Guided Image Manipulation Using Differentiable Vector Graphics**, Yiren Song et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.02122v1)] \n       * (AAAI 2023) **DE-Net: Dynamic Text-guided Image Editing Adversarial Networks**, Ming Tao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.01160)] [[Code](https:\u002F\u002Fgithub.com\u002Ftobran\u002FDE-Net)]\n       * (arXiv preprint 2022) **Plug-and-Play Diffusion Features for Text-Driven Image-to-Image Translation**, Narek Tumanyan et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.12572)] [[Project](https:\u002F\u002Fpnp-diffusion.github.io\u002F)]\n       * (arXiv preprint 2022) [💬Text+Image → Video] **Tell Me What Happened: Unifying Text-guided Video Completion via Multimodal Masked Video Generation**, Tsu-Jui Fu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.12824)]\n       * (arXiv preprint 2022) [💬Image Stylization] **DiffStyler: Controllable Dual Diffusion for Text-Driven Image Stylization**, Nisha Huang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.10682)] [[Code](https:\u002F\u002Fgithub.com\u002Fhaha-lisa\u002FDiffstyler)] \n       * (arXiv preprint 2022) **Null-text Inversion for Editing Real Images using Guided Diffusion Models**, Ron Mokady et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.09794)] [[Project]([https:\u002F\u002Fwww.timothybrooks.com\u002Finstruct-pix2pix](https:\u002F\u002Fnull-text-inversion.github.io\u002F))] \n       * (arXiv preprint 2022) **InstructPix2Pix: Learning to Follow Image Editing Instructions**, Tim Brooks et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.09800)] [[Project](https:\u002F\u002Fwww.timothybrooks.com\u002Finstruct-pix2pix)] \n       * (ECCV 2022) [💬Style Transfer] **Language-Driven Artistic Style Transfer**, Tsu-Jui Fu et al. [[Paper](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-20059-5_41)] [[Code](https:\u002F\u002Fgithub.com\u002Ftsujuifu\u002Fpytorch_ldast)]\n       * (arXiv preprint 2022) **Bridging CLIP and StyleGAN through Latent Alignment for Image Editing**, Wanfeng Zheng et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.04506)] \n       * (NeurIPS 2022) **One Model to Edit Them All: Free-Form Text-Driven Image Manipulation with Semantic Modulations**, Yiming Zhu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.07883)] [[Code](https:\u002F\u002Fgithub.com\u002FKumapowerLIU\u002FFFCLIP)]\n       * (BMVC 2022) **LDEdit: Towards Generalized Text Guided Image Manipulation via Latent Diffusion Models**, Paramanand Chandramouli et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.02249v1)]\n       * (ACMMM 2022) [💬Iterative Language-based Image Manipulation] **LS-GAN: Iterative Language-based Image Manipulation via Long and Short Term Consistency Reasoning**, Gaoxiang Cong et al. [[Paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3503161.3548206)] \n       * (ACMMM 2022) [💬Digital Art Synthesis] **Draw Your Art Dream: Diverse Digital Art Synthesis with Multimodal Guided Diffusion**, Huang Nisha et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.13360)] [[Code](https:\u002F\u002Fgithub.com\u002Fhaha-lisa\u002FMGAD-multimodal-guided-artwork-diffusion)]\n       * (SIGGRAPH Asia 2022) [💬HDR Panorama Generation] **Text2Light: Zero-Shot Text-Driven HDR Panorama Generation**, Zhaoxi Chen et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.09898)] [[Project](https:\u002F\u002Ffrozenburning.github.io\u002Fprojects\u002Ftext2light\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FFrozenBurning\u002FText2Light)]\n       * (arXiv preprint 2022) **LANIT: Language-Driven Image-to-Image Translation for Unlabeled Data**, Jihye Park et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.14889)] [[Project](https:\u002F\u002Fku-cvlab.github.io\u002FLANIT\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FKU-CVLAB\u002FLANIT)]\n       * (ACMMM PIES-ME 2022) [💬3D Semantic Style Transfer] **Language-guided Semantic Style Transfer of 3D Indoor Scenes**, Bu Jin et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.07870)] [[Code](https:\u002F\u002Fgithub.com\u002FAIR-DISCOVER\u002FLASST)]\n       * (arXiv preprint 2022) [💬Face Animation] **Language-Guided Face Animation by Recurrent StyleGAN-based Generator**, Tiankai Hang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.05617)] [[Code](https:\u002F\u002Fgithub.com\u002FTiankaiHang\u002Flanguage-guided-animation)]\n       * (arXiv preprint 2022) [💬Fashion Design] **ARMANI: Part-level Garment-Text Alignment for Unified Cross-Modal Fashion Design**, Xujie Zhang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.05621)] [[Code](https:\u002F\u002Fgithub.com\u002FHarvey594\u002FARMANI)]\n       * (arXiv preprint 2022) [💬Image Colorization] **TIC: Text-Guided Image Colorization**, Subhankar Ghosh et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.02843)] \n       * (ECCV 2022) [💬Animating Human Meshes] **CLIP-Actor: Text-Driven Recommendation and Stylization for Animating Human Meshes**, Kim Youwang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.04382)] [[Code](https:\u002F\u002Fgithub.com\u002FYouwang-Kim\u002FCLIP-Actor)]\n       * (ECCV 2022) [💬Pose Synthesis] **TIPS: Text-Induced Pose Synthesis**, Prasun Roy et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.11718)] [[Code](https:\u002F\u002Fgithub.com\u002Fprasunroy\u002Ftips)] [[Project](https:\u002F\u002Fprasunroy.github.io\u002Ftips\u002F)]\n       * (ACMMM 2022) [💬Person Re-identification] **Learning Granularity-Unified Representations for Text-to-Image Person Re-identification**, Zhiyin Shao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.07802)] [[Code](https:\u002F\u002Fgithub.com\u002FZhiyinShao-H\u002FLGUR)]\n       * (ACMMM 2022) **Towards Counterfactual Image Manipulation via CLIP**, Yingchen Yu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.02812)] [[Code](https:\u002F\u002Fgithub.com\u002Fyingchen001\u002FCF-CLIP)]\n       * (ACMMM 2022) [💬Monocular Depth Estimation] **Can Language Understand Depth?**, Wangbo Zhao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.01077)] [[Code](https:\u002F\u002Fgithub.com\u002FAdonis-galaxy\u002FDepthCLIP)]\n       * (arXiv preprint 2022) [💬Image Style Transfer] **Referring Image Matting**, Tsu-Jui Fu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.00178)]\n       * (CVPR 2022) [💬Image Segmentation] **Image Segmentation Using Text and Image Prompts**, Timo Lüddecke et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.10003)] [[Code](https:\u002F\u002Fgithub.com\u002Ftimojl\u002Fclipseg)] \n       * (CVPR 2022) [💬Video Segmentation] **Modeling Motion with Multi-Modal Features for Text-Based Video Segmentation**, Wangbo Zhao et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FZhao_Modeling_Motion_With_Multi-Modal_Features_for_Text-Based_Video_Segmentation_CVPR_2022_paper.pdf)] [[Code](https:\u002F\u002Fgithub.com\u002Fwangbo-zhao\u002F2022cvpr-mmmmtbvs)]\n       * (arXiv preprint 2022) [💬Image Matting] **Referring Image Matting**, Sebastian Loeschcke et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.05149)] [[Dataset](https:\u002F\u002Fgithub.com\u002FJizhiziLi\u002FRIM)]\n       * (arXiv preprint 2022) [💬Stylizing Video Objects] **Text-Driven Stylization of Video Objects**, Sebastian Loeschcke et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.12396)] [[Project](https:\u002F\u002Fsloeschcke.github.io\u002FText-Driven-Stylization-of-Video-Objects\u002F)]\n       * (arXiv preprint 2022) **DALL-E for Detection: Language-driven Context Image Synthesis for Object Detection**, Yunhao Ge et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.09592)] \n       * (IEEE Transactions on Neural Networks and Learning Systems 2022) [💬Pose-Guided Person Generation] **Verbal-Person Nets: Pose-Guided Multi-Granularity Language-to-Person Generation**, Deyin Liu et al. [[Paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9732175)]\n       * (SIGGRAPH 2022) [💬3D Avatar Generation] **AvatarCLIP: Zero-Shot Text-Driven Generation and Animation of 3D Avatars**, Fangzhou Hong et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.08535)] [[Code](https:\u002F\u002Fgithub.com\u002Fhongfz16\u002FAvatarCLIP)] [[Project](https:\u002F\u002Fhongfz16.github.io\u002Fprojects\u002FAvatarCLIP.html)] \n       * ⭐⭐(arXiv preprint 2022) [💬Image & Video Editing] **Text2LIVE: Text-Driven Layered Image and Video Editing**, Omer Bar-Tal et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.02491)] [[Project](https:\u002F\u002Ftext2live.github.io\u002F)] \n       * (Machine Vision and Applications 2022) **Paired-D++ GAN for image manipulation with text**, Duc Minh Vo et al. [[Paper](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs00138-022-01298-7)]\n       * (CVPR 2022) [💬Hairstyle Transfer] **HairCLIP: Design Your Hair by Text and Reference Image**, Tianyi Wei et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.05142)] [[Code](https:\u002F\u002Fgithub.com\u002Fwty-ustc\u002FHairCLIP)] \n       * (CVPR 2022) [💬NeRF] **CLIP-NeRF: Text-and-Image Driven Manipulation of Neural Radiance Fields**, Can Wang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.05139)] [[Code](https:\u002F\u002Fgithub.com\u002FcassiePython\u002FCLIPNeRF)] [[Project](https:\u002F\u002Fcassiepython.github.io\u002Fclipnerf\u002F)]\n       * (CVPR 2022) **DiffusionCLIP: Text-Guided Diffusion Models for Robust Image Manipulation**, Gwanghyun Kim et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.02711)]\n       * (CVPR 2022) **ManiTrans: Entity-Level Text-Guided Image Manipulation via Token-wise Semantic Alignment and Generation**, Jianan Wang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.04428)] [[Project](https:\u002F\u002Fjawang19.github.io\u002Fmanitrans\u002F)] \n       * ⭐⭐ (CVPR 2022) **Blended Diffusion for Text-driven Editing of Natural Images**, Omri Avrahami et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.14818)] [[Code](https:\u002F\u002Fgithub.com\u002Fomriav\u002Fblended-diffusion)] [[Project](https:\u002F\u002Fomriavrahami.com\u002Fblended-diffusion-page\u002F)] \n       * (CVPR 2022) **Predict, Prevent, and Evaluate: Disentangled Text-Driven Image Manipulation Empowered by Pre-Trained Vision-Language Model**, Zipeng Xu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.13333)] [[Code](https:\u002F\u002Fgithub.com\u002Fzipengxuc\u002FPPE-Pytorch)] \n       * (CVPR 2022) [💬Style Transfer] **CLIPstyler: Image Style Transfer with a Single Text Condition**, Gihyun Kwon et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.00374)] [[Code](https:\u002F\u002Fgithub.com\u002Fpaper11667\u002FCLIPstyler)] \n       * (arXiv preprint 2022) [💬Multi-person Image Generation] **Pose Guided Multi-person Image Generation From Text**, Soon Yau Cheong et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.04907)]\n       * (arXiv preprint 2022) [💬Image Style Transfer] **StyleCLIPDraw: Coupling Content and Style in Text-to-Drawing Translation**, Peter Schaldenbrand et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.12362)] [[Dataset](https:\u002F\u002Fwww.kaggle.com\u002Fpittsburghskeet\u002Fdrawings-with-style-evaluation-styleclipdraw)] [[Code](https:\u002F\u002Fgithub.com\u002Fpschaldenbrand\u002FStyleCLIPDraw)] [[Demo](https:\u002F\u002Freplicate.com\u002Fpschaldenbrand\u002Fstyle-clip-draw)]\n       * (arXiv preprint 2022) [💬Image Style Transfer] **Name Your Style: An Arbitrary Artist-aware Image Style Transfer**, Zhi-Song Liu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.13562)]\n       * (arXiv preprint 2022) [💬3D Avatar Generation] **Text and Image Guided 3D Avatar Generation and Manipulation**, Zehranaz Canfes et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.06079)] [[Project](https:\u002F\u002Fcatlab-team.github.io\u002Flatent3D\u002F)]\n       * (arXiv preprint 2022) [💬Image Inpainting] **NÜWA-LIP: Language Guided Image Inpainting with Defect-free VQGAN**, Minheng Ni et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.05009)]\n       * ⭐(arXiv preprint 2021) [💬Text+Image → Video] **Make It Move: Controllable Image-to-Video Generation with Text Descriptions**, Yaosi Hu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.02815)]\n       * (arXiv preprint 2021) [💬NeRF] **Zero-Shot Text-Guided Object Generation with Dream Fields**, Ajay Jain et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.01455)]  [[Project](https:\u002F\u002Fajayj.com\u002Fdreamfields)]\n       * (NeurIPS 2021) **Instance-Conditioned GAN**, Arantxa Casanova et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.05070)] [[Code](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fic_gan)]\n       * (ICCV 2021) **Language-Guided Global Image Editing via Cross-Modal Cyclic Mechanism**, Wentao Jiang et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FJiang_Language-Guided_Global_Image_Editing_via_Cross-Modal_Cyclic_Mechanism_ICCV_2021_paper.pdf)]\n       * (ICCV 2021) **Talk-to-Edit: Fine-Grained Facial Editing via Dialog**, Yuming Jiang et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FJiang_Talk-To-Edit_Fine-Grained_Facial_Editing_via_Dialog_ICCV_2021_paper.pdf)] [[Project](https:\u002F\u002Fwww.mmlab-ntu.com\u002Fproject\u002Ftalkedit\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fyumingj\u002FTalk-to-Edit)]\n       * (ICCVW 2021) **CIGLI: Conditional Image Generation from Language & Image**, Xiaopeng Lu et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021W\u002FCLVL\u002Fpapers\u002FLu_CIGLI_Conditional_Image_Generation_From_Language__Image_ICCVW_2021_paper.pdf)] [[Code](https:\u002F\u002Fgithub.com\u002Fvincentlux\u002FCIGLI?utm_source=catalyzex.com)]\n       * (ICCV 2021) **StyleCLIP: Text-Driven Manipulation of StyleGAN Imagery**, Or Patashnik et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.17249)] [[Code](https:\u002F\u002Fgithub.com\u002Forpatashnik\u002FStyleCLIP)]\n       * (arXiv preprint 2021) **Paint by Word**, David Bau et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.10951.pdf)] \n       * ⭐(arXiv preprint 2021) **Zero-Shot Text-to-Image Generation**, Aditya Ramesh et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2102.12092.pdf)] [[Code](https:\u002F\u002Fgithub.com\u002Fopenai\u002FDALL-E)] [[Blog](https:\u002F\u002Fopenai.com\u002Fblog\u002Fdall-e\u002F)] [[Model Card](https:\u002F\u002Fgithub.com\u002Fopenai\u002FDALL-E\u002Fblob\u002Fmaster\u002Fmodel_card.md)] [[Colab](https:\u002F\u002Fcolab.research.google.com\u002Fdrive\u002F1KA2w8bA9Q1HDiZf5Ow_VNOrTaWW4lXXG?usp=sharing)] \n       * (NeurIPS 2020) **Lightweight Generative Adversarial Networks for Text-Guided Image Manipulation**, Bowen Li et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.12136.pdf)]\n       * (CVPR 2020) **ManiGAN: Text-Guided Image Manipulation**, Bowen Li et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FLi_ManiGAN_Text-Guided_Image_Manipulation_CVPR_2020_paper.pdf)] [[Code](https:\u002F\u002Fgithub.com\u002Fmrlibw\u002FManiGAN)]\n       * (ACMMM 2020) **Text-Guided Neural Image Inpainting**, Lisai Zhang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2004.03212.pdf)] [[Code](https:\u002F\u002Fgithub.com\u002Fidealwhite\u002FTDANet)]\n       * (ACMMM 2020) **Describe What to Change: A Text-guided Unsupervised Image-to-Image Translation Approach**, Yahui Liu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2008.04200.pdf)]\n       * (NeurIPS 2018) **Text-adaptive generative adversarial networks: Manipulating images with natural language**, Seonghyeon Nam et al. [[Paper](http:\u002F\u002Fpapers.nips.cc\u002Fpaper\u002F7290-text-adaptive-generative-adversarial-networks-manipulating-images-with-natural-language.pdf)] [[Code](https:\u002F\u002Fgithub.com\u002Fwoozzu\u002Ftagan)]\n\n[\u003Cu>\u003C🎯Back to Top>\u003C\u002Fu>](#head-content)\n\n   * \u003Cspan id=\"head-tl2i\"> **Text+Layout → Image** \u003C\u002Fspan> \n       * (ECCV 2024) **Training-free Composite Scene Generation for Layout-to-Image Synthesis**, Jiaqi Liu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.13609)] \n       * (CVPR 2024) **Zero-Painter: Training-Free Layout Control for Text-to-Image Synthesis**, Marianna Ohanyan et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.04032)] [[Code](https:\u002F\u002Fgithub.com\u002FPicsart-AI-Research\u002FZero-Painter)]\n       * (CVPR 2024) **MIGC: Multi-Instance Generation Controller for Text-to-Image Synthesis**, Dewei Zhou et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.05408)] [[Project](https:\u002F\u002Fmigcproject.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Flimuloo\u002FMIGC)]\n       * (ICLR 2024) **Adversarial Supervision Makes Layout-to-Image Diffusion Models Thrive**, Yumeng Li et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.08815)] [[Project](https:\u002F\u002Fyumengli007.github.io\u002FALDM\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fboschresearch\u002FALDM)]\n       * (ICCV 2023) **Dense Text-to-Image Generation with Attention Modulation**, Yunji Kim et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.12964)] [[Code](https:\u002F\u002Fgithub.com\u002Fnaver-ai\u002FDenseDiffusion)]\n       * (arXiv preprint 2023) **Training-Free Layout Control with Cross-Attention Guidance**, Minghao Chen et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03373)] [[Code](https:\u002F\u002Fgithub.com\u002Fsilent-chen\u002Flayout-guidance)] [[Project](https:\u002F\u002Fsilent-chen.github.io\u002Flayout-guidance\u002F)]\n\n[\u003Cu>\u003C🎯Back to Top>\u003C\u002Fu>](#head-content)\n\n   * \u003Cspan id=\"head-oti2i\"> **Others+Text+Image\u002FVideo → Image\u002FVideo** \u003C\u002Fspan> \n       * (arXiv preprint 2024) [💬Skeleton\u002FSketch] **ECNet: Effective Controllable Text-to-Image Diffusion Models**, Sicheng Li et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.18417)]\n       * (ICCV 2023) [💬Skeleton] **HumanSD: A Native Skeleton-Guided Diffusion Model for Human Image Generation**, Xuan Ju et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.04269)] [[Project](https:\u002F\u002Fidea-research.github.io\u002FHumanSD\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FIDEA-Research\u002FHumanSD)] [[Video](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F1Djc2uJS5fmKnKeBnL34FnAAm3YSH20Bb\u002Fview)]\n       * (arXiv preprint 2023) [💬Sound+Speech→Robotic Painting] **Robot Synesthesia: A Sound and Emotion Guided AI Painter**, Vihaan Misra et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.04850)]\n       * (arXiv preprint 2022) [💬Sound] **Robust Sound-Guided Image Manipulation**, Seung Hyun Lee et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.14114)]\n\n[\u003Cu>\u003C🎯Back to Top>\u003C\u002Fu>](#head-content)\n       \n   * \u003Cspan id=\"head-l2i\"> **Layout\u002FMask → Image** \u003C\u002Fspan> \n       * (arXiv preprint 2024) **CreatiLayout: Siamese Multimodal Diffusion Transformer for Creative Layout-to-Image Generation**, Hui Zhang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.03859)] [[Project](https:\u002F\u002Fcreatilayout.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FHuiZhang0812\u002FCreatiLayout)]\n       * (CVPR 2024) [💬Instance information +Text→Image] **InstanceDiffusion: Instance-level Control for Image Generation**, XuDong Wang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.03290)] [[Project](https:\u002F\u002Fpeople.eecs.berkeley.edu\u002F~xdwang\u002Fprojects\u002FInstDiff\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Ffrank-xwang\u002FInstanceDiffusion)]\n       * (arXiv preprint 2023) [💬Text→Layout→Image] **LayoutLLM-T2I: Eliciting Layout Guidance from LLM for Text-to-Image Generation**, Leigang Qu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.05095)]\n       * (CVPR 2023) [💬Mask+Text→Image] **SceneComposer: Any-Level Semantic Image Synthesis**, Yu Zeng et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.11742)] [[Demo](https:\u002F\u002Fforms.microsoft.com\u002Fpages\u002Fresponsepage.aspx?id=Wht7-jR7h0OUrtLBeN7O4fEq8XkaWWJBhiLWWMELo2NUMjJYS0FDS0RISUVBUllMV0FRSzNCOTFTQy4u)]\n       * (CVPR 2023) **Freestyle Layout-to-Image Synthesis**, Han Xue et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.14412)] [[Code](https:\u002F\u002Fgithub.com\u002Fessunny310\u002FFreestyleNet)]\n       * (CVPR 2023) **LayoutDiffusion: Controllable Diffusion Model for Layout-to-image Generation**, Guangcong Zheng et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.17189)] [[Code](https:\u002F\u002Fgithub.com\u002FZGCTroy\u002FLayoutDiffusion)]\n       * (Journal of King Saud University - Computer and Information Sciences) [Survey] **Image Generation Models from Scene Graphs and Layouts: A Comparative Analysis**, Muhammad Umair Hassan et al. [[Paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1319157823000897)] \n       * (CVPR 2022) **Modeling Image Composition for Complex Scene Generation**, Zuopeng Yang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.00923)] [[Code](https:\u002F\u002Fgithub.com\u002FJohnDreamer\u002FTwFA)]\n       * (CVPR 2022) **Interactive Image Synthesis with Panoptic Layout Generation**, Bo Wang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.02104)] \n       * (CVPR 2021 [AI for Content Creation Workshop](http:\u002F\u002Fvisual.cs.brown.edu\u002Fworkshops\u002Faicc2021\u002F)) **High-Resolution Complex Scene Synthesis with Transformers**, Manuel Jahn et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2105.06458.pdf)] \n       * (CVPR 2021) **Context-Aware Layout to Image Generation with Enhanced Object Appearance**, Sen He et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.11897.pdf)] [[Code](https:\u002F\u002Fgithub.com\u002Fwtliao\u002Flayout2img)] \n\n[\u003Cu>\u003C🎯Back to Top>\u003C\u002Fu>](#head-content)\n\n   * \u003Cspan id=\"head-l2s\"> **Label-set → Semantic maps** \u003C\u002Fspan> \n       * (ECCV 2020) **Controllable image synthesis via SegVAE**, Yen-Chi Cheng et al. [[Paper](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fpapers\u002F123520154.pdf)] [[Code](https:\u002F\u002Fgithub.com\u002Fyccyenchicheng\u002FSegVAE)]\n\n[\u003Cu>\u003C🎯Back to Top>\u003C\u002Fu>](#head-content)\n       \n   * \u003Cspan id=\"head-s2i\"> **Speech → Image** \u003C\u002Fspan> \n       *  (IEEE\u002FACM Transactions on Audio, Speech and Language Processing-2021) **Generating Images From Spoken Descriptions**, Xinsheng Wang et al. [[Paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1109\u002FTASLP.2021.3053391)] [[Code](https:\u002F\u002Fgithub.com\u002Fxinshengwang\u002FS2IGAN)]  [[Project](https:\u002F\u002Fxinshengwang.github.io\u002Fproject\u002Fs2igan\u002F)]\n       *  (INTERSPEECH 2020)**[Extent Version👆] S2IGAN: Speech-to-Image Generation via Adversarial Learning**, Xinsheng Wang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2005.06968)]\n       * (IEEE Journal of Selected Topics in Signal Processing-2020) **Direct Speech-to-Image Translation**, Jiguo Li et al. [[Paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9067083)] [[Code](https:\u002F\u002Fgithub.com\u002Fsmallflyingpig\u002Fspeech-to-image-translation-without-text)] [[Project](https:\u002F\u002Fsmallflyingpig.github.io\u002Fspeech-to-image\u002Fmain)]\n\n[\u003Cu>\u003C🎯Back to Top>\u003C\u002Fu>](#head-content)\n       \n   * \u003Cspan id=\"head-sg2i\"> **Scene Graph → Image** \u003C\u002Fspan>  \n       * (arXiv preprint 2023) **Diffusion-Based Scene Graph to Image Generation with Masked Contrastive Pre-Training**, Ling Yang et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.11138)]\n       * (CVPR 2018) **Image Generation from Scene Graphs**, Justin Johnson et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002FCameraReady\u002F0764.pdf)] [[Code](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fsg2im)]\n\n[\u003Cu>\u003C🎯Back to Top>\u003C\u002Fu>](#head-content)\n   \n   * \u003Cspan id=\"head-t2vr\"> **Text → Visual Retrieval** \u003C\u002Fspan> \n       * (ECIR 2023) **Scene-Centric vs. Object-Centric Image-Text Cross-Modal Retrieval: A Reproducibility Study**, Mariya Hendriksen et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.05174)] [[Code](https:\u002F\u002Fgithub.com\u002Fmariyahendriksen\u002Fecir23-object-centric-vs-scene-centric-CMR)]\n       * (ECIR 2022) **Extending CLIP for Category-to-image Retrieval in E-commerce**, Mariya Hendriksen et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.11294)] [[Code](https:\u002F\u002Fgithub.com\u002Fmariyahendriksen\u002Fecir2022_category_to_image_retrieval)]\n       * (ACMMM 2022) **CAIBC: Capturing All-round Information Beyond Color for Text-based Person Retrieval**, Zijie Wang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.05773)] \n       * (AAAI 2022) **Cross-Modal Coherence for Text-to-Image Retrieval**, Malihe Alikhani et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.11047)] \n       * (ECCV [RWS 2022](https:\u002F\u002Fvap.aau.dk\u002Frws-eccv2022\u002F)) [💬Person Retrieval] **See Finer, See More: Implicit Modality Alignment for Text-based Person Retrieval**, Xiujun Shu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.08608)] [[Code](https:\u002F\u002Fgithub.com\u002FTencentYoutuResearch\u002FPersonRetrieval-IVT)] \n       * (ECCV 2022) [💬Text+Sketch→Visual Retrieval] **A Sketch Is Worth a Thousand Words: Image Retrieval with Text and Sketch**, Patsorn Sangkloy et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.03354)] [[Project](https:\u002F\u002Fpatsorn.me\u002Fprojects\u002Ftsbir\u002F)] \n       * (Neurocomputing-2022) **TIPCB: A simple but effective part-based convolutional baseline for text-based person search**, Yuhao Chen et al. [[Paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0925231222004726)] [[Code](https:\u002F\u002Fgithub.com\u002FOrangeYHChen\u002FTIPCB?utm_source=catalyzex.com)] \n       * (arXiv preprint 2021) [💬Dataset] **FooDI-ML: a large multi-language dataset of food, drinks and groceries images and descriptions**, David Amat Olóndriz et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.02035)] [[Code](https:\u002F\u002Fgithub.com\u002Fglovo\u002Ffoodi-ml-dataset)] \n       * (CVPRW 2021) **TIED: A Cycle Consistent Encoder-Decoder Model for Text-to-Image Retrieval**, Clint Sebastian et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021W\u002FAICity\u002Fpapers\u002FSebastian_TIED_A_Cycle_Consistent_Encoder-Decoder_Model_for_Text-to-Image_Retrieval_CVPRW_2021_paper.pdf)] \n       * (CVPR 2021) **T2VLAD: Global-Local Sequence Alignment for Text-Video Retrieval**, Xiaohan Wang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.10054.pdf)] \n       * (CVPR 2021) **Thinking Fast and Slow: Efficient Text-to-Visual Retrieval with Transformers**, Antoine Miech et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.16553.pdf)] \n       * (IEEE Access 2019) **Query is GAN: Scene Retrieval With Attentional Text-to-Image Generative Adversarial Network**, RINTARO YANAGI et al. [[Paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8868179)]\n\n[\u003Cu>\u003C🎯Back to Top>\u003C\u002Fu>](#head-content)\n \n   * \u003Cspan id=\"head-t2m\"> **Text → 3D\u002FMotion\u002FShape\u002FMesh\u002FObject...** \u003C\u002Fspan>\n      * (WACV 2026) [💬Text → Texture] **CasTex: Cascaded Text-to-Texture Synthesis via Explicit Texture Maps and Physically-Based Shading**, Mishan Aliev et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.06856)] [[Project](https:\u002F\u002Fthecrazymage.github.io\u002FCasTex\u002F)]\n       * (arXiv preprint 2024) [💬Text → Motion] **CrowdMoGen: Zero-Shot Text-Driven Collective Motion Generation**, Xinying Guo et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.06188)] [[Project](https:\u002F\u002Fgxyes.github.io\u002Fprojects\u002FCrowdMoGen.html)]\n       * (ACMMM 2024) [💬Text → 3D] **PlacidDreamer: Advancing Harmony in Text-to-3D Generation**, Shuo Huang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.13976)] [[Code](https:\u002F\u002Fgithub.com\u002FHansenHuang0823\u002FPlacidDreamer)]\n       * (Meta) [💬Text → 3D] **Meta 3D Gen**, Raphael Bensadoun et al. [[Paper](https:\u002F\u002Fscontent-dus1-1.xx.fbcdn.net\u002Fv\u002Ft39.2365-6\u002F449707112_509645168082163_2193712134508658234_n.pdf?_nc_cat=111&ccb=1-7&_nc_sid=3c67a6&_nc_ohc=TdfUsn5eGzgQ7kNvgEir1_g&_nc_ht=scontent-dus1-1.xx&oh=00_AYCH-Fbi8CL2l3Yc3ehAr-Itl5B6Wbo7KtXeONb8KCJ_mg&oe=668C1291)]\n       * (arXiv preprint 2024) [💬Text → 3D] **Meta 3D TextureGen: Fast and Consistent Texture Generation for 3D Objects**, Raphael Bensadoun et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.02430v1)]\n       * (arXiv preprint 2024) [💬Text → 3D] **Meta 3D AssetGen: Text-to-Mesh Generation with High-Quality Geometry, Texture, and PBR Materials**, Yawar Siddiqui et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.02445v1)] [[Project](https:\u002F\u002Fassetgen.github.io\u002F)]\n       * (arXiv preprint 2024) [💬Text → 3D] **3DStyleGLIP: Part-Tailored Text-Guided 3D Neural Stylization**, SeungJeh Chung et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.02634v1)]\n       * (arXiv preprint 2024) [💬Text → 3D] **LATTE3D: Large-scale Amortized Text-To-Enhanced3D Synthesis**, Kevin Xie et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.15385)] [[Project](https:\u002F\u002Fresearch.nvidia.com\u002Flabs\u002Ftoronto-ai\u002FLATTE3D\u002F)]\n       * (IEEE Transactions on Visualization and Computer Graphics) [💬Text → Motion] **GUESS:GradUally Enriching SyntheSis for Text-Driven Human Motion Generation**, Xuehao Gao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.02142v1)]\n       * (arXiv preprint 2023) [💬Text → 4D] **4D-fy: Text-to-4D Generation Using Hybrid Score Distillation Sampling**, Sherwin Bahmani et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.17984)] [[Project](https:\u002F\u002Fsherwinbahmani.github.io\u002F4dfy\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fsherwinbahmani\u002F4dfy)]\n       * (arXiv preprint 2023) [💬Text → 3D] **MetaDreamer: Efficient Text-to-3D Creation With Disentangling Geometry and Texture**, Lincong Feng et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.10123)] [[Project](https:\u002F\u002Fmetadreamer3d.github.io\u002F)]\n       * (arXiv preprint 2023) [💬Text → 3D] **One-2-3-45++: Fast Single Image to 3D Objects with Consistent Multi-View Generation and 3D Diffusion**, Minghua Liu et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.07885)] [[Project](https:\u002F\u002Fsudo-ai-3d.github.io\u002FOne2345plus_page\u002F)]\n       * (NeurIPS 2023) [💬Text → 3D] **One-2-3-45: Any Single Image to 3D Mesh in 45 Seconds without Per-Shape Optimization**, Minghua Liu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.16928)] [[Project](https:\u002F\u002Fone-2-3-45.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FOne-2-3-45\u002FOne-2-3-45)] \n       * (ACMMM 2023) [💬Text+Sketch → 3D] **Control3D: Towards Controllable Text-to-3D Generation**, Yang Chen et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.05461)] \n       * (SIGGRAPH Asia 2023 & TOG) [💬Text → 3D] **EXIM: A Hybrid Explicit-Implicit Representation for Text-Guided 3D Shape Generation**, Zhengzhe Liu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.01714v1)] [[Code](https:\u002F\u002Fgithub.com\u002Fliuzhengzhe\u002FEXIM)] \n       * (arXiv preprint 2023) [💬Text → 3D] **PaintHuman: Towards High-fidelity Text-to-3D Human Texturing via Denoised Score Distillation**, Jianhui Yu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.09458v1)] \n       * (arXiv preprint 2023) [💬Text → Motion] **Fg-T2M: Fine-Grained Text-Driven Human Motion Generation via Diffusion Model**, Yin Wang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.06284)] \n       * (arXiv preprint 2023) [💬Text → 3D] **IT3D: Improved Text-to-3D Generation with Explicit View Synthesis**, Yiwen Chen et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.11473)] [[Code](https:\u002F\u002Fgithub.com\u002Fbuaacyw\u002FIT3D-text-to-3D)] \n       * (arXiv preprint 2023) [💬Text → 3D] **HD-Fusion: Detailed Text-to-3D Generation Leveraging Multiple Noise Estimation**, Jinbo Wu et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.16183)] \n       * (arXiv preprint 2023) [💬Text → 3D] **T2TD: Text-3D Generation Model based on Prior Knowledge Guidance**, Weizhi Nie et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15753)] \n       * (arXiv preprint 2023) [💬Text → 3D] **ProlificDreamer: High-Fidelity and Diverse Text-to-3D Generation with Variational Score Distillation**, Zhengyi Wang et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.16213)] [[Project](https:\u002F\u002Fml.cs.tsinghua.edu.cn\u002Fprolificdreamer\u002F)] \n       * (arXiv preprint 2023) [💬Text+Mesh → Mesh] **X-Mesh: Towards Fast and Accurate Text-driven 3D Stylization via Dynamic Textual Guidance**, Yiwei Ma et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.15764)] [[Project](https:\u002F\u002Fxmu-xiaoma666.github.io\u002FProjects\u002FX-Mesh\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fxmu-xiaoma666\u002FX-Mesh)] \n       * (arXiv preprint 2023) [💬Text → Motion] **T2M-GPT: Generating Human Motion from Textual Descriptions with Discrete Representations**, Jianrong Zhang et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.06052)] [[Project](https:\u002F\u002Fmael-zys.github.io\u002FT2M-GPT\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FMael-zys\u002FT2M-GPT)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fvumichien\u002FT2M-GPT)]\n       * (arXiv preprint 2023) [💬Text → 3D] **DreamHuman: Animatable 3D Avatars from Text**, Nikos Kolotouros et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.09329)] [[Project](https:\u002F\u002Fdream-human.github.io\u002F)]\n       * (arXiv preprint 2023) [💬Text → 3D] **ATT3D: Amortized Text-to-3D Object Synthesis**, Jonathan Lorraine et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.07349)] [[Project](https:\u002F\u002Fresearch.nvidia.com\u002Flabs\u002Ftoronto-ai\u002FATT3D\u002F)]\n       * (arXiv preprint 2022) [💬Text → 3D] **Dream3D: Zero-Shot Text-to-3D Synthesis Using 3D Shape Prior and Text-to-Image Diffusion Models**, Jiale Xu et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.14704)] [[Project](https:\u002F\u002Fbluestyle97.github.io\u002Fdream3d\u002F)]\n       * (arXiv preprint 2022) [💬3D Generative Model] **DATID-3D: Diversity-Preserved Domain Adaptation Using Text-to-Image Diffusion for 3D Generative Model**, Gwanghyun Kim et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.16374)] [[Code](https:\u002F\u002Fgithub.com\u002Fgwang-kim\u002FDATID-3D)] [[Project](https:\u002F\u002Fdatid-3d.github.io\u002F)]\n       * (arXiv preprint 2022) [💬Point Clouds] **Point-E: A System for Generating 3D Point Clouds from Complex Prompts**, Alex Nichol et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.08751)] [[Code](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fpoint-e)]\n       * (arXiv preprint 2022) [💬Text → 3D] **Magic3D: High-Resolution Text-to-3D Content Creation**, Chen-Hsuan Lin et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.10440)] [[Project](https:\u002F\u002Fdeepimagination.cc\u002FMagic3D\u002F)]\n       * (arXiv preprint 2022) [💬Text → Shape] **Diffusion-SDF: Text-to-Shape via Voxelized Diffusion**, Muheng Li et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.03293)] [[Code](https:\u002F\u002Fgithub.com\u002Fttlmh\u002FDiffusion-SDF)]\n       * (NIPS 2022) [💬Mesh] **TANGO: Text-driven Photorealistic and Robust 3D Stylization via Lighting Decomposition**, Yongwei Chen et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.11277)] [[Project](https:\u002F\u002Fcyw-3d.github.io\u002Ftango\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FGorilla-Lab-SCUT\u002Ftango)] \n       * (arXiv preprint 2022) [💬Human Motion Generation] **Human Motion Diffusion Model**, Guy Tevet et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.14916)] [[Project](https:\u002F\u002Fguytevet.github.io\u002Fmdm-page\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FGuyTevet\u002Fmotion-diffusion-model)]\n       * (arXiv preprint 2022) [💬Human Motion Generation] **MotionDiffuse: Text-Driven Human Motion Generation with Diffusion Model**, Mingyuan Zhang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.15001)] [[Project](https:\u002F\u002Fmingyuan-zhang.github.io\u002Fprojects\u002FMotionDiffuse.html#)]\n       * (arXiv preprint 2022) [💬3D Shape] **ISS: Image as Stetting Stone for Text-Guided 3D Shape Generation**, Zhengzhe Liu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.04145)]\n       * (ECCV 2022) [💬Virtual Humans] **Compositional Human-Scene Interaction Synthesis with Semantic Control**, Kaifeng Zhao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.12824)] [[Project](https:\u002F\u002Fzkf1997.github.io\u002FCOINS\u002Findex.html)] [[Code](https:\u002F\u002Fgithub.com\u002Fzkf1997\u002FCOINS)] \n       * (CVPR 2022) [💬3D Shape] **Towards Implicit Text-Guided 3D Shape Generation**, Zhengzhe Liu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.14622)] [[Code](https:\u002F\u002Fgithub.com\u002Fliuzhengzhe\u002FTowards-Implicit-Text-Guided-Shape-Generation)]\n       * (CVPR 2022) [💬Object] **Zero-Shot Text-Guided Object Generation with Dream Fields**, Ajay Jain et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FJain_Zero-Shot_Text-Guided_Object_Generation_With_Dream_Fields_CVPR_2022_paper.pdf)] [[Project](https:\u002F\u002Fajayj.com\u002Fdreamfields)] [[Code](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fgoogle-research\u002Ftree\u002Fmaster\u002Fdreamfields)] \n       * (CVPR 2022) [💬Mesh] **Text2Mesh: Text-Driven Neural Stylization for Meshes**, Oscar Michel et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FMichel_Text2Mesh_Text-Driven_Neural_Stylization_for_Meshes_CVPR_2022_paper.pdf)] [[Project](https:\u002F\u002Fthreedle.github.io\u002Ftext2mesh\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fthreedle\u002Ftext2mesh)] \n       * (CVPR 2022) [💬Motion] **Generating Diverse and Natural 3D Human Motions from Text**, Chuan Guo et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FGuo_Generating_Diverse_and_Natural_3D_Human_Motions_From_Text_CVPR_2022_paper.pdf)] [[Project](https:\u002F\u002Fericguo5513.github.io\u002Ftext-to-motion\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FEricGuo5513\u002Ftext-to-motion)] \n       * (CVPR 2022) [💬Shape] **CLIP-Forge: Towards Zero-Shot Text-to-Shape Generation**, Aditya Sanghi et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FSanghi_CLIP-Forge_Towards_Zero-Shot_Text-To-Shape_Generation_CVPR_2022_paper.pdf)] [[Code](https:\u002F\u002Fgithub.com\u002FAutodeskAILab\u002FClip-Forge)] \n       * (arXiv preprint 2022) [💬Motion] **TEMOS: Generating diverse human motions from textual descriptions**, Mathis Petrovich et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.14109)] [[Project](https:\u002F\u002Fmathis.petrovich.fr\u002Ftemos\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FMathux\u002FTEMOS)] \n\n[\u003Cu>\u003C🎯Back to Top>\u003C\u002Fu>](#head-content)\n   \n   * \u003Cspan id=\"head-t2v\"> **Text → Video** \u003C\u002Fspan> \n       * (arXiv preprint 2025) **MotionAgent: Fine-grained Controllable Video Generation via Motion Field Agent**, Xinyao Liao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.03207)] \n       * (arXiv preprint 2024) **VideoTetris: Towards Compositional Text-to-Video Generation**, Ye Tian et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.04277)] [[Project](https:\u002F\u002Fvideotetris.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FYangLing0818\u002FVideoTetris)] \n       * (arXiv preprint 2024) **MovieDreamer: Hierarchical Generation for Coherent Long Visual Sequence**, Canyu Zhao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.16655)] [[Project](https:\u002F\u002Faim-uofa.github.io\u002FMovieDreamer\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Faim-uofa\u002FMovieDreamer)] [[Demo Video](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=aubRVOGrKLU)]\n       * 💥💥(OpenAI 2024) **Sora** [[Homepage](https:\u002F\u002Fopenai.com\u002Fsora)] [[Technical Report](https:\u002F\u002Fopenai.com\u002Fresearch\u002Fvideo-generation-models-as-world-simulators)] [[Sora with Audio](https:\u002F\u002Fx.com\u002Felevenlabsio\u002Fstatus\u002F1759240084342059260?s=20)]\n       * (ICLR 2024) **ControlVideo: Training-free Controllable Text-to-Video Generation**, Yabo Zhang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.13077)] [[Code](https:\u002F\u002Fgithub.com\u002FYBYBZhang\u002FControlVideo)]\n       * (arXiv preprint 2024) **MagicVideo-V2: Multi-Stage High-Aesthetic Video Generation**, Weimin Wang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.04468)] [[Project](https:\u002F\u002Fmagicvideov2.github.io\u002F)]\n       * (arXiv preprint 2023) **LAVIE: High-Quality Video Generation with Cascaded Latent Diffusion Models**, Yaohui Wang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.15103)] [[Project](https:\u002F\u002Fvchitect.github.io\u002FLaVie-project\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FVchitect\u002FLaVie)]\n       * (arXiv preprint 2023) **Emu Video: Factorizing Text-to-Video Generation by Explicit Image Conditioning**, Rohit Girdhar et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.10709)] [[Project](https:\u002F\u002Femu-video.metademolab.com\u002F)] \n       * (ICCV 2023) **Text2Video-Zero: Text-to-Image Diffusion Models are Zero-Shot Video Generators**, Levon Khachatryan et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.13439)] [[Project](https:\u002F\u002Ftext2video-zero.github.io\u002F)] [[Video](https:\u002F\u002Fwww.dropbox.com\u002Fs\u002Fuv90mi2z598olsq\u002FText2Video-Zero.MP4?dl=0)] [[Code](https:\u002F\u002Fgithub.com\u002FPicsart-AI-Research\u002FText2Video-Zero)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FPAIR\u002FText2Video-Zero)]\n       * (NeurIPS 2023 Datasets and Benchmarks) **FETV: A Benchmark for Fine-Grained Evaluation of Open-Domain Text-to-Video Generation**, Yuanxin Liu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.01813v1)] [[Project](https:\u002F\u002Fgithub.com\u002Fllyx97\u002FFETV)]\n       * (arXiv preprint 2023) **Optimal Noise pursuit for Augmenting Text-to-Video Generation**, Shijie Ma et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.00949v1)] \n       * (arXiv preprint 2023) **Reuse and Diffuse: Iterative Denoising for Text-to-Video Generation**, Jiaxi Gu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.03549)] [[Project](https:\u002F\u002Fanonymous0x233.github.io\u002FReuseAndDiffuse\u002F)] \n       * (arXiv preprint 2023) **Make-A-Protagonist: Generic Video Editing with An Ensemble of Experts**, Yuyang Zhao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.08850)] [[Code](https:\u002F\u002Fgithub.com\u002FMake-A-Protagonist\u002FMake-A-Protagonist)] [[Project](https:\u002F\u002Fmake-a-protagonist.github.io\u002F)] \n         * 📚Image Editing, Background Editing, Text-to-Video Editing with Protagonist\n       * ⭐⭐(CVPR 2023) **Align your Latents: High-Resolution Video Synthesis with Latent Diffusion Models**, Andreas Blattmann et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.08818)] [[Project](https:\u002F\u002Fresearch.nvidia.com\u002Flabs\u002Ftoronto-ai\u002FVideoLDM\u002F)]\n       * (arXiv preprint 2023) [💬Music Visualization] **Generative Disco: Text-to-Video Generation for Music Visualization**, Vivian Liu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.08551)] \n       * (arXiv preprint 2023) **Text-To-4D Dynamic Scene Generation**, Uriel Singer et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.11280)] [[Project](https:\u002F\u002Fmake-a-video3d.github.io\u002F)]\n       * (arXiv preprint 2022) **Tune-A-Video: One-Shot Tuning of Image Diffusion Models for Text-to-Video Generation**, Jay Zhangjie Wu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.11565)] [[Project](https:\u002F\u002Ftuneavideo.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fshowlab\u002FTune-A-Video)]\n       * (arXiv preprint 2022) **MagicVideo: Efficient Video Generation With Latent Diffusion Models**, Daquan Zhou et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.11018)] [[Project](https:\u002F\u002Fmagicvideo.github.io\u002F#)] \n       * (arXiv preprint 2022) **Phenaki: Variable Length Video Generation From Open Domain Textual Description**, Ruben Villegas et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.02399)] \n       * (arXiv preprint 2022) **Imagen Video: High Definition Video Generation with Diffusion Models**, Jonathan Ho et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.02303v1)] [[Project](https:\u002F\u002Fimagen.research.google\u002Fvideo\u002F)] \n       * (arXiv preprint 2022) **Text-driven Video Prediction**, Xue Song et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.02872)] \n       * (arXiv preprint 2022) **Make-A-Video: Text-to-Video Generation without Text-Video Data**, Uriel Singer et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.14792)] [[Project](https:\u002F\u002Fmakeavideo.studio\u002F)] [[Short read](https:\u002F\u002Fwww.louisbouchard.ai\u002Fmake-a-video\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Flucidrains\u002Fmake-a-video-pytorch)]\n       * (ECCV 2022) [💬Story Continuation] **StoryDALL-E: Adapting Pretrained Text-to-Image Transformers for Story Continuation**, Adyasha Maharana et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.06192)] [[Code](https:\u002F\u002Fgithub.com\u002Fadymaharana\u002Fstorydalle)]\n       * (arXiv preprint 2022) [💬Story → Video] **Word-Level Fine-Grained Story Visualization**, Bowen Li et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.02341)] [[Code](https:\u002F\u002Fgithub.com\u002Fmrlibw\u002FWord-Level-Story-Visualization)]\n       * (arXiv preprint 2022) **CogVideo: Large-scale Pretraining for Text-to-Video Generation via Transformers**, Wenyi Hong et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.15868)] [[Code](https:\u002F\u002Fgithub.com\u002FTHUDM\u002FCogVideo)]\n       * (CVPR 2022) **Show Me What and Tell Me How: Video Synthesis via Multimodal Conditioning**, Yogesh Balaji et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.02573)] [[Code](https:\u002F\u002Fgithub.com\u002Fsnap-research\u002FMMVID)] [Project](https:\u002F\u002Fsnap-research.github.io\u002FMMVID\u002F)\n       * (arXiv preprint 2022) **Video Diffusion Models**, Jonathan Ho et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.03458)] [[Project](https:\u002F\u002Fvideo-diffusion.github.io\u002F)]\n       * (arXiv preprint 2021) [❌Genertation Task] **Transcript to Video: Efficient Clip Sequencing from Texts**, Ligong Han et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2107.11851.pdf)] [[Project](http:\u002F\u002Fwww.xiongyu.me\u002Fprojects\u002Ftranscript2video\u002F)]\n       * (arXiv preprint 2021) **GODIVA: Generating Open-DomaIn Videos from nAtural Descriptions**, Chenfei Wu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.14806.pdf)] \n       * (arXiv preprint 2021) **Text2Video: Text-driven Talking-head Video Synthesis with Phonetic Dictionary**, Sibo Zhang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.14631.pdf)] \n       * (IEEE Access 2020) **TiVGAN: Text to Image to Video Generation With Step-by-Step Evolutionary Generator**, DOYEON KIM et al. [[Paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9171240)] \n       * (IJCAI 2019) **Conditional GAN with Discriminative Filter Generation for Text-to-Video Synthesis**, Yogesh Balaji et al. [[Paper](https:\u002F\u002Fwww.ijcai.org\u002FProceedings\u002F2019\u002F0276.pdf)] [[Code](https:\u002F\u002Fgithub.com\u002Fminrq\u002FCGAN_Text2Video)] \n       * (IJCAI 2019) **IRC-GAN: Introspective Recurrent Convolutional GAN for Text-to-video Generation**, Kangle Deng et al. [[Paper](https:\u002F\u002Fwww.ijcai.org\u002FProceedings\u002F2019\u002F0307.pdf)] \n       * (CVPR 2019) [💬Story → Video] **StoryGAN: A Sequential Conditional GAN for Story Visualization**, Yitong Li et al. [[Paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F12233https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FLi_StoryGAN_A_Sequential_Conditional_GAN_for_Story_Visualization_CVPR_2019_paper.html)] [[Code](https:\u002F\u002Fgithub.com\u002Fyitong91\u002FStoryGAN?utm_source=catalyzex.com)]\n       * (AAAI 2018) **Video Generation From Text**, Yitong Li et al. [[Paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F12233)] \n       * (ACMMM 2017) **To create what you tell: Generating videos from captions**, Yingwei Pan et al. [[Paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3123266.3127905)] \n\n[\u003Cu>\u003C🎯Back to Top>\u003C\u002Fu>](#head-content)\n\n   * \u003Cspan id=\"head-t2music\"> **Text → Music** \u003C\u002Fspan> \n       * ⭐(arXiv preprint 2023) **MusicLM: Generating Music From Text**, Andrea Agostinelli et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.11325)] [[Project](https:\u002F\u002Fgoogle-research.github.io\u002Fseanet\u002Fmusiclm\u002Fexamples\u002F)] [[MusicCaps](https:\u002F\u002Fwww.kaggle.com\u002Fdatasets\u002Fgoogleai\u002Fmusiccaps)]\n\n[\u003Cu>\u003C🎯Back to Top>\u003C\u002Fu>](#head-content)\n\n## \u003Cspan id=\"head7\"> Contact Me \u003C\u002Fspan>\n\n [![Star History Chart](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FYutong-Zhou-cv_Awesome-Text-to-Image_readme_5b587d094885.png)](https:\u002F\u002Fstar-history.com\u002F#Yutong-Zhou-cv\u002FAwesome-Text-to-Image&Date)\n\nIf you have any questions or comments, please feel free to contact [**Yutong**](https:\u002F\u002Felizazhou96.github.io\u002F) ლ(╹◡╹ლ)\n\n## \u003Cspan id=\"head8\"> Contributors \u003C\u002Fspan>\n\n![Alt](https:\u002F\u002Frepobeats.axiom.co\u002Fapi\u002Fembed\u002F2a1ae2aebaa287bfbf50a9aafdfde0406c1b0cfe.svg \"Repobeats analytics image\")\n\n\u003Ca href=\"https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002Fawesome-Text-to-Image\u002Fgraphs\u002Fcontributors\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FYutong-Zhou-cv_Awesome-Text-to-Image_readme_332ca1af655f.png\" \u002F>\n\u003C\u002Fa>\n\n> Made with [contrib.rocks](https:\u002F\u002Fcontrib.rocks).\n","# \u003Cp align=center> awesome 文本转图像📝-to-Image🌇\u003C\u002Fp>\n\u003C!--# \u003Cp align=center>`Awesome Text📝-to-Image🌇`\u003C\u002Fp>-->\n\u003Cdiv align=center>\n\n\u003Cp>\n \n ![GitHub stars](https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fstars\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image.svg?color=red&style=for-the-badge) \n ![GitHub forks](https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fforks\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image.svg?style=for-the-badge) \n ![GitHub activity](https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Flast-commit\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image?color=yellow&style=for-the-badge) \n ![GitHub issues](https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fissues\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image?style=for-the-badge)\n ![GitHub closed issues](https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fissues-closed\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image?color=inactive&style=for-the-badge)\n \n [![Awesome](https:\u002F\u002Fcdn.rawgit.com\u002Fsindresorhus\u002Fawesome\u002Fd7305f38d29fed78fa85652e3a63e154dd8e8829\u002Fmedia\u002Fbadge.svg)](https:\u002F\u002Fgithub.com\u002Fsindresorhus\u002Fawesome) \n [![Hits](https:\u002F\u002Fhits.seeyoufarm.com\u002Fapi\u002Fcount\u002Fincr\u002Fbadge.svg?url=https%3A%2F%2Fgithub.com%2FYutong-Zhou-cv%2Fawesome-Text-to-Image&count_bg=%23DD4B78&title_bg=%23555555&icon=jabber.svg&icon_color=%23E7E7E7&title=Hits(2023.05~)&edge_flat=false)](https:\u002F\u002Fhits.seeyoufarm.com)\n\u003C\u002Fp>\n\n一个关于文本到图像合成\u002F操控任务的资源集合。\n \n\u003C\u002Fdiv>\n\n\u003C!--\n![Figure from paper](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FYutong-Zhou-cv_Awesome-Text-to-Image_readme_32fff393e6f2.png)\n> *From: [Hierarchical Text-Conditional Image Generation with CLIP Latents](https:\u002F\u002Fcdn.openai.com\u002Fpapers\u002Fdall-e-2.pdf)*\n-->\n\n## ⭐ 引用\n\n如果您觉得本文和这个仓库对您的研究有所帮助，请在下方引用：\n\n```bibtex\n\n@inproceedings{zhou2023vision+,\n  title={视觉+语言应用：综述},\n  author={周宇彤和岛田信隆},\n  booktitle={IEEE\u002FCVF计算机视觉与模式识别会议论文集},\n  pages={826--842},\n  year={2023}\n}\n\n```\n\n## 🎑 新闻\n> [!TIP]\n> **版本1.0**（一体化版本）可在此处找到[链接](https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Ftree\u002F2024-Version-1.0)，并将于**2024年2月29日停止更新**。\n* [2024年2月29日] 更新**“Awesome Text to Image”版本2.0**！论文与代码以及相关工作也将于3月逐步更新。\n* [2023年5月26日] 🔥新增我们的综述论文【**视觉+语言应用：综述**】（https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023W\u002FGCV\u002Fhtml\u002FZhou_Vision__Language_Applications_A_Survey_CVPRW_2023_paper.html）及特别【**最佳合集**】（https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fblob\u002Fmain\u002F%5BCVPRW%202023%F0%9F%8E%88%5D%20%20Best%20Collection.md）列表！\n* [2023年4月4日] 【**视觉+语言应用：综述**】被CVPRW2023接受。\n* [2020年10月13日] **Awesome-Text-to-Image**仓库创建。\n\n## *\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FYutong-Zhou-cv_Awesome-Text-to-Image_readme_eac0a2beaf02.png\" width=\"25\" \u002F> 待办*\n* - [ ] 添加**主题顺序**列表和**时间顺序**列表\n* - [x] 添加【**最佳合集**】（https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fblob\u002Fmain\u002F%5BCVPRW%202023%F0%9F%8E%88%5D%20%20Best%20Collection.md）\n* - [x] 创建【**⏳近期关注论文**】（https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fblob\u002Fmain\u002F%E2%8F%B3Recently%20Focused%20Papers.md）\n\n## \u003Cspan id=\"head-content\"> *内容* \u003C\u002Fspan>\n* - [ ] [**1. 描述**](#head1)\n\n* - [ ] [**2. 定量评价指标**](https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fblob\u002F2024-Version-2.0\u002FLists\u002F2-Quantitative%20Evaluation%20Metrics.md)\n \n* - [ ] [**3. 数据集**](https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fblob\u002F2024-Version-2.0\u002FLists\u002F3-Datasets.md)  \n\n* - [ ] [**4. 项目**](https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fblob\u002F2024-Version-2.0\u002FLists\u002F4-Project.md)\n\n* - [ ] [5. Paper With Code](#head5)\n  * - [ ] [Text to Face👨🏻🧒👧🏼🧓🏽](#head-t2f)\n  * - [ ] [Specific Issues🤔](#head-si)\n  * - [ ] [**Survey**](https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fblob\u002F2024-Version-2.0\u002FLists\u002F5.0-Survey.md)&emsp;&emsp;&nbsp;\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FNumber%20of%20Papers-11-E83015?style=social&logo=data:image\u002Fsvg%2bxml;base64,PHN2ZyBpZD0iQ2FwYV8xIiBlbmFibGUtYmFja2dyb3VuZD0ibmV3IDAgMCA1MTIgNTEyIiBoZWlnaHQ9IjUxMiIgdmlld0JveD0iMCAwIDUxMiA1MTIiIHdpZHRoPSI1MTIiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGc+PHBhdGggZD0ibTM5NS44MiAxODIuNjE2LTE4OC43MiAxODguNzItMTIuOTEgMS43Mi05LjM1IDIwLjU0LTM0LjMxIDM0LjMxLTExLjAxLS43My0xMS4yNSAyMi45OS01Ni40OCA1Ni40OGMtMi45MyAyLjkzLTYuNzcgNC4zOS0xMC42MSA0LjM5cy03LjY4LTEuNDYtMTAuNjEtNC4zOWwtMjIuNjItMjIuNjJoLS4wMWwtMjIuNjItMjIuNjNjLTUuODYtNS44Ni01Ljg2LTE1LjM2IDAtMjEuMjJsNzcuNjMtNzcuNjMgMTYuNi03LjAzIDUuNjYtMTUuMjMgMzQuMzEtMzQuMzEgMTQuODQtNC45MiA3LjQyLTE3LjM0IDE2Ny41Ny0xNjcuNTcgMzMuMjQgMzMuMjR6IiBmaWxsPSIjZjY2Ii8+PHBhdGggZD0ibTM5NS44MiAxMTYuMTQ2djY2LjQ3bC0xODguNzIgMTg4LjcyLTEyLjkxIDEuNzItOS4zNSAyMC41NC0zNC4zMSAzNC4zMS0xMS4wMS0uNzMtMTEuMjUgMjIuOTktNTYuNDggNTYuNDhjLTIuOTMgMi45My02Ljc3IDQuMzktMTAuNjEgNC4zOXMtNy42OC0xLjQ2LTEwLjYxLTQuMzlsLTIyLjYyLTIyLjYyIDMzNC42NC0zMzQuNjR6IiBmaWxsPSIjZTYyZTZiIi8+PHBhdGggZD0ibTUwNi42MSAyMDkuMDA2LTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xNy00LjUyLTQuNTMtMTEuNDItNS42OC0xNy4xNy0yLjg4bC04OC4zOCA0My4wNS02OS4xMy02OS4xNGMtNC4zNS00LjM1LTEwLjkyLTUuNi0xNi41Ni0zLjE2LTUuNjUgMi40NS05LjIzIDguMDktOS4wNCAxNC4yNGwyLjg2IDkwLjQ1LTg1LjM3IDU3LjgzYy00LjkxIDMuMzItNy40IDkuMjItNi4zNiAxNS4wNCAxLjA0IDUuODMgNS40IDEwLjUxIDExLjE1IDExLjk0bDk2LjYyIDI0LjAxIDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZ6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTI5Ni4yNiAyMTUuNzA2IDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZsLTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xN3oiIGZpbGw9IiNmZDkwMjUiLz48cGF0aCBkPSJtNDY1IDQxNi45NjZjLTI1LjkyIDAtNDcgMjEuMDgtNDcgNDdzMjEuMDggNDcgNDcgNDcgNDctMjEuMDggNDctNDctMjEuMDgtNDctNDctNDd6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTEwNCAyOC45NjZoLTEzdi0xM2MwLTguMjg0LTYuNzE2LTE1LTE1LTE1cy0xNSA2LjcxNi0xNSAxNXYxM2gtMTNjLTguMjg0IDAtMTUgNi43MTYtMTUgMTVzNi43MTYgMTUgMTUgMTVoMTN2MTNjMCA4LjI4NCA2LjcxNiAxNSAxNSAxNXMxNS02LjcxNiAxNS0xNXYtMTNoMTNjOC4yODQgMCAxNS02LjcxNiAxNS0xNXMtNi43MTYtMTUtMTUtMTV6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTIwNy4xIDM3MS4zMzYtMjIuMjYgMjIuMjYtNDUuMzItODcuNjIgMjIuMjYtMjIuMjZ6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTE4NC44NCAzOTMuNTk2IDIyLjI2LTIyLjI2LTIyLjY2LTQzLjgxLTIyLjI2NSAyMi4yNjV6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTE1MC41MyA0MjcuOTA2LTIyLjI2IDIyLjI2LTQ1LjMyLTg3LjYyIDIyLjI2LTIyLjI2eiIgZmlsbD0iI2ZlZDg0MyIvPjxwYXRoIGQ9Im0xMjguMjcgNDUwLjE2NiAyMi4yNi0yMi4yNi0yMi42NTUtNDMuODE1LTIyLjI2IDIyLjI2eiIgZmlsbD0iI2ZhYmUyYyIvPjxjaXJjbGUgY3g9IjE1IiBjeT0iMTE5Ljk2OSIgZmlsbD0iIzVlZDhkMyIgcj0iMTUiLz48Y2lyY2xlIGN4PSIxMjgiIGN5PSIxOTkuOTY5IiBmaWxsPSIjZDU5OWVkIiByPSIxNSIvPjxjaXJjbGUgY3g9IjE5MiIgY3k9IjYzLjk2NCIgZmlsbD0iI2Y2NiIgcj0iMTUiLz48Y2lyY2xlIGN4PSIzMjgiIGN5PSI0MTUuOTY3IiBmaWxsPSIjMzFiZWJlIiByPSIxNSIvPjxjaXJjbGUgY3g9IjQ0MCIgY3k9IjMyNy45NjciIGZpbGw9IiNhZDc3ZTMiIHI9IjE0Ljk5OSIvPjwvZz48L3N2Zz4=\" alt=\"PaperNum\"\u002F>\n  * - [ ] [2025](#head-2024)&emsp;&emsp;&emsp;&nbsp;\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FNumber%20of%20Papers-xx-B481BB?style=flat-square&logo=data:image\u002Fsvg%2bxml;base64,PHN2ZyBpZD0iQ2FwYV8xIiBlbmFibGUtYmFja2dyb3VuZD0ibmV3IDAgMCA1MTIgNTEyIiBoZWlnaHQ9IjUxMiIgdmlld0JveD0iMCAwIDUxMiA1MTIiIHdpZHRoPSI1MTIiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGc+PHBhdGggZD0ibTM5NS44MiAxODIuNjE2LTE4OC43MiAxODguNzItMTIuOTEgMS43Mi05LjM1IDIwLjU0LTM0LjMxIDM0LjMxLTExLjAxLS43My0xMS4yNSAyMi45OS01Ni40OCA1Ni40OGMtMi45MyAyLjkzLTYuNzcgNC4zOS0xMC42MSA0LjM5cy03LjY4LTEuNDYtMTAuNjEtNC4zOWwtMjIuNjItMjIuNjJoLS4wMWwtMjIuNjItMjIuNjNjLTUuODYtNS44Ni01Ljg2LTE1LjM2IDAtMjEuMjJsNzcuNjMtNzcuNjMgMTYuNi03LjAzIDUuNjYtMTUuMjMgMzQuMzEtMzQuMzEgMTQuODQtNC45MiA3LjQyLTE3LjM0IDE2Ny41Ny0xNjcuNTcgMzMuMjQgMzMuMjR6IiBmaWxsPSIjZjY2Ii8+PHBhdGggZD0ibTM5NS44MiAxMTYuMTQ2djY2LjQ3bC0xODguNzIgMTg4LjcyLTEyLjkxIDEuNzItOS4zNSAyMC41NC0zNC4zMSAzNC4zMS0xMS4wMS0uNzMtMTEuMjUgMjIuOTktNTYuNDggNTYuNDhjLTIuOTMgMi45My02Ljc3IDQuMzktMTAuNjEgNC4zOXMtNy42OC0xLjQ2LTEwLjYxLTQuMzlsLTIyLjYyLTIyLjYyIDMzNC42NC0zMzQuNjR6IiBmaWxsPSIjZTYyZTZiIi8+PHBhdGggZD0ibTUwNi42MSAyMDkuMDA2LTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xNy00LjUyLTQuNTMtMTEuNDItNS42OC0xNy4xNy0yLjg4bC04OC4zOCA0My4wNS02OS4xMy02OS4xNGMtNC4zNS00LjM1LTEwLjkyLTUuNi0xNi41Ni0zLjE2LTUuNjUgMi40NS05LjIzIDguMDktOS4wNCAxNC4yNGwyLjg2IDkwLjQ1LTg1LjM3IDU3LjgzYy00LjkxIDMuMzItNy40IDkuMjItNi4zNiAxNS4wNCAxLjA0IDUuODMgNS40IDEwLjUxIDExLjE1IDExLjk0bDk2LjYyIDI0LjAxIDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZ6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTI5Ni4yNiAyMTUuNzA2IDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZsLTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xN3oiIGZpbGw9IiNmZDkwMjUiLz48cGF0aCBkPSJtNDY1IDQxNi45NjZjLTI1LjkyIDAtNDcgMjEuMDgtNDcgNDdzMjEuMDggNDcgNDcgNDcgNDctMjEuMDggNDctNDctMjEuMDgtNDctNDctNDd6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTEwNCAyOC45NjZoLTEzdi0xM2MwLTguMjg0LTYuNzE2LTE1LTE1LTE1cy0xNSA2LjcxNi0xNSAxNXYxM2gtMTNjLTguMjg0IDAtMTUgNi43MTYtMTUgMTVzNi43MTYgMTUgMTUgMTVoMTN2MTNjMCA4LjI4NCA2LjcxNiAxNSAxNSAxNXMxNS02LjcxNiAxNS0xNXYtMTNoMTNjOC4yODQgMCAxNS02LjcxNiAxNS0xNXMtNi43MTYtMTUtMTUtMTV6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTIwNy4xIDM3MS4zMzYtMjIuMjYgMjIuMjYtNDUuMzItODcuNjIgMjIuMjYtMjIuMjZ6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTE4NC44NCAzOTMuNTk2IDIyLjI2LTIyLjI2LTIyLjY2LTQzLjgxLTIyLjI2NSAyMi4yNjV6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTE1MC41MyA0MjcuOTA2LTIyLjI2IDIyLjI2LTQ1LjMyLTg3LjYyIDIyLjI2LTIyLjI2eiIgZmlsbD0iI2ZlZDg0MyIvPjxwYXRoIGQ9Im0xMjguMjcgNDUwLjE2NiAyMi4yNi0yMi4yNi0yMi42NTUtNDMuODE1LTIyLjI2IDIyLjI2eiIgZmlsbD0iI2ZhYmUyYyIvPjxjaXJjbGUgY3g9IjE1IiBjeT0iMTE5Ljk2OSIgZmlsbD0iIzVlZDhkMyIgcj0iMTUiLz48Y2lyY2xlIGN4PSIxMjgiIGN5PSIxOTkuOTY5IiBmaWxsPSIjZDU5OWVkIiByPSIxNSIvPjxjaXJjbGUgY3g9IjE5MiIgY3k9IjYzLjk2NCIgZmlsbD0iI2Y2NiIgcj0iMTUiLz48Y2lyY2xlIGN4PSIzMjgiIGN5PSI0MTUuOTY3IiBmaWxsPSIjMzFiZWJlIiByPSIxNSIvPjxjaXJjbGUgY3g9IjQ0MCIgY3k9IjMyNy45NjciIGZpbGw9IiNhZDc3ZTMiIHI9IjE0Ljk5OSIvPjwvZz48L3N2Zz4=\" alt=\"PaperNum\"\u002F>\n  * - [ ] [2024](#head-2024)&emsp;&emsp;&emsp;&nbsp;\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FNumber%20of%20Papers-xx-B481BB?style=flat-square&logo=data:image\u002Fsvg%2bxml;base64,PHN2ZyBpZD0iQ2FwYV8xIiBlbmFibGUtYmFja2dyb3VuZD0ibmV3IDAgMCA1MTIgNTEyIiBoZWlnaHQ9IjUxMiIgdmlld0JveD0iMCAwIDUxMiA1MTIiIHdpZHRoPSI1MTIiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGc+PHBhdGggZD0ibTM5NS44MiAxODIuNjE2LTE4OC43MiAxODguNzItMTIuOTEgMS43Mi05LjM1IDIwLjU0LTM0LjMxIDM0LjMxLTExLjAxLS43My0xMS4yNSAyMi45OS01Ni40OCA1Ni40OGMtMi45MyAyLjkzLTYuNzcgNC4zOS0xMC42MSA0LjM5cy03LjY4LTEuNDYtMTAuNjEtNC4zOWwtMjIuNjItMjIuNjJoLS4wMWwtMjIuNjItMjIuNjNjLTUuODYtNS44Ni01Ljg2LTE1LjM2IDAtMjEuMjJsNzcuNjMtNzcuNjMgMTYuNi03LjAzIDUuNjYtMTUuMjMgMzQuMzEtMzQuMzEgMTQuODQtNC45MiA3LjQyLTE3LjM0IDE2Ny41Ny0xNjcuNTcgMzMuMjQgMzMuMjR6IiBmaWxsPSIjZjY2Ii8+PHBhdGggZD0ibTM5NS44MiAxMTYuMTQ2djY2LjQ3bC0xODguNzIgMTg4LjcyLTEyLjkxIDEuNzItOS4zNSAyMC41NC0zNC4zMSAzNC4zMS0xMS4wMS0uNzMtMTEuMjUgMjIuOTktNTYuNDggNTYuNDhjLTIuOTMgMi45My02Ljc3IDQuMzktMTAuNjEgNC4zOXMtNy42OC0xLjQ2LTEwLjYxLTQuMzlsLTIyLjYyLTIyLjYyIDMzNC42NC0zMzQuNjR6IiBmaWxsPSIjZTYyZTZiIi8+PHBhdGggZD0ibTUwNi42MSAyMDkuMDA2LTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xNy00LjUyLTQuNTMtMTEuNDItNS42OC0xNy4xNy0yLjg4bC04OC4zOCA0My4wNS02OS4xMy02OS4xNGMtNC4zNS00LjM1LTEwLjkyLTUuNi0xNi41Ni0zLjE2LTUuNjUgMi40NS05LjIzIDguMDktOS4wNCAxNC4yNGwyLjg2IDkwLjQ1LTg1LjM3IDU3LjgzYy00LjkxIDMuMzItNy40IDkuMjItNi4zNiAxNS4wNCAxLjA0IDUuODMgNS40IDEwLjUxIDExLjE1IDExLjk0bDk2LjYyIDI0LjAxIDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZ6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTI5Ni4yNiAyMTUuNzA2IDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZsLTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xN3oiIGZpbGw9IiNmZDkwMjUiLz48cGF0aCBkPSJtNDY1IDQxNi45NjZjLTI1LjkyIDAtNDcgMjEuMDgtNDcgNDdzMjEuMDggNDcgNDcgNDcgNDctMjEuMDggNDctNDctMjEuMDgtNDctNDctNDd6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTEwNCAyOC45NjZoLTEzdi0xM2MwLTguMjg0LTYuNzE2LTE1LTE1LTE1cy0xNSA2LjcxNi0xNSAxNXYxM2gtMTNjLTguMjg0IDAtMTUgNi43MTYtMTUgMTVzNi43MTYgMTUgMTUgMTVoMTN2MTNjMCA4LjI4NCA2LjcxNiAxNSAxNSAxNXMxNS02LjcxNiAxNS0xNXYtMTNoMTNjOC4yODQgMCAxNS02LjcxNiAxNS0xNXMtNi43MTYtMTUtMTUtMTV6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTIwNy4xIDM3MS4zMzYtMjIuMjYgMjIuMjYtNDUuMzItODcuNjIgMjIuMjYtMjIuMjZ6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTE4NC44NCAzOTMuNTk2IDIyLjI2LTIyLjI2LTIyLjY2LTQzLjgxLTIyLjI2NSAyMi4yNjV6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTE1MC41MyA0MjcuOTA2LTIyLjI2IDIyLjI2LTQ1LjMyLTg3LjYyIDIyLjI2LTIyLjI2eiIgZmlsbD0iI2ZlZDg0MyIvPjxwYXRoIGQ9Im0xMjguMjcgNDUwLjE2NiAyMi4yNi0yMi4yNi0yMi42NTUtNDMuODE1LTIyLjI2IDIyLjI2eiIgZmlsbD0iI2ZhYmUyYyIvPjxjaXJjbGUgY3g9IjE1IiBjeT0iMTE5Ljk2OSIgZmlsbD0iIzVlZDhkMyIgcj0iMTUiLz48Y2lyY2xlIGN4PSIxMjgiIGN5PSIxOTkuOTY5IiBmaWxsPSIjZDU5OWVkIiByPSIxNSIvPjxjaXJjbGUgY3g9IjE5MiIgY3k9IjYzLjk2NCIgZmlsbD0iI2Y2NiIgcj0iMTUiLz48Y2lyY2xlIGN4PSIzMjgiIGN5PSI0MTUuOTY3IiBmaWxsPSIjMzFiZWJlIiByPSIxNSIvPjxjaXJjbGUgY3g9IjQ0MCIgY3k9IjMyNy45NjciIGZpbGw9IiNhZDc3ZTMiIHI9IjE0Ljk5OSIvPjwvZz48L3N2Zz4=\" alt=\"PaperNum\"\u002F>\n  * - [ ] [2023](#head-2023)&emsp;&emsp;&emsp;&nbsp;\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FNumber%20of%20Papers-xx-90B44B?style=flat-square&logo=data:image\u002Fsvg%2bxml;base64,PHN2ZyBpZD0iQ2FwYV8xIiBlbmFibGUtYmFja2dyb3VuZD0ibmV3IDAgMCA1MTIgNTEyIiBoZWlnaHQ9IjUxMiIgdmlld0JveD0iMCAwIDUxMiA1MTIiIHdpZHRoPSI1MTIiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGc+PHBhdGggZD0ibTM5NS44MiAxODIuNjE2LTE4OC43MiAxODguNzItMTIuOTEgMS43Mi05LjM1IDIwLjU0LTM0LjMxIDM0LjMxLTExLjAxLS43My0xMS4yNSAyMi45OS01Ni40OCA1Ni40OGMtMi45MyAyLjkzLTYuNzcgNC4zOS0xMC42MSA0LjM5cy03LjY4LTEuNDYtMTAuNjEtNC4zOWwtMjIuNjItMjIuNjJoLS4wMWwtMjIuNjItMjIuNjNjLTUuODYtNS44Ni01Ljg2LTE1LjM2IDAtMjEuMjJsNzcuNjMtNzcuNjMgMTYuNi03LjAzIDUuNjYtMTUuMjMgMzQuMzEtMzQuMzEgMTQuODQtNC45MiA3LjQyLTE3LjM0IDE2Ny41Ny0xNjcuNTcgMzMuMjQgMzMuMjR6IiBmaWxsPSIjZjY2Ii8+PHBhdGggZD0ibTM5NS44MiAxMTYuMTQ2djY2LjQ3bC0xODguNzIgMTg4LjcyLTEyLjkxIDEuNzItOS4zNSAyMC41NC0zNC4zMSAzNC4zMS0xMS4wMS0uNzMtMTEuMjUgMjIuOTktNTYuNDggNTYuNDhjLTIuOTMgMi45My02Ljc3IDQuMzktMTAuNjEgNC4zOXMtNy42OC0xLjQ2LTEwLjYxLTQuMzlsLTIyLjYyLTIyLjYyIDMzNC42NC0zMzQuNjR6IiBmaWxsPSIjZTYyZTZiIi8+PHBhdGggZD0ibTUwNi42MSAyMDkuMDA2LTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xNy00LjUyLTQuNTMtMTEuNDItNS42OC0xNy4xNy0yLjg4bC04OC4zOCA0My4wNS02OS4xMy02OS4xNGMtNC4zNS00LjM1LTEwLjkyLTUuNi0xNi41Ni0zLjE2LTUuNjUgMi40NS05LjIzIDguMDktOS4wNCAxNC4yNGwyLjg2IDkwLjQ1LTg1LjM3IDU3LjgzYy00LjkxIDMuMzItNy40IDkuMjItNi4zNiAxNS4wNCAxLjA0IDUuODMgNS40IDEwLjUxIDExLjE1IDExLjk0bDk2LjYyIDI0LjAxIDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZ6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTI5Ni4yNiAyMTUuNzA2IDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZsLTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xN3oiIGZpbGw9IiNmZDkwMjUiLz48cGF0aCBkPSJtNDY1IDQxNi45NjZjLTI1LjkyIDAtNDcgMjEuMDgtNDcgNDdzMjEuMDggNDcgNDcgNDcgNDctMjEuMDggNDctNDctMjEuMDgtNDctNDctNDd6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTEwNCAyOC45NjZoLTEzdi0xM2MwLTguMjg0LTYuNzE2LTE1LTE1LTE1cy0xNSA2LjcxNi0xNSAxNXYxM2gtMTNjLTguMjg0IDAtMTUgNi43MTYtMTUgMTVzNi43MTYgMTUgMTUgMTVoMTN2MTNjMCA4LjI4NCA2LjcxNiAxNSAxNSAxNXMxNS02LjcxNiAxNS0xNXYtMTNoMTNjOC4yODQgMCAxNS02LjcxNiAxNS0xNXMtNi43MTYtMTUtMTUtMTV6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTIwNy4xIDM3MS4zMzYtMjIuMjYgMjIuMjYtNDUuMzItODcuNjIgMjIuMjYtMjIuMjZ6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTE4NC44NCAzOTMuNTk2IDIyLjI2LTIyLjI2LTIyLjY2LTQzLjgxLTIyLjI2NSAyMi4yNjV6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTE1MC41MyA0MjcuOTA2LTIyLjI2IDIyLjI2LTQ1LjMyLTg3LjYyIDIyLjI2LTIyLjI2eiIgZmlsbD0iI2ZlZDg0MyIvPjxwYXRoIGQ9Im0xMjguMjcgNDUwLjE2NiAyMi4yNi0yMi4yNi0yMi42NTUtNDMuODE1LTIyLjI2IDIyLjI2eiIgZmlsbD0iI2ZhYmUyYyIvPjxjaXJjbGUgY3g9IjE1IiBjeT0iMTE5Ljk2OSIgZmlsbD0iIzVlZDhkMyIgcj0iMTUiLz48Y2lyY2xlIGN4PSIxMjgiIGN5PSIxOTkuOTY5IiBmaWxsPSIjZDU5OWVkIiByPSIxNSIvPjxjaXJjbGUgY3g9IjE5MiIgY3k9IjYzLjk2NCIgZmlsbD0iI2Y2NiIgcj0iMTUiLz48Y2lyY2xlIGN4PSIzMjgiIGN5PSI0MTUuOTY3IiBmaWxsPSIjMzFiZWJlIiByPSIxNSIvPjxjaXJjbGUgY3g9IjQ0MCIgY3k9IjMyNy45NjciIGZpbGw9IiNhZDc3ZTMiIHI9IjE0Ljk5OSIvPjwvZz48L3N2Zz4=\" alt=\"PaperNum\"\u002F>\n  * - [x] [**2022**](https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fblob\u002F2024-Version-2.0\u002FLists\u002F5.3-2022.md)&emsp;&emsp;&emsp;&nbsp;\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FNumber%20of%20Papers-69-2EA9DF?style=flat-square&logo=data:image\u002Fsvg%2bxml;base64,PHN2ZyBpZD0iQ2FwYV8xIiBlbmFibGUtYmFja2dyb3VuZD0ibmV3IDAgMCA1MTIgNTEyIiBoZWlnaHQ9IjUxMiIgdmlld0JveD0iMCAwIDUxMiA1MTIiIHdpZHRoPSI1MTIiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGc+PHBhdGggZD0ibTM5NS44MiAxODIuNjE2LTE4OC43MiAxODguNzItMTIuOTEgMS43Mi05LjM1IDIwLjU0LTM0LjMxIDM0LjMxLTExLjAxLS43My0xMS4yNSAyMi45OS01Ni40OCA1Ni40OGMtMi45MyAyLjkzLTYuNzcgNC4zOS0xMC42MSA0LjM5cy03LjY4LTEuNDYtMTAuNjEtNC4zOWwtMjIuNjItMjIuNjJoLS4wMWwtMjIuNjItMjIuNjNjLTUuODYtNS44Ni01Ljg2LTE1LjM2IDAtMjEuMjJsNzcuNjMtNzcuNjMgMTYuNi03LjAzIDUuNjYtMTUuMjMgMzQuMzEtMzQuMzEgMTQuODQtNC45MiA3LjQyLTE3LjM0IDE2Ny41Ny0xNjcuNTcgMzMuMjQgMzMuMjR6IiBmaWxsPSIjZjY2Ii8+PHBhdGggZD0ibTM5NS44MiAxMTYuMTQ2djY2LjQ3bC0xODguNzIgMTg4LjcyLTEyLjkxIDEuNzItOS4zNSAyMC41NC0zNC4zMSAzNC4zMS0xMS4wMS0uNzMtMTEuMjUgMjIuOTktNTYuNDggNTYuNDhjLTIuOTMgMi45My02Ljc3IDQuMzktMTAuNjEgNC4zOXMtNy42OC0xLjQ2LTEwLjYxLTQuMzlsLTIyLjYyLTIyLjYyIDMzNC42NC0zMzQuNjR6IiBmaWxsPSIjZTYyZTZiIi8+PHBhdGggZD0ibTUwNi42MSAyMDkuMDA2LTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xNy00LjUyLTQuNTMtMTEuNDItNS42OC0xNy4xNy0yLjg4bC04OC4zOCA0My4wNS02OS4xMy02OS4xNGMtNC4zNS00LjM1LTEwLjkyLTUuNi0xNi41Ni0zLjE2LTUuNjUgMi40NS05LjIzIDguMDktOS4wNCAxNC4yNGwyLjg2IDkwLjQ1LTg1LjM3IDU3LjgzYy00LjkxIDMuMzItNy40IDkuMjItNi4zNiAxNS4wNCAxLjA0IDUuODMgNS40IDEwLjUxIDExLjE1IDExLjk0bDk2LjYyIDI0LjAxIDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZ6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTI5Ni4yNiAyMTUuNzA2IDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZsLTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xN3oiIGZpbGw9IiNmZDkwMjUiLz48cGF0aCBkPSJtNDY1IDQxNi45NjZjLTI1LjkyIDAtNDcgMjEuMDgtNDcgNDdzMjEuMDggNDcgNDcgNDcgNDctMjEuMDggNDctNDctMjEuMDgtNDctNDctNDd6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTEwNCAyOC45NjZoLTEzdi0xM2MwLTguMjg0LTYuNzE2LTE1LTE1LTE1cy0xNSA2LjcxNi0xNSAxNXYxM2gtMTNjLTguMjg0IDAtMTUgNi43MTYtMTUgMTVzNi43MTYgMTUgMTUgMTVoMTN2MTNjMCA4LjI4NCA2LjcxNiAxNSAxNSAxNXMxNS02LjcxNiAxNS0xNXYtMTNoMTNjOC4yODQgMCAxNS02LjcxNiAxNS0xNXMtNi43MTYtMTUtMTUtMTV6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTIwNy4xIDM3MS4zMzYtMjIuMjYgMjIuMjYtNDUuMzItODcuNjIgMjIuMjYtMjIuMjZ6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTE4NC44NCAzOTMuNTk2IDIyLjI2LTIyLjI2LTIyLjY2LTQzLjgxLTIyLjI2NSAyMi4yNjV6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTE1MC41MyA0MjcuOTA2LTIyLjI2IDIyLjI2LTQ1LjMyLTg3LjYyIDIyLjI2LTIyLjI2eiIgZmlsbD0iI2ZlZDg0MyIvPjxwYXRoIGQ9Im0xMjguMjcgNDUwLjE2NiAyMi4yNi0yMi4yNi0yMi42NTUtNDMuODE1LTIyLjI2IDIyLjI2eiIgZmlsbD0iI2ZhYmUyYyIvPjxjaXJjbGUgY3g9IjE1IiBjeT0iMTE5Ljk2OSIgZmlsbD0iIzVlZDhkMyIgcj0iMTUiLz48Y2lyY2xlIGN4PSIxMjgiIGN5PSIxOTkuOTY5IiBmaWxsPSIjZDU5OWVkIiByPSIxNSIvPjxjaXJjbGUgY3g9IjE5MiIgY3k9IjYzLjk2NCIgZmlsbD0iI2Y2NiIgcj0iMTUiLz48Y2lyY2xlIGN4PSIzMjgiIGN5PSI0MTUuOTY3IiBmaWxsPSIjMzFiZWJlIiByPSIxNSIvPjxjaXJjbGUgY3g9IjQ0MCIgY3k9IjMyNy45NjciIGZpbGw9IiNhZDc3ZTMiIHI9IjE0Ljk5OSIvPjwvZz48L3N2Zz4=\" alt=\"PaperNum\"\u002F>\n  * - [x] [**2021**](https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fblob\u002F2024-Version-2.0\u002FLists\u002F5.2-2021.md)&emsp;&emsp;&emsp;&nbsp;\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FNumber%20of%20Papers-31-F9BF45?style=flat-square&logo=data:image\u002Fsvg%2bxml;base64,PHN2ZyBpZD0iQ2FwYV8xIiBlbmFibGUtYmFja2dyb3VuZD0ibmV3IDAgMCA1MTIgNTEyIiBoZWlnaHQ9IjUxMiIgdmlld0JveD0iMCAwIDUxMiA1MTIiIHdpZHRoPSI1MTIiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGc+PHBhdGggZD0ibTM5NS44MiAxODIuNjE2LTE4OC43MiAxODguNzItMTIuOTEgMS43Mi05LjM1IDIwLjU0LTM0LjMxIDM0LjMxLTExLjAxLS43My0xMS4yNSAyMi45OS01Ni40OCA1Ni40OGMtMi45MyAyLjkzLTYuNzcgNC4zOS0xMC42MSA0LjM5cy03LjY4LTEuNDYtMTAuNjEtNC4zOWwtMjIuNjItMjIuNjJoLS4wMWwtMjIuNjItMjIuNjNjLTUuODYtNS44Ni01Ljg2LTE1LjM2IDAtMjEuMjJsNzcuNjMtNzcuNjMgMTYuNi03LjAzIDUuNjYtMTUuMjMgMzQuMzEtMzQuMzEgMTQuODQtNC45MiA3LjQyLTE3LjM0IDE2Ny41Ny0xNjcuNTcgMzMuMjQgMzMuMjR6IiBmaWxsPSIjZjY2Ii8+PHBhdGggZD0ibTM5NS44MiAxMTYuMTQ2djY2LjQ3bC0xODguNzIgMTg4LjcyLTEyLjkxIDEuNzItOS4zNSAyMC41NC0zNC4zMSAzNC4zMS0xMS4wMS0uNzMtMTEuMjUgMjIuOTktNTYuNDggNTYuNDhjLTIuOTMgMi45My02Ljc3IDQuMzktMTAuNjEgNC4zOXMtNy42OC0xLjQ2LTEwLjYxLTQuMzlsLTIyLjYyLTIyLjYyIDMzNC42NC0zMzQuNjR6IiBmaWxsPSIjZTYyZTZiIi8+PHBhdGggZD0ibTUwNi42MSAyMDkuMDA2LTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xNy00LjUyLTQuNTMtMTEuNDItNS42OC0xNy4xNy0yLjg4bC04OC4zOCA0My4wNS02OS4xMy02OS4xNGMtNC4zNS00LjM1LTEwLjkyLTUuNi0xNi41Ni0zLjE2LTUuNjUgMi40NS05LjIzIDguMDktOS4wNCAxNC4yNGwyLjg2IDkwLjQ1LTg1LjM3IDU3LjgzYy00LjkxIDMuMzItNy40IDkuMjItNi4zNiAxNS4wNCAxLjA0IDUuODMgNS40IDEwLjUxIDExLjE1IDExLjk0bDk2LjYyIDI0LjAxIDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZ6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTI5Ni4yNiAyMTUuNzA2IDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZsLTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xN3oiIGZpbGw9IiNmZDkwMjUiLz48cGF0aCBkPSJtNDY1IDQxNi45NjZjLTI1LjkyIDAtNDcgMjEuMDgtNDcgNDdzMjEuMDggNDcgNDcgNDcgNDctMjEuMDggNDctNDctMjEuMDgtNDctNDctNDd6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTEwNCAyOC45NjZoLTEzdi0xM2MwLTguMjg0LTYuNzE2LTE1LTE1LTE1cy0xNSA2LjcxNi0xNSAxNXYxM2gtMTNjLTguMjg0IDAtMTUgNi43MTYtMTUgMTVzNi43MTYgMTUgMTUgMTVoMTN2MTNjMCA4LjI4NCA2LjcxNiAxNSAxNSAxNXMxNS02LjcxNiAxNS0xNXYtMTNoMTNjOC4yODQgMCAxNS02LjcxNiAxNS0xNXMtNi43MTYtMTUtMTUtMTV6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTIwNy4xIDM3MS4zMzYtMjIuMjYgMjIuMjYtNDUuMzItODcuNjIgMjIuMjYtMjIuMjZ6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTE4NC44NCAzOTMuNTk2IDIyLjI2LTIyLjI2LTIyLjY2LTQzLjgxLTIyLjI2NSAyMi4yNjV6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTE1MC41MyA0MjcuOTA2LTIyLjI2IDIyLjI2LTQ1LjMyLTg3LjYyIDIyLjI2LTIyLjI2eiIgZmlsbD0iI2ZlZDg0MyIvPjxwYXRoIGQ9Im0xMjguMjcgNDUwLjE2NiAyMi4yNi0yMi4yNi0yMi42NTUtNDMuODE1LTIyLjI2IDIyLjI2eiIgZmlsbD0iI2ZhYmUyYyIvPjxjaXJjbGUgY3g9IjE1IiBjeT0iMTE5Ljk2OSIgZmlsbD0iIzVlZDhkMyIgcj0iMTUiLz48Y2lyY2xlIGN4PSIxMjgiIGN5PSIxOTkuOTY5IiBmaWxsPSIjZDU5OWVkIiByPSIxNSIvPjxjaXJjbGUgY3g9IjE5MiIgY3k9IjYzLjk2NCIgZmlsbD0iI2Y2NiIgcj0iMTUiLz48Y2lyY2xlIGN4PSIzMjgiIGN5PSI0MTUuOTY3IiBmaWxsPSIjMzFiZWJlIiByPSIxNSIvPjxjaXJjbGUgY3g9IjQ0MCIgY3k9IjMyNy45NjciIGZpbGw9IiNhZDc3ZTMiIHI9IjE0Ljk5OSIvPjwvZz48L3N2Zz4=\" alt=\"PaperNum\"\u002F>\n  * - [x] [**2016~2020**](https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fblob\u002F2024-Version-2.0\u002FLists\u002F5.1-2016~2020.md)&ensp;\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FNumber%20of%20Papers-46-E83015?style=flat-square&logo=data:image\u002Fsvg%2bxml;base64,PHN2ZyBpZD0iQ2FwYV8xIiBlbmFibGUtYmFja2dyb3VuZD0ibmV3IDAgMCA1MTIgNTEyIiBoZWlnaHQ9IjUxMiIgdmlld0JveD0iMCAwIDUxMiA1MTIiIHdpZHRoPSI1MTIiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGc+PHBhdGggZD0ibTM5NS44MiAxODIuNjE2LTE4OC43MiAxODguNzItMTIuOTEgMS43Mi05LjM1IDIwLjU0LTM0LjMxIDM0LjMxLTExLjAxLS43My0xMS4yNSAyMi45OS01Ni40OCA1Ni40OGMtMi45MyAyLjkzLTYuNzcgNC4zOS0xMC42MSA0LjM5cy03LjY4LTEuNDYtMTAuNjEtNC4zOWwtMjIuNjItMjIuNjJoLS4wMWwtMjIuNjItMjIuNjNjLTUuODYtNS44Ni01Ljg2LTE1LjM2IDAtMjEuMjJsNzcuNjMtNzcuNjMgMTYuNi03LjAzIDUuNjYtMTUuMjMgMzQuMzEtMzQuMzEgMTQuODQtNC45MiA3LjQyLTE3LjM0IDE2Ny41Ny0xNjcuNTcgMzMuMjQgMzMuMjR6IiBmaWxsPSIjZjY2Ii8+PHBhdGggZD0ibTM5NS44MiAxMTYuMTQ2djY2LjQ3bC0xODguNzIgMTg4LjcyLTEyLjkxIDEuNzItOS4zNSAyMC41NC0zNC4zMSAzNC4zMS0xMS4wMS0uNzMtMTEuMjUgMjIuOTktNTYuNDggNTYuNDhjLTIuOTMgMi45My02Ljc3IDQuMzktMTAuNjEgNC4zOXMtNy42OC0xLjQ2LTEwLjYxLTQuMzlsLTIyLjYyLTIyLjYyIDMzNC42NC0zMzQuNjR6IiBmaWxsPSIjZTYyZTZiIi8+PHBhdGggZD0ibTUwNi42MSAyMDkuMDA2LTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xNy00LjUyLTQuNTMtMTEuNDItNS42OC0xNy4xNy0yLjg4bC04OC4zOCA0My4wNS02OS4xMy02OS4xNGMtNC4zNS00LjM1LTEwLjkyLTUuNi0xNi41Ni0zLjE2LTUuNjUgMi40NS05LjIzIDguMDktOS4wNCAxNC4yNGwyLjg2IDkwLjQ1LTg1LjM3IDU3LjgzYy00LjkxIDMuMzItNy40IDkuMjItNi4zNiAxNS4wNCAxLjA0IDUuODMgNS40IDEwLjUxIDExLjE1IDExLjk0bDk2LjYyIDI0LjAxIDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZ6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTI5Ni4yNiAyMTUuNzA2IDI0LjAxIDk2LjYyYzEuNDMgNS43NSA2LjExIDEwLjExIDExLjk0IDExLjE1Ljg3LjE2IDEuNzUuMjMgMi42Mi4yMyA0LjkyIDAgOS42LTIuNDIgMTIuNDItNi41OWw1Ny44My04NS4zNyA5MC40NSAyLjg2YzYuMTQuMTkgMTEuNzktMy4zOSAxNC4yNC05LjA0IDIuNDQtNS42NCAxLjE5LTEyLjIxLTMuMTYtMTYuNTZsLTY5LjE0LTY5LjEzIDQzLjA1LTg4LjM4YzIuOC01Ljc1IDEuNjUtMTIuNjUtMi44OC0xNy4xN3oiIGZpbGw9IiNmZDkwMjUiLz48cGF0aCBkPSJtNDY1IDQxNi45NjZjLTI1LjkyIDAtNDcgMjEuMDgtNDcgNDdzMjEuMDggNDcgNDcgNDcgNDctMjEuMDggNDctNDctMjEuMDgtNDctNDctNDd6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTEwNCAyOC45NjZoLTEzdi0xM2MwLTguMjg0LTYuNzE2LTE1LTE1LTE1cy0xNSA2LjcxNi0xNSAxNXYxM2gtMTNjLTguMjg0IDAtMTUgNi43MTYtMTUgMTVzNi43MTYgMTUgMTUgMTVoMTN2MTNjMCA4LjI4NCA2LjcxNiAxNSAxNSAxNXMxNS02LjcxNiAxNS0xNXYtMTNoMTNjOC4yODQgMCAxNS02LjcxNiAxNS0xNXMtNi43MTYtMTUtMTUtMTV6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTIwNy4xIDM3MS4zMzYtMjIuMjYgMjIuMjYtNDUuMzItODcuNjIgMjIuMjYtMjIuMjZ6IiBmaWxsPSIjZmVkODQzIi8+PHBhdGggZD0ibTE4NC44NCAzOTMuNTk2IDIyLjI2LTIyLjI2LTIyLjY2LTQzLjgxLTIyLjI2NSAyMi4yNjV6IiBmaWxsPSIjZmFiZTJjIi8+PHBhdGggZD0ibTE1MC41MyA0MjcuOTA2LTIyLjI2IDIyLjI2LTQ1LjMyLTg3LjYyIDIyLjI2LTIyLjI2eiIgZmlsbD0iI2ZlZDg0MyIvPjxwYXRoIGQ9Im0xMjguMjcgNDUwLjE2NiAyMi4yNi0yMi4yNi0yMi42NTUtNDMuODE1LTIyLjI2IDIyLjI2eiIgZmlsbD0iI2ZhYmUyYyIvPjxjaXJjbGUgY3g9IjE1IiBjeT0iMTE5Ljk2OSIgZmlsbD0iIzVlZDhkMyIgcj0iMTUiLz48Y2lyY2xlIGN4PSIxMjgiIGN5PSIxOTkuOTY5IiBmaWxsPSIjZDU5OWVkIiByPSIxNSIvPjxjaXJjbGUgY3g9IjE5MiIgY3k9IjYzLjk2NCIgZmlsbD0iI2Y2NiIgcj0iMTUiLz48Y2lyY2xlIGN4PSIzMjgiIGN5PSI0MTUuOTY3IiBmaWxsPSIjMzFiZWJlIiByPSIxNSIvPjxjaXJjbGUgY3g9IjQ0MCIgY3k9IjMyNy45NjciIGZpbGw9IiNhZDc3ZTMiIHI9IjE0Ljk5OSIvPjwvZz48L3N2Zz4=\" alt=\"PaperNum\"\u002F>\n  \n* - [ ] [6. Other Related Works](#head6)\n  * - [ ] [📝Prompt Engineering📝](#head-pe)\n  * - [ ] [⭐Multimodality⭐](#head-mm)\n  * - [ ] [🛫Applications🛫](#head-app)\n  * - [ ] [Text+Image\u002FVideo → Image\u002FVideo](#head-ti2i)\n  * - [ ] [Text+Layout → Image](#head-tl2i)\n  * - [ ] [Others+Text+Image\u002FVideo → Image\u002FVideo](#head-oti2i)\n  * - [ ] [Layout\u002FMask → Image](#head-l2i)\n  * - [ ] [Label-set → Semantic maps](#head-l2s)\n  * - [ ] [Speech → Image](#head-s2i)\n  * - [ ] [Scene Graph → Image](#head-sg2i)\n  * - [ ] [Text → Visual Retrieval](#head-t2vr)\n  * - [ ] [Text → 3D\u002FMotion\u002FShape\u002FMesh\u002FObject...](#head-t2m)\n  * - [ ] [Text → Video](#head-t2v)\n  * - [ ] [Text → Music](#head-t2music)\n\n* [联系我](#head7)\n  \n* [贡献者](#head8)\n\n## \u003Cspan id=\"head1\"> *描述* \u003C\u002Fspan>\n\n* 在过去的几十年里，计算机视觉（CV）和自然语言处理（NLP）领域在深度学习研究中取得了多项重大技术突破。最近，研究人员开始关注如何将语义信息与视觉信息结合起来，以解决这些传统上独立的领域之间的跨学科问题。 \n目前已有不少研究致力于文本到图像的合成技术，这类技术能够将输入的文本描述（关键词或句子）转换为逼真的图像。\n\n* 文本到图像任务的相关论文、代码和数据集均可在此处获取。\n\n>🐌 Markdown 格式：\n> * （会议\u002F期刊 年份）**标题**, 第一作者等. [[论文](URL)] [[代码](URL)] [[项目](URL)]\n\n\n\n## \u003Cspan id=\"head5\"> *论文与代码* \u003C\u002Fspan>\n\n* \u003Cspan id=\"head-t2f\"> **文本转人脸👨🏻🧒👧🏼🧓🏽**  \u003C\u002Fspan> \n    * (ECCV 2024) **PreciseControl：通过细粒度属性控制增强文本到图像扩散模型**, Rishubh Parihar 等人 [[论文](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2408.05083)] [[项目](https:\u002F\u002Frishubhpar.github.io\u002FPreciseControl.home\u002F)] \n    * (arXiv 预印本 2024) [💬 数据集] **1500 万个多模态人脸图像-文本数据集**, Dawei Dai 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.08515)] \n    * (arXiv 预印本 2024) [💬 3D] **Portrait3D：利用金字塔表示和 GAN 先验进行文本引导的高质量 3D 人像生成**, Yiqian Wu 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.10394v1)] \n    * (CVPR 2024) **CosmicMan：面向人类的文本到图像基础模型**, Shikai Li 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.01294)] [[项目](https:\u002F\u002Fcosmicman-cvpr2024.github.io\u002F)] \n    * (ICML 2024) **通过直接跨模态映射与几何正则化实现快速文本到 3D 人脸生成与操控**, Jinlu Zhang 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.06702)] [[代码](https:\u002F\u002Fgithub.com\u002FAria-Zhangjl\u002FE3-FaceNet)]    \n    * (NeurIPS 2023) **通过 Celeb 基础将任何人插入扩散模型**, Ge Yuan 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.00926)] [[项目](https:\u002F\u002Fceleb-basis.github.io\u002F)] \n    * (IJACSA 2023) **Mukh-Oboyob：基于 Stable Diffusion 和 BanglaBERT 的孟加拉语文本到人脸合成**, Aloke Kumar Saha 等人 [[论文](https:\u002F\u002Fthesai.org\u002FPublications\u002FViewPaper?Volume=14&Issue=11&Code=IJACSA&SerialNo=142)] [[代码](https:\u002F\u002Fgithub.com\u002FCodernob\u002FMukh-Oboyob)]\n    * (SIGGRAPH 2023) [💬 3D] **DreamFace：在文本指导下渐进式生成可动画化的 3D 人脸**, Longwen Zhang 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03117)] [[项目](https:\u002F\u002Fsites.google.com\u002Fview\u002Fdreamface)] [[HuggingFace](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FDEEMOSTECH\u002FChatAvatar)]\n    * (CVPR 2023) [💬 3D] **从自然语言描述生成高保真 3D 人脸**, Menghua Wu 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.03302)] [[代码](https:\u002F\u002Fgithub.com\u002Fzhuhao-nju\u002Fdescribe3d)] [[项目](https:\u002F\u002Fmhwu2017.github.io\u002F)]\n    * (CVPR 2023) **用于多模态人脸生成与编辑的协同扩散**, Ziqi Huang 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.10530v1)] [[代码](https:\u002F\u002Fgithub.com\u002Fziqihuangg\u002FCollaborative-Diffusion)] [[项目](https:\u002F\u002Fziqihuangg.github.io\u002Fprojects\u002Fcollaborative-diffusion.html)]\n    * (模式识别 2023) **你在哪里编辑，就得到什么：基于区域注意力的文本引导图像编辑**, Changming Xiao 等人 [[论文](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320323001589)] [[代码](https:\u002F\u002Fgithub.com\u002FBig-Brother-Pikachu\u002FWhere2edit)]\n    * (arXiv 预印本 2022) **通过潜在对齐桥接 CLIP 和 StyleGAN 以进行图像编辑**, Wanfeng Zheng 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.04506)] \n    * (ACMMM 2022) **学习动态先验知识用于文本到人脸像素合成**, Jun Peng 等人 [[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3503161.3547818)]\n    * (ACMMM 2022) **迈向开放式的文本到人脸生成、组合与操控**, Jun Peng 等人 [[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3503161.3547758)]\n    * (BMVC 2022) **clip2latent：使用去噪扩散和 CLIP 对预训练 StyleGAN 进行文本驱动采样**, Justin N. M. Pinkney 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.02347v1)] [[代码](https:\u002F\u002Fgithub.com\u002Fjustinpinkney\u002Fclip2latent)]\n    * (arXiv 预印本 2022) **ManiCLIP：从文本进行多属性人脸操控**, Hao Wang 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.00445)]\n    * (arXiv 预印本 2022) **野外生成的人脸：Stable Diffusion、Midjourney 和 DALL-E 2 的定量比较**, Ali Borji, [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.00586)] [[代码](https:\u002F\u002Fgithub.com\u002Faliborji\u002FGFW)] [[数据](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F1EhbUK64J3d0_chmD2mpBuWB-Ic7LeFlP\u002Fview)]\n    * (arXiv 预印本 2022) **为预训练人脸生成器学习无文本的自然语言接口**, Xiaodan Du 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.03953)] [[代码](https:\u002F\u002Fgithub.com\u002Fduxiaodan\u002FFast_text2StyleGAN)]\n    * (基于知识的系统-2022) **CMAFGAN：一种基于跨模态注意力融合的生成对抗网络用于属性词到人脸合成**, Xiaodong Luo 等人 [[论文](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0950705122008863)]\n    * (神经网络-2022) **DualG-GAN，一种基于双通道生成器的生成对抗网络用于文本到人脸合成**, Xiaodong Luo 等人 [[论文](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0893608022003161)]\n    * (arXiv 预印本 2022) **使用 StyleGAN2 进行文本到人脸生成**, D. M. A. Ayanthi 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.12512)]\n    * (CVPR 2022) **StyleT2I：迈向组合式且高保真的文本到图像合成**, Zhiheng Li 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15799)] [[代码](https:\u002F\u002Fgithub.com\u002Fzhihengli-UR\u002FStyleT2I)]\n    * (arXiv 预印本 2022) **StyleT2F：使用 StyleGAN2 从文本描述生成人脸**, Mohamed Shawky Sabae 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.07924)] [[代码](https:\u002F\u002Fgithub.com\u002FDarkGeekMS\u002FRetratista)]\n    * (CVPR 2022) **AnyFace：自由风格文本到人脸合成与操控**, Jianxin Sun 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15334)] \n    * (IEEE 网络科学与工程学报-2022) **TextFace：基于文本到风格映射的人脸生成与操控**, Xianxu Hou 等人 [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9737433)]\n    * (CVPR 2021) **TediGAN：文本引导的多样化图像生成与操控**, Weihao Xia 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.03308.pdf)] [[扩展版](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.08910.pdf)][[代码](https:\u002F\u002Fgithub.com\u002FIIGROUP\u002FTediGAN)] [[数据集](https:\u002F\u002Fgithub.com\u002FIIGROUP\u002FMulti-Modal-CelebA-HQ-Dataset)] [[Colab](https:\u002F\u002Fcolab.research.google.com\u002Fgithub\u002Fweihaox\u002FTediGAN\u002Fblob\u002Fmain\u002Fplayground.ipynb)] [[视频](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=L8Na2f5viAM)] \n    * (FG 2021) **用于文本到人脸合成与操控的生成对抗网络，结合预训练 BERT 模型**, Yutong Zhou 等人 [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9666791)] \n    * (ACMMM 2021) **多标题文本到人脸合成：数据集与算法**, Jianxin Sun 等人 [[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3474085.3475391)] [[代码](https:\u002F\u002Fgithub.com\u002Fcripac-sjx\u002FSEA-T2F)]\n    * (ACMMM 2021) **用于文本到人脸合成与操控的生成对抗网络**, Yutong Zhou. [[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3474085.3481026)]\n    * (WACV 2021) **按需定制人脸：通过属性解耦进行文本到人脸生成**, Tianren Wang 等人 [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2021\u002Fpapers\u002FWang_Faces_a_la_Carte_Text-to-Face_Generation_via_Attribute_Disentanglement_WACV_2021_paper.pdf)] \n    * (arXiv 预印本 2019) **FTGAN：用于文本到人脸生成的全训练生成对抗网络**, Xiang Chen 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1904.05729)]\n\n[\u003Cu>\u003C🎯返回顶部>\u003C\u002Fu>](#head-content)\n\n* \u003Cspan id=\"head-si\"> **具体问题🤔**  \u003C\u002Fspan>\n    * (arXiv预印本2026) [🖼️ 美学数据集] **Moonworks Lunara美学数据集**, 王燕等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2601.07941)] [[数据集](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fmoonworks\u002Flunara-aesthetic)]\n    * (arXiv预印本2026) [📸 变化数据集] **Moonworks Lunara美学II：图像变化数据集** 王燕等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2602.01666)] [[数据集](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fmoonworks\u002Flunara-aesthetic-image-variations)]\n    * (arXiv预印本2025) [💬 可微物体计数] **YOLO-Count：面向文本到图像生成的可微物体计数**, 曾冠宁等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.00728)]\n    * (arXiv预印本2024) [💬 性别偏见对齐] **PopAlign：面向公平文本到图像生成的人口级对齐**, 李舒凡等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.19668)] [[代码](https:\u002F\u002Fgithub.com\u002Fjacklishufan\u002FPopAlignSDXL)]\n    * (arXiv预印本2024) [💬 细粒度反馈] **超越点赞\u002F点踩：解析文本到图像生成细粒度反馈的挑战**, 凯瑟琳·M·柯林斯等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.16807)]\n    * (CVPR 2024-最佳论文) [💬 人类反馈] **面向文本到图像生成的丰富人类反馈**, 梁友伟等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.10240)]\n    * (ICLR 2024) [💬 未经授权的数据] **DIAGNOSIS：检测文本到图像扩散模型中的未经授权数据使用**, 王振廷等 [[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=f8S3aLm0Vp)] [[代码](https:\u002F\u002Fgithub.com\u002FZhentingWang\u002FDIAGNOSIS)]\n    * (CVPR 2024) [💬 开放集偏见检测] **OpenBias：文本到图像生成模型中的开放集偏见检测**, 莫雷诺·丁卡等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.07990)]\n    * (arXiv预印本2024) [💬 空间一致性] **做对了：提升文本到图像模型的空间一致性**, 阿格尼特·查特吉等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.01197)] [[项目](https:\u002F\u002Fspright-t2i.github.io\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002FSPRIGHT-T2I\u002FSPRIGHT)] [[数据集](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FSPRIGHT-T2I\u002Fspright)]\n    * (arXiv预印本2024) [💬 安全性] **SafeGen：缓解文本到图像模型中的不安全内容生成**, 李新峰等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.06666)] [[代码](https:\u002F\u002Fgithub.com\u002FLetterLiGo\u002Ftext-agnostic-governance)]\n    * (arXiv预印本2024) [💬 美学] **Playground v2.5：提升文本到图像生成美学质量的三大洞见**, 李大清等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.17245)] [[项目](https:\u002F\u002Fblog.playgroundai.com\u002Fplayground-v2-5\u002F)] [[HuggingFace](https:\u002F\u002Fhuggingface.co\u002Fplaygroundai\u002Fplayground-v2.5-1024px-aesthetic)]\n    * (EMNLP 2023) [💬 文本视觉性] **利用大型视觉语言模型学习文本的视觉性**, 戈拉夫·维尔玛等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.10434)] [[项目](https:\u002F\u002Fgaurav22verma.github.io\u002Ftext-visualness\u002F)]\n    * (arXiv预印本2023) [💬 抵抗恶意适应] **IMMA：免疫文本到图像模型抵抗恶意适应**, 郑一嘉等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.18815)] [[项目](https:\u002F\u002Fzhengyjzoe.github.io\u002Fimma\u002F)]\n    * (arXiv预印本2023) [💬 原则性重提示] **一张图胜过千言万语：原则性重提示提升图像生成效果**, 埃亚尔·塞加利斯等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.16656)]\n    * ⭐⭐(NeurIPS 2023) [💬 全面评估] **文本到图像模型的全面评估**, 托尼·李等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.04287)] [[代码](https:\u002F\u002Fgithub.com\u002Fstanford-crfm\u002Fhelm)] [[项目](https:\u002F\u002Fcrfm.stanford.edu\u002Fheim\u002Fv1.1.0\u002F)]\n    * (ICCV 2023) [💬 安全性] **让艺术家“Rickroll”：向文本编码器注入后门以实现文本到图像合成**, 卢卡斯·斯特鲁佩克等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.02408)] [[代码](https:\u002F\u002Fgithub.com\u002FLukasStruppek\u002FRickrolling-the-Artist)]\n    * (arXiv预印本2023) [💬 自然攻击能力] **扩散模型的有趣特性：大规模数据集用于评估文本到图像生成模型的自然攻击能力**, 佐藤隆美等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.15692)]\n    * (ACL 2023) [💬 偏见] **视觉语言模型中偏见的多维度研究**, 加布里埃莱·鲁杰里等 [[论文](https:\u002F\u002Faclanthology.org\u002F2023.findings-acl.403\u002F)]\n    * (FAACT 2023) [💬 人口统计刻板印象] **易于获取的文本到图像生成放大了大规模人口统计刻板印象**, 费德里科·比安奇等 [[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3593013.3594095)]\n    * (arXiv预印本2023) [💬 鲁棒性] **评估文本到图像扩散模型在现实世界攻击下的鲁棒性**, 高洪成等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.13103)]\n    * (CVPR 2023) [💬 对抗鲁棒性分析] **RIATIG：可靠且难以察觉的对抗性文本到图像生成与自然提示**, 刘涵等 [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FLiu_RIATIG_Reliable_and_Imperceptible_Adversarial_Text-to-Image_Generation_With_Natural_Prompts_CVPR_2023_paper.html)]\n    * (arXiv预印本2023) [💬 文本反转] **这个损失有意义吗？用确定性目标评估加速文本反转**, 安东·沃罗诺夫等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.04841)] [[代码](https:\u002F\u002Fgithub.com\u002Fyandex-research\u002FDVAR)]\n    * (arXiv预印本2022) [💬 可解释干预] **不只是漂亮图片：文本到图像生成器支持可解释干预以获得稳健表示**, 袁建浩等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.11237)]\n    * (arXiv预印本2022) [💬 道德图像操作] **判断、定位和编辑：确保文本到图像生成的视觉常识道德性**, 朴圣范等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.03507)]\n    * (arXiv预印本2022) [💬 创意迁移] **基于反转的创意迁移与扩散模型**, 张宇欣等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.13203)]\n    * (arXiv预印本2022) [💬 模糊性] **大象在飞吗？解析文本到图像生成模型中的模糊性**, 尼纳雷·梅赫拉比等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.12503)]\n    * (arXiv预印本2022) [💬 种族政治] **一个拼写符号：DALL-E 2、无视觉图像与特征空间的种族政治**, 法比安·奥弗特等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.06323)]\n    * (arXiv预印本2022) [💬 隐私分析] **针对文本到图像生成模型的成员推断攻击**, 吴奕欣等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.00968)]\n    * (arXiv预印本2022) [💬 假图像真实性评估] **DE-FAKE：检测与归因由文本到图像扩散模型生成的假图像**, 沙泽阳等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.06998v1)]\n    * (arXiv预印本2022) [💬 文化偏见] **有偏见的艺术家：通过同形异义词在文本引导图像生成模型中利用文化偏见**, 卢卡斯·斯特鲁佩克等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.08891)]\n\n[\u003Cu>\u003C🎯返回顶部>\u003C\u002Fu>](#head-content)\n\n* \u003Cspan id=\"head-2025\"> **2025**  \u003C\u002Fspan> \n     * （arXiv预印本 2025）**GenExam：一种多学科文本到图像考试**，王兆凯等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.14232)]\n     * （arXiv预印本 2025）**RefVNLI：迈向面向主题的文本到图像生成的可扩展评估**，阿维夫·斯洛博德金等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.17502)]\n     * （arXiv预印本 2025）**GPT-4o图像生成能力的实证研究**，陈思翔等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05979)]\n\n* \u003Cspan id=\"head-2024\"> **2024**  \u003C\u002Fspan> \n     * (arXiv预印本2024) **流程生成器匹配**, 黄泽民等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.19310)]\n     * (EMNLP 2024) **Kandinsky 3：面向多功能生成框架的文本到图像合成**, 弗拉基米尔·阿基普金等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.21061)] [[代码](https:\u002F\u002Fgithub.com\u002Fai-forever\u002FKandinsky-3)] [[项目](https:\u002F\u002Fai-forever.github.io\u002FKandinsky-3\u002F)] \n     * (arXiv预印本2024) **小数据集上文本到图像生成的数据外推**, 叶森茂和刘飞 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.01638)]\n     * ⭐⭐(arXiv预印本2024) **Imagen 3**, ImagenTeam-谷歌 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.07009)]\n     * (arXiv预印本2024) **MARS：用于细粒度文本到图像合成的自回归模型混合体**, 何旺贵等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.07614)]\n     * (快手) **Kolors：用于照片级真实感文本到图像合成的扩散模型高效训练**, 张思贤等 [[论文](https:\u002F\u002Fgithub.com\u002FKwai-Kolors\u002FKolors\u002Fblob\u002Fmaster\u002Fimgs\u002FKolors_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FKwai-Kolors\u002FKolors)] [[项目](https:\u002F\u002Fkwai-kolors.github.io\u002Fpost\u002Fpost-2\u002F)] \n     * (CVPR 2024) [💬人类偏好] **学习多维度人类偏好用于文本到图像生成**, 张思贤等 [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fpapers\u002FZhang_Learning_Multi-Dimensional_Human_Preference_for_Text-to-Image_Generation_CVPR_2024_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fwangbohan97\u002FKolors-MPS)] [[项目](https:\u002F\u002Fkwai-kolors.github.io\u002Fpost\u002Fpost-1\u002F)] \n     * (CVPR 2024) [💬文本到布局→文本+布局到图像] **基于注意力重聚焦的有据文本到图像合成**, 冯琼等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.05427)] [[项目](https:\u002F\u002Fattention-refocusing.github.io\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002FAttention-Refocusing\u002Fattention-refocusing)] \n     * (arXiv预印本2024) **Dimba：Transformer-Mamba扩散模型**, 费正聪等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.01159)]\n     * (arXiv预印本2024) [💬生成与编辑] **MultiEdits：使用文本到图像扩散模型同时进行多方面编辑**, 黄明珍等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.00985)] [[项目](https:\u002F\u002Fmingzhenhuang.com\u002Fprojects\u002FMultiEdits.html)]\n     * (arXiv预印本2024) **AutoStudio：在多轮交互式图像生成中构建一致的主题**, 程俊豪等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.01388)] [[项目](https:\u002F\u002Fhowe183.github.io\u002FAutoStudio.io\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Fdonahowe\u002FAutoStudio)] \n     * (arXiv预印本2024) **TheaterGen：利用大语言模型进行角色管理以实现多轮图像生成的一致性**, 程俊豪等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.18919)] [[项目](https:\u002F\u002Fhowe140.github.io\u002Ftheatergen.io\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Fdonahowe\u002FTheatergen)] \n     * (CVPR 2024) **Ranni：驯服文本到图像扩散以准确遵循指令**, 冯宇彤等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.17002)] [[项目](https:\u002F\u002Franni-t2i.github.io\u002FRanni\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Fali-vilab\u002FRanni)] \n     * (arXiv预印本2024) **CoMat：将文本到图像扩散模型与图像到文本概念匹配对齐**, 江东志等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.03653)] [[项目](https:\u002F\u002Fcaraj7.github.io\u002Fcomat\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002FCaraJ7\u002FCoMat)] \n     * (arXiv预印本2024) **TextCraftor：你的文本编码器可以成为图像质量控制器**, 李燕玉等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.18978)] \n     * (CVPR 2024) **ECLIPSE：用于图像生成的资源高效文本到图像先验**, 帕特尔·迈特雷亚等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.04655)] [[项目](https:\u002F\u002Feclipse-t2i.vercel.app\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Feclipse-t2i\u002Feclipse-inference)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FECLIPSE-Community\u002FECLIPSE-Kandinsky-v2.2)]\n     * (arXiv预印本2024) **SELMA：利用自动生成数据学习并融合技能专用文本到图像专家**, 李家鲁等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.06952)] [[项目](https:\u002F\u002Fselma-t2i.github.io\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Fjialuli-luka\u002FSELMA)]\n     * (ICLR 2024) **PixArt-α：用于照片级真实感文本到图像合成的快速扩散Transformer训练**, 陈俊松等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.00426)] [[项目](https:\u002F\u002Fpixart-alpha.github.io\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002FPixArt-alpha\u002FPixArt-alpha)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FPixArt-alpha\u002FPixArt-LCM)]\n     * (arXiv预印本2024) **PixArt-Σ：用于4K文本到图像生成的弱到强扩散Transformer训练**, 陈俊松等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.04692)] \n     * (arXiv预印本2024) **PIXART-δ：基于潜在一致性模型的快速可控图像生成**, 陈俊松等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.05252)] \n     * (CVPR 2024) **用于文本到图像生成的判别探测与调优**, 曲雷刚等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.04321)] [[项目](https:\u002F\u002Fdpt-t2i.github.io\u002F)] \n     * (CVPR 2024) **RealCustom：为实时开放域文本到图像定制缩小真实文本词**, 黄梦琪等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.00483)] [[项目](https:\u002F\u002Fcorleone-huang.github.io\u002Frealcustom\u002F)] \n     * ⭐(arXiv预印本2024) **SDXL-Lightning：渐进式对抗扩散蒸馏**, 林善川等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.13929)] [[HuggingFace](https:\u002F\u002Fhuggingface.co\u002FByteDance\u002FSDXL-Lightning)] [[演示](https:\u002F\u002Ffastsdxl.ai\u002F)]\n     * ⭐(arXiv预印本2024) **RealCompo：现实主义与构图性之间的动态平衡提升文本到图像扩散模型**, 张欣晨等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.12908)] [[代码](https:\u002F\u002Fgithub.com\u002FYangLing0818\u002FRealCompo)] \n     * (arXiv预印本2024) **为文本到图像生成学习连续3D词**, 郑泰英等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.08654)] [[项目](https:\u002F\u002Fttchengab.github.io\u002Fcontinuous_3d_words\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Fttchengab\u002Fcontinuous_3d_words_code\u002F)]\n     * (arXiv预印本2024) **DiffusionGPT：LLM驱动的文本到图像生成系统**, 秦杰等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.10061)] [[项目](https:\u002F\u002Fdiffusiongpt.github.io\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002FDiffusionGPT\u002FDiffusionGPT)]\n     * (arXiv预印本2024) **DressCode：根据文本指导自动递归缝制并生成服装**, 何凯等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.16465)] [[项目](https:\u002F\u002Fsites.google.com\u002Fview\u002Fprojectpage-dresscode)]\n\n[\u003Cu>\u003C🎯返回顶部>\u003C\u002Fu>](#head-content)\n\n* \u003Cspan id=\"head-2023\"> **2023**  \u003C\u002Fspan>\n     * (arXiv预印本2023) **CoDi-2：上下文内、交错式与交互式的任意文本到任意图像生成**, 汤子宁等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.18775)] [[项目](https:\u002F\u002Fcodi-2.github.io\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002Fi-Code\u002Ftree\u002Fmain\u002FCoDi-2)] \n     * (arXiv预印本2023) **DiffBlender：可扩展且可组合的多模态文本到图像扩散模型**, 金圣允等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15194)] [[代码](https:\u002F\u002Fgithub.com\u002Fsungnyun\u002Fdiffblender)] [[项目](https:\u002F\u002Fsungnyun.github.io\u002Fdiffblender\u002F)] \n     * (arXiv预印本2023) **ElasticDiffusion：无需训练的任意尺寸图像生成**, 哈吉-阿里·莫亚德等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.18822)] [[项目](https:\u002F\u002Felasticdiffusion.github.io\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Fmoayedhajiali\u002Felasticdiffusion-official)] [[演示](https:\u002F\u002Freplicate.com\u002Fmoayedhajiali\u002Felasticdiffusion)]\n     * (ICCV 2023) **BoxDiff：无需训练的框约束扩散模型进行文本到图像合成**, 谢金恒等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.10816)] [[代码](https:\u002F\u002Fgithub.com\u002Fshowlab\u002FBoxDiff)] \n     * (arXiv预印本2023) **晚期约束扩散引导用于可控图像合成**, 刘畅等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.11520)] [[代码](https:\u002F\u002Fgithub.com\u002FAlonzoLeeeooo\u002FLCDG)]\n     * (arXiv预印本2023) **一张图胜过千言万语：多属性反演用于约束文本到图像合成**, 阿什瓦里亚·阿加瓦尔等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.11919)] \n     * ⭐(arXiv预印本2023) **UFOGen：通过扩散GAN实现大规模文本到图像生成的一次性前向传播**, 徐彦武等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.09257)] \n     * (ICCV 2023) **ITI-GEN：包容性文本到图像生成**, 张成等 [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fhtml\u002FZhang_ITI-GEN_Inclusive_Text-to-Image_Generation_ICCV_2023_paper.html)] [[代码](https:\u002F\u002Fgithub.com\u002Fhumansensinglab\u002FITI-GEN)] [[项目](https:\u002F\u002Fczhang0528.github.io\u002Fiti-gen)] \n     * (arXiv预印本2023) **Mini-DALLE3：通过提示大型语言模型进行交互式文本到图像生成**, 赖泽强等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.07653)] [[代码](https:\u002F\u002Fgithub.com\u002FZeqiang-Lai\u002FMini-DALLE3)] [[演示](http:\u002F\u002F139.224.23.16:10085\u002F)] [[项目](https:\u002F\u002Fminidalle3.github.io\u002F)] \n     * (arXiv预印本2023) [💬评估] **GenEval：面向对象的文本到图像对齐评估框架**, 德鲁巴·戈什等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.11513v1)] [[代码](https:\u002F\u002Fgithub.com\u002Fdjghosh13\u002Fgeneval)] \n     * ⭐(arXiv预印本2023) **Kandinsky：改进的文本到图像合成，结合图像先验与潜在扩散**, 安东·拉日加耶夫等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.03502)] [[代码](https:\u002F\u002Fgithub.com\u002Fai-forever\u002FKandinsky-2)] [[演示](https:\u002F\u002Ffusionbrain.ai\u002Fen\u002Feditor\u002F)] [[演示视频](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=c7zHPc59cWU)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fkandinsky-community)]\n     * ⭐⭐(ICCV 2023) **为文本到图像扩散模型添加条件控制**, 张吕敏等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.05543)] [[代码](https:\u002F\u002Fgithub.com\u002Flllyasviel\u002FControlNet)] \n     * (ICCV 2023) **DiffCloth：基于扩散的服装合成与操控，通过结构化跨模态语义对齐**, 张旭杰等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.11206)] \n     * (ICCV 2023) **无监督组合概念发现与文本到图像生成模型**, 刘楠等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.05357)] [[代码](https:\u002F\u002Fgithub.com\u002Fnanlliu\u002FUnsupervised-Compositional-Concepts-Discovery)] [[项目](https:\u002F\u002Fenergy-based-model.github.io\u002Funsupervised-concept-discovery\u002F)] \n     * (arXiv预印本2023) **抽象概念的文本到图像生成**, 廖嘉艺等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.14623)]\n     * (arXiv预印本2023) **T2I-CompBench：开放世界组合文本到图像生成的综合基准测试**, 黄凯毅等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.06350)] [[代码](https:\u002F\u002Fgithub.com\u002FKarine-Huang\u002FT2I-CompBench)] [[项目](https:\u002F\u002Fkarine-h.github.io\u002FT2I-CompBench\u002F)] \n     * (arXiv预印本2023) [💬评估] **人类偏好评分v2：评估文本到图像合成人类偏好的坚实基准**, 吴晓石等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.09341)] [[代码](https:\u002F\u002Fgithub.com\u002Ftgxs002\u002FHPSv2)]\n     * (arXiv预印本2023) **迈向统一的基于文本的人像检索：大规模多属性与语言搜索基准测试**, 杨舒宇等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.02898)] [[代码](https:\u002F\u002Fgithub.com\u002FShuyu-XJTU\u002FAPTM)] [[项目](https:\u002F\u002Fwww.zdzheng.xyz\u002Fpublication\u002FTowards-2023)]\n     * (arXiv预印本2023) **从文本合成艺术电影照片**, 马哈帕特拉·阿尼鲁达等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.02236)] [[代码](https:\u002F\u002Fgithub.com\u002Ftext2cinemagraph\u002Fartistic-cinemagraph)] [[项目](https:\u002F\u002Ftext2cinemagraph.github.io\u002Fwebsite\u002F)]\n     * (arXiv预印本2023) **多对象文本到图像生成的检测器引导**, 刘陆平等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.02236)]\n     * (arXiv预印本2023) **A-STAR：测试时注意力分离与保留用于文本到图像合成**, 阿什瓦里亚·阿加瓦尔等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.14544)]\n     * (arXiv预印本2023) [💬评估] **ConceptBed：评估文本到图像扩散模型的概念学习能力**, 帕特尔·迈特雷亚等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.04695)] [[代码](https:\u002F\u002Fgithub.com\u002FConceptBed\u002Fevaluations)] [[项目](https:\u002F\u002Fconceptbed.github.io\u002F)]\n     * ⭐(arXiv预印本2023) **StyleDrop：任意风格的文本到图像生成**, 孙基赫等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.00983)] [[项目](https:\u002F\u002Fstyledrop.github.io\u002F)]\n     * ⭐⭐(arXiv预印本2023) **无需提示的扩散：将“文本”从文本到图像扩散模型中移除**, 徐兴谦等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.16223)] [[代码](https:\u002F\u002Fgithub.com\u002FSHI-Labs\u002FPrompt-Free-Diffusion)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fshi-labs\u002FPrompt-Free-Diffusion)]\n     * ⭐⭐ (SIGGRAPH 2023) **混合潜在扩散**, 阿夫拉米·奥姆里等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.02779)] [[代码](https:\u002F\u002Fgithub.com\u002Fomriav\u002Fblended-latent-diffusion)] [[项目](https:\u002F\u002Fomriavrahami.com\u002Fblended-latent-diffusion-page\u002F)]\n     * (CVPR 2023) [💬可控] **SpaText：用于可控图像生成的空间-文本表示**, 阿夫拉米·奥姆里等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.14305)] [[项目](https:\u002F\u002Fomriavrahami.com\u002Fspatext\u002F)]\n     * ⭐⭐ (arXiv 2023) **被选中的那一个：文本到图像扩散模型中的一致性角色**, 阿夫拉米·奥姆里等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.10093)] [[代码](https:\u002F\u002Fgithub.com\u002FZichengDuan\u002FTheChosenOne)] [[项目](https:\u002F\u002Fomriavrahami.com\u002Fthe-chosen-one\u002F)]\n     * (CVPR 2023) [💬稳定扩散与大脑] **利用人类脑活动的潜在扩散模型进行高分辨率图像重建**, 高木悠等 [[论文](https:\u002F\u002Fwww.biorxiv.org\u002Fcontent\u002F10.1101\u002F2022.11.18.517004v1)] \n     * (arXiv预印本2023) **BLIP-Diffusion：用于可控文本到图像生成与编辑的预训练主体表示**, 李东旭等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.14720)] \n     * (arXiv预印本2023) [💬评估] **LLMScore：揭示大型语言模型在文本到图像合成评估中的强大能力**, 卢宇杰等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.11116)] [[代码](https:\u002F\u002Fgithub.com\u002FYujieLu10\u002FLLMScore)] \n     * (arXiv预印本2023) **P+：文本到图像生成中的扩展文本条件**, 沃伊诺夫·安德烈等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.09522)] [[项目](https:\u002F\u002Fprompt-plus.github.io\u002F)] \n     * (arXiv预印本2023) **微调零样本图像定制的编码器**, 贾旭辉等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.02642)] \n     * (ICML 2023) **TR0N：用于零样本即插即用条件生成的翻译网络**, 刘兆延等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.13742)] [[代码](https:\u002F\u002Fgithub.com\u002Flayer6ai-labs\u002Ftr0n)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FLayer6\u002FTR0N)]\n     * (ICLR 2023) [💬3D]**DreamFusion：使用2D扩散进行文本到3D**, 波尔·本等 [[论文（arXiv）](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.14988)] [[论文（OpenReview）](https:\u002F\u002Fopenreview.net\u002Fforum?id=FjNys5c7VyY)] [[项目](https:\u002F\u002Fdreamfusion3d.github.io\u002F)] [[简短阅读](https:\u002F\u002Fwww.louisbouchard.ai\u002Fdreamfusion\u002F)]\n     * (ICLR 2023) **无需训练的结构化扩散引导用于组合文本到图像合成**, 冯伟西等 [[论文（arXiv）](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.05032)] [[论文（OpenReview）](https:\u002F\u002Fopenreview.net\u002Fforum?id=PUIqjT4rzq7)] [[代码](https:\u002F\u002Fgithub.com\u002Fshunk031\u002Ftraining-free-structured-diffusion-guidance)]\n     * ⭐⭐(arXiv预印本2023) **Pick-a-Pic：文本到图像生成用户偏好开放数据集**, 克里斯汀·尤瓦尔等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.01569)] [[代码](https:\u002F\u002Fgithub.com\u002Fyuvalkirstain\u002FPickScore)] [[数据集](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fyuvalkirstain\u002Fpickapic_v1)] [[在线应用](https:\u002F\u002Fpickapic.io\u002F)] [[PickScore](https:\u002F\u002Fhuggingface.co\u002Fyuvalkirstain\u002FPickScore_v1)] \n     * (arXiv预印本2023) **TTIDA：通过文本到文本和文本到图像模型进行可控生成数据增强**, 尹宇威等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.08821)] \n     * (arXiv预印本2023) [💬文本反转] **用于个性化文本到图像生成的可控文本反转**, 杨建安等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.05265)] \n     * (arXiv预印本2023) **扩散解释器：用于文本到图像稳定扩散的可视化解释**, 李晟敏等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.03509)] [[项目](https:\u002F\u002Fpoloclub.github.io\u002Fdiffusion-explainer\u002F)] \n     * ⭐⭐(ACL 2023发现) [💬多语言到图像] **AltCLIP：改变CLIP的语言编码器以扩展语言能力**, 陈忠志等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.06679)] [[代码-AltDiffusion](https:\u002F\u002Fgithub.com\u002FFlagAI-Open\u002FFlagAI\u002Ftree\u002Fmaster\u002Fexamples\u002FAltDiffusion-m18)] [[代码-AltCLIP](https:\u002F\u002Fgithub.com\u002FFlagAI-Open\u002FFlagAI\u002Ftree\u002Fmaster\u002Fexamples\u002FAltCLIP-m18)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002FBAAI\u002FAltDiffusion-m18)] \n     * (arXiv预印本2023) [💬种子选择] **一切取决于你的起点：带种子选择的文本到图像生成**, 萨缪尔·德维尔等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.14530)] \n     * (arXiv预印本2023) [💬音频\u002F声音\u002F多语言到图像] **GlueGen：用于X到图像生成的即插即用多模态编码器**, 秦灿等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.10056)] \n     * (arXiv预印本2023) [💬忠实度评估] **TIFA：准确且可解释的文本到图像忠实度评估与问答**, 胡宇石等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.11897)] [[项目](https:\u002F\u002Ftifa-benchmark.github.io\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002FYushi-Hu\u002Ftifa)] \n     * (arXiv预印本2023) **InstantBooth：无需测试时微调的个性化文本到图像生成**, 石静等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03411)] [[项目](https:\u002F\u002Fjshi31.github.io\u002FInstantBooth\u002F)]\n     * (TOMM 2023) **LFR-GAN：基于局部特征精化的生成对抗网络用于文本到图像生成**, 邓子军等 [[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3589002)] [[代码](https:\u002F\u002Fgithub.com\u002FPKU-ICST-MIPL\u002FLFR-GAN_TOMM2023)] \n     * (ICCV 2023) **富有表现力的文本到图像生成与丰富文本**, 葛松伟等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.06720)] [[代码](https:\u002F\u002Fgithub.com\u002FSongweiGe\u002Frich-text-to-image)] [[项目](https:\u002F\u002Frich-text-to-image.github.io\u002F)] [[演示](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fsongweig\u002Frich-text-to-image\u002Fdiscussions)]\n     * (arXiv预印本2023) [💬人类偏好] **ImageReward：学习与评估文本到图像生成的人类偏好**, 徐家政等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.05977)] [[代码](https:\u002F\u002Fgithub.com\u002FTHUDM\u002FImageReward)] \n     * (arXiv预印本2023) **eDiff-I：带有专家去噪器集合的文本到图像扩散模型**, 巴拉吉·约格什等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.01324)] [[项目](https:\u002F\u002Fresearch.nvidia.com\u002Flabs\u002Fdir\u002FeDiff-I\u002F)] \n     * (CVPR 2023) **GALIP：用于文本到图像合成的生成对抗CLIP**, 陶明等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.12959)] [[代码](https:\u002F\u002Fgithub.com\u002Ftobran\u002FGALIP)]\n     * (CVPR 2023) [💬人类评估] **迈向可验证与可重复的文本到图像生成人类评估**, 大谷真由等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.01816)] \n     * (arXiv预印本2023) **Text2Room：从2D文本到图像模型中提取纹理3D网格**, 霍林·卢卡斯等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.11989)] [[项目](https:\u002F\u002Flukashoel.github.io\u002Ftext-to-room\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002FlukasHoel\u002Ftext2room)] [[视频](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=fjRnFL91EZc)]\n     * (arXiv预印本2023) **编辑文本到图像扩散模型中的隐含假设**, 奥尔加德·哈达斯等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.08084)] [[项目](https:\u002F\u002Ftime-diffusion.github.io\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Fbahjat-kawar\u002Ftime-diffusion)] \n     * ⭐⭐(arXiv预印本2023) **视觉ChatGPT：与视觉基础模型对话、绘图与编辑**, 吴晨飞等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.04671)] [[代码](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002Fvisual-chatgpt)]\n     * (arXiv预印本2023) **X&Fuse：融合文本到图像生成中的视觉信息**, 克里斯汀·尤瓦尔等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.01000v1)]\n     * (CVPR 2023) [💬稳定扩散与大脑] **利用人类脑活动的潜在扩散模型进行高分辨率图像重建**, 高木悠等 [[论文](https:\u002F\u002Fwww.biorxiv.org\u002Fcontent\u002F10.1101\u002F2022.11.18.517004v1)] [[项目](https:\u002F\u002Fsites.google.com\u002Fview\u002Fstablediffusion-with-brain\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Fyu-takagi\u002FStableDiffusionReconstruction)]\n     * ⭐⭐(arXiv预印本2023) **扩散模型的通用引导**, 班萨尔·阿尔皮特等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.07121)] [[代码](https:\u002F\u002Fgithub.com\u002Farpitbansal297\u002FUniversal-Guided-Diffusion)] \n     * ⭐(arXiv预印本2023) **关注并激发：基于注意力的语义引导用于文本到图像扩散模型**, 谢弗·希拉等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.13826)] [[项目](https:\u002F\u002Fattendandexcite.github.io\u002FAttend-and-Excite\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002FAttendAndExcite\u002FAttend-and-Excite)]\n     * (BMVC 2023) **分割与绑定你的注意力以改善生成语义护理**, 李雨萌等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.10864)] [[项目](https:\u002F\u002Fsites.google.com\u002Fview\u002Fdivide-and-bind)] [[代码](https:\u002F\u002Fgithub.com\u002Fboschresearch\u002FDivide-and-Bind)]   \n     * (IEEE多媒体事务) **ALR-GAN：用于文本到图像合成的自适应布局精炼**, 谭洪辰等 [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10023990)] \n     * ⭐(CVPR 2023) **文本到图像扩散的多概念定制**, 库马里·努普尔等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.04488)] [[项目](https:\u002F\u002Fwww.cs.cmu.edu\u002F~custom-diffusion\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Fadobe-research\u002Fcustom-diffusion)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fnupurkmr9\u002Fcustom-diffusion)]\n     * (CVPR 2023) **GLIGEN：开放集接地文本到图像生成**, 李宇恒等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.07093)] [[代码](https:\u002F\u002Fgithub.com\u002Fgligen\u002FGLIGEN)] [[项目](https:\u002F\u002Fgligen.github.io\u002F)] [[Hugging Face演示](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fgligen\u002Fdemo)] \n     * (arXiv预印本2023) **以属性为中心的组合文本到图像生成**, 丛玉仁等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.01413)] [[项目](https:\u002F\u002Fgithub.com\u002Fyrcong\u002FACTIG)] \n     * (arXiv预印本2023) **Muse：通过掩码生成变压器进行文本到图像生成**, 昌慧文等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.00704v1)] [[项目](https:\u002F\u002Fmuse-model.github.io\u002F)]\n\n[\u003Cu>\u003C🎯返回顶部>\u003C\u002Fu>](#head-content)\n\n\n\n## \u003Cspan id=\"head6\"> *6. 其他相关工作* \u003C\u002Fspan>\n   * \u003Cspan id=\"head-pe\"> **📝提示工程📝** \u003C\u002Fspan> \n       * (CHI 2024) **PromptCharm：通过多模态提示与优化实现文本到图像生成**, 王志杰等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.04014)] \n       * (arXiv预印本 2024) **面向个性化文本到图像生成的自动化黑盒提示工程**, 何宇彤等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.191039)] \n       * (EMNLP 2023) **BeautifulPrompt：迈向文本到图像合成的自动提示工程**, 曹廷峰等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.06752)] \n       * (arXiv预印本 2023) [💬优化提示] **NeuroPrompts：用于文本到图像生成的自适应提示优化框架**, 沙哈尔·罗森曼等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.12229)] [[视频演示](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=Cmca_RWYn2g)] \n       * (arXiv预印本 2022) [💬优化提示] **用于文本到图像生成的提示优化**, 郝亚茹等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.09611)] [[代码](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FLMOps)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fmicrosoft\u002FPromptist)] \n       * (arXiv预印本 2022) [💬美学图像生成] **文本到图像模型的最佳提示及其寻找方法**, 尼基塔·帕夫利琴科等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.11711)] \n       * (arXiv预印本 2022) **文本到图像生成的提示修饰符分类法**, 约纳斯·奥本莱德 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.13988)] \n       * (CHI 2022) **文本到图像生成模型提示工程的设计指南**, 刘维安等 [[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3491102.3501825)] \n\n[\u003Cu>\u003C🎯返回顶部>\u003C\u002Fu>](#head-content)\n\n* \u003Cspan id=\"head-mm\"> **⭐多模态⭐** \u003C\u002Fspan> \n       * （arXiv预印本2024）**4M-21：一种适用于数十种任务与模态的任意到任意视觉模型**，Roman Bachmann等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.09406)] [[4M论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.06647)] [[项目](https:\u002F\u002F4m.epfl.ch\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Fapple\u002Fml-4m\u002F)]\n         * 📚任意到任意，RGB到所有（标题、边界框、语义分割、深度等），细粒度生成与编辑，多模态引导，任意到RGB检索，RGB到任意检索，\n       * （arXiv预印本2024）**Ctrl-X：无需指导的文本到图像生成中的结构与外观控制**，Kuan Heng Lin等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.07540)] [[项目](https:\u002F\u002Fgenforce.github.io\u002Fctrl-x\u002F)] \n         * 📚结构（自然图像、Canny图、法线图、线框图、3D网格等）+图像→图像，结构（掩码、3D网格、Canny图、深度图等）+文本→图像\n       * （arXiv预印本2024）**Lumina-T2X：通过基于流的大扩散Transformer将文本转换为任意模态、分辨率与时长**，Peng Gao等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.05945)] [[代码](https:\u002F\u002Fgithub.com\u002FAlpha-VLLM\u002FLumina-T2X)] \n         * 📚文本→图像\u002F视频\u002F音频\u002F3D\u002F音乐\n       * （ICLR 2024）**用于文本引导的视觉生成与编辑的跨模态上下文扩散模型**，Ling Yang等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.16627v1)] [[代码](https:\u002F\u002Fgithub.com\u002FYangLing0818\u002FContextDiff?tab=readme-ov-file)]\n         * 📚文本→图像，文本→视频\n       * （arXiv预印本2024）**TMT：通过将不同模态视为不同语言进行处理的语音、图像与文本之间的三模态翻译**，Minsu Kim等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.16021v1)] \n         * 📚图像→文本，图像→语音，文本→图像，语音→图像，语音→文本，文本→语音\n       * ⭐⭐（NeurIPS 2023）**CoDi：通过可组合扩散实现任意到任意生成**，Zineng Tang等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.11846)] [[项目](https:\u002F\u002Fcodi-gen.github.io\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002Fi-Code\u002Ftree\u002Fmain\u002Fi-Code-V3)] \n         * 📚[单对单生成] 文本→图像，音频→图像，图像→视频，图像→音频，音频→文本，图像→文本\n         * 📚[多输出联合生成] 文本→视频+音频，文本→文本+音频+图像，文本+图像→文本+图像\n         * 📚[多种条件] 文本+音频→图像，文本+图像→图像，文本+音频+图像→图像，文本+音频→视频，文本+图像→视频，视频+音频→文本，图像+音频→音频，文本+图像→音频\n       * ⭐⭐（CVPR 2023）**ImageBind：一个嵌入空间绑定一切**，Rohit Girdhar等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.05665)] [[项目](https:\u002F\u002Fai.facebook.com\u002Fblog\u002Fimagebind-six-modalities-binding-ai\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FImageBind)] \n         * 📚图像到音频检索，音频到图像检索，文本到图像+音频，音频+图像到图像，音频到图像生成，零样本文本到音频检索与分类... \n       * ⭐（CVPR 2023）**为文本到图像合成扩展GANs**，Minguk Kang等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.05511)] [[项目](https:\u002F\u002Fmingukkang.github.io\u002FGigaGAN\u002F)] \n         * 📚文本到图像，可控图像合成（风格混合、提示插值、提示混合），超分辨率（文本条件、无条件）\n       * （arXiv预印本2023）**DiffBlender：可扩展且可组合的多模态文本到图像扩散模型**，Sungnyun Kim等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15194)] [[代码](https:\u002F\u002Fgithub.com\u002Fsungnyun\u002Fdiffblender)] [[项目](https:\u002F\u002Fsungnyun.github.io\u002Fdiffblender\u002F)]\n         * 📚文本到图像，多模态可控图像合成，文本+图像+空间\u002F非空间标记→图像\n       * （arXiv预印本2023）**TextIR：用于文本驱动可编辑图像修复的简单框架**，Yunpeng Bai等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.14736)] [[代码](https:\u002F\u002Fgithub.com\u002Fhaha-lisa\u002FRDM-Region-Aware-Diffusion-Model)] \n         * 📚图像修复，图像上色，图像超分辨率，通过退化进行图像编辑\n       * （arXiv预印本2023）**为多模态图像合成调制预训练扩散模型**，Cusuh Ham等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.12764)]\n         * 📚素描到图像，分割到图像，文本+素描到图像，文本+分割到图像，文本+素描+分割到图像\n       * （arXiv预印本2023）**Muse：通过掩码生成Transformer进行文本到图像生成**，Huiwen Chang等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.00704v1)] [[项目](https:\u002F\u002Fmuse-model.github.io\u002F)] \n         * 📚文本到图像，零样本+无掩码编辑，零样本修复\u002F外扩\n       * （arXiv预印本2022）**多功能扩散：文本、图像与变体全部集成于一个扩散模型中**，Xingqian Xu等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.08332)] [[代码](https:\u002F\u002Fgithub.com\u002FSHI-Labs\u002FVersatile-Diffusion)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fshi-labs\u002FVersatile-Diffusion)]\n         * 📚文本到图像，图像变体，图像到文本，解纠缠，文本+图像引导生成，可编辑I2T2I\n       * （arXiv预印本2022）**Frido：用于复杂场景图像合成的特征金字塔扩散**，Wan-Cyuan Fan等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.13753)] [[代码](https:\u002F\u002Fgithub.com\u002Fdavidhalladay\u002FFrido)]\n         * 📚文本到图像，场景图到图像，布局到图像，无条件图像生成\n       * （arXiv预印本2022）**NUWA-Infinity：用于无限视觉合成的自回归到自回归生成**，Chenfei Wu等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.09814)] [[代码](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FNUWA)] [[项目](https:\u002F\u002Fnuwa-infinity.microsoft.com\u002F#\u002F)]\n         * 📚无条件图像生成（高清），文本到图像（高清），图像动画（高清），图像外扩（高清），文本到视频（高清）\n       * （ECCV 2022）**NÜWA：用于神经视觉世界创造的视觉合成预训练**，Chenfei Wu等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.12417)] [[代码](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FNUWA)]\n         * **多模态预训练模型用于多任务🎄**：文本到图像，素描到图像，图像补全，文本引导图像操作，文本到视频，视频预测，素描到视频，文本引导视频操作\n       * （ACMMM 2022）**重新思考超分辨率作为文本引导细节生成**，Chenxi Ma等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.06604)] \n         * 📚文本到图像，高分辨率，文本引导高分辨率\n       * （arXiv预印本2022）**用于跨模态与条件生成的离散对比扩散**，Ye Zhu等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.07771)] [[代码](https:\u002F\u002Fgithub.com\u002FL-YeZhu\u002FCDCD)] \n         * 📚文本到图像，舞蹈到音乐，类别到图像\n       * （arXiv预印本2022）**M6-Fashion：高保真多模态图像生成与编辑**，Zhikang Li等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.11705)] \n         * 📚文本到图像，无条件图像生成，局部编辑，文本引导局部编辑，内\u002F外绘画，风格混合\n       * （CVPR 2022）**向我展示什么并告诉我如何：通过多模态条件进行视频合成**，Yogesh Balaji等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.02573)] [[代码](https:\u002F\u002Fgithub.com\u002Fsnap-research\u002FMMVID)] [项目](https:\u002F\u002Fsnap-research.github.io\u002FMMVID\u002F)\n         * 📚文本到视频，独立多模态控制，依赖多模态控制\n       * ⭐⭐（CVPR 2022）**用潜在扩散模型进行高分辨率图像合成**，Robin Rombach等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.10752)] [[代码](https:\u002F\u002Fgithub.com\u002FCompVis\u002Flatent-diffusion)] [[Stable Diffusion代码](https:\u002F\u002Fgithub.com\u002FCompVis\u002Fstable-diffusion)]\n         * 📚文本到图像，条件潜在扩散，超分辨率，修复\n       * ⭐⭐（arXiv预印本2022）**通过简单的序列到序列学习框架统一架构、任务与模态**，Peng Wang等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.03052v1)] [[代码](https:\u002F\u002Fgithub.com\u002Fofa-sys\u002Fofa)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002FOFA-Sys)]\n         * 📚文本到图像生成，图像标题生成，文本摘要，自监督图像分类，**[SOTA]** 指称表达理解，视觉蕴含，视觉问答\n       * （arXiv预印本2021）**使用专家乘积GAN进行多模态条件图像合成**，Xun Huang等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.05130)] [[项目](https:\u002F\u002Fdeepimagination.cc\u002FPoE-GAN\u002F)]\n         * 📚文本到图像，分割到图像，文本+分割\u002F素描\u002F图像→图像，素描+分割\u002F图像→图像，分割+图像→图像\n       * （NeurIPS 2021）**M6-UFC：通过非自回归生成Transformer统一多模态控制用于条件图像合成**，Zhu Zhang等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.14211)] \n         * 📚文本到图像，素描到图像，风格迁移，图像修复，多模态控制到图像\n       * （arXiv预印本2021）**ERNIE-ViLG：用于双向视觉-语言生成的统一生成预训练**，Han Zhang等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.15283)] \n         * 一个预训练的**100亿**参数模型：ERNIE-ViLG。\n         * 一个包含**1.45亿**高质量中文图像-文本对的大规模数据集。\n         * 📚文本到图像，图像标题生成，生成式视觉问答\n       * （arXiv预印本2021）**使用专家乘积GAN进行多模态条件图像合成**，Xun Huang等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.05130)] [[项目](https:\u002F\u002Fdeepimagination.cc\u002FPoE-GAN\u002F)]\n         * 📚文本到图像，分割到图像，文本+分割\u002F素描\u002F图像→图像，素描+分割\u002F图像→图像，分割+图像→图像\n       * （arXiv预印本2021）**L-Verse：图像与文本之间的双向生成**，Taehoon Kim等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.11133)] [[代码](https:\u002F\u002Fgithub.com\u002Ftgisaturday\u002FL-Verse)] \n         * 📚文本到图像，图像到文本，图像重建 \n       * （arXiv预印本2021）[💬语义扩散引导] **更多控制，免费！使用语义扩散引导进行图像合成**，Xihui Liu等[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.05744)] [[项目](https:\u002F\u002Fxh-liu.github.io\u002Fsdg\u002F)] \n         * 📚文本到图像，图像到图像，文本+图像→图像\n\n[\u003Cu>\u003C🎯返回顶部>\u003C\u002Fu>](#head-content)\n\n   * \u003Cspan id=\"head-app\"> **🛫应用🚀** \u003C\u002Fspan> \n       * (arXiv预印本 2024) [💬照片修饰] **JarvisArt：通过智能照片修饰代理解放人类艺术创造力**, 林云龙等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.17612)] [[项目](https:\u002F\u002Fjarvisart.vercel.app\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002FLYL1015\u002FJarvisArt)]\n       * (CVPR 2025) [💬图像修复] **获取再适应：为图像修复榨取文本到图像模型**, 邓俊源等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.15159)]\n       * (arXiv预印本 2024) [💬多概念合成] **Gen4Gen：用于生成式多概念合成的生成式数据管道**, 叶春晓等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.15504)] [[项目](https:\u002F\u002Fdanielchyeh.github.io\u002FGen4Gen\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002FlouisYen\u002FGen4Gen)]\n       * (arXiv预印本 2023) [💬3D发型生成] **HAAR：基于文本条件的3D发束式人类发型生成模型**, 瓦妮莎·斯克利亚罗娃等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.11666)] [[项目](https:\u002F\u002Fhaar.is.tue.mpg.de\u002F)] \n       * (arXiv预印本 2023) [💬图像超分辨率] **带文本提示扩散的图像超分辨率**, 陈峥等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.14282)] [[代码](https:\u002F\u002Fgithub.com\u002Fzhengchen1999\u002FPromptSR)] \n       * (2023) [💬图像编辑] **生成式填充**。[[项目](https:\u002F\u002Fwww.adobe.com\u002Fproducts\u002Fphotoshop\u002Fgenerative-fill.html)] \n       * (arXiv预印本 2023) [💬大语言模型] **大语言模型作为艺术指导（LaDi）：利用大语言模型改进文本到媒体生成器**, 艾伦·鲁什等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.03716v1)]\n       * (arXiv预印本 2023) [💬分割] **SegGen：用文本到掩码和掩码到图像合成为分割模型注入强大动力**, 叶汉荣等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.03355)] [[项目](https:\u002F\u002Fseggenerator.github.io\u002F)]\n       * (arXiv预印本 2023) [💬文本编辑] **DiffUTE：通用文本编辑扩散模型**, 陈浩兴等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.10825)] \n       * (arXiv预印本 2023) [💬文本字符生成] **TextDiffuser：将扩散模型用作文本画家**, 陈静叶等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.10855)] \n       * (CVPR 2023) [💬开放词汇全景分割] **基于文本到图像扩散模型的开放词汇全景分割**, 徐嘉瑞等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.04803)] [[代码](https:\u002F\u002Fgithub.com\u002FNVlabs\u002FODISE)] [项目](https:\u002F\u002Fjerryxu.net\u002FODISE\u002F)] [HuggingFace](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fxvjiarui\u002FODISE)]\n       * (arXiv预印本 2023) [💬中文文本字符生成] **GlyphDraw：在图像合成模型中学习连贯地绘制汉字**, 马健等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.17870)] [[项目](https:\u002F\u002F1073521013.github.io\u002Fglyph-draw.github.io\u002F)] \n       * (arXiv预印本 2023) [💬有据生成] **引导文本到图像扩散模型实现有据生成**, 李子毅等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.05221)] [[代码](https:\u002F\u002Fgithub.com\u002FLipurple\u002FGrounded-Diffusion)] [项目](https:\u002F\u002Flipurple.github.io\u002FGrounded_Diffusion\u002F)]\n       * (arXiv预印本 2022) [💬语义分割] **CLIP同样是一种高效的分割器：一种基于文本驱动的弱监督语义分割方法**, 林宇琪等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.09506)] [[代码](https:\u002F\u002Fgithub.com\u002Flinyq2117\u002FCLIP-ES)]\n       * (arXiv预印本 2022) [💬无监督语义分割] **Peekaboo：文本到图像扩散模型是零样本分割器**, 瑞安·伯格特等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.13224)]\n       * (SIGGRAPH Asia 2022) [💬文本+语音→手势] **节奏性手势生成器：基于层次神经嵌入的节奏感知共语音手势合成**, 敖腾龙等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.01448)] [[代码](https:\u002F\u002Fgithub.com\u002FAubrey-ao\u002FHumanBehaviorAnimation)]\n       * (arXiv预印本 2022) [💬文本+图像+形状→图像] **带内外注意力的形状引导扩散**, 朴东赫等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.00210v1)] [[项目](https:\u002F\u002Fshape-guided-diffusion.github.io\u002F)]\n\n[\u003Cu>\u003C🎯返回顶部>\u003C\u002Fu>](#head-content)\n\n* \u003Cspan id=\"head-ti2i\"> **Text+Image\u002FVideo → Image\u002FVideo** \u003C\u002Fspan> \n       * (arXiv preprint 2025) **In-Context Edit: Enabling Instructional Image Editing with In-Context Generation in Large Scale Diffusion Transformer**, Zechuan Zhang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.20690)] [[Project](https:\u002F\u002Friver-zhang.github.io\u002FICEdit-gh-pages\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FRiver-Zhang\u002FICEdit)]\n       * (arXiv preprint 2025) **MAGREF: Masked Guidance for Any-Reference Video Generation**, Yufan Deng et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.23742v1)] [[Project](https:\u002F\u002Fmagref-video.github.io\u002Fmagref.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FMAGREF-Video\u002FMAGREF)]\n       * (arXiv preprint 2025) **Generating Multi-Image Synthetic Data for Text-to-Image Customization**, Nupur Kumari et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.01720)] [[Project](https:\u002F\u002Fwww.cs.cmu.edu\u002F~syncd-project\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fnupurkmr9\u002Fsyncd)]\n       * (arXiv preprint 2024) [💬Style Transfer] **StyleShot: A Snapshot on Any Style**, Junyao Gao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.01414)] [[Project](https:\u002F\u002Fgithub.com\u002Fopen-mmlab\u002FStyleShot)]\n       * (CVPR 2024) **SmartEdit: Exploring Complex Instruction-based Image Editing with Multimodal Large Language Models**, Yuzhou Huang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.06739)] [[Project](https:\u002F\u002Fyuzhou914.github.io\u002FSmartEdit\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FSmartEdit)]\n       * (arXiv preprint 2024) **MM-Diff: High-Fidelity Image Personalization via Multi-Modal Condition Integration**, Zhichao Wei et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.15059)] \n       * (CVPR 2024) **Instruct-Imagen: Image Generation with Multi-modal Instruction**, Hexiang Hu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.01952)] [[Project](https:\u002F\u002Finstruct-imagen.github.io\u002F)]\n       * (arXiv preprint 2024) [💬NERF] **InseRF: Text-Driven Generative Object Insertion in Neural 3D Scenes**, Mohamad Shahbazi et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.05335)] [[Project](https:\u002F\u002Fmohamad-shahbazi.github.io\u002Finserf\u002F)]\n       * (arXiv preprint 2023) **ViCo: Plug-and-play Visual Condition for Personalized Text-to-image Generation**, Shaozhe Hao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.00971)] [[Code](https:\u002F\u002Fgithub.com\u002Fhaoosz\u002FViCo)]\n       * (arXiv preprint 2023) [💬Video Editing] **MagicStick: Controllable Video Editing via Control Handle Transformations**, Yue Ma et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.03047v1)] [[Project](https:\u002F\u002Fmagic-stick-edit.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fmayuelala\u002FMagicStick)]\n       * (arXiv preprint 2023) **Lego: Learning to Disentangle and Invert Concepts Beyond Object Appearance in Text-to-Image Diffusion Models**, Chen Henry Wu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.13833)] \n       * (ACMMM 2023) [💬Style Transfer] **ControlStyle: Text-Driven Stylized Image Generation Using Diffusion Priors**, Jingwen Chen et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.05463)] \n       * (ICCV 2023) **A Latent Space of Stochastic Diffusion Models for Zero-Shot Image Editing and Guidance**, Chen Henry Wu et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FWu_A_Latent_Space_of_Stochastic_Diffusion_Models_for_Zero-Shot_Image_ICCV_2023_paper.pdf)] [[Arxiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.05559)] [[Code](https:\u002F\u002Fgithub.com\u002Fchenwu98\u002Fcycle-diffusion)]\n       * (arXiv preprint 2023) [💬Multi-Subject Generation] **VideoDreamer: Customized Multi-Subject Text-to-Video Generation with Disen-Mix Finetuning**, Hong Chen et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.00990v1)] [[Project](https:\u002F\u002Fvideodreamer23.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fvideodreamer23\u002Fvideodreamer23.github.io)]\n       * (arXiv preprint 2023) [💬Video Editing] **CCEdit: Creative and Controllable Video Editing via Diffusion Models**, Ruoyu Feng et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.16496)] [[Demo video](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=UQw4jq-igN4)] \n       * ⭐⭐ (SIGGRAPH Asia 2023) **Break-A-Scene: Extracting Multiple Concepts from a Single Image**, Omri Avrahami et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.16311)] [[Project](https:\u002F\u002Fomriavrahami.com\u002Fbreak-a-scene\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fbreak-a-scene)]\n       * (arXiv preprint 2023) **Visual Instruction Inversion: Image Editing via Visual Prompting**, Thao Nguyen et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.14331)] [[Project](https:\u002F\u002Fthaoshibe.github.io\u002Fvisii\u002F)]\n       * (CVPR 2023) [💬3D Shape Editing] **ShapeTalk: A Language Dataset and Framework for 3D Shape Edits and Deformations**, Panos Achlioptas et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FAchlioptas_ShapeTalk_A_Language_Dataset_and_Framework_for_3D_Shape_Edits_CVPR_2023_paper.pdf)] [[Code](https:\u002F\u002Fgithub.com\u002Foptas\u002Fchangeit3d)] [[Project](https:\u002F\u002Fchangeit3d.github.io\u002F)] \n       * (arXiv preprint 2023) [💬Colorization] **DiffColor: Toward High Fidelity Text-Guided Image Colorization with Diffusion Models**, Jianxin Lin et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.01655)] \n       * (ICCV 2023) [💬Video Editing] **FateZero: Fusing Attentions for Zero-shot Text-based Video Editing**, Chenyang Qi et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.09535)] [[Code](https:\u002F\u002Fgithub.com\u002FChenyangQiQi\u002FFateZero)] [[Project](https:\u002F\u002Ffate-zero-edit.github.io\u002F)] [Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fchenyangqi\u002FFateZero)] \n       * (arXiv preprint 2023) [💬3D] **AvatarVerse: High-quality & Stable 3D Avatar Creation from Text and Pose**, Huichao Zhang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.03610)] [[Project](https:\u002F\u002Favatarverse3d.github.io\u002F)] \n       * (ACM Transactions on Graphics 2023) **CLIP-Guided StyleGAN Inversion for Text-Driven Real Image Editing**, Ahmet Canberk Baykal et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.08397)] \n       * (arXiv preprint 2023) ⭐⭐**AnimateDiff: Animate Your Personalized Text-to-Image Diffusion Models without Specific Tuning**, Yuwei Guo et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.04725)] [[Project](https:\u002F\u002Fanimatediff.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fguoyww\u002Fanimatediff\u002F)]\n       * (ICLR 2023) **DiffEdit: Diffusion-based semantic image editing with mask guidance**, Guillaume Couairon et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.11427v1)] \n       * (arXiv preprint 2023) **Controlling Text-to-Image Diffusion by Orthogonal Finetuning**, Zeju Qiu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.07280)] [[Project](https:\u002F\u002Foft.wyliu.com\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FZeju1997\u002Foft)]\n       * (arXiv preprint 2023) [💬Reject Human Instructions] **Accountable Textual-Visual Chat Learns to Reject Human Instructions in Image Re-creation**, Zhiwei Zhang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.05983)] [[Project](https:\u002F\u002Fmatrix-alpha.github.io\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fmatrix-alpha\u002FAccountable-Textual-Visual-Chat)]\n       * (arXiv preprint 2023) **MultiFusion: Fusing Pre-Trained Models for Multi-Lingual, Multi-Modal Image Generation**, Marco Bellagente et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15296)]\n       * (CVPR 2023) **Text-Guided Unsupervised Latent Transformation for Multi-Attribute Image Manipulation**, Xiwen Wei et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FWei_Text-Guided_Unsupervised_Latent_Transformation_for_Multi-Attribute_Image_Manipulation_CVPR_2023_paper.html)] \n       * (arXiv preprint 2023) **Uni-ControlNet: All-in-One Control to Text-to-Image Diffusion Models**, Shihao Zhao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.16322v1)] [[Project](https:\u002F\u002Fshihaozhaozsh.github.io\u002Funicontrolnet\u002F)]\n       * (arXiv preprint 2023) **Unified Multi-Modal Latent Diffusion for Joint Subject and Text Conditional Image Generation**, Yiyang Ma et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.09319)] \n       * (arXiv preprint 2023) **DisenBooth: Disentangled Parameter-Efficient Tuning for Subject-Driven Text-to-Image Generation**, Hong Chen et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.03374)] \n       * (arXiv preprint 2023) [💬Image Editing] **Guided Image Synthesis via Initial Image Editing in Diffusion Model**, Jiafeng Mao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.03382)] \n       * (arXiv preprint 2023) [💬Image Editing] **Prompt Tuning Inversion for Text-Driven Image Editing Using Diffusion Models**, Wenkai Dong et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.04441)] \n       * (CVPR 2023) **DreamBooth: Fine Tuning Text-to-Image Diffusion Models for Subject-Driven Generation**, Nataniel Ruiz et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.12242)] [[Project](https:\u002F\u002Fdreambooth.github.io\u002F)]\n       * (arXiv preprint 2023) **Shape-Guided Diffusion with Inside-Outside Attention**, Dong Huk Park et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.00210)] [[Code](https:\u002F\u002Fgithub.com\u002Fshape-guided-diffusion\u002Fshape-guided-diffusion)] [[Project](https:\u002F\u002Fshape-guided-diffusion.github.io\u002F)] [Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fshape-guided-diffusion\u002Fshape-guided-diffusion)] \n       * (arXiv preprint 2023) [💬Image Editing] **iEdit: Localised Text-guided Image Editing with Weak Supervision**, Rumeysa Bodur et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.05947)] \n       * (PR 2023) [💬Person Re-identification] **BDNet: A BERT-based Dual-path Network for Text-to-Image Cross-modal Person Re-identification**, Qiang Liu et al. [[Paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320323003370)] \n       * (arXiv preprint 2023) **MagicFusion: Boosting Text-to-Image Generation Performance by Fusing Diffusion Models**, Jing Zhao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.13126)] [[Code](https:\u002F\u002Fgithub.com\u002FMagicFusion\u002FMagicFusion.github.io)] [Project](https:\u002F\u002Fmagicfusion.github.io\u002F)] \n       * (CVPR 2023) [💬3D] **TAPS3D: Text-Guided 3D Textured Shape Generation from Pseudo Supervision**, Jiacheng Wei et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.13273)] \n       * ⭐⭐(arXiv preprint 2023) [💬Image Editing] **MasaCtrl: Tuning-free Mutual Self-Attention Control for Consistent Image Synthesis and Editing**, Mingdeng Cao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.08465)] [[Code](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FMasaCtrl)] [[Project](https:\u002F\u002Fljzycmd.github.io\u002Fprojects\u002FMasaCtrl\u002F)]\n       * (arXiv preprint 2023) **Follow Your Pose: Pose-Guided Text-to-Video Generation using Pose-Free Videos**, Yue Ma et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.01186)] [[Code](https:\u002F\u002Fgithub.com\u002Fmayuelala\u002FFollowYourPose)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FYueMafighting\u002FFollowYourPose)]\n       * ⭐⭐(arXiv preprint 2023) [💬Image Editing] **Delta Denoising Score**, Amir Hertz et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.07090)] \n       * (arXiv preprint 2023) **Subject-driven Text-to-Image Generation via Apprenticeship Learning**, Wenhu Chen et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.00186)] [[Project](https:\u002F\u002Fdelta-denoising-score.github.io\u002F)]\n       * (arXiv preprint 2023) [💬Image Editing] **Region-Aware Diffusion for Zero-shot Text-driven Image Editing**, Nisha Huang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.11797)] [[Code](https:\u002F\u002Fgithub.com\u002Fhaha-lisa\u002FRDM-Region-Aware-Diffusion-Model)] \n       * ⭐⭐(arXiv preprint 2023) [💬Text+Video → Video]**Structure and Content-Guided Video Synthesis with Diffusion Models**, Patrick Esser et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.03011)] [[Project](https:\u002F\u002Fresearch.runwayml.com\u002Fgen1)]\n       * (arXiv preprint 2023) **ELITE: Encoding Visual Concepts into Textual Embeddings for Customized Text-to-Image Generation**, Yuxiang Wei et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.13848)]\n       * (arXiv preprint 2023) [💬Fashion Image Editing] **FICE: Text-Conditioned Fashion Image Editing With Guided GAN Inversion**, Martin Pernuš et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.02110)] [[Code](https:\u002F\u002Fgithub.com\u002FMartinPernus\u002FFICE)] \n       * (AAAI 2023) **CLIPVG: Text-Guided Image Manipulation Using Differentiable Vector Graphics**, Yiren Song et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.02122v1)] \n       * (AAAI 2023) **DE-Net: Dynamic Text-guided Image Editing Adversarial Networks**, Ming Tao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.01160)] [[Code](https:\u002F\u002Fgithub.com\u002Ftobran\u002FDE-Net)]\n       * (arXiv preprint 2022) **Plug-and-Play Diffusion Features for Text-Driven Image-to-Image Translation**, Narek Tumanyan et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.12572)] [[Project](https:\u002F\u002Fpnp-diffusion.github.io\u002F)]\n       * (arXiv preprint 2022) [💬Text+Image → Video] **Tell Me What Happened: Unifying Text-guided Video Completion via Multimodal Masked Video Generation**, Tsu-Jui Fu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.12824)]\n       * (arXiv preprint 2022) [💬Image Stylization] **DiffStyler: Controllable Dual Diffusion for Text-Driven Image Stylization**, Nisha Huang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.10682)] [[Code](https:\u002F\u002Fgithub.com\u002Fhaha-lisa\u002FDiffstyler)] \n       * (arXiv preprint 2022) **Null-text Inversion for Editing Real Images using Guided Diffusion Models**, Ron Mokady et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.09794)] [[Project]([https:\u002F\u002Fwww.timothybrooks.com\u002Finstruct-pix2pix](https:\u002F\u002Fnull-text-inversion.github.io\u002F))] \n       * (arXiv preprint 2022) **InstructPix2Pix: Learning to Follow Image Editing Instructions**, Tim Brooks et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.09800)] [[Project](https:\u002F\u002Fwww.timothybrooks.com\u002Finstruct-pix2pix)] \n       * (ECCV 2022) [💬Style Transfer] **Language-Driven Artistic Style Transfer**, Tsu-Jui Fu et al. [[Paper](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-20059-5_41)] [[Code](https:\u002F\u002Fgithub.com\u002Ftsujuifu\u002Fpytorch_ldast)]\n       * (arXiv preprint 2022) **Bridging CLIP and StyleGAN through Latent Alignment for Image Editing**, Wanfeng Zheng et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.04506)] \n       * (NeurIPS 2022) **One Model to Edit Them All: Free-Form Text-Driven Image Manipulation with Semantic Modulations**, Yiming Zhu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.07883)] [[Code](https:\u002F\u002Fgithub.com\u002FKumapowerLIU\u002FFFCLIP)]\n       * (BMVC 2022) **LDEdit: Towards Generalized Text Guided Image Manipulation via Latent Diffusion Models**, Paramanand Chandramouli et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.02249v1)]\n       * (ACMMM 2022) [💬Iterative Language-based Image Manipulation] **LS-GAN: Iterative Language-based Image Manipulation via Long and Short Term Consistency Reasoning**, Gaoxiang Cong et al. [[Paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3503161.3548206)] \n       * (ACMMM 2022) [💬Digital Art Synthesis] **Draw Your Art Dream: Diverse Digital Art Synthesis with Multimodal Guided Diffusion**, Huang Nisha et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.13360)] [[Code](https:\u002F\u002Fgithub.com\u002Fhaha-lisa\u002FMGAD-multimodal-guided-artwork-diffusion)]\n       * (SIGGRAPH Asia 2022) [💬HDR Panorama Generation] **Text2Light: Zero-Shot Text-Driven HDR Panorama Generation**, Zhaoxi Chen et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.09898)] [[Project](https:\u002F\u002Ffrozenburning.github.io\u002Fprojects\u002Ftext2light\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FFrozenBurning\u002FText2Light)]\n       * (arXiv preprint 2022) **LANIT: Language-Driven Image-to-Image Translation for Unlabeled Data**, Jihye Park et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.14889)] [[Project](https:\u002F\u002Fku-cvlab.github.io\u002FLANIT\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002FKU-CVLAB\u002FLANIT)]\n       * (ACMMM PIES-ME 2022) [💬3D Semantic Style Transfer] **Language-guided Semantic Style Transfer of 3D Indoor Scenes**, Bu Jin et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.07870)] [[Code](https:\u002F\u002Fgithub.com\u002FAIR-DISCOVER\u002FLASST)]\n       * (arXiv preprint 2022) [💬Face Animation] **Language-Guided Face Animation by Recurrent StyleGAN-based Generator**, Tiankai Hang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.05617)] [[Code](https:\u002F\u002Fgithub.com\u002FTiankaiHang\u002Flanguage-guided-animation)]\n       * (arXiv preprint 2022) [💬Fashion Design] **ARMANI: Part-level Garment-Text Alignment for Unified Cross-Modal Fashion Design**, Xujie Zhang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.05621)] [[Code](https:\u002F\u002Fgithub.com\u002FHarvey594\u002FARMANI)]\n       * (arXiv preprint 2022) [💬Image Colorization] **TIC: Text-Guided Image Colorization**, Subhankar Ghosh et al.  [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.02843)] \n       * (ECCV 2022) [💬Animating Human Meshes] **CLIP-Actor: Text-Driven Recommendation and Stylization for Animating Human Meshes**, Kim Youwang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.04382)] [[Code](https:\u002F\u002Fgithub.com\u002FYouwang-Kim\u002FCLIP-Actor)]\n       * (ECCV 2022) [💬Pose Synthesis] **TIPS: Text-Induced Pose Synthesis**, Prasun Roy et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.11718)] [[Code](https:\u002F\u002Fgithub.com\u002Fprasunroy\u002Ftips)] [[Project](https:\u002F\u002Fprasunroy.github.io\u002Ftips\u002F)]\n       * (ACMMM 2022) [💬Person Re-identification] **Learning Granularity-Unified Representations for Text-to-Image Person Re-identification**, Zhiyin Shao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.07802)] [[Code](https:\u002F\u002Fgithub.com\u002FZhiyinShao-H\u002FLGUR)]\n       * (ACMMM 2022) **Towards Counterfactual Image Manipulation via CLIP**, Yingchen Yu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.02812)] [[Code](https:\u002F\u002Fgithub.com\u002Fyingchen001\u002FCF-CLIP)]\n       * (ACMMM 2022) [💬Monocular Depth Estimation] **Can Language Understand Depth?**, Wangbo Zhao et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.01077)] [[Code](https:\u002F\u002Fgithub.com\u002FAdonis-galaxy\u002FDepthCLIP)]\n       * (arXiv preprint 2022) [💬Image Style Transfer] **Referring Image Matting**, Tsu-Jui Fu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.00178)]\n       * (CVPR 2022) [💬Image Segmentation] **Image Segmentation Using Text and Image Prompts**, Timo Lüddecke et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.10003)] [[Code](https:\u002F\u002Fgithub.com\u002Ftimojl\u002Fclipseg)] \n       * (CVPR 2022) [💬Video Segmentation] **Modeling Motion with Multi-Modal Features for Text-Based Video Segmentation**, Wangbo Zhao et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FZhao_Modeling_Motion_With_Multi-Modal_Features_for_Text-Based_Video_Segmentation_CVPR_2022_paper.pdf)] [[Code](https:\u002F\u002Fgithub.com\u002Fwangbo-zhao\u002F2022cvpr-mmmmtbvs)]\n       * (arXiv preprint 2022) [💬Image Matting] **Referring Image Matting**, Sebastian Loeschcke et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.05149)] [[Dataset](https:\u002F\u002Fgithub.com\u002FJizhiziLi\u002FRIM)]\n       * (arXiv preprint 2022) [💬Stylizing Video Objects] **Text-Driven Stylization of Video Objects**, Sebastian Loeschcke et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.12396)] [[Project](https:\u002F\u002Fsloeschcke.github.io\u002FText-Driven-Stylization-of-Video-Objects\u002F)]\n       * (arXiv preprint 2022) **DALL-E for Detection: Language-driven Context Image Synthesis for Object Detection**, Yunhao Ge et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.09592)] \n       * (IEEE Transactions on Neural Networks and Learning Systems 2022) [💬Pose-Guided Person Generation] **Verbal-Person Nets: Pose-Guided Multi-Granularity Language-to-Person Generation**, Deyin Liu et al. [[Paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9732175)]\n       * (SIGGRAPH 2022) [💬3D Avatar Generation] **AvatarCLIP: Zero-Shot Text-Driven Generation and Animation of 3D Avatars**, Fangzhou Hong et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.08535)] [[Code](https:\u002F\u002Fgithub.com\u002Fhongfz16\u002FAvatarCLIP)] [[Project](https:\u002F\u002Fhongfz16.github.io\u002Fprojects\u002FAvatarCLIP.html)] \n       * ⭐⭐(arXiv preprint 2022) [💬Image & Video Editing] **Text2LIVE: Text-Driven Layered Image and Video Editing**, Omer Bar-Tal et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.02491)] [[Project](https:\u002F\u002Ftext2live.github.io\u002F)] \n       * (Machine Vision and Applications 2022) **Paired-D++ GAN for image manipulation with text**, Duc Minh Vo et al. [[Paper](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs00138-022-01298-7)]\n       * (CVPR 2022) [💬Hairstyle Transfer] **HairCLIP: Design Your Hair by Text and Reference Image**, Tianyi Wei et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.05142)] [[Code](https:\u002F\u002Fgithub.com\u002Fwty-ustc\u002FHairCLIP)] \n       * (CVPR 2022) [💬NeRF] **CLIP-NeRF: Text-and-Image Driven Manipulation of Neural Radiance Fields**, Can Wang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.05139)] [[Code](https:\u002F\u002Fgithub.com\u002FcassiePython\u002FCLIPNeRF)] [[Project](https:\u002F\u002Fcassiepython.github.io\u002Fclipnerf\u002F)]\n       * (CVPR 2022) **DiffusionCLIP: Text-Guided Diffusion Models for Robust Image Manipulation**, Gwanghyun Kim et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.02711)]\n       * (CVPR 2022) **ManiTrans: Entity-Level Text-Guided Image Manipulation via Token-wise Semantic Alignment and Generation**, Jianan Wang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.04428)] [[Project](https:\u002F\u002Fjawang19.github.io\u002Fmanitrans\u002F)] \n       * ⭐⭐ (CVPR 2022) **Blended Diffusion for Text-driven Editing of Natural Images**, Omri Avrahami et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.14818)] [[Code](https:\u002F\u002Fgithub.com\u002Fomriav\u002Fblended-diffusion)] [[Project](https:\u002F\u002Fomriavrahami.com\u002Fblended-diffusion-page\u002F)] \n       * (CVPR 2022) **Predict, Prevent, and Evaluate: Disentangled Text-Driven Image Manipulation Empowered by Pre-Trained Vision-Language Model**, Zipeng Xu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.13333)] [[Code](https:\u002F\u002Fgithub.com\u002Fzipengxuc\u002FPPE-Pytorch)] \n       * (CVPR 2022) [💬Style Transfer] **CLIPstyler: Image Style Transfer with a Single Text Condition**, Gihyun Kwon et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.00374)] [[Code](https:\u002F\u002Fgithub.com\u002Fpaper11667\u002FCLIPstyler)] \n       * (arXiv preprint 2022) [💬Multi-person Image Generation] **Pose Guided Multi-person Image Generation From Text**, Soon Yau Cheong et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.04907)]\n       * (arXiv preprint 2022) [💬Image Style Transfer] **StyleCLIPDraw: Coupling Content and Style in Text-to-Drawing Translation**, Peter Schaldenbrand et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.12362)] [[Dataset](https:\u002F\u002Fwww.kaggle.com\u002Fpittsburghskeet\u002Fdrawings-with-style-evaluation-styleclipdraw)] [[Code](https:\u002F\u002Fgithub.com\u002Fpschaldenbrand\u002FStyleCLIPDraw)] [[Demo](https:\u002F\u002Freplicate.com\u002Fpschaldenbrand\u002Fstyle-clip-draw)]\n       * (arXiv preprint 2022) [💬Image Style Transfer] **Name Your Style: An Arbitrary Artist-aware Image Style Transfer**, Zhi-Song Liu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.13562)]\n       * (arXiv preprint 2022) [💬3D Avatar Generation] **Text and Image Guided 3D Avatar Generation and Manipulation**, Zehranaz Canfes et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.06079)] [[Project](https:\u002F\u002Fcatlab-team.github.io\u002Flatent3D\u002F)]\n       * (arXiv preprint 2022) [💬Image Inpainting] **NÜWA-LIP: Language Guided Image Inpainting with Defect-free VQGAN**, Minheng Ni et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.05009)]\n       * ⭐(arXiv preprint 2021) [💬Text+Image → Video] **Make It Move: Controllable Image-to-Video Generation with Text Descriptions**, Yaosi Hu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.02815)]\n       * (arXiv preprint 2021) [💬NeRF] **Zero-Shot Text-Guided Object Generation with Dream Fields**, Ajay Jain et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.01455)]  [[Project](https:\u002F\u002Fajayj.com\u002Fdreamfields)]\n       * (NeurIPS 2021) **Instance-Conditioned GAN**, Arantxa Casanova et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.05070)] [[Code](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fic_gan)]\n       * (ICCV 2021) **Language-Guided Global Image Editing via Cross-Modal Cyclic Mechanism**, Wentao Jiang et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FJiang_Language-Guided_Global_Image_Editing_via_Cross-Modal_Cyclic_Mechanism_ICCV_2021_paper.pdf)]\n       * (ICCV 2021) **Talk-to-Edit: Fine-Grained Facial Editing via Dialog**, Yuming Jiang et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FJiang_Talk-To-Edit_Fine-Grained_Facial_Editing_via_Dialog_ICCV_2021_paper.pdf)] [[Project](https:\u002F\u002Fwww.mmlab-ntu.com\u002Fproject\u002Ftalkedit\u002F)] [[Code](https:\u002F\u002Fgithub.com\u002Fyumingj\u002FTalk-to-Edit)]\n       * (ICCVW 2021) **CIGLI: Conditional Image Generation from Language & Image**, Xiaopeng Lu et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021W\u002FCLVL\u002Fpapers\u002FLu_CIGLI_Conditional_Image_Generation_From_Language__Image_ICCVW_2021_paper.pdf)] [[Code](https:\u002F\u002Fgithub.com\u002Fvincentlux\u002FCIGLI?utm_source=catalyzex.com)]\n       * (ICCV 2021) **StyleCLIP: Text-Driven Manipulation of StyleGAN Imagery**, Or Patashnik et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.17249)] [[Code](https:\u002F\u002Fgithub.com\u002Forpatashnik\u002FStyleCLIP)]\n       * (arXiv preprint 2021) **Paint by Word**, David Bau et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.10951.pdf)] \n       * ⭐(arXiv preprint 2021) **Zero-Shot Text-to-Image Generation**, Aditya Ramesh et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2102.12092.pdf)] [[Code](https:\u002F\u002Fgithub.com\u002Fopenai\u002FDALL-E)] [[Blog](https:\u002F\u002Fopenai.com\u002Fblog\u002Fdall-e\u002F)] [[Model Card](https:\u002F\u002Fgithub.com\u002Fopenai\u002FDALL-E\u002Fblob\u002Fmaster\u002Fmodel_card.md)] [[Colab](https:\u002F\u002Fcolab.research.google.com\u002Fdrive\u002F1KA2w8bA9Q1HDiZf5Ow_VNOrTaWW4lXXG?usp=sharing)] \n       * (NeurIPS 2020) **Lightweight Generative Adversarial Networks for Text-Guided Image Manipulation**, Bowen Li et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.12136.pdf)]\n       * (CVPR 2020) **ManiGAN: Text-Guided Image Manipulation**, Bowen Li et al. [[Paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FLi_ManiGAN_Text-Guided_Image_Manipulation_CVPR_2020_paper.pdf)] [[Code](https:\u002F\u002Fgithub.com\u002Fmrlibw\u002FManiGAN)]\n       * (ACMMM 2020) **Text-Guided Neural Image Inpainting**, Lisai Zhang et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2004.03212.pdf)] [[Code](https:\u002F\u002Fgithub.com\u002Fidealwhite\u002FTDANet)]\n       * (ACMMM 2020) **Describe What to Change: A Text-guided Unsupervised Image-to-Image Translation Approach**, Yahui Liu et al. [[Paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2008.04200.pdf)]\n       * (NeurIPS 2018) **Text-adaptive generative adversarial networks: Manipulating images with natural language**, Seonghyeon Nam et al. [[Paper](http:\u002F\u002Fpapers.nips.cc\u002Fpaper\u002F7290-text-adaptive-generative-adversarial-networks-manipulating-images-with-natural-language.pdf)] [[Code](https:\u002F\u002Fgithub.com\u002Fwoozzu\u002Ftagan)]\n\n[\u003Cu>\u003C🎯返回顶部>\u003C\u002Fu>](#head-content)\n\n   * \u003Cspan id=\"head-tl2i\"> **文本+布局 → 图像** \u003C\u002Fspan> \n       * (ECCV 2024) **无需训练的布局到图像合成场景生成方法**, 刘佳琪等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.13609)] \n       * (CVPR 2024) **Zero-Painter：无需训练的布局控制文本到图像合成方法**, 玛丽安娜·奥哈尼扬等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.04032)] [[代码](https:\u002F\u002Fgithub.com\u002FPicsart-AI-Research\u002FZero-Painter)]\n       * (CVPR 2024) **MIGC：多实例生成控制器用于文本到图像合成**, 周德伟等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.05408)] [[项目](https:\u002F\u002Fmigcproject.github.io\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Flimuloo\u002FMIGC)]\n       * (ICLR 2024) **对抗性监督助力布局到图像扩散模型蓬勃发展**, 李雨萌等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.08815)] [[项目](https:\u002F\u002Fyumengli007.github.io\u002FALDM\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Fboschresearch\u002FALDM)]\n       * (ICCV 2023) **基于注意力调节的密集文本到图像生成方法**, 金允智等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.12964)] [[代码](https:\u002F\u002Fgithub.com\u002Fnaver-ai\u002FDenseDiffusion)]\n       * (arXiv预印本2023) **无需训练的布局控制与交叉注意力引导**, 陈明浩等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03373)] [[代码](https:\u002F\u002Fgithub.com\u002Fsilent-chen\u002Flayout-guidance)] [[项目](https:\u002F\u002Fsilent-chen.github.io\u002Flayout-guidance\u002F)]\n\n[\u003Cu>\u003C🎯返回顶部>\u003C\u002Fu>](#head-content)\n\n   * \u003Cspan id=\"head-oti2i\"> **其他+文本+图像\u002F视频 → 图像\u002F视频** \u003C\u002Fspan> \n       * (arXiv预印本2024) [💬骨架\u002F草图] **ECNet：高效可控的文本到图像扩散模型**, 李思成等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.18417)]\n       * (ICCV 2023) [💬骨架] **HumanSD：一种原生骨架引导的扩散模型用于人体图像生成**, 居轩等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.04269)] [[项目](https:\u002F\u002Fidea-research.github.io\u002FHumanSD\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002FIDEA-Research\u002FHumanSD)] [[视频](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F1Djc2uJS5fmKnKeBnL34FnAAm3YSH20Bb\u002Fview)]\n       * (arXiv预印本2023) [💬声音+语音→机器人绘画] **机器人共感觉：一种声音与情感引导的AI画家**, 维汉·米斯拉等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.04850)]\n       * (arXiv预印本2022) [💬声音] **鲁棒的声音引导图像操控**, 李承贤等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.14114)]\n\n[\u003Cu>\u003C🎯返回顶部>\u003C\u002Fu>](#head-content)\n       \n   * \u003Cspan id=\"head-l2i\"> **布局\u002F掩码 → 图像** \u003C\u002Fspan> \n       * (arXiv预印本2024) **CreatiLayout：用于创意布局到图像生成的孪生多模态扩散Transformer**, 张辉等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.03859)] [[项目](https:\u002F\u002Fcreatilayout.github.io\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002FHuiZhang0812\u002FCreatiLayout)]\n       * (CVPR 2024) [💬实例信息+文本→图像] **InstanceDiffusion：面向图像生成的实例级控制方法**, 王旭东等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.03290)] [[项目](https:\u002F\u002Fpeople.eecs.berkeley.edu\u002F~xdwang\u002Fprojects\u002FInstDiff\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Ffrank-xwang\u002FInstanceDiffusion)]\n       * (arXiv预印本2023) [💬文本→布局→图像] **LayoutLLM-T2I：从大语言模型中提取布局指导用于文本到图像生成**, 曲雷刚等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.05095)]\n       * (CVPR 2023) [💬掩码+文本→图像] **SceneComposer：任意级别语义图像合成方法**, 曾宇等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.11742)] [[演示](https:\u002F\u002Fforms.microsoft.com\u002Fpages\u002Fresponsepage.aspx?id=Wht7-jR7h0OUrtLBeN7O4fEq8XkaWWJBhiLWWMELo2NUMjJYS0FDS0RISUVBUllMV0FRSzNCOTFTQy4u)]\n       * (CVPR 2023) **自由风格布局到图像合成方法**, 薛涵等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.14412)] [[代码](https:\u002F\u002Fgithub.com\u002Fessunny310\u002FFreestyleNet)]\n       * (CVPR 2023) **LayoutDiffusion：用于布局到图像生成的可控扩散模型**, 郑广聪等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.17189)] [[代码](https:\u002F\u002Fgithub.com\u002FZGCTroy\u002FLayoutDiffusion)]\n       * (沙特国王大学计算机与信息科学期刊) [综述] **基于场景图和布局的图像生成模型：对比分析**, 穆罕默德·乌迈尔·哈桑等 [[论文](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1319157823000897)] \n       * (CVPR 2022) **复杂场景生成的图像构图建模方法**, 杨作鹏等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.00923)] [[代码](https:\u002F\u002Fgithub.com\u002FJohnDreamer\u002FTwFA)]\n       * (CVPR 2022) **带有全景布局生成的交互式图像合成方法**, 王波等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.02104)] \n       * (CVPR 2021 [内容创作人工智能研讨会](http:\u002F\u002Fvisual.cs.brown.edu\u002Fworkshops\u002Faicc2021\u002F)) **使用Transformer进行高分辨率复杂场景合成方法**, 曼努埃尔·扬等 [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2105.06458.pdf)] \n       * (CVPR 2021) **上下文感知布局到图像生成与增强对象外观方法**, 何森等 [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.11897.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fwtliao\u002Flayout2img)] \n\n[\u003Cu>\u003C🎯返回顶部>\u003C\u002Fu>](#head-content)\n\n   * \u003Cspan id=\"head-l2s\"> **标签集 → 语义地图** \u003C\u002Fspan> \n       * (ECCV 2020) **通过SegVAE实现可控图像合成**, 程彦池等 [[论文](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fpapers\u002F123520154.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fyccyenchicheng\u002FSegVAE)]\n\n[\u003Cu>\u003C🎯返回顶部>\u003C\u002Fu>](#head-content)\n       \n   * \u003Cspan id=\"head-s2i\"> **语音 → 图像** \u003C\u002Fspan> \n       * (IEEE\u002FACM音频、语音与语言处理汇刊-2021) **从语音描述生成图像**, 王新胜等 [[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1109\u002FTASLP.2021.3053391)] [[代码](https:\u002F\u002Fgithub.com\u002Fxinshengwang\u002FS2IGAN)] [[项目](https:\u002F\u002Fxinshengwang.github.io\u002Fproject\u002Fs2igan\u002F)]\n       * (INTERSPEECH 2020)**[扩展版本👆] S2IGAN：通过对抗学习生成语音到图像**, 王新胜等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2005.06968)]\n       * (IEEE信号处理专题期刊-2020) **直接语音到图像翻译方法**, 李吉国等 [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9067083)] [[代码](https:\u002F\u002Fgithub.com\u002Fsmallflyingpig\u002Fspeech-to-image-translation-without-text)] [[项目](https:\u002F\u002Fsmallflyingpig.github.io\u002Fspeech-to-image\u002Fmain)]\n\n[\u003Cu>\u003C🎯返回顶部>\u003C\u002Fu>](#head-content)\n       \n   * \u003Cspan id=\"head-sg2i\"> **场景图 → 图像** \u003C\u002Fspan>  \n       * (arXiv预印本2023) **基于扩散的场景图到图像生成与掩码对比预训练方法**, 杨玲等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.11138)]\n       * (CVPR 2018) **从场景图生成图像**, 贾斯汀·约翰逊等 [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002FCameraReady\u002F0764.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fsg2im)]\n\n[\u003Cu>\u003C🎯返回顶部>\u003C\u002Fu>](#head-content)\n   \n   * \u003Cspan id=\"head-t2vr\"> **文本 → 视觉检索** \u003C\u002Fspan> \n       * (ECIR 2023) **以场景为中心与以对象为中心的图像-文本跨模态检索：一项可重复性研究**, Mariya Hendriksen 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.05174)] [[代码](https:\u002F\u002Fgithub.com\u002Fmariyahendriksen\u002Fecir23-object-centric-vs-scene-centric-CMR)]\n       * (ECIR 2022) **为电商中的类别到图像检索扩展CLIP**, Mariya Hendriksen 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.11294)] [[代码](https:\u002F\u002Fgithub.com\u002Fmariyahendriksen\u002Fecir2022_category_to_image_retrieval)]\n       * (ACMMM 2022) **CAIBC：捕捉超越颜色的全方位信息用于基于文本的人体检索**, Zijie Wang 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.05773)] \n       * (AAAI 2022) **用于文本到图像检索的跨模态一致性**, Malihe Alikhani 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.11047)] \n       * (ECCV [RWS 2022](https:\u002F\u002Fvap.aau.dk\u002Frws-eccv2022\u002F)) [💬人体检索] **看得更细，看得更多：用于基于文本的人体检索的隐式模态对齐**, Xiujun Shu 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.08608)] [[代码](https:\u002F\u002Fgithub.com\u002FTencentYoutuResearch\u002FPersonRetrieval-IVT)] \n       * (ECCV 2022) [💬文本+草图→视觉检索] **一幅草图胜过千言万语：基于文本和草图的图像检索**, Patsorn Sangkloy 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.03354)] [[项目](https:\u002F\u002Fpatsorn.me\u002Fprojects\u002Ftsbir\u002F)] \n       * (Neurocomputing-2022) **TIPCB：一种简单而有效的基于部件的卷积基准模型用于基于文本的人体搜索**, Yuhao Chen 等人 [[论文](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0925231222004726)] [[代码](https:\u002F\u002Fgithub.com\u002FOrangeYHChen\u002FTIPCB?utm_source=catalyzex.com)] \n       * (arXiv预印本 2021) [💬数据集] **FooDI-ML：一个大型多语言食品、饮料和杂货图像及描述数据集**, David Amat Olóndriz 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.02035)] [[代码](https:\u002F\u002Fgithub.com\u002Fglovo\u002Ffoodi-ml-dataset)] \n       * (CVPRW 2021) **TIED：一种用于文本到图像检索的循环一致编码器-解码器模型**, Clint Sebastian 等人 [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021W\u002FAICity\u002Fpapers\u002FSebastian_TIED_A_Cycle_Consistent_Encoder-Decoder_Model_for_Text-to-Image_Retrieval_CVPRW_2021_paper.pdf)] \n       * (CVPR 2021) **T2VLAD：用于文本-视频检索的全局-局部序列对齐**, Xiaohan Wang 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.10054.pdf)] \n       * (CVPR 2021) **快速与慢速思考：基于Transformer的高效文本到视觉检索**, Antoine Miech 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.16553.pdf)] \n       * (IEEE Access 2019) **查询即GAN：基于注意力的文本到图像生成对抗网络的场景检索**, RINTARO YANAGI 等人 [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8868179)]\n\n[\u003Cu>\u003C🎯返回顶部>\u003C\u002Fu>](#head-content)\n \n   * \u003Cspan id=\"head-t2m\"> **文本 → 3D\u002F动态\u002F形状\u002F网格\u002F对象...** \u003C\u002Fspan>\n      * (WACV 2026) [💬文本 → 纹理] **CasTex：基于显式纹理图与物理渲染的级联文本到纹理合成**, Mishan Aliev 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.06856)] [[项目](https:\u002F\u002Fthecrazymage.github.io\u002FCasTex\u002F)]\n       * (arXiv 预印本 2024) [💬文本 → 动作] **CrowdMoGen：零样本文本驱动的群体动作生成**, Xinying Guo 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.06188)] [[项目](https:\u002F\u002Fgxyes.github.io\u002Fprojects\u002FCrowdMoGen.html)]\n       * (ACMMM 2024) [💬文本 → 3D] **PlacidDreamer：推进文本到3D生成中的和谐性**, Shuo Huang 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.13976)] [[代码](https:\u002F\u002Fgithub.com\u002FHansenHuang0823\u002FPlacidDreamer)]\n       * (Meta) [💬文本 → 3D] **Meta 3D Gen**, Raphael Bensadoun 等人 [[论文](https:\u002F\u002Fscontent-dus1-1.xx.fbcdn.net\u002Fv\u002Ft39.2365-6\u002F449707112_509645168082163_2193712134508658234_n.pdf?_nc_cat=111&ccb=1-7&_nc_sid=3c67a6&_nc_ohc=TdfUsn5eGzgQ7kNvgEir1_g&_nc_ht=scontent-dus1-1.xx&oh=00_AYCH-Fbi8CL2l3Yc3ehAr-Itl5B6Wbo7KtXeONb8KCJ_mg&oe=668C1291)]\n       * (arXiv 预印本 2024) [💬文本 → 3D] **Meta 3D TextureGen：面向3D物体的快速且一致的纹理生成**, Raphael Bensadoun 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.02430v1)]\n       * (arXiv 预印本 2024) [💬文本 → 3D] **Meta 3D AssetGen：文本到网格生成，具备高质量几何、纹理和PBR材质**, Yawar Siddiqui 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.02445v1)] [[项目](https:\u002F\u002Fassetgen.github.io\u002F)]\n       * (arXiv 预印本 2024) [💬文本 → 3D] **3DStyleGLIP：针对部分定制的文本引导3D神经风格化**, SeungJeh Chung 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.02634v1)]\n       * (arXiv 预印本 2024) [💬文本 → 3D] **LATTE3D：大规模摊销文本到增强3D合成**, Kevin Xie 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.15385)] [[项目](https:\u002F\u002Fresearch.nvidia.com\u002Flabs\u002Ftoronto-ai\u002FLATTE3D\u002F)]\n       * (IEEE 可视化与计算机图形学汇刊) [💬文本 → 动作] **GUESS：逐步丰富合成用于文本驱动的人体动作生成**, Xuehao Gao 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.02142v1)]\n       * (arXiv 预印本 2023) [💬文本 → 4D] **4D-fy：使用混合分数蒸馏采样进行文本到4D生成**, Sherwin Bahmani 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.17984)] [[项目](https:\u002F\u002Fsherwinbahmani.github.io\u002F4dfy\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Fsherwinbahmani\u002F4dfy)]\n       * (arXiv 预印本 2023) [💬文本 → 3D] **MetaDreamer：高效文本到3D创作，实现几何与纹理解耦**, Lincong Feng 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.10123)] [[项目](https:\u002F\u002Fmetadreamer3d.github.io\u002F)]\n       * (arXiv 预印本 2023) [💬文本 → 3D] **One-2-3-45++：快速单张图像到3D物体，具备一致多视角生成与3D扩散**, Minghua Liu 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.07885)] [[项目](https:\u002F\u002Fsudo-ai-3d.github.io\u002FOne2345plus_page\u002F)]\n       * (NeurIPS 2023) [💬文本 → 3D] **One-2-3-45：任意单张图像在45秒内生成3D网格，无需逐形状优化**, Minghua Liu 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.16928)] [[项目](https:\u002F\u002Fone-2-3-45.github.io\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002FOne-2-3-45\u002FOne-2-3-45)]\n       * (ACMMM 2023) [💬文本+草图 → 3D] **Control3D：迈向可控文本到3D生成**, Yang Chen 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.05461)]\n       * (SIGGRAPH Asia 2023 & TOG) [💬文本 → 3D] **EXIM：一种混合显隐表示用于文本引导的3D形状生成**, Zhengzhe Liu 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.01714v1)] [[代码](https:\u002F\u002Fgithub.com\u002Fliuzhengzhe\u002FEXIM)]\n       * (arXiv 预印本 2023) [💬文本 → 3D] **PaintHuman：迈向高保真文本到3D人体纹理化，通过去噪分数蒸馏**, Jianhui Yu 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.09458v1)]\n       * (arXiv 预印本 2023) [💬文本 → 动作] **Fg-T2M：基于扩散模型的细粒度文本驱动人体动作生成**, Yin Wang 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.06284)]\n       * (arXiv 预印本 2023) [💬文本 → 3D] **IT3D：改进的文本到3D生成，具备显式视角合成**, Yiwen Chen 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.11473)] [[代码](https:\u002F\u002Fgithub.com\u002Fbuaacyw\u002FIT3D-text-to-3D)]\n       * (arXiv 预印本 2023) [💬文本 → 3D] **HD-Fusion：利用多重噪声估计实现细节丰富的文本到3D生成**, Jinbo Wu 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.16183)]\n       * (arXiv 预印本 2023) [💬文本 → 3D] **T2TD：基于先验知识指导的文本到3D生成模型**, Weizhi Nie 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15753)]\n       * (arXiv 预印本 2023) [💬文本 → 3D] **ProlificDreamer：高保真与多样化的文本到3D生成，基于变分分数蒸馏**, Zhengyi Wang 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.16213)] [[项目](https:\u002F\u002Fml.cs.tsinghua.edu.cn\u002Fprolificdreamer\u002F)]\n       * (arXiv 预印本 2023) [💬文本+网格 → 网格] **X-Mesh：迈向快速准确的文本驱动3D风格化，通过动态文本引导**, Yiwei Ma 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.15764)] [[项目](https:\u002F\u002Fxmu-xiaoma666.github.io\u002FProjects\u002FX-Mesh\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Fxmu-xiaoma666\u002FX-Mesh)]\n       * (arXiv 预印本 2023) [💬文本 → 动作] **T2M-GPT：以离散表示生成文本描述的人体动作**, Jianrong Zhang 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.06052)] [[项目](https:\u002F\u002Fmael-zys.github.io\u002FT2M-GPT\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002FMael-zys\u002FT2M-GPT)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fvumichien\u002FT2M-GPT)]\n       * (arXiv 预印本 2023) [💬文本 → 3D] **DreamHuman：可动画化的3D虚拟形象，由文本生成**, Nikos Kolotouros 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.09329)] [[项目](https:\u002F\u002Fdream-human.github.io\u002F)]\n       * (arXiv 预印本 2023) [💬文本 → 3D] **ATT3D：摊销文本到3D物体合成**, Jonathan Lorraine 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.07349)] [[项目](https:\u002F\u002Fresearch.nvidia.com\u002Flabs\u002Ftoronto-ai\u002FATT3D\u002F)]\n       * (arXiv 预印本 2022) [💬文本 → 3D] **Dream3D：零样本文本到3D合成，使用3D形状先验与文本到图像扩散模型**, Jiale Xu 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.14704)] [[项目](https:\u002F\u002Fbluestyle97.github.io\u002Fdream3d\u002F)]\n       * (arXiv 预印本 2022) [💬3D生成模型] **DATID-3D：利用文本到图像扩散模型实现多样性保持的领域适应**, Gwanghyun Kim 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.16374)] [[代码](https:\u002F\u002Fgithub.com\u002Fgwang-kim\u002FDATID-3D)] [[项目](https:\u002F\u002Fdatid-3d.github.io\u002F)]\n       * (arXiv 预印本 2022) [💬点云] **Point-E：从复杂提示生成3D点云的系统**, Alex Nichol 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.08751)] [[代码](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fpoint-e)]\n       * (arXiv 预印本 2022) [💬文本 → 3D] **Magic3D：高分辨率文本到3D内容创作**, Chen-Hsuan Lin 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.10440)] [[项目](https:\u002F\u002Fdeepimagination.cc\u002FMagic3D\u002F)]\n       * (arXiv 预印本 2022) [💬文本 → 形状] **Diffusion-SDF：通过体素化扩散生成文本到形状**, Muheng Li 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.03293)] [[代码](https:\u002F\u002Fgithub.com\u002Fttlmh\u002FDiffusion-SDF)]\n       * (NIPS 2022) [💬网格] **TANGO：基于光照分解的文本驱动真实感与鲁棒性3D风格化**, Yongwei Chen 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.11277)] [[项目](https:\u002F\u002Fcyw-3d.github.io\u002Ftango\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002FGorilla-Lab-SCUT\u002Ftango)]\n       * (arXiv 预印本 2022) [💬人体动作生成] **人体动作扩散模型**, Guy Tevet 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.14916)] [[项目](https:\u002F\u002Fguytevet.github.io\u002Fmdm-page\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002FGuyTevet\u002Fmotion-diffusion-model)]\n       * (arXiv 预印本 2022) [💬人体动作生成] **MotionDiffuse：基于扩散模型的文本驱动人体动作生成**, Mingyuan Zhang 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.15001)] [[项目](https:\u002F\u002Fmingyuan-zhang.github.io\u002Fprojects\u002FMotionDiffuse.html#)]\n       * (arXiv 预印本 2022) [💬3D形状] **ISS：作为文本引导3D形状生成基石的图像**, Zhengzhe Liu 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.04145)]\n       * (ECCV 2022) [💬虚拟人] **语义控制下的组合式人机场景交互合成**, Kaifeng Zhao 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.12824)] [[项目](https:\u002F\u002Fzkf1997.github.io\u002FCOINS\u002Findex.html)] [[代码](https:\u002F\u002Fgithub.com\u002Fzkf1997\u002FCOINS)]\n       * (CVPR 2022) [💬3D形状] **迈向隐式文本引导的3D形状生成**, Zhengzhe Liu 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.14622)] [[代码](https:\u002F\u002Fgithub.com\u002Fliuzhengzhe\u002FTowards-Implicit-Text-Guided-Shape-Generation)]\n       * (CVPR 2022) [💬物体] **零样本文本引导物体生成，借助梦境场**, Ajay Jain 等人 [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FJain_Zero-Shot_Text-Guided_Object_Generation_With_Dream_Fields_CVPR_2022_paper.pdf)] [[项目](https:\u002F\u002Fajayj.com\u002Fdreamfields)] [[代码](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fgoogle-research\u002Ftree\u002Fmaster\u002Fdreamfields)]\n       * (CVPR 2022) [💬网格] **Text2Mesh：文本驱动的网格神经风格化**, Oscar Michel 等人 [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FMichel_Text2Mesh_Text-Driven_Neural_Stylization_for_Meshes_CVPR_2022_paper.pdf)] [[项目](https:\u002F\u002Fthreedle.github.io\u002Ftext2mesh\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Fthreedle\u002Ftext2mesh)]\n       * (CVPR 2022) [💬动作] **从文本生成多样且自然的3D人体动作**, Chuan Guo 等人 [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FGuo_Generating_Diverse_and_Natural_3D_Human_Motions_From_Text_CVPR_2022_paper.pdf)] [[项目](https:\u002F\u002Fericguo5513.github.io\u002Ftext-to-motion\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002FEricGuo5513\u002Ftext-to-motion)]\n       * (CVPR 2022) [💬形状] **CLIP-Forge：迈向零样本文本到形状生成**, Aditya Sanghi 等人 [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FSanghi_CLIP-Forge_Towards_Zero-Shot_Text-To-Shape_Generation_CVPR_2022_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FAutodeskAILab\u002FClip-Forge)]\n       * (arXiv 预印本 2022) [💬动作] **TEMOS：从文本描述生成多样人体动作**, Mathis Petrovich 等人 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.14109)] [[项目](https:\u002F\u002Fmathis.petrovich.fr\u002Ftemos\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002FMathux\u002FTEMOS)]\n\n[\u003Cu>\u003C🎯返回顶部>\u003C\u002Fu>](#head-content)\n   \n   * \u003Cspan id=\"head-t2v\"> **文本 → 视频** \u003C\u002Fspan> \n       * (arXiv预印本 2025) **MotionAgent：通过运动场智能体实现细粒度可控视频生成**, 廖欣瑶等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.03207)] \n       * (arXiv预印本 2024) **VideoTetris：迈向组合式文本到视频生成**, 田晔等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.04277)] [[项目](https:\u002F\u002Fvideotetris.github.io\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002FYangLing0818\u002FVideoTetris)] \n       * (arXiv预印本 2024) **MovieDreamer：用于连贯长视觉序列的层次化生成**, 赵灿宇等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.16655)] [[项目](https:\u002F\u002Faim-uofa.github.io\u002FMovieDreamer\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Faim-uofa\u002FMovieDreamer)] [[演示视频](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=aubRVOGrKLU)]\n       * 💥💥(OpenAI 2024) **Sora** [[主页](https:\u002F\u002Fopenai.com\u002Fsora)] [[技术报告](https:\u002F\u002Fopenai.com\u002Fresearch\u002Fvideo-generation-models-as-world-simulators)] [[带音频的Sora](https:\u002F\u002Fx.com\u002Felevenlabsio\u002Fstatus\u002F1759240084342059260?s=20)]\n       * (ICLR 2024) **ControlVideo：无需训练的可控文本到视频生成**, 张亚博等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.13077)] [[代码](https:\u002F\u002Fgithub.com\u002FYBYBZhang\u002FControlVideo)]\n       * (arXiv预印本 2024) **MagicVideo-V2：多阶段高审美视频生成**, 王伟敏等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.04468)] [[项目](https:\u002F\u002Fmagicvideov2.github.io\u002F)]\n       * (arXiv预印本 2023) **LAVIE：利用级联潜在扩散模型实现高质量视频生成**, 王耀辉等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.15103)] [[项目](https:\u002F\u002Fvchitect.github.io\u002FLaVie-project\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002FVchitect\u002FLaVie)]\n       * (arXiv预印本 2023) **Emu Video：通过显式图像条件化分解文本到视频生成**, 罗希特·吉尔达尔等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.10709)] [[项目](https:\u002F\u002Femu-video.metademolab.com\u002F)] \n       * (ICCV 2023) **Text2Video-Zero：文本到图像扩散模型是零样本视频生成器**, 列文·哈恰特良等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.13439)] [[项目](https:\u002F\u002Ftext2video-zero.github.io\u002F)] [[视频](https:\u002F\u002Fwww.dropbox.com\u002Fs\u002Fuv90mi2z598olsq\u002FText2Video-Zero.MP4?dl=0)] [[代码](https:\u002F\u002Fgithub.com\u002FPicsart-AI-Research\u002FText2Video-Zero)] [[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FPAIR\u002FText2Video-Zero)]\n       * (NeurIPS 2023 数据集与基准) **FETV：开放域文本到视频生成的细粒度评估基准**, 刘元欣等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.01813v1)] [[项目](https:\u002F\u002Fgithub.com\u002Fllyx97\u002FFETV)]\n       * (arXiv预印本 2023) **用于增强文本到视频生成的最佳噪声探索**, 马世杰等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.00949v1)] \n       * (arXiv预印本 2023) **复用与扩散：迭代去噪用于文本到视频生成**, 顾嘉熙等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.03549)] [[项目](https:\u002F\u002Fanonymous0x233.github.io\u002FReuseAndDiffuse\u002F)] \n       * (arXiv预印本 2023) **制作主角：基于专家集合的通用视频编辑**, 赵宇阳等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.08850)] [[代码](https:\u002F\u002Fgithub.com\u002FMake-A-Protagonist\u002FMake-A-Protagonist)] [[项目](https:\u002F\u002Fmake-a-protagonist.github.io\u002F)] \n         * 📚使用主角进行图像编辑、背景编辑和文本到视频编辑\n       * ⭐⭐(CVPR 2023) **对齐你的潜在变量：基于潜在扩散模型的高分辨率视频合成**, 安德烈亚斯·布拉特曼等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.08818)] [[项目](https:\u002F\u002Fresearch.nvidia.com\u002Flabs\u002Ftoronto-ai\u002FVideoLDM\u002F)]\n       * (arXiv预印本 2023) [💬音乐可视化] **生成迪斯科：用于音乐可视化的文本到视频生成**, 刘薇安等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.08551)] \n       * (arXiv预印本 2023) **文本到4D动态场景生成**, 乌里埃尔·辛格等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.11280)] [[项目](https:\u002F\u002Fmake-a-video3d.github.io\u002F)]\n       * (arXiv预印本 2022) **调优视频：单次调优图像扩散模型以实现文本到视频生成**, 吴杰志等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.11565)] [[项目](https:\u002F\u002Ftuneavideo.github.io\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Fshowlab\u002FTune-A-Video)]\n       * (arXiv预印本 2022) **MagicVideo：基于潜在扩散模型的高效视频生成**, 周大泉等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.11018)] [[项目](https:\u002F\u002Fmagicvideo.github.io\u002F#)] \n       * (arXiv预印本 2022) **Phenaki：从开放域文本描述生成可变长度视频**, 鲁本·维列加斯等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.02399)] \n       * (arXiv预印本 2022) **Imagen Video：基于扩散模型的高清视频生成**, 乔纳森·霍等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.02303v1)] [[项目](https:\u002F\u002Fimagen.research.google\u002Fvideo\u002F)] \n       * (arXiv预印本 2022) **文本驱动的视频预测**, 宋雪等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.02872)] \n       * (arXiv预印本 2022) **制作视频：无需文本视频数据的文本到视频生成**, 乌里埃尔·辛格等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.14792)] [[项目](https:\u002F\u002Fmakeavideo.studio\u002F)] [[简短阅读](https:\u002F\u002Fwww.louisbouchard.ai\u002Fmake-a-video\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Flucidrains\u002Fmake-a-video-pytorch)]\n       * (ECCV 2022) [💬故事续写] **StoryDALL-E：为故事续写适配预训练文本到图像转换器**, 阿迪亚莎·马哈拉纳等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.06192)] [[代码](https:\u002F\u002Fgithub.com\u002Fadymaharana\u002Fstorydalle)]\n       * (arXiv预印本 2022) [💬故事 → 视频] **单词级细粒度故事可视化**, 李博文等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.02341)] [[代码](https:\u002F\u002Fgithub.com\u002Fmrlibw\u002FWord-Level-Story-Visualization)]\n       * (arXiv预印本 2022) **CogVideo：基于Transformer的大规模文本到视频生成预训练**, 洪文义等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.15868)] [[代码](https:\u002F\u002Fgithub.com\u002FTHUDM\u002FCogVideo)]\n       * (CVPR 2022) **向我展示什么，告诉我如何：基于多模态条件化的视频合成**, 约格什·巴拉吉等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.02573)] [[代码](https:\u002F\u002Fgithub.com\u002Fsnap-research\u002FMMVID)] [项目](https:\u002F\u002Fsnap-research.github.io\u002FMMVID\u002F)\n       * (arXiv预印本 2022) **视频扩散模型**, 乔纳森·霍等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.03458)] [[项目](https:\u002F\u002Fvideo-diffusion.github.io\u002F)]\n       * (arXiv预印本 2021) [❌生成任务] **从文本到视频：高效的片段序列生成**, 韩立功等 [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2107.11851.pdf)] [[项目](http:\u002F\u002Fwww.xiongyu.me\u002Fprojects\u002Ftranscript2video\u002F)]\n       * (arXiv预印本 2021) **GODIVA：从自然描述生成开放域视频**, 吴晨飞等 [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.14806.pdf)] \n       * (arXiv预印本 2021) **文本到视频：基于音韵词典的文本驱动说话头视频合成**, 张思博等 [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.14631.pdf)] \n       * (IEEE Access 2020) **TiVGAN：逐步进化生成器实现文本到图像到视频生成**, 金道延等 [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9171240)] \n       * (IJCAI 2019) **带有判别滤波器生成的条件GAN用于文本到视频合成**, 约格什·巴拉吉等 [[论文](https:\u002F\u002Fwww.ijcai.org\u002FProceedings\u002F2019\u002F0276.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fminrq\u002FCGAN_Text2Video)] \n       * (IJCAI 2019) **IRC-GAN：用于文本到视频生成的内省递归卷积GAN**, 邓康乐等 [[论文](https:\u002F\u002Fwww.ijcai.org\u002FProceedings\u002F2019\u002F0307.pdf)] \n       * (CVPR 2019) [💬故事 → 视频] **StoryGAN：用于故事可视化的顺序条件GAN**, 李一彤等 [[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F12233https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FLi_StoryGAN_A_Sequential_Conditional_GAN_for_Story_Visualization_CVPR_2019_paper.html)] [[代码](https:\u002F\u002Fgithub.com\u002Fyitong91\u002FStoryGAN?utm_source=catalyzex.com)]\n       * (AAAI 2018) **从文本生成视频**, 李一彤等 [[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F12233)] \n       * (ACMMM 2017) **根据你所说创造：从字幕生成视频**, 潘颖威等 [[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3123266.3127905)]\n\n[\u003Cu>\u003C🎯返回顶部>\u003C\u002Fu>](#head-content)\n\n   * \u003Cspan id=\"head-t2music\"> **文本 → 音乐** \u003C\u002Fspan> \n       * ⭐（arXiv预印本 2023）**MusicLM：从文本生成音乐**, 安德烈亚·阿戈斯蒂内利等 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.11325)] [[项目](https:\u002F\u002Fgoogle-research.github.io\u002Fseanet\u002Fmusiclm\u002Fexamples\u002F)] [[MusicCaps](https:\u002F\u002Fwww.kaggle.com\u002Fdatasets\u002Fgoogleai\u002Fmusiccaps)]\n\n[\u003Cu>\u003C🎯返回顶部>\u003C\u002Fu>](#head-content)\n\n\n\n## \u003Cspan id=\"head7\"> 联系我 \u003C\u002Fspan>\n\n [![Star History Chart](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FYutong-Zhou-cv_Awesome-Text-to-Image_readme_5b587d094885.png)](https:\u002F\u002Fstar-history.com\u002F#Yutong-Zhou-cv\u002FAwesome-Text-to-Image&Date)\n\n如果您有任何问题或意见，请随时联系[**Yutong**](https:\u002F\u002Felizazhou96.github.io\u002F) ლ(╹◡╹ლ)\n\n## \u003Cspan id=\"head8\"> 贡献者 \u003C\u002Fspan>\n\n![Alt](https:\u002F\u002Frepobeats.axiom.co\u002Fapi\u002Fembed\u002F2a1ae2aebaa287bfbf50a9aafdfde0406c1b0cfe.svg \"Repobeats analytics image\")\n\n\u003Ca href=\"https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002Fawesome-Text-to-Image\u002Fgraphs\u002Fcontributors\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FYutong-Zhou-cv_Awesome-Text-to-Image_readme_332ca1af655f.png\" \u002F>\n\u003C\u002Fa>\n\n> 使用[contrib.rocks](https:\u002F\u002Fcontrib.rocks)制作。","# Awesome-Text-to-Image 快速上手指南\n\n> 本项目为文本生成图像（Text-to-Image）领域的资源集合，包含论文、项目、数据集等。无需安装，直接浏览即可获取相关资源。\n\n## 环境准备\n- Git 工具（已安装）\n\n## 安装步骤\n使用国内镜像加速克隆仓库（推荐）：\n```bash\ngit clone https:\u002F\u002Fhub.fastgit.org\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image.git\n```\n\n或直接克隆：\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image.git\n```\n\n## 基本使用\n1. 进入项目目录：\n   ```bash\n   cd Awesome-Text-to-Image\n   ```\n\n2. 查看核心资源列表：\n   - 首选查看 `[CVPRW 2023 🎈] Best Collection.md`（最新精选资源）\n   - 其他分类列表位于 `Lists\u002F` 目录下\n\n3. 直接访问 GitHub 页面获取最新内容：\n   https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image","某高校AI实验室的研究员小李正在撰写关于文本生成图像技术的综述论文，需要快速整理最新研究进展和相关资源。\n\n### 没有 Awesome-Text-to-Image 时\n- 每天花费数小时在Google Scholar、arXiv等平台手动搜索，结果杂乱且难以区分核心论文，常需反复筛选\n- 常错过CVPR、ICCV等顶会的最新成果，需逐个查看会议论文集，耗时且易遗漏\n- 找到的代码仓库质量参差不齐，部分项目已停止维护，复现时频繁遇到依赖问题\n- 数据集链接分散在GitHub Issues、Reddit、个人博客等不同平台，平均每个链接需10分钟验证有效性\n\n### 使用 Awesome-Text-to-Image 后\n- 通过“Best Collection”和“Recently Focused Papers”列表，30分钟内精准定位2023年CVPR、ICLR等顶会的20篇关键论文，覆盖最新技术突破\n- 直接访问整理好的数据集页面，所有链接经过验证且附带使用说明，数据准备时间减少80%\n- 项目列表明确标注GitHub star数、更新时间和维护状态，快速筛选高活跃度代码库，复现成功率提升至90%\n- 按主题分类的资源导航让综述章节结构清晰，写作效率提升3倍，提前两周完成论文初稿\n\n核心价值：将文献调研和资源收集的耗时从数周压缩至数小时，让研究者专注创新而非信息整理。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FYutong-Zhou-cv_Awesome-Text-to-Image_32fff393.png","Yutong-Zhou-cv","Sanctuary","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FYutong-Zhou-cv_a1ef265d.jpg","(ෆ`꒳´ෆ)\r\nPostdoc",null,"Germany","Yutong96Sweet","https:\u002F\u002Felizazhou96.github.io\u002F","https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv",2431,207,"2026-03-31T06:43:23","MIT",1,"未说明",{"notes":91,"python":89,"dependencies":92},"此为资源集合仓库，非可执行工具，无具体运行环境需求",[89],[13,54,14],[95,96,97,98,99,100,101,102,103,104],"generative-adversarial-network","text-to-image","image-synthesis","image-generation","survey","awseome-list","image-manipulation","text-to-face","multimodal","multimodal-deep-learning","2026-03-27T02:49:30.150509","2026-04-06T07:24:46.421194",[108,113,118,123,128,133],{"id":109,"question_zh":110,"answer_zh":111,"source_url":112},8792,"DALL-E Colab 链接失效如何解决？","DALL-E Colab 链接已更新，具体请参考 [DALL-E Issues#17](https:\u002F\u002Fgithub.com\u002Fopenai\u002FDALL-E\u002Fissues\u002F17) 获取更多生成图像的细节。","https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fissues\u002F4",{"id":114,"question_zh":115,"answer_zh":116,"source_url":117},8793,"是否有文本到图像生成的调查资源？","有调查博客 https:\u002F\u002Fhackmd.io\u002F@prajwalsingh\u002Fimagesynthesis，已添加到仓库中。","https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fissues\u002F5",{"id":119,"question_zh":120,"answer_zh":121,"source_url":122},8794,"如何计算 FID 和 IS 指标？","新版本的 FID 和 IS 评估代码已添加到仓库，具体代码在 https:\u002F\u002Fgithub.com\u002Fsenmaoy\u002FInception-Score-FID-on-CUB-and-OXford.git，解决了旧 TensorFlow 代码运行问题。","https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fissues\u002F7",{"id":124,"question_zh":125,"answer_zh":126,"source_url":127},8791,"如何联系作者？","作者的 email 地址在 README.md 文件末尾。","https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fissues\u002F1",{"id":129,"question_zh":130,"answer_zh":131,"source_url":132},8795,"如何测试文本到图像生成？","在项目的 Project 部分有演示，最流行的是 Disco Diffusion Colab（https:\u002F\u002Fcolab.research.google.com\u002Fgithub\u002Falembics\u002Fdisco-diffusion\u002Fblob\u002Fmain\u002FDisco_Diffusion.ipynb），并有教程视频：[Quick & Easy Tutorial for Disco Diffusion Google Colab](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=FA2MNG8D5x0) 和 [Image Generation with CLIP + Diffusion models](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=Dx2G940Pao8)","https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fissues\u002F10",{"id":134,"question_zh":135,"answer_zh":136,"source_url":137},8796,"有哪些图像质量评估指标？","CLIPScore 已添加到指标列表中，可用于评估图像质量。具体使用请参考 CLIPScore 论文 https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.08718。","https:\u002F\u002Fgithub.com\u002FYutong-Zhou-cv\u002FAwesome-Text-to-Image\u002Fissues\u002F12",[]]