[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-atfortes--Awesome-Controllable-Diffusion":3,"tool-atfortes--Awesome-Controllable-Diffusion":64},[4,17,26,40,48,56],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":16},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,3,"2026-04-05T11:01:52",[13,14,15],"开发框架","图像","Agent","ready",{"id":18,"name":19,"github_repo":20,"description_zh":21,"stars":22,"difficulty_score":23,"last_commit_at":24,"category_tags":25,"status":16},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,2,"2026-04-03T11:11:01",[13,14,15],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":23,"last_commit_at":32,"category_tags":33,"status":16},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,"2026-04-05T10:45:23",[14,34,35,36,15,37,38,13,39],"数据工具","视频","插件","其他","语言模型","音频",{"id":41,"name":42,"github_repo":43,"description_zh":44,"stars":45,"difficulty_score":10,"last_commit_at":46,"category_tags":47,"status":16},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,"2026-04-04T04:44:48",[15,14,13,38,37],{"id":49,"name":50,"github_repo":51,"description_zh":52,"stars":53,"difficulty_score":10,"last_commit_at":54,"category_tags":55,"status":16},519,"PaddleOCR","PaddlePaddle\u002FPaddleOCR","PaddleOCR 是一款基于百度飞桨框架开发的高性能开源光学字符识别工具包。它的核心能力是将图片、PDF 等文档中的文字提取出来，转换成计算机可读取的结构化数据，让机器真正“看懂”图文内容。\n\n面对海量纸质或电子文档，PaddleOCR 解决了人工录入效率低、数字化成本高的问题。尤其在人工智能领域，它扮演着连接图像与大型语言模型（LLM）的桥梁角色，能将视觉信息直接转化为文本输入，助力智能问答、文档分析等应用场景落地。\n\nPaddleOCR 适合开发者、算法研究人员以及有文档自动化需求的普通用户。其技术优势十分明显：不仅支持全球 100 多种语言的识别，还能在 Windows、Linux、macOS 等多个系统上运行，并灵活适配 CPU、GPU、NPU 等各类硬件。作为一个轻量级且社区活跃的开源项目，PaddleOCR 既能满足快速集成的需求，也能支撑前沿的视觉语言研究，是处理文字识别任务的理想选择。",74913,"2026-04-05T10:44:17",[38,14,13,37],{"id":57,"name":58,"github_repo":59,"description_zh":60,"stars":61,"difficulty_score":23,"last_commit_at":62,"category_tags":63,"status":16},2471,"tesseract","tesseract-ocr\u002Ftesseract","Tesseract 是一款历史悠久且备受推崇的开源光学字符识别（OCR）引擎，最初由惠普实验室开发，后由 Google 维护，目前由全球社区共同贡献。它的核心功能是将图片中的文字转化为可编辑、可搜索的文本数据，有效解决了从扫描件、照片或 PDF 文档中提取文字信息的难题，是数字化归档和信息自动化的重要基础工具。\n\n在技术层面，Tesseract 展现了强大的适应能力。从版本 4 开始，它引入了基于长短期记忆网络（LSTM）的神经网络 OCR 引擎，显著提升了行识别的准确率；同时，为了兼顾旧有需求，它依然支持传统的字符模式识别引擎。Tesseract 原生支持 UTF-8 编码，开箱即用即可识别超过 100 种语言，并兼容 PNG、JPEG、TIFF 等多种常见图像格式。输出方面，它灵活支持纯文本、hOCR、PDF、TSV 等多种格式，方便后续数据处理。\n\nTesseract 主要面向开发者、研究人员以及需要构建文档处理流程的企业用户。由于它本身是一个命令行工具和库（libtesseract），不包含图形用户界面（GUI），因此最适合具备一定编程能力的技术人员集成到自动化脚本或应用程序中",73286,"2026-04-03T01:56:45",[13,14],{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":70,"readme_en":71,"readme_zh":72,"quickstart_zh":73,"use_case_zh":74,"hero_image_url":75,"owner_login":76,"owner_name":77,"owner_avatar_url":78,"owner_bio":79,"owner_company":80,"owner_location":81,"owner_email":82,"owner_twitter":83,"owner_website":84,"owner_url":85,"languages":82,"stars":86,"forks":87,"last_commit_at":88,"license":89,"difficulty_score":90,"env_os":91,"env_gpu":92,"env_ram":92,"env_deps":93,"category_tags":96,"github_topics":97,"view_count":23,"oss_zip_url":82,"oss_zip_packed_at":82,"status":16,"created_at":117,"updated_at":118,"faqs":119,"releases":120},3337,"atfortes\u002FAwesome-Controllable-Diffusion","Awesome-Controllable-Diffusion","Papers and resources on Controllable Generation using Diffusion Models, including ControlNet, DreamBooth, IP-Adapter.","Awesome-Controllable-Diffusion 是一个专注于扩散模型可控生成技术的开源资源库。在 AIGC 蓬勃发展的当下，基础的文本生成图像往往难以精确满足用户对构图、姿态或特定主体的精细控制需求。该项目旨在解决这一痛点，系统性地收集并整理了相关学术论文与实用资源，涵盖了 ControlNet、DreamBooth、IP-Adapter 等主流技术，以及 IFAdapter、CSGO 等前沿研究成果。\n\n通过按年份分类的论文列表和详细的技术标签（如布局控制、风格迁移、3D 场景生成等），Awesome-Controllable-Diffusion 帮助用户快速定位如何利用草图、深度图、参考图像等多种条件来引导 AI 创作，实现从“随机生成”到“精准控制”的跨越。\n\n这份资源库特别适合 AI 研究人员、算法开发者以及希望深入理解生成式 AI 底层逻辑的设计师使用。对于研究者，它是追踪最新学术动态的绝佳索引；对于开发者，它提供了丰富的代码链接和项目主页，便于复现与创新；对于高级创作者，它则揭示了更多定制化生成的可能性。无论你是想探索如何将随意涂鸦转化为互动 3D 游戏场景，","Awesome-Controllable-Diffusion 是一个专注于扩散模型可控生成技术的开源资源库。在 AIGC 蓬勃发展的当下，基础的文本生成图像往往难以精确满足用户对构图、姿态或特定主体的精细控制需求。该项目旨在解决这一痛点，系统性地收集并整理了相关学术论文与实用资源，涵盖了 ControlNet、DreamBooth、IP-Adapter 等主流技术，以及 IFAdapter、CSGO 等前沿研究成果。\n\n通过按年份分类的论文列表和详细的技术标签（如布局控制、风格迁移、3D 场景生成等），Awesome-Controllable-Diffusion 帮助用户快速定位如何利用草图、深度图、参考图像等多种条件来引导 AI 创作，实现从“随机生成”到“精准控制”的跨越。\n\n这份资源库特别适合 AI 研究人员、算法开发者以及希望深入理解生成式 AI 底层逻辑的设计师使用。对于研究者，它是追踪最新学术动态的绝佳索引；对于开发者，它提供了丰富的代码链接和项目主页，便于复现与创新；对于高级创作者，它则揭示了更多定制化生成的可能性。无论你是想探索如何将随意涂鸦转化为互动 3D 游戏场景，还是希望精确控制画面中的人物姿态与风格组合，Awesome-Controllable-Diffusion 都能为你提供坚实的技术指引和丰富的学习素材。","\u003Ca name=\"readme-top\">\u003C\u002Fa>\n\n\u003Cdiv align=\"center\">\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fatfortes\u002FAwesome-Controllable-Diffusion\u002Fstargazers\">\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fstars\u002Fatfortes\u002FAwesome-Controllable-Diffusion?style=for-the-badge\" alt=\"Stargazers\">\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fatfortes\u002FAwesome-Controllable-Diffusion\u002Fnetwork\u002Fmembers\">\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fforks\u002Fatfortes\u002FAwesome-Controllable-Diffusion?style=for-the-badge\" alt=\"Forks\">\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fatfortes\u002FAwesome-Controllable-Diffusion\u002Fgraphs\u002Fcontributors\">\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fcontributors\u002Fatfortes\u002FAwesome-Controllable-Diffusion?style=for-the-badge\" alt=\"Contributors\">\u003C\u002Fa>\n    \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fatfortes\u002FAwesome-Controllable-Diffusion\u002Fblob\u002Fmain\u002FREADME.md\">\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FPapers-70-70?style=for-the-badge\" alt=\"Papers\">\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fatfortes\u002FAwesome-Controllable-Diffusion\u002Fblob\u002Fmain\u002FLICENSE\">\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Flicense\u002Fatfortes\u002FAwesome-Controllable-Diffusion?style=for-the-badge\" alt=\"MIT License\">\u003C\u002Fa>\n\u003C\u002Fdiv>\n\n\u003Ch1 align=\"center\">Awesome Controllable Diffusion\u003C\u002Fh1>\n\n\u003Cp align=\"center\">\n    \u003Cb> Papers and Resources on Adding Conditional Controls to Diffusion Models in the Era of AIGC.\u003C\u002Fb>\n\u003C\u002Fp>\n\n\u003Cdetails>\n  \u003Csummary>🗂️ Table of Contents\u003C\u002Fsummary>\n  \u003Col>\n    \u003Cli>\u003Ca href=\"#papers\">📝 Papers\u003C\u002Fa>\u003C\u002Fli>\n      \u003Cul>\n        \u003Cli>\u003Ca href=\"#2025\"> 2025\u003C\u002Fa>\u003C\u002Fli>\n        \u003Cli>\u003Ca href=\"#2024\"> 2024\u003C\u002Fa>\u003C\u002Fli>\n        \u003Cli>\u003Ca href=\"#2023\"> 2023\u003C\u002Fa>\u003C\u002Fli>\n      \u003C\u002Ful>\n    \u003Cli>\u003Ca href=\"#other-resources\">🔗 Other Resources\u003C\u002Fa>\u003C\u002Fli>\n    \u003Cli>\u003Ca href=\"#other-awesome-lists\">🌟 Other Awesome Lists\u003C\u002Fa>\u003C\u002Fli>\n    \u003Cli>\u003Ca href=\"#contributing\">✍️ Contributing\u003C\u002Fa>\u003C\u002Fli>\n  \u003C\u002Fol>\n\u003C\u002Fdetails>\n\n\n\n \n# \u003Ch1 id=\"papers\">📝 Papers\u003Ch1\u002F>\n\n## 2024\n\n1. **[IFAdapter: Instance Feature Control for Grounded Text-to-Image Generation.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.08240)** 🔥 [[project](https:\u002F\u002Fifadapter.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.08240)] [[code](https:\u002F\u002Fgithub.com\u002FWUyinwei-hah\u002FIFAdapter)]\n\n    *Yinwei Wu, Xianpan Zhou, Bing Ma, Xuefeng Su, Kai Ma, Xinchao Wang.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n   \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_7ae291ef47cb.png\" style=\"width:100%\">\n\n1. **[CSGO: Content-Style Composition in Text-to-Image Generation.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.16766)** [[project](https:\u002F\u002Fcsgo-gen.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.16766)] [[code](https:\u002F\u002Fgithub.com\u002FinstantX-research\u002FCSGO)]\n\n    *Peng Xing, Haofan Wang, Yanpeng Sun, Qixun Wang, Xu Bai, Hao Ai, Renyuan Huang, Zechao Li.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FStyle-ff0000?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[Generative Photomontage.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.07116)** [[project](https:\u002F\u002Flseancs.github.io\u002Fgenerativephotomontage\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.07116)] [[code](https:\u002F\u002Fgithub.com\u002Flseancs\u002FGenerativePhotomontage)]\n\n    *Sean J. Liu, Nupur Kumari, Ariel Shamir, Jun-Yan Zhu.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[Sketch2Scene: Automatic Generation of Interactive 3D Game Scenes from User's Casual Sketches.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.04567)** [[project](https:\u002F\u002Fxrvisionlabs.github.io\u002FSketch2Scene\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.04567)]\n\n    *Yongzhi Xu, Yonhon Ng, Yifu Wang, Inkyu Sa, Yunfei Duan, Yang Li, Pan Ji, Hongdong Li.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F3D-3cb371?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n1. **[IPAdapter-Instruct: Resolving Ambiguity in Image-based Conditioning using Instruct Prompts.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.03209)** [[project](https:\u002F\u002Funity-research.github.io\u002FIP-Adapter-Instruct.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.03209)] [[code](https:\u002F\u002Fgithub.com\u002Funity-research\u002FIP-Adapter-Instruct)]\n\n    *Ciara Rowles, Shimon Vainer, Dante De Nigris, Slava Elizarov, Konstantin Kutsy, Simon Donné.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FStyle-ff0000?style=flat-square)\n\n1. **[ViPer: Visual Personalization of Generative Models via Individual Preference Learning.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.17365)** [[project](https:\u002F\u002Fviper.epfl.ch\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.17365)] [[code](https:\u002F\u002Fgithub.com\u002FEPFL-VILAB\u002FViPer)]\n\n    *Sogand Salehi, Mahdi Shafiei, Teresa Yeo, Roman Bachmann, Amir Zamir.* ECCV'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FStyle-ff0000?style=flat-square)\n\n1. **[Training-free Composite Scene Generation for Layout-to-Image Synthesis.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.13609)** [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.13609)] [[code](https:\u002F\u002Fgithub.com\u002FPapple-F\u002Fcsg)]\n\n    *Jiaqi Liu, Tao Huang, Chang Xu.* ECCV'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[SEED-Story: Multimodal Long Story Generation with Large Language Model.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.08683)** [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.08683)] [[code](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FSEED-Story)]\n\n    *Shuai Yang, Yuying Ge, Yang Li, Yukang Chen, Yixiao Ge, Ying Shan, Yingcong Chen.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FConsistency-ff69b4?style=flat-square)\n\n1. **[Sketch-Guided Scene Image Generation.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.06469)** [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.06469)]\n\n    *Tianyu Zhang, Xiaoxuan Xie, Xusheng Du, Haoran Xie.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n1. **[Instant 3D Human Avatar Generation using Image Diffusion Models.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.07516)** [[project](https:\u002F\u002Fwww.nikoskolot.com\u002Favatarpopup\u002F)] [[paper](https:\u002F\u002Fwww.nikoskolot.com\u002Favatarpopup\u002F)]\n\n    *Nikos Kolotouros, Thiemo Alldieck, Enric Corona, Eduard Gabriel Bazavan, Cristian Sminchisescu.* ECCV'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F3D-3cb371?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n1. **[Ctrl-X: Controlling Structure and Appearance for Text-To-Image Generation Without Guidance.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.07540)** 🔥 [[project](https:\u002F\u002Fgenforce.github.io\u002Fctrl-x\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.07540)] [[code](https:\u002F\u002Fgithub.com\u002Fgenforce\u002Fctrl-x)]\n\n    *Kuan Heng Lin, Sicheng Mo, Ben Klingher, Fangzhou Mu, Bolei Zhou.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_26d4376b7b4c.png\" style=\"width:100%\">\n\n1. **[Zero-Painter: Training-Free Layout Control for Text-to-Image Synthesis.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.04032)** [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.04032)] [[code](https:\u002F\u002Fgithub.com\u002FPicsart-AI-Research\u002FZero-Painter)]\n\n    *Marianna Ohanyan, Hayk Manukyan, Zhangyang Wang, Shant Navasardyan, Humphrey Shi.* CVPR'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n1. **[pOps: Photo-Inspired Diffusion Operators.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.01300)** 🔥 [[project](https:\u002F\u002Fpopspaper.github.io\u002FpOps\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.01300)] [[code](https:\u002F\u002Fgithub.com\u002FpOpsPaper\u002FpOps)]\n\n    *Elad Richardson, Yuval Alaluf, Ali Mahdavi-Amiri, Daniel Cohen-Or.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_f3fa4a461e4b.png\" style=\"width:100%\">\n\n1. **[RB-Modulation: Training-Free Personalization of Diffusion Models using Stochastic Optimal Control.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.17401)** [[project](https:\u002F\u002Frb-modulation.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.17401)] [[code](https:\u002F\u002Fgithub.com\u002Fgoogle\u002FRB-Modulation)]\n\n    *Litu Rout, Yujia Chen, Nataniel Ruiz, Abhishek Kumar, Constantine Caramanis, Sanjay Shakkottai, Wen-Sheng Chu.* Preprint 2024. 🔥\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FStyle-ff0000?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_a6c3a2e9b9a2.png\" style=\"width:100%\">\n\n1. **[FreeCustom: Tuning-Free Customized Image Generation for Multi-Concept Composition.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.13870)** [[project](https:\u002F\u002Faim-uofa.github.io\u002FFreeCustom\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.13870)] [[code](https:\u002F\u002Fgithub.com\u002Faim-uofa\u002FFreeCustom)]\n\n    *Ganggui Ding, Canyu Zhao, Wen Wang, Zhen Yang, Zide Liu, Hao Chen, Chunhua Shen.* CVPR'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[Personalized Residuals for Concept-Driven Text-to-Image Generation.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.12978)** [[project](https:\u002F\u002Fcusuh.github.io\u002Fpersonalized-residuals\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.12978)]\n\n    *Cusuh Ham, Matthew Fisher, James Hays, Nicholas Kolkin, Yuchen Liu, Richard Zhang, Tobias Hinz.* CVPR'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n1. **[Compositional Text-to-Image Generation with Dense Blob Representations.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.08246)** 🔥 [[project](https:\u002F\u002Fblobgen-2d.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.08246)]\n\n    *Weili Nie, Sifei Liu, Morteza Mardani, Chao Liu, Benjamin Eckart, Arash Vahdat.* ICML'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_614a552877f6.png\" style=\"width:100%\">\n\n1. **[Customizing Text-to-Image Models with a Single Image Pair.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.01536)** [[project](https:\u002F\u002Fpaircustomization.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.01536)] [[code](https:\u002F\u002Fgithub.com\u002FPairCustomization\u002FPairCustomization)]\n\n    *Maxwell Jones, Sheng-Yu Wang, Nupur Kumari, David Bau, Jun-Yan Zhu.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n1. **[StoryDiffusion: Consistent Self-Attention for Long-Range Image and Video Generation.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.01434)** [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.01434)]\n\n    *Yupeng Zhou, Daquan Zhou, Ming-Ming Cheng, Jiashi Feng, Qibin Hou.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FVideo-9370db?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FConsistency-ff69b4?style=flat-square)\n\n1. **[InstantFamily: Masked Attention for Zero-shot Multi-ID Image Generation.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.19427)** [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.19427)]\n\n    *Chanran Kim, Jeongin Lee, Shichang Joung, Bongmo Kim, Yeul-Min Baek.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[PuLID: Pure and Lightning ID Customization via Contrastive Alignment.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.16022)** [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.16022)] [[code](https:\u002F\u002Fgithub.com\u002FToTheBeginning\u002FPuLID)]\n\n    *Zinan Guo, Yanze Wu, Zhuowei Chen, Lang Chen, Qian He.* Tech Report 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n1. **[MultiBooth: Towards Generating All Your Concepts in an Image from Text.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.14239)** [[project](https:\u002F\u002Fmultibooth.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.14239)] [[code](https:\u002F\u002Fgithub.com\u002Fchenyangzhu1\u002FMultiBooth)]\n\n    *Chenyang Zhu, Kai Li, Yue Ma, Chunming He, Li Xiu.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[StyleBooth: Image Style Editing with Multimodal Instruction.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.12154)** [[project](https:\u002F\u002Fali-vilab.github.io\u002Fstylebooth-page\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.12154)] [[code](https:\u002F\u002Fgithub.com\u002Fmodelscope\u002Fscepter)]\n\n    *Zhen Han, Chaojie Mao, Zeyinzi Jiang, Yulin Pan, Jingfeng Zhang.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FStyle-ff0000?style=flat-square)\n\n1. **[MoMA: Multimodal LLM Adapter for Fast Personalized Image Generation.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.05674)** 🔥 [[project](https:\u002F\u002Fmoma-adapter.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.05674)] [[code](https:\u002F\u002Fgithub.com\u002Fbytedance\u002FMoMA\u002Ftree\u002Fmain)]\n\n    *Kunpeng Song, Yizhe Zhu, Bingchen Liu, Qing Yan, Ahmed Elgammal, Xiao Yang.* ECCV'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_0e58158c478b.png\" style=\"width:100%\">\n\n1. **[Prompt Optimizer of Text-to-Image Diffusion Models for Abstract Concept Understanding.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.11589)** [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.11589)]\n\n    *Zezhong Fan, Xiaohan Li, Chenhao Fang, Topojoy Biswas, Kaushiki Nag, Jianpeng Xu, Kannan Achan.* WWW'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FConcept--Understanding-8bbe1b?style=flat-square)\n\n1. **[MoA: Mixture-of-Attention for Subject-Context Disentanglement in Personalized Image Generation.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.11565)** [[project](https:\u002F\u002Fsnap-research.github.io\u002Fmixture-of-attention\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.11565)] [[code](https:\u002F\u002Fgithub.com\u002Fsnap-research\u002Fmixture-of-attention)]\n\n    *Kuan-Chieh Wang, Daniil Ostashev, Yuwei Fang, Sergey Tulyakov, Kfir Aberman.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[MaxFusion: Plug&Play Multi-Modal Generation in Text-to-Image Diffusion Models.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.09977)** [[project](https:\u002F\u002Fnithin-gk.github.io\u002Fmaxfusion.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.09977)] [[code](https:\u002F\u002Fgithub.com\u002FNithin-GK\u002FMaxFusion)]\n\n    *Nithin Gopalakrishnan Nair, Jeya Maria Jose Valanarasu, Vishal M Patel.* ECCV'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[Ctrl-Adapter: An Efficient and Versatile Framework for Adapting Diverse Controls to Any Diffusion Model.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.09967)** [[project](https:\u002F\u002Fctrl-adapter.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.09967)] [[code](https:\u002F\u002Fgithub.com\u002FHL-hanlin\u002FCtrl-Adapter)]\n\n    *Han Lin, Jaemin Cho, Abhay Zala, Mohit Bansal.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FVideo-9370db?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n1. **[ControlNet++: Improving Conditional Controls with Efficient Consistency Feedback.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.07987)** [[project](https:\u002F\u002Fliming-ai.github.io\u002FControlNet_Plus_Plus\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.07987)] [[code](https:\u002F\u002Fgithub.com\u002Fliming-ai\u002FControlNet_Plus_Plus)]\n\n    *Ming Li, Taojiannan Yang, Huafeng Kuang, Jie Wu, Zhaoning Wang, Xuefeng Xiao, Chen Chen.* ECCV'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n1. **[Identity Decoupling for Multi-Subject Personalization of Text-to-Image Models.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.04243)** [[project](https:\u002F\u002Fmudi-t2i.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.04243)]\n\n    *Sangwon Jang, Jaehyeong Jo, Kimin Lee, Sung Ju Hwang.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[Concept Weaver: Enabling Multi-Concept Fusion in Text-to-Image Models.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.03913)** [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.03913)]\n\n    *Gihyun Kwon, Simon Jenni, Dingzeyu Li, Joon-Young Lee, Jong Chul Ye, Fabian Caba Heilbron.* CVPR'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[FlashFace: Human Image Personalization with High-fidelity Identity Preservation.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.17008)** [[project](https:\u002F\u002Fjshilong.github.io\u002Fflashface-page\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.17008)] [[code](https:\u002F\u002Fgithub.com\u002Fali-vilab\u002FFlashFace)]\n\n    *Shilong Zhang, Lianghua Huang, Xi Chen, Yifei Zhang, Zhi-Fan Wu, Yutong Feng, Wei Wang, Yujun Shen, Yu Liu, Ping Luo.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n1. **[Be Yourself: Bounded Attention for Multi-Subject Text-to-Image Generation.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.16990)** [[project](https:\u002F\u002Fomer11a.github.io\u002Fbounded-attention\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.16990)] [[code](https:\u002F\u002Fgithub.com\u002Fomer11a\u002Fbounded-attention)]\n\n    *Omer Dahary, Or Patashnik, Kfir Aberman, Daniel Cohen-Or.* ECCV'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[Continuous Subject-Specific Attribute Control in T2I Models by Identifying Semantic Directions.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.17064)** [[project](https:\u002F\u002Fcompvis.github.io\u002Fattribute-control\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.17064)] [[code](https:\u002F\u002Fgithub.com\u002FCompVis\u002Fattribute-control)]\n\n    *Stefan Andreas Baumann, Felix Krause, Michael Neumayr, Nick Stracke, Vincent Tao Hu, Björn Ommer.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FAppearance-b78727?style=flat-square)\n\n1. **[Make-Your-3D: Fast and Consistent Subject-Driven 3D Content Generation.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.09625)** [[project](https:\u002F\u002Fliuff19.github.io\u002FMake-Your-3D\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.09625)] [[code](https:\u002F\u002Fgithub.com\u002Fliuff19\u002FMake-Your-3D)]\n\n    *Fangfu Liu, Hanyang Wang, Weiliang Chen, Haowen Sun, Yueqi Duan.* ECCV'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F3D-3cb371?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n1. **[FeedFace: Efficient Inference-based Face Personalization via Diffusion Models.](https:\u002F\u002Fopenreview.net\u002Fforum?id=PqPKBcamy3)** [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=PqPKBcamy3)] [[code](https:\u002F\u002Fgithub.com\u002FXiang-cd\u002FFeedFace)]\n\n    *Chendong Xiang, Armando Fortes, Khang Hui Chua, Hang Su, Jun Zhu.* Tiny Papers @ ICLR'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n1. **[Multi-LoRA Composition for Image Generation.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.16843)** [[project](https:\u002F\u002Fmaszhongming.github.io\u002FMulti-LoRA-Composition\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.16843)] [[code](https:\u002F\u002Fgithub.com\u002Fmaszhongming\u002FMulti-LoRA-Composition)]\n\n    *Ming Zhong, Yelong Shen, Shuohang Wang, Yadong Lu, Yizhu Jiao, Siru Ouyang, Donghan Yu, Jiawei Han, Weizhu Chen.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[Gen4Gen: Generative Data Pipeline for Generative Multi-Concept Composition.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.15504)** [[project](https:\u002F\u002Fdanielchyeh.github.io\u002FGen4Gen\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.15504)] [[code](https:\u002F\u002Fgithub.com\u002FlouisYen\u002FGen4Gen)]\n\n    *Chun-Hsiao Yeh, Ta-Ying Cheng, He-Yen Hsieh, Chuan-En Lin, Yi Ma, Andrew Markham, Niki Trigoni, H.T. Kung, Yubei Chen.* Tech Report 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[Visual Style Prompting with Swapping Self-Attention.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.12974)** [[project](https:\u002F\u002Fcurryjung.github.io\u002FVisualStylePrompt\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.12974)] [[code](https:\u002F\u002Fgithub.com\u002Fnaver-ai\u002FVisual-Style-Prompting)]\n\n    *Jaeseok Jeong, Junho Kim, Yunjey Choi, Gayoung Lee, Youngjung Uh.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FStyle-ff0000?style=flat-square)\n\n1. **[RealCompo: Dynamic Equilibrium between Realism and Composition Improves Text-to-Image Diffusion Models.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.12908)** [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.12908)] [[code](https:\u002F\u002Fgithub.com\u002FYangLing0818\u002FRealCompo)]\n\n    *Xinchen Zhang, Ling Yang, Yaqi Cai, Zhaochen Yu, Jiake Xie, Ye Tian, Minkai Xu, Yong Tang, Yujiu Yang, Bin Cui.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[Direct Consistency Optimization for Compositional Text-to-Image Personalization.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.12004)** [[project](https:\u002F\u002Fdco-t2i.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.12004)] [[code](https:\u002F\u002Fgithub.com\u002Fkyungmnlee\u002Fdco)]\n\n    *Kyungmin Lee, Sangkyung Kwak, Kihyuk Sohn, Jinwoo Shin.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[InstanceDiffusion: Instance-level Control for Image Generation.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.03290)** [[project](https:\u002F\u002Fpeople.eecs.berkeley.edu\u002F~xdwang\u002Fprojects\u002FInstDiff\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.03290)] [[code](https:\u002F\u002Fgithub.com\u002Ffrank-xwang\u002FInstanceDiffusion)]\n\n    *Xudong Wang, Trevor Darrell, Sai Saketh Rambhatla, Rohit Girdhar, Ishan Misra.* CVPR'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n1. **[Training-Free Consistent Text-to-Image Generation.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.03286)** [[project](https:\u002F\u002Fresearch.nvidia.com\u002Flabs\u002Fpar\u002Fconsistory\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.03286)]\n\n    *Yoad Tewel, Omri Kaduri, Rinon Gal, Yoni Kasten, Lior Wolf, Gal Chechik, Yuval Atzmon.* SIGGRAPH'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FConsistency-ff69b4?style=flat-square)\n\n1. **[UNIMO-G: Unified Image Generation through Multimodal Conditional Diffusion.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.13388)** 🔥 [[project](https:\u002F\u002Funimo-ptm.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.13388)]\n\n    *Wei Li, Xue Xu, Jiachen Liu, Xinyan Xiao.* ACL'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_f7466ea624a0.png\" style=\"width:100%\">\n\n1. **[Mastering Text-to-Image Diffusion: Recaptioning, Planning, and Generating with Multimodal LLMs.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.11708)** 🔥 [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.11708)] [[code](https:\u002F\u002Fgithub.com\u002FYangLing0818\u002FRPG-DiffusionMaster)]\n\n    *Ling Yang, Zhaochen Yu, Chenlin Meng, Minkai Xu, Stefano Ermon, Bin Cui.* ICML'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_e838fc7b44a1.png\" style=\"width:100%\">\n\n1. **[InstantID: Zero-shot Identity-Preserving Generation in Seconds.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.07519)** [[project](https:\u002F\u002Fgithub.com\u002FInstantID\u002FInstantID)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.07519)] [[code](https:\u002F\u002Fgithub.com\u002FInstantID\u002FInstantID)]\n\n    *Qixun Wang, Xu Bai, Haofan Wang, Zekui Qin, Anthony Chen, Huaxia Li, Xu Tang, Yao Hu.* Tech Report 2024. 🔥\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_d6936de7f7a5.png\" style=\"width:100%\">\n\n1. **[PALP: Prompt Aligned Personalization of Text-to-Image Models.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.06105)** [[project](https:\u002F\u002Fprompt-aligned.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.06105)]\n\n    *Qixun Wang, Xu Bai, Haofan Wang, Zekui Qin, Anthony Chen.* Preprint 2024.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n1. **[SCEdit: Efficient and Controllable Image Diffusion Generation via Skip Connection Editing.](arxiv.org\u002Fabs\u002F2312.11392)** [[project](https:\u002F\u002Fscedit.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.04461)] [[code](https:\u002F\u002Fgithub.com\u002Fali-vilab\u002FSCEdit)]\n\n    *Zeyinzi Jiang, Chaojie Mao, Yulin Pan, Zhen Han, Jingfeng Zhang.* CVPR'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n1. **[PhotoMaker: Customizing Realistic Human Photos via Stacked ID Embedding.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.04461)** [[project](https:\u002F\u002Fphoto-maker.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.04461)] [[code](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FPhotoMaker)]\n\n    *Zhen Li, Mingdeng Cao, Xintao Wang, Zhongang Qi, Ming-Ming Cheng, Ying Shan.* CVPR'24. 🔥\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_704cfc3c9dd3.png\" style=\"width:100%\">\n\n1. **[Context Diffusion: In-Context Aware Image Generation.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.03584)** [[project](https:\u002F\u002Fivonajdenkoska.github.io\u002Fcontextdiffusion\u002Fmain.html)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.03584)]\n\n    *Ivona Najdenkoska, Animesh Sinha, Abhimanyu Dubey, Dhruv Mahajan, Vignesh Ramanathan, Filip Radenovic.* ECCV'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n1. **[Style Aligned Image Generation via Shared Attention.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.02133)** 🔥 [[project](https:\u002F\u002Fstyle-aligned-gen.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.02133)] [[code](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fstyle-aligned\u002F)]\n\n    *Amir Hertz, Andrey Voynov, Shlomi Fruchter, Daniel Cohen-Or.* CVPR'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FStyle-ff0000?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_e831890c4e04.png\" style=\"width:100%\">\n\n1. **[Visual Anagrams: Generating Multi-View Optical Illusions with Diffusion Models.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.17919)** [[project](https:\u002F\u002Fdangeng.github.io\u002Fvisual_anagrams\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.17919)] [[code](https:\u002F\u002Fgithub.com\u002Fdangeng\u002Fvisual_anagrams)]\n\n    *Daniel Geng, Inbum Park, Andrew Owens.* CVPR'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FIllusion-3cb371?style=flat-square)\n\n1. **[MagicPose: Realistic Human Poses and Facial Expressions Retargeting with Identity-aware Diffusion.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.12052)** [[project](https:\u002F\u002Fboese0601.github.io\u002Fmagicdance\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.12052)] [[code](https:\u002F\u002Fgithub.com\u002FBoese0601\u002FMagicDance)]\n\n    *Di Chang, Yichun Shi, Quankai Gao, Jessica Fu, Hongyi Xu, Guoxian Song, Qing Yan, Xiao Yang, Mohammad Soleymani.* ICML'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FVideo-9370db?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n1. **[The Chosen One: Consistent Characters in Text-to-Image Diffusion Models.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.10093)** [[project](https:\u002F\u002Fomriavrahami.com\u002Fthe-chosen-one\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.10093)] [[code](https:\u002F\u002Fgithub.com\u002FZichengDuan\u002FTheChosenOne)]\n\n    *Omri Avrahami, Amir Hertz, Yael Vinker, Moab Arar, Shlomi Fruchter, Ohad Fried, Daniel Cohen-Or, Dani Lischinski.* SIGGRAPH'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FConsistency-ff69b4?style=flat-square)\n\n1. **[Cross-Image Attention for Zero-Shot Appearance Transfer.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.03335)** [[project](https:\u002F\u002Fgaribida.github.io\u002Fcross-image-attention)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.03335)] [[code](https:\u002F\u002Fgithub.com\u002Fgaribida\u002Fcross-image-attention)]\n\n    *Yuval Alaluf, Daniel Garibi, Or Patashnik, Hadar Averbuch-Elor, Daniel Cohen-Or.* SIGGRAPH'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FAppearance-b78727?style=flat-square)\n\n1. **[Kosmos-G: Generating Images in Context with Multimodal Large Language Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.02992)** 🔥 [[project](https:\u002F\u002Fxichenpan.github.io\u002Fkosmosg)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.02992)] [[code](https:\u002F\u002Faka.ms\u002FKosmos-G)]\n\n    *Xichen Pan, Li Dong, Shaohan Huang, Zhiliang Peng, Wenhu Chen, Furu Wei.* ICLR'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_a3b0bc2d7f2c.png\" style=\"width:100%\">\n\n1. **[InstantBooth: Personalized Text-to-Image Generation without Test-Time Finetuning.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03411)**  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03411)]\n\n    *Jing Shi, Wei Xiong, Zhe Lin, Hyun Joon Jung.* CVPR'24.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n## 2023\n\n1. **[ZipLoRA: Any Subject in Any Style by Effectively Merging LoRAs.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.13600)** [[project](https:\u002F\u002Fziplora.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.13600)]\n\n    *Viraj Shah, Nataniel Ruiz, Forrester Cole, Erika Lu, Svetlana Lazebnik, Yuanzhen Li, Varun Jampani.* Preprint 2023.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FStyle-ff0000?style=flat-square)\n\n1. **[IP-Adapter: Text Compatible Image Prompt Adapter for Text-to-Image Diffusion Models.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.06721)** 🔥 [[project](https:\u002F\u002Fip-adapter.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.06721)] [[code]()]\n\n    *Hu Ye, Jun Zhang, Sibo Liu, Xiao Han, Wei Yang.* Tech Report 2023.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_45b0f77eb686.png\" style=\"width:100%\">\n\n1. **[Zero-shot spatial layout conditioning for text-to-image diffusion models.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.13754)**\n\n    *Guillaume Couairon, Marlène Careil, Matthieu Cord, Stéphane Lathuilière, Jakob Verbeek.* ICCV'23.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n1. **[Controlling Text-to-Image Diffusion by Orthogonal Finetuning.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.07280)**  [[project](https:\u002F\u002Foft.wyliu.com\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.07280)] [[code](https:\u002F\u002Fgithub.com\u002FZeju1997\u002Foft)]\n\n    *Zeju Qiu, Weiyang Liu, Haiwen Feng, Yuxuan Xue, Yao Feng, Zhen Liu, Dan Zhang, Adrian Weller, Bernhard Schölkopf.* NeruIPS'23.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n1. **[Face0: Instantaneously Conditioning a Text-to-Image Model on a Face.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.06638)** [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.06638)]\n\n    *Dani Valevski, Danny Wasserman, Yossi Matias, Yaniv Leviathan.* SIGGRAPH Asia'23.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n1. **[StyleDrop: Text-to-Image Generation in Any Style.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.00983)** 🔥 [[project](https:\u002F\u002Fstyledrop.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.00983)]\n\n    *Kihyuk Sohn, Nataniel Ruiz, Kimin Lee, Daniel Castro Chin, Irina Blok, Huiwen Chang, Jarred Barber, Lu Jiang, Glenn Entis, Yuanzhen Li, Yuan Hao, Irfan Essa, Michael Rubinstein, Dilip Krishnan.* NeurIPS'23.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FStyle-ff0000?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_436c5e62b42e.png\" style=\"width:100%\">\n\n1. **[BLIP-Diffusion: Pre-trained Subject Representation for Controllable Text-to-Image Generation and Editing.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.14720)** 🔥 [[project](https:\u002F\u002Fdxli94.github.io\u002FBLIP-Diffusion-website\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.14720)] [[code](https:\u002F\u002Fgithub.com\u002Fsalesforce\u002FLAVIS\u002Ftree\u002Fmain\u002Fprojects\u002Fblip-diffusion)]\n\n    *Dongxu Li, Junnan Li, Steven C.H. Hoi.* NeurIPS'23.\n   \n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_8a49018b5661.png\" style=\"width:100%\">\n\n1. **[Subject-driven Text-to-Image Generation via Apprenticeship Learning.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.00186)** [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.00186)]\n\n    *Wenhu Chen, Hexiang Hu, Yandong Li, Nataniel Ruiz, Xuhui Jia, Ming-Wei Chang, William W. Cohen.* NeurIPS'23.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n1. **[T2I-Adapter: Learning Adapters to Dig out More Controllable Ability for Text-to-Image Diffusion Models.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.08453)** 🔥 [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.08453)] [[code](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FT2I-Adapter)]\n\n    *Chong Mou, Xintao Wang, Liangbin Xie, Yanze Wu, Jian Zhang, Zhongang Qi, Ying Shan, Xiaohu Qie.* Tech Report 2023.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_080d5c19756c.png\" style=\"width:100%\">\n\n1. **[Adding Conditional Control to Text-to-Image Diffusion Models.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.05543)** 🔥 [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.05543)] [[code](https:\u002F\u002Fgithub.com\u002Flllyasviel\u002FControlNet)]\n\n    *Lvmin Zhang, Anyi Rao, Maneesh Agrawala.* ICCV'23.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_53c620896888.png\" style=\"width:100%\">\n\n1. **[GLIGEN: Open-Set Grounded Text-to-Image Generation.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.07093)** 🔥 [[project](https:\u002F\u002Fgligen.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.07093)] [[code](https:\u002F\u002Fgithub.com\u002Fgligen\u002FGLIGEN)]\n\n    *Yuheng Li, Haotian Liu, Qingyang Wu, Fangzhou Mu, Jianwei Yang, Jianfeng Gao, Chunyuan Li, Yong Jae Lee.* CVPR'23.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n1. **[Multi-Concept Customization of Text-to-Image Diffusion.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.04488)** [[project](https:\u002F\u002Fwww.cs.cmu.edu\u002F~custom-diffusion\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.04488)] [[code](https:\u002F\u002Fgithub.com\u002Fadobe-research\u002Fcustom-diffusion)]\n\n    *Nupur Kumari, Bingliang Zhang, Richard Zhang, Eli Shechtman, Jun-Yan Zhu.* CVPR'23.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[DreamBooth: Fine Tuning Text-to-Image Diffusion Models for Subject-Driven Generation.](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.12242)** 🔥 [[project](https:\u002F\u002Fdreambooth.github.io\u002F)] [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.12242)]\n\n    *Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, Kfir Aberman.* CVPR'23.\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_5e3dc1920671.png\" style=\"width:100%\">\n\n\u003Cp align=\"right\" style=\"font-size: 14px; color: #555; margin-top: 20px;\">\n    \u003Ca href=\"#readme-top\" style=\"text-decoration: none; color: #007bff; font-weight: bold;\">\n        ↑ Back to Top ↑\n    \u003C\u002Fa>\n\u003C\u002Fp>\n\n\n\n # \u003Ch1 id=\"other-resources\">🔗 Other Resources# \u003Ch1\u002F>\n\n \n\n1. **[Regional Prompter](https:\u002F\u002Fgithub.com\u002Fhako-mikan\u002Fsd-webui-regional-prompter)**  Set a prompt to a divided region.\n\n\u003Cp align=\"right\" style=\"font-size: 14px; color: #555; margin-top: 20px;\">\n    \u003Ca href=\"#readme-top\" style=\"text-decoration: none; color: #007bff; font-weight: bold;\">\n        ↑ Back to Top ↑\n    \u003C\u002Fa>\n\u003C\u002Fp>\n\n\n \n # \u003Ch1 id=\"other-awesome-lists\">🌟 Other Awesome Lists\u003Ch1\u002F>\n\n\n\n1. **[Awesome-LLM-Reasoning](https:\u002F\u002Fgithub.com\u002Fatfortes\u002FAwesome-LLM-Reasoning)**  Collection of papers and resources on Reasoning in Large Language Models.\n\n1. **[Awesome-Controllable-T2I-Diffusion-Models](https:\u002F\u002Fgithub.com\u002FPRIV-Creation\u002FAwesome-Controllable-T2I-Diffusion-Models)**  A collection of resources on controllable generation with text-to-image diffusion models.\n\n\n\u003Cp align=\"right\" style=\"font-size: 14px; color: #555; margin-top: 20px;\">\n    \u003Ca href=\"#readme-top\" style=\"text-decoration: none; color: #007bff; font-weight: bold;\">\n        ↑ Back to Top ↑\n    \u003C\u002Fa>\n\u003C\u002Fp>\n\n\n\n # \u003Ch1 id=\"contributing\">✍️ Contributing # \u003Ch1\u002F>\n\n\n\n- Add a new paper or update an existing paper, thinking about which category the work should belong to.\n- Use the same format as existing entries to describe the work.\n- Add the abstract link of the paper (`\u002Fabs\u002F` format if it is an arXiv publication).\n\n**Don't worry if you do something wrong, it will be fixed for you!**\n\n## Contributors\n\n\u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fatfortes\u002FAwesome-Controllable-Diffusion\u002Fgraphs\u002Fcontributors\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_056f6f7ef868.png\" \u002F>\n\u003C\u002Fa>\n\n## Star History\n\n[![Star History Chart](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_9872c843e440.png)](https:\u002F\u002Fstar-history.com\u002F#atfortes\u002FAwesome-Controllable-Generation&Timeline)\n","\u003Ca name=\"readme-top\">\u003C\u002Fa>\n\n\u003Cdiv align=\"center\">\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fatfortes\u002FAwesome-Controllable-Diffusion\u002Fstargazers\">\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fstars\u002Fatfortes\u002FAwesome-Controllable-Diffusion?style=for-the-badge\" alt=\"星标数\">\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fatfortes\u002FAwesome-Controllable-Diffusion\u002Fnetwork\u002Fmembers\">\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fforks\u002Fatfortes\u002FAwesome-Controllable-Diffusion?style=for-the-badge\" alt=\"复刻数\">\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fatfortes\u002FAwesome-Controllable-Diffusion\u002Fgraphs\u002Fcontributors\">\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fcontributors\u002Fatfortes\u002FAwesome-Controllable-Diffusion?style=for-the-badge\" alt=\"贡献者\">\u003C\u002Fa>\n    \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fatfortes\u002FAwesome-Controllable-Diffusion\u002Fblob\u002Fmain\u002FREADME.md\">\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F论文-70-70?style=for-the-badge\" alt=\"论文数量\">\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fatfortes\u002FAwesome-Controllable-Diffusion\u002Fblob\u002Fmain\u002FLICENSE\">\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Flicense\u002Fatfortes\u002FAwesome-Controllable-Diffusion?style=for-the-badge\" alt=\"MIT 许可证\">\u003C\u002Fa>\n\u003C\u002Fdiv>\n\n\u003Ch1 align=\"center\">强大的可控扩散模型\u003C\u002Fh1>\n\n\u003Cp align=\"center\">\n    \u003Cb> 在 AIGC 时代，为扩散模型添加条件控制的相关论文与资源。\u003C\u002Fb>\n\u003C\u002Fp>\n\n\u003Cdetails>\n  \u003Csummary>🗂️ 目录\u003C\u002Fsummary>\n  \u003Col>\n    \u003Cli>\u003Ca href=\"#papers\">📝 论文\u003C\u002Fa>\u003C\u002Fli>\n      \u003Cul>\n        \u003Cli>\u003Ca href=\"#2025\"> 2025 年\u003C\u002Fa>\u003C\u002Fli>\n        \u003Cli>\u003Ca href=\"#2024\"> 2024 年\u003C\u002Fa>\u003C\u002Fli>\n        \u003Cli>\u003Ca href=\"#2023\"> 2023 年\u003C\u002Fa>\u003C\u002Fli>\n      \u003C\u002Ful>\n    \u003Cli>\u003Ca href=\"#other-resources\">🔗 其他资源\u003C\u002Fa>\u003C\u002Fli>\n    \u003Cli>\u003Ca href=\"#other-awesome-lists\">🌟 其他优秀列表\u003C\u002Fa>\u003C\u002Fli>\n    \u003Cli>\u003Ca href=\"#contributing\">✍️ 贡献\u003C\u002Fa>\u003C\u002Fli>\n  \u003C\u002Fol>\n\u003C\u002Fdetails>\n\n\n\n \n# \u003Ch1 id=\"papers\">📝 论文\u003Ch1\u002F>\n\n## 2024 年\n\n1. **[IFAdapter：用于接地文本到图像生成的实例特征控制。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.08240)** 🔥 [[项目](https:\u002F\u002Fifadapter.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.08240)] [[代码](https:\u002F\u002Fgithub.com\u002FWUyinwei-hah\u002FIFAdapter)]\n\n    *吴银伟、周先潘、马冰、苏雪峰、马凯、王新超。* 预印本 2024 年。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F图像-蓝色?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F布局-a50b5e?style=flat-square)\n\n   \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_7ae291ef47cb.png\" style=\"width:100%\">\n\n1. **[CSGO：文本到图像生成中的内容-风格组合。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.16766)** [[项目](https:\u002F\u002Fcsgo-gen.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.16766)] [[代码](https:\u002F\u002Fgithub.com\u002FinstantX-research\u002FCSGO)]\n\n    *邢鹏、王浩凡、孙延鹏、王奇勋、白旭、艾浩、黄仁远、李泽超。* 预印本 2024 年。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F图像-蓝色?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F主题驱动-橙色?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F风格-ff0000?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F组合-5218fa?style=flat-square)\n\n1. **[生成式照片蒙太奇。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.07116)** [[项目](https:\u002F\u002Flseancs.github.io\u002Fgenerativephotomontage\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.07116)] [[代码](https:\u002F\u002Fgithub.com\u002Flseancs\u002FGenerativePhotomontage)]\n\n    *刘Sean J.、库玛丽·努普尔、沙米尔·阿里埃尔、朱俊彦。* 预印本 2024 年。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F图像-蓝色?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F主题驱动-橙色?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F布局-a50b5e?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F组合-5218fa?style=flat-square)\n\n1. **[Sketch2Scene：根据用户随意草图自动生成交互式 3D 游戏场景。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.04567)** [[项目](https:\u002F\u002Fxrvisionlabs.github.io\u002FSketch2Scene\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.04567)]\n\n    *徐永志、吴勇汉、王义夫、萨英奎、段云飞、李阳、季攀、李洪东。* 预印本 2024 年。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F3D-3cb371?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F布局-a50b5e?style=flat-square)\n\n1. **[IPAdapter-Instruct：利用指令提示解决基于图像的条件模糊性。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.03209)** [[项目](https:\u002F\u002Funity-research.github.io\u002FIP-Adapter-Instruct.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.03209)] [[代码](https:\u002F\u002Fgithub.com\u002Funity-research\u002FIP-Adapter-Instruct)]\n\n    *西阿拉·罗尔斯、希蒙·韦纳、丹特·德尼格里斯、斯拉瓦·埃利扎罗夫、康斯坦丁·库茨伊、西蒙·多内。* 预印本 2024 年。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F图像-蓝色?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F主题驱动-橙色?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F风格-ff0000?style=flat-square)\n\n1. **[ViPer：通过个体偏好学习对生成模型进行视觉个性化。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.17365)** [[项目](https:\u002F\u002Fviper.epfl.ch\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.17365)] [[代码](https:\u002F\u002Fgithub.com\u002FEPFL-VILAB\u002FViPer)]\n\n    *索甘德·萨莱希、马赫迪·沙菲伊、特蕾莎·叶欧、罗曼·巴赫曼、阿米尔·扎米尔。* ECCV'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F图像-蓝色?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F风格-ff0000?style=flat-square)\n\n1. **[无需训练的复合场景生成用于布局到图像合成。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.13609)** [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.13609)] [[代码](https:\u002F\u002Fgithub.com\u002FPapple-F\u002Fcsg)]\n\n    *刘佳琪、黄涛、许昌。* ECCV'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F图像-蓝色?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F布局-a50b5e?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F组合-5218fa?style=flat-square)\n\n1. **[SEED-Story：使用大型语言模型进行多模态长篇故事生成。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.08683)** [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.08683)] [[代码](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FSEED-Story)]\n\n    *杨帅、葛宇莹、李阳、陈宇康、葛一肖、山英、陈英聪。* 预印本 2024 年。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F图像-蓝色?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F一致性-ff69b4?style=flat-square)\n\n1. **[草图引导的场景图像生成。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.06469)** [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.06469)]\n\n    *张天宇、谢晓轩、杜旭升、谢浩然。* 预印本 2024 年。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F图像-蓝色?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F布局-a50b5e?style=flat-square)\n\n1. **[使用图像扩散模型即时生成 3D 人体化身。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.07516)** [[项目](https:\u002F\u002Fwww.nikoskolot.com\u002Favatarpopup\u002F)] [[论文](https:\u002F\u002Fwww.nikoskolot.com\u002Favatarpopup\u002F)]\n\n    *尼科斯·科洛托罗斯、蒂莫·阿尔迪克、恩里克·科罗纳、爱德华·加布里埃尔·巴扎万、克里斯蒂安·斯明奇塞斯库。* ECCV'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F3D-3cb371?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F主题驱动-橙色?style=flat-square)\n\n1. **[Ctrl-X：无需指导的文本到图像生成中的结构与外观控制。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.07540)** 🔥 [[项目](https:\u002F\u002Fgenforce.github.io\u002Fctrl-x\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.07540)] [[代码](https:\u002F\u002Fgithub.com\u002Fgenforce\u002Fctrl-x)]\n\n    *林宽衡、莫思成、本·克林格、穆方舟、周博磊。* 预印本 2024年。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_26d4376b7b4c.png\" style=\"width:100%\">\n\n1. **[Zero-Painter：用于文本到图像合成的免训练布局控制。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.04032)** [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.04032)] [[代码](https:\u002F\u002Fgithub.com\u002FPicsart-AI-Research\u002FZero-Painter)]\n\n    *玛丽安娜·奥哈尼扬、海克·马努基扬、张阳王、桑特·纳瓦萨尔迪扬、亨弗莱·施伊。* CVPR'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n1. **[pOps：受照片启发的扩散算子。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.01300)** 🔥 [[项目](https:\u002F\u002Fpopspaper.github.io\u002FpOps\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.01300)] [[代码](https:\u002F\u002Fgithub.com\u002FpOpsPaper\u002FpOps)]\n\n    *埃拉德·理查森、尤瓦尔·阿拉卢夫、阿里·马赫达维-阿米里、丹尼尔·科恩-奥尔。* 预印本 2024年。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_f3fa4a461e4b.png\" style=\"width:100%\">\n\n1. **[RB调制：利用随机最优控制对扩散模型进行免训练个性化。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.17401)** [[项目](https:\u002F\u002Frb-modulation.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.17401)] [[代码](https:\u002F\u002Fgithub.com\u002Fgoogle\u002FRB-Modulation)]\n\n    *利图·劳特、陈宇佳、纳塔尼尔·鲁伊斯、阿比舍克·库马尔、康斯坦丁·卡拉马尼斯、桑杰·沙科泰、朱文生。* 预印本 2024年。🔥\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FStyle-ff0000?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_a6c3a2e9b9a2.png\" style=\"width:100%\">\n\n1. **[FreeCustom：用于多概念组合的免调优定制化图像生成。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.13870)** [[项目](https:\u002F\u002Faim-uofa.github.io\u002FFreeCustom\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.13870)] [[代码](https:\u002F\u002Fgithub.com\u002Faim-uofa\u002FFreeCustom)]\n\n    *丁刚贵、赵灿宇、王文、杨振、刘子德、陈浩、沈春华。* CVPR'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[面向概念驱动的文本到图像生成的个性化残差。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.12978)** [[项目](https:\u002F\u002Fcusuh.github.io\u002Fpersonalized-residuals\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.12978)]\n\n    *库苏·哈姆、马修·费舍尔、詹姆斯·海斯、尼古拉斯·科尔金、刘雨辰、理查德·张、托比亚斯·欣茨。* CVPR'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)\n\n1. **[基于密集块表示的组合式文本到图像生成。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.08246)** 🔥 [[项目](https:\u002F\u002Fblobgen-2d.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.08246)]\n\n    *聂伟力、刘思飞、莫特扎·马尔达尼、刘超、本杰明·埃卡特、阿拉什·瓦赫达特。* ICML'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_614a552877f6.png\" style=\"width:100%\">\n\n1. **[仅用一对图像即可定制文本到图像模型。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.01536)** [[项目](https:\u002F\u002Fpaircustomization.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.01536)] [[代码](https:\u002F\u002Fgithub.com\u002FPairCustomization\u002FPairCustomization)]\n\n    *麦克斯韦尔·琼斯、王圣宇、努普尔·库玛丽、大卫·鲍、朱俊彦。* 预印本 2024年。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)\n\n1. **[StoryDiffusion：用于长序列图像和视频生成的一致性自注意力机制。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.01434)** [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.01434)]\n\n    *周宇鹏、周大泉、程明明、冯家世、侯启斌。* 预印本 2024年。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FVideo-9370db?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FConsistency-ff69b4?style=flat-square)\n\n1. **[InstantFamily：基于掩码注意力的零样本多身份图像生成。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.19427)** [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.19427)]\n\n    *金婵兰、李正仁、郑始昌、金奉模、白悦敏。* 预印本 2024年。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[PuLID：通过对比对齐实现纯净且快速的身份定制。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.16022)** [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.16022)] [[代码](https:\u002F\u002Fgithub.com\u002FToTheBeginning\u002FPuLID)]\n\n    *郭子楠、吴言泽、陈卓伟、陈朗、何倩。* 技术报告 2024年。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)\n\n1. **[MultiBooth：从文本生成包含所有概念的图像。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.14239)** [[项目](https:\u002F\u002Fmultibooth.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.14239)] [[代码](https:\u002F\u002Fgithub.com\u002Fchenyangzhu1\u002FMultiBooth)]\n\n    *朱晨阳、李凯、马月、何春明、修丽。* 预印本 2024年。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[StyleBooth：通过多模态指令编辑图像风格。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.12154)** [[项目](https:\u002F\u002Fali-vilab.github.io\u002Fstylebooth-page\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.12154)] [[代码](https:\u002F\u002Fgithub.com\u002Fmodelscope\u002Fscepter)]\n\n    *韩震、毛超杰、蒋泽音、潘玉琳、张景峰。* 预印本 2024年。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FStyle-ff0000?style=flat-square)\n\n1. **[MoMA：用于快速个性化图像生成的多模态LLM适配器。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.05674)** 🔥 [[项目](https:\u002F\u002Fmoma-adapter.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.05674)] [[代码](https:\u002F\u002Fgithub.com\u002Fbytedance\u002FMoMA\u002Ftree\u002Fmain)]\n\n    *Kunpeng Song, Yizhe Zhu, Bingchen Liu, Qing Yan, Ahmed Elgammal, Xiao Yang.* ECCV'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_0e58158c478b.png\" style=\"width:100%\">\n\n1. **[用于抽象概念理解的文生图扩散模型提示优化器。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.11589)** [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.11589)]\n\n    *Zezhong Fan, Xiaohan Li, Chenhao Fang, Topojoy Biswas, Kaushiki Nag, Jianpeng Xu, Kannan Achan.* WWW'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FConcept–Understanding-8bbe1b?style=flat-square)\n\n1. **[MoA：用于个性化图像生成中主体与上下文解耦的注意力混合机制。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.11565)** [[项目](https:\u002F\u002Fsnap-research.github.io\u002Fmixture-of-attention\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.11565)] [[代码](https:\u002F\u002Fgithub.com\u002Fsnap-research\u002Fmixture-of-attention)]\n\n    *Kuan-Chieh Wang, Daniil Ostashev, Yuwei Fang, Sergey Tulyakov, Kfir Aberman.* 2024年预印本。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[MaxFusion：文生图扩散模型中的即插即用多模态生成。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.09977)** [[项目](https:\u002F\u002Fnithin-gk.github.io\u002Fmaxfusion.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.09977)] [[代码](https:\u002F\u002Fgithub.com\u002FNithin-GK\u002FMaxFusion)]\n\n    *Nithin Gopalakrishnan Nair, Jeya Maria Jose Valanarasu, Vishal M Patel.* ECCV'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[Ctrl-Adapter：一种高效且通用的框架，可将多种控制方式适配到任何扩散模型。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.09967)** [[项目](https:\u002F\u002Fctrl-adapter.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.09967)] [[代码](https:\u002F\u002Fgithub.com\u002FHL-hanlin\u002FCtrl-Adapter)]\n\n    *Han Lin, Jaemin Cho, Abhay Zala, Mohit Bansal.* 2024年预印本。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FVideo-9370db?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n1. **[ControlNet++：通过高效的连贯性反馈改进条件控制。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.07987)** [[项目](https:\u002F\u002Fliming-ai.github.io\u002FControlNet_Plus_Plus\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.07987)] [[代码](https:\u002F\u002Fgithub.com\u002Fliming-ai\u002FControlNet_Plus_Plus)]\n\n    *Ming Li, Taojiannan Yang, Huafeng Kuang, Jie Wu, Zhaoning Wang, Xuefeng Xiao, Chen Chen.* ECCV'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n1. **[面向多主体个性化的文生图模型身份解耦技术。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.04243)** [[项目](https:\u002F\u002Fmudi-t2i.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.04243)]\n\n    *Sangwon Jang, Jaehyeong Jo, Kimin Lee, Sung Ju Hwang.* 2024年预印本。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[Concept Weaver：实现文生图模型中的多概念融合。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.03913)** [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.03913)]\n\n    *Gihyun Kwon, Simon Jenni, Dingzeyu Li, Joon-Young Lee, Jong Chul Ye, Fabian Caba Heilbron.* CVPR'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[FlashFace：高保真度身份保留的人像个性化生成。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.17008)** [[项目](https:\u002F\u002Fjshilong.github.io\u002Fflashface-page\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.17008)] [[代码](https:\u002F\u002Fgithub.com\u002Fali-vilab\u002FFlashFace)]\n\n    *Shilong Zhang, Lianghua Huang, Xi Chen, Yifei Zhang, Zhi-Fan Wu, Yutong Feng, Wei Wang, Yujun Shen, Yu Liu, Ping Luo.* 2024年预印本。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)\n\n1. **[做你自己：用于多主体文生图生成的有界注意力机制。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.16990)** [[项目](https:\u002F\u002Fomer11a.github.io\u002Fbounded-attention\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.16990)] [[代码](https:\u002F\u002Fgithub.com\u002Fomer11a\u002Fbounded-attention)]\n\n    *Omer Dahary, Or Patashnik, Kfir Aberman, Daniel Cohen-Or.* ECCV'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[通过识别语义方向实现T2I模型中的连续主体特异性属性控制。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.17064)** [[项目](https:\u002F\u002Fcompvis.github.io\u002Fattribute-control\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.17064)] [[代码](https:\u002F\u002Fgithub.com\u002FCompVis\u002Fattribute-control)]\n\n    *Stefan Andreas Baumann, Felix Krause, Michael Neumayr, Nick Stracke, Vincent Tao Hu, Björn Ommer.* 2024年预印本。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FAppearance-b78727?style=flat-square)\n\n1. **[Make-Your-3D：快速且一致的主体驱动型3D内容生成。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.09625)** [[项目](https:\u002F\u002Fliuff19.github.io\u002FMake-Your-3D\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.09625)] [[代码](https:\u002F\u002Fgithub.com\u002Fliuff19\u002FMake-Your-3D)]\n\n    *Fangfu Liu, Hanyang Wang, Weiliang Chen, Haowen Sun, Yueqi Duan.* ECCV'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F3D-3cb371?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)\n\n1. **[FeedFace：基于扩散模型的高效推理式人脸个性化。](https:\u002F\u002Fopenreview.net\u002Fforum?id=PqPKBcamy3)** [[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=PqPKBcamy3)] [[代码](https:\u002F\u002Fgithub.com\u002FXiang-cd\u002FFeedFace)]\n\n    *Chendong Xiang, Armando Fortes, Khang Hui Chua, Hang Su, Jun Zhu.* Tiny Papers @ ICLR'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)\n\n1. **[用于图像生成的多LoRA组合。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.16843)** [[项目](https:\u002F\u002Fmaszhongming.github.io\u002FMulti-LoRA-Composition\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.16843)] [[代码](https:\u002F\u002Fgithub.com\u002Fmaszhongming\u002FMulti-LoRA-Composition)]\n\n    *钟明、沈烨龙、王硕航、陆亚东、焦一竹、欧阳思儒、于东汉、韩家伟、陈伟柱。* 预印本 2024年。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[Gen4Gen：面向生成式多概念组合的生成式数据流水线。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.15504)** [[项目](https:\u002F\u002Fdanielchyeh.github.io\u002FGen4Gen\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.15504)] [[代码](https:\u002F\u002Fgithub.com\u002FlouisYen\u002FGen4Gen)]\n\n    *叶春孝、程大英、谢荷妍、林传恩、马毅、安德鲁·马卡姆、尼基·特里戈尼、H.T. 康格、陈宇贝。* 技术报告 2024年。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[通过交换式自注意力进行视觉风格提示。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.12974)** [[项目](https:\u002F\u002Fcurryjung.github.io\u002FVisualStylePrompt\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.12974)] [[代码](https:\u002F\u002Fgithub.com\u002Fnaver-ai\u002FVisual-Style-Prompting)]\n\n    *郑在锡、金俊浩、崔允洁、李佳莹、禹英中。* 预印本 2024年。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FStyle-ff0000?style=flat-square)\n\n1. **[RealCompo：现实感与构图之间的动态平衡提升文本到图像扩散模型。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.12908)** [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.12908)] [[代码](https:\u002F\u002Fgithub.com\u002FYangLing0818\u002FRealCompo)]\n\n    *张新晨、杨凌、蔡雅琪、俞兆辰、谢嘉克、田野、徐敏凯、唐勇、杨宇久、崔斌。* 预印本 2024年。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[用于组合型文本到图像个性化的直接一致性优化。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.12004)** [[项目](https:\u002F\u002Fdco-t2i.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.12004)] [[代码](https:\u002F\u002Fgithub.com\u002Fkyungmnlee\u002Fdco)]\n\n    *李京民、郭尚京、孙基赫、申振宇。* 预印本 2024年。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[InstanceDiffusion：图像生成中的实例级控制。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.03290)** [[项目](https:\u002F\u002Fpeople.eecs.berkeley.edu\u002F~xdwang\u002Fprojects\u002FInstDiff\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.03290)] [[代码](https:\u002F\u002Fgithub.com\u002Ffrank-xwang\u002FInstanceDiffusion)]\n\n    *王旭东、特雷弗·达雷尔、赛·萨凯斯·兰巴特拉、罗希特·吉达尔、伊山·米斯拉。* CVPR'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n1. **[无需训练的一致性文本到图像生成。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.03286)** [[项目](https:\u002F\u002Fresearch.nvidia.com\u002Flabs\u002Fpar\u002Fconsistory\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.03286)]\n\n    *约阿德·特韦尔、奥姆里·卡杜里、里农·加尔、约尼·卡斯滕、利奥尔·沃尔夫、加尔·切奇克、尤瓦尔·阿茨蒙。* SIGGRAPH'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FConsistency-ff69b4?style=flat-square)\n\n1. **[UNIMO-G：通过多模态条件扩散实现统一的图像生成。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.13388)** 🔥 [[项目](https:\u002F\u002Funimo-ptm.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.13388)]\n\n    *李伟、许雪、刘嘉晨、肖欣燕。* ACL'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_f7466ea624a0.png\" style=\"width:100%\">\n\n1. **[掌握文本到图像扩散：利用多模态LLM进行重新描述、规划和生成。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.11708)** 🔥 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.11708)] [[代码](https:\u002F\u002Fgithub.com\u002FYangLing0818\u002FRPG-DiffusionMaster)]\n\n    *杨凌、俞兆辰、孟晨琳、徐敏凯、斯特凡诺·埃尔蒙、崔斌。* ICML'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_e838fc7b44a1.png\" style=\"width:100%\">\n\n1. **[InstantID：零样本身份保留生成，几秒钟内完成。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.07519)** [[项目](https:\u002F\u002Fgithub.com\u002FInstantID\u002FInstantID)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.07519)] [[代码](https:\u002F\u002Fgithub.com\u002FInstantID\u002FInstantID)]\n\n    *王启勋、白旭、王浩帆、秦泽奎、安东尼·陈、李华夏、唐旭、胡耀。* 技术报告 2024年。🔥\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_d6936de7f7a5.png\" style=\"width:100%\">\n\n1. **[PALP：文本到图像模型的提示对齐个性化。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.06105)** [[项目](https:\u002F\u002Fprompt-aligned.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.06105)]\n\n    *王启勋、白旭、王浩帆、秦泽奎、安东尼·陈。* 预印本 2024年。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n1. **[SCEdit：通过跳过连接编辑实现高效可控的图像扩散生成。](arxiv.org\u002Fabs\u002F2312.11392)** [[项目](https:\u002F\u002Fscedit.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.04461)] [[代码](https:\u002F\u002Fgithub.com\u002Fali-vilab\u002FSCEdit)]\n\n    *蒋泽音、毛超杰、潘玉林、韩震、张景峰。* CVPR'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n1. **[PhotoMaker：通过堆叠ID嵌入定制逼真的人像照片。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.04461)** [[项目](https:\u002F\u002Fphoto-maker.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.04461)] [[代码](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FPhotoMaker)]\n\n    *李振、曹明登、王新涛、齐仲刚、程明明、单颖。* CVPR'24。🔥\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_704cfc3c9dd3.png\" style=\"width:100%\">\n\n1. **[上下文扩散：上下文感知的图像生成。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.03584)** [[项目](https:\u002F\u002Fivonajdenkoska.github.io\u002Fcontextdiffusion\u002Fmain.html)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.03584)]\n\n*伊沃娜·纳杰登科斯卡、阿尼梅什·辛哈、阿比曼纽·杜贝、德鲁夫·马哈詹、维格内什·拉马纳坦、菲利普·拉德诺维奇。* ECCV'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n1. **[通过共享注意力实现风格对齐的图像生成。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.02133)** 🔥 [[项目](https:\u002F\u002Fstyle-aligned-gen.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.02133)] [[代码](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fstyle-aligned\u002F)]\n\n    *阿米尔·赫兹、安德烈·沃伊诺夫、什洛米·弗鲁赫特、丹尼尔·科恩-奥尔。* CVPR'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FStyle-ff0000?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_e831890c4e04.png\" style=\"width:100%\">\n\n1. **[视觉字谜：利用扩散模型生成多视角光学幻象。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.17919)** [[项目](https:\u002F\u002Fdangeng.github.io\u002Fvisual_anagrams\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.17919)] [[代码](https:\u002F\u002Fgithub.com\u002Fdangeng\u002Fvisual_anagrams)]\n\n    *丹尼尔·耿、朴仁范、安德鲁·欧文斯。* CVPR'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FIllusion-3cb371?style=flat-square)\n\n1. **[MagicPose：基于身份感知扩散模型的真实人体姿态与面部表情重定向。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.12052)** [[项目](https:\u002F\u002Fboese0601.github.io\u002Fmagicdance\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.12052)] [[代码](https:\u002F\u002Fgithub.com\u002FBoese0601\u002FMagicDance)]\n\n    *迪昌、史一春、高权凯、杰西卡·傅、徐宏毅、宋国贤、严青、杨晓、穆罕默德·索莱曼尼。* ICML'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FVideo-9370db?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n1. **[被选中的人：文本到图像扩散模型中的一致性角色。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.10093)** [[项目](https:\u002F\u002Fomriavrahami.com\u002Fthe-chosen-one\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.10093)] [[代码](https:\u002F\u002Fgithub.com\u002FZichengDuan\u002FTheChosenOne)]\n\n    *奥姆里·阿夫拉米、阿米尔·赫兹、雅埃尔·温克尔、摩阿布·阿拉尔、什洛米·弗鲁赫特、欧哈德·弗里德、丹尼尔·科恩-奥尔、丹尼·利希金斯基。* SIGGRAPH'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FConsistency-ff69b4?style=flat-square)\n\n1. **[用于零样本外观迁移的跨图像注意力。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.03335)** [[项目](https:\u002F\u002Fgaribida.github.io\u002Fcross-image-attention)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.03335)] [[代码](https:\u002F\u002Fgithub.com\u002Fgaribida\u002Fcross-image-attention)]\n\n    *尤瓦尔·阿拉卢夫、丹尼尔·加里比、奥尔·帕塔什尼克、哈达尔·阿韦尔布赫-埃洛尔、丹尼尔·科恩-奥尔。* SIGGRAPH'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FAppearance-b78727?style=flat-square)\n\n1. **[Kosmos-G：利用多模态大型语言模型在上下文中生成图像](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.02992)** 🔥 [[项目](https:\u002F\u002Fxichenpan.github.io\u002Fkosmosg)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.02992)] [[代码](https:\u002F\u002Faka.ms\u002FKosmos-G)]\n\n    *潘熙晨、董力、黄绍涵、彭志良、陈文虎、魏福儒。* ICLR'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_a3b0bc2d7f2c.png\" style=\"width:100%\">\n\n1. **[InstantBooth：无需测试时微调的个性化文本到图像生成。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03411)**  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03411)]\n\n    *石静、熊伟、林哲、郑贤俊。* CVPR'24。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)\n\n\n\n## 2023年\n\n1. **[ZipLoRA：通过有效合并LoRA实现任意主题、任意风格的生成。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.13600)** [[项目](https:\u002F\u002Fziplora.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.13600)]\n\n    *维拉吉·沙阿、纳塔尼尔·鲁伊斯、福雷斯特·科尔、艾丽卡·卢、斯维特兰娜·拉泽布尼克、李元振、瓦伦·詹帕尼。* 2023年预印本。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FStyle-ff0000?style=flat-square)\n\n1. **[IP-Adapter：兼容文本的图像提示适配器，用于文本到图像扩散模型。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.06721)** 🔥 [[项目](https:\u002F\u002Fip-adapter.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.06721)] [[代码]()]\n\n    *胡叶、张军、刘思博、韩晓、杨伟。* 2023年技术报告。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_45b0f77eb686.png\" style=\"width:100%\">\n\n1. **[文本到图像扩散模型的零样本空间布局条件化。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.13754)**\n\n    *纪尧姆·库瓦龙、玛琳·卡雷尔、马蒂厄·科尔德、斯蒂芬·拉图耶尔、雅各布·费尔贝克。* ICCV'23。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n1. **[通过正交微调控制文本到图像扩散。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.07280)**  [[项目](https:\u002F\u002Foft.wyliu.com\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.07280)] [[代码](https:\u002F\u002Fgithub.com\u002FZeju1997\u002Foft)]\n\n    *邱泽宇、刘伟阳、冯海文、薛宇轩、冯瑶、刘震、张丹、阿德里安·韦勒、伯恩哈德·舍尔科普夫。* NeurIPS'23。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)\n\n1. **[Face0：将文本到图像模型瞬间条件化为一张人脸。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.06638)** [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.06638)]\n\n    *达尼·瓦列夫斯基、丹尼·瓦瑟曼、约西·马蒂亚斯、亚尼夫·列维坦。* SIGGRAPH Asia'23。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject–Driven-orange?style=flat-square)\n\n1. **[StyleDrop：以任意风格生成文本到图像。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.00983)** 🔥 [[项目](https:\u002F\u002Fstyledrop.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.00983)]\n\n    *苏基赫、纳塔尼尔·鲁伊斯、金民李、丹尼尔·卡斯特罗·钦、伊琳娜·布洛克、常慧雯、贾瑞德·巴伯、江璐、格伦·恩蒂斯、李元振、袁浩、伊尔凡·埃萨、迈克尔·鲁宾斯坦、迪利普·克里希南。* NeurIPS'23。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FStyle-ff0000?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_436c5e62b42e.png\" style=\"width:100%\">\n\n1. **[BLIP-Diffusion：用于可控文本到图像生成和编辑的预训练主体表示。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.14720)** 🔥 [[项目](https:\u002F\u002Fdxli94.github.io\u002FBLIP-Diffusion-website\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.14720)] [[代码](https:\u002F\u002Fgithub.com\u002Fsalesforce\u002FLAVIS\u002Ftree\u002Fmain\u002Fprojects\u002Fblip-diffusion)]\n\n    *李东旭、李俊楠、史蒂文·C·H·霍伊.* NeurIPS'23。\n   \n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_8a49018b5661.png\" style=\"width:100%\">\n\n1. **[基于学徒学习的主体驱动型文本到图像生成。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.00186)** [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.00186)]\n\n    *陈文虎、胡恒祥、李彦东、纳塔尼尔·鲁伊斯、贾旭辉、张明伟、威廉·W·科恩.* NeurIPS'23。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n1. **[T2I-Adapter：学习适配器以挖掘文本到图像扩散模型的更多可控能力。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.08453)** 🔥 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.08453)] [[代码](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FT2I-Adapter)]\n\n    *牟冲、王新涛、谢良斌、吴延泽、张健、齐中刚、单颖、切晓虎.* 2023年技术报告。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_080d5c19756c.png\" style=\"width:100%\">\n\n1. **[向文本到图像扩散模型添加条件控制。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.05543)** 🔥 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.05543)] [[代码](https:\u002F\u002Fgithub.com\u002Flllyasviel\u002FControlNet)]\n\n    *张吕敏、饶安怡、马尼什·阿格拉瓦拉.* ICCV'23。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_53c620896888.png\" style=\"width:100%\">\n\n1. **[GLIGEN：开放集接地文本到图像生成。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.07093)** 🔥 [[项目](https:\u002F\u002Fgligen.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.07093)] [[代码](https:\u002F\u002Fgithub.com\u002Fgligen\u002FGLIGEN)]\n\n    *李宇恒、刘浩天、吴庆阳、穆方舟、杨建伟、高建峰、李春元、李永在.* CVPR'23。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLayout-a50b5e?style=flat-square)\n\n1. **[文本到图像扩散的多概念定制。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.04488)** [[项目](https:\u002F\u002Fwww.cs.cmu.edu\u002F~custom-diffusion\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.04488)] [[代码](https:\u002F\u002Fgithub.com\u002Fadobe-research\u002Fcustom-diffusion)]\n\n    *努普尔·库玛丽、张炳亮、理查德·张、埃利·谢赫特曼、朱俊彦.* CVPR'23。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComposition-5218fa?style=flat-square)\n\n1. **[DreamBooth：针对主体驱动型生成对文本到图像扩散模型进行微调。](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.12242)** 🔥 [[项目](https:\u002F\u002Fdreambooth.github.io\u002F)] [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.12242)]\n\n    *纳塔尼尔·鲁伊斯、李远振、瓦伦·詹帕尼、雅埃尔·普里奇、迈克尔·鲁宾斯坦、克菲尔·阿伯曼.* CVPR'23。\n\n    ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FImage-blue?style=flat-square)![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FSubject--Driven-orange?style=flat-square)\n\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_5e3dc1920671.png\" style=\"width:100%\">\n\n\u003Cp align=\"right\" style=\"font-size: 14px; color: #555; margin-top: 20px;\">\n    \u003Ca href=\"#readme-top\" style=\"text-decoration: none; color: #007bff; font-weight: bold;\">\n        ↑ 返回顶部 ↑\n    \u003C\u002Fa>\n\u003C\u002Fp>\n\n\n\n # \u003Ch1 id=\"other-resources\">🔗 其他资源# \u003Ch1\u002F>\n\n \n\n1. **[区域提示器](https:\u002F\u002Fgithub.com\u002Fhako-mikan\u002Fsd-webui-regional-prompter)**  将提示设置到划分的区域。\n\n\u003Cp align=\"right\" style=\"font-size: 14px; color: #555; margin-top: 20px;\">\n    \u003Ca href=\"#readme-top\" style=\"text-decoration: none; color: #007bff; font-weight: bold;\">\n        ↑ 返回顶部 ↑\n    \u003C\u002Fa>\n\u003C\u002Fp>\n\n\n \n # \u003Ch1 id=\"other-awesome-lists\">🌟 其他精彩列表\u003Ch1\u002F>\n\n\n\n1. **[Awesome-LLM-Reasoning](https:\u002F\u002Fgithub.com\u002Fatfortes\u002FAwesome-LLM-Reasoning)**  大型语言模型推理领域的论文与资源合集。\n\n1. **[Awesome-Controllable-T2I-Diffusion-Models](https:\u002F\u002Fgithub.com\u002FPRIV-Creation\u002FAwesome-Controllable-T2I-Diffusion-Models)**  文本到图像扩散模型可控生成相关资源合集。\n\n\n\u003Cp align=\"right\" style=\"font-size: 14px; color: #555; margin-top: 20px;\">\n    \u003Ca href=\"#readme-top\" style=\"text-decoration: none; color: #007bff; font-weight: bold;\">\n        ↑ 返回顶部 ↑\n    \u003C\u002Fa>\n\u003C\u002Fp>\n\n\n\n # \u003Ch1 id=\"contributing\">✍️ 贡献 # \u003Ch1\u002F>\n\n\n\n- 添加一篇新论文或更新现有论文，并思考该工作应归入哪个类别。\n- 使用与现有条目相同的格式来描述该工作。\n- 添加论文的摘要链接（如果是 arXiv 出版物，则使用 `\u002Fabs\u002F` 格式）。\n\n**即使你犯了错误，也会有人帮你修正！**\n\n\n\n## 贡献者\n\n\u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fatfortes\u002FAwesome-Controllable-Diffusion\u002Fgraphs\u002Fcontributors\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_056f6f7ef868.png\" \u002F>\n\u003C\u002Fa>\n\n## 星标历史\n\n[![星标历史图表](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_readme_9872c843e440.png)](https:\u002F\u002Fstar-history.com\u002F#atfortes\u002FAwesome-Controllable-Generation&Timeline)","# Awesome-Controllable-Diffusion 快速上手指南\n\n**Awesome-Controllable-Diffusion** 并非一个单一的可执行软件或 Python 包，而是一个**精选资源列表（Awesome List）**，汇集了为扩散模型（Diffusion Models）添加条件控制（如布局、风格、主体一致性等）的前沿论文、项目代码和开源实现。\n\n本指南将指导你如何利用该列表中的资源，快速在本地运行一个典型的可控生成项目（以列表中热门的 **IPAdapter** 或 **ControlNet** 类项目为例）。\n\n## 1. 环境准备\n\n在开始之前，请确保你的开发环境满足以下要求：\n\n*   **操作系统**: Linux (推荐 Ubuntu 20.04\u002F22.04) 或 macOS (M1\u002FM2\u002FM3 芯片需特定配置)。Windows 用户建议使用 WSL2。\n*   **硬件要求**:\n    *   **GPU**: NVIDIA GPU (推荐 RTX 3090\u002F4090 或更高)，显存建议 **16GB+** (运行复杂控制任务如 3D 或多主体生成时)。\n    *   **CUDA**: 版本 11.8 或 12.1+。\n*   **前置依赖**:\n    *   Python 3.10 或 3.11\n    *   Git\n    *   Conda 或 Mamba (推荐用于环境管理)\n\n> **国内加速建议**:\n> *   使用清华源或阿里源配置 `pip` 和 `conda`。\n> *   模型权重下载推荐使用 [ModelScope (魔搭)](https:\u002F\u002Fmodelscope.cn\u002F) 或 [Wisemodel (始智 AI)](https:\u002F\u002Fwisemodel.cn\u002F) 作为 HuggingFace 的替代镜像。\n\n## 2. 安装步骤\n\n由于该仓库包含多个独立项目，以下步骤以克隆仓库并运行其中一个典型项目（例如 **IPAdapter** 或列表中的其他具体实现）为例。\n\n### 2.1 克隆资源列表\n首先获取该 Awesome 列表，以便查阅最新的项目链接：\n\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002Fatfortes\u002FAwesome-Controllable-Diffusion.git\ncd Awesome-Controllable-Diffusion\n```\n\n### 2.2 选择并克隆具体项目\n在 `README.md` 的 \"Papers\" 部分选择一个你感兴趣的项目（例如 2024 年的 **IPAdapter-Instruct** 或 **Ctrl-X**）。假设我们选择 **IPAdapter-Instruct**：\n\n```bash\n# 进入你希望存放项目的目录\ncd ..\ngit clone https:\u002F\u002Fgithub.com\u002Funity-research\u002FIP-Adapter-Instruct.git\ncd IP-Adapter-Instruct\n```\n\n### 2.3 创建虚拟环境并安装依赖\n大多数项目都提供 `requirements.txt`。建议使用国内镜像源加速安装：\n\n```bash\n# 创建 conda 环境\nconda create -n ctrl-diff python=3.10 -y\nconda activate ctrl-diff\n\n# 配置 pip 国内镜像 (清华大学)\npip config set global.index-url https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n\n# 安装基础依赖\npip install torch torchvision torchaudio --index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu118\npip install -r requirements.txt\n```\n\n> **注意**: 如果项目依赖特定的 Diffusers 版本或 Gradio 版本，请严格参照该项目根目录下的 `README` 指示进行调整。\n\n### 2.4 下载预训练模型\n根据项目说明下载基础扩散模型（如 SDXL 或 SD1.5）及特定的控制适配器权重。\n**国内开发者推荐**: 检查项目是否提供了 ModelScope 下载链接，若无，可使用 `huggingface-cli` 配合镜像站下载。\n\n```bash\n# 示例：使用 huggingface-cli 下载 (需配置 HF_ENDPOINT)\nexport HF_ENDPOINT=https:\u002F\u002Fhf-mirror.com\nhuggingface-cli download unity-research\u002FIP-Adapter-Instruct --local-dir .\u002Fmodels\n```\n\n## 3. 基本使用\n\n安装完成后，通常可以通过命令行脚本或 Web UI 进行测试。以下是基于典型项目的通用使用流程。\n\n### 3.1 命令行推理 (Inference)\n大多数项目提供一个 `infer.py` 或 `generate.py` 脚本。以下是一个通用的调用示例（具体参数请参考所选项目的文档）：\n\n```bash\npython infer.py \\\n    --prompt \"A cyberpunk cat sitting on a neon roof, high detail\" \\\n    --image_path \".\u002Fassets\u002Freference_cat.jpg\" \\\n    --control_type \"style\" \\\n    --output_dir \".\u002Foutputs\" \\\n    --steps 30 \\\n    --guidance_scale 7.5\n```\n\n*   `--prompt`: 文本提示词。\n*   `--image_path`: 参考图像（用于风格迁移、主体保持或布局控制）。\n*   `--control_type`: 控制类型（如 `layout`, `style`, `subject` 等，视具体项目支持情况而定）。\n\n### 3.2 启动 Web UI (如果支持)\n许多现代可控生成项目集成了 Gradio 界面，方便调试：\n\n```bash\npython app.py\n```\n\n运行后，终端会显示一个本地地址（通常是 `http:\u002F\u002F127.0.0.1:7860`），在浏览器中打开即可上传参考图、输入提示词并生成图像。\n\n### 3.3 探索更多项目\n回到 `Awesome-Controllable-Diffusion` 目录，查看 `README.md` 中的分类标签：\n*   **Layout**: 布局控制 (如 *Zero-Painter*, *Ctrl-X*)\n*   **Subject-Driven**: 主体驱动 (如 *IPAdapter*, *FreeCustom*)\n*   **Style**: 风格迁移 (如 *CSGO*, *ViPer*)\n*   **3D**: 三维生成 (如 *Sketch2Scene*)\n\n根据需要切换到对应项目的仓库重复上述“安装”与“使用”步骤。","某独立游戏开发者试图将手绘的草图快速转化为风格统一、布局可控的 3D 游戏场景概念图，以加速前期美术设计流程。\n\n### 没有 Awesome-Controllable-Diffusion 时\n- **布局失控**：直接使用基础扩散模型生成图像时，AI 完全忽略草图中的建筑位置和道路走向，导致生成的场景构图与原始设计意图严重偏离。\n- **风格割裂**：难以在保持特定像素艺术或赛博朋克风格的同时，精准植入自定义的角色或道具，每次调整都需要重新训练模型，耗时数小时。\n- **资源分散**：需要在 GitHub、ArXiv 和各类论坛中盲目搜索 ControlNet、IP-Adapter 等最新控制技术，缺乏系统整理，极易错过如 Sketch2Scene 这类针对 3D 场景生成的关键论文。\n- **迭代低效**：修改局部细节（如更换窗户样式）往往引发整体画面崩坏，无法实现“指哪打哪”的精细化控制，导致废稿率极高。\n\n### 使用 Awesome-Controllable-Diffusion 后\n- **精准还原布局**：通过列表中集成的 Sketch2Scene 和 IFAdapter 技术，开发者能直接将粗糙草图转化为结构严谨的 3D 场景，完美保留原始设计的空间逻辑。\n- **灵活风格组合**：利用 CSGO 等资源，轻松实现内容与风格的解耦控制，既能固定角色形象，又能一键切换多种美术风格，无需重复训练。\n- **前沿技术直达**：依托该清单对 2024-2025 年最新论文（如 Generative Photomontage）的系统收录，开发者能迅速定位并应用最适合场景合成的 SOTA 算法。\n- **高效局部编辑**：借助成熟的条件控制方案，可单独调整场景中的光照、材质或物体位置，大幅降低试错成本，将概念图产出效率提升数倍。\n\nAwesome-Controllable-Diffusion 将零散的前沿控制技术转化为系统化的生产力，让创作者从“抽卡式”生成进化为真正的“导演式”创作。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fatfortes_Awesome-Controllable-Diffusion_7ae291ef.png","atfortes","Armando Fortes","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fatfortes_b81cb213.jpg","PhD candidate in MMLab@NTU. Prev: Tsinghua @thu-ml, Técnico Lisboa.","Nanyang Technological University","Singapore",null,"atfortes19","atfortes.github.io","https:\u002F\u002Fgithub.com\u002Fatfortes",505,29,"2026-04-04T08:19:38","MIT",5,"","未说明",{"notes":94,"python":92,"dependencies":95},"该仓库是一个论文和资源列表（Awesome List），用于汇总可控扩散模型的相关研究，本身不是一个可直接运行的软件工具，因此没有具体的运行环境、依赖库或硬件需求。用户需根据列表中各个具体项目（如 IFAdapter, CSGO 等）的独立仓库查阅其特定的安装和运行要求。",[],[14],[98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,6,115,116],"papers","controlnet","customization","dall-e","diffusion-models","dreambooth","latent-diffusion","midjourney","personalization","stable-diffusion","text-to-image","generative-art","image-synthesis","ip-adapter","t2i-adapter","flux","multi-concept","style-transfer","subject-driven-generation","2026-03-27T02:49:30.150509","2026-04-06T07:12:57.293904",[],[]]