[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-scraed--LanPaint":3,"tool-scraed--LanPaint":61},[4,18,28,37,45,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":24,"last_commit_at":25,"category_tags":26,"status":17},9989,"n8n","n8n-io\u002Fn8n","n8n 是一款面向技术团队的公平代码（fair-code）工作流自动化平台，旨在让用户在享受低代码快速构建便利的同时，保留编写自定义代码的灵活性。它主要解决了传统自动化工具要么过于封闭难以扩展、要么完全依赖手写代码效率低下的痛点，帮助用户轻松连接 400 多种应用与服务，实现复杂业务流程的自动化。\n\nn8n 特别适合开发者、工程师以及具备一定技术背景的业务人员使用。其核心亮点在于“按需编码”：既可以通过直观的可视化界面拖拽节点搭建流程，也能随时插入 JavaScript 或 Python 代码、调用 npm 包来处理复杂逻辑。此外，n8n 原生集成了基于 LangChain 的 AI 能力，支持用户利用自有数据和模型构建智能体工作流。在部署方面，n8n 提供极高的自由度，支持完全自托管以保障数据隐私和控制权，也提供云端服务选项。凭借活跃的社区生态和数百个现成模板，n8n 让构建强大且可控的自动化系统变得简单高效。",184740,2,"2026-04-19T23:22:26",[16,14,13,15,27],"插件",{"id":29,"name":30,"github_repo":31,"description_zh":32,"stars":33,"difficulty_score":10,"last_commit_at":34,"category_tags":35,"status":17},10095,"AutoGPT","Significant-Gravitas\u002FAutoGPT","AutoGPT 是一个旨在让每个人都能轻松使用和构建 AI 的强大平台，核心功能是帮助用户创建、部署和管理能够自动执行复杂任务的连续型 AI 智能体。它解决了传统 AI 应用中需要频繁人工干预、难以自动化长流程工作的痛点，让用户只需设定目标，AI 即可自主规划步骤、调用工具并持续运行直至完成任务。\n\n无论是开发者、研究人员，还是希望提升工作效率的普通用户，都能从 AutoGPT 中受益。开发者可利用其低代码界面快速定制专属智能体；研究人员能基于开源架构探索多智能体协作机制；而非技术背景用户也可直接选用预置的智能体模板，立即投入实际工作场景。\n\nAutoGPT 的技术亮点在于其模块化“积木式”工作流设计——用户通过连接功能块即可构建复杂逻辑，每个块负责单一动作，灵活且易于调试。同时，平台支持本地自托管与云端部署两种模式，兼顾数据隐私与使用便捷性。配合完善的文档和一键安装脚本，即使是初次接触的用户也能在几分钟内启动自己的第一个 AI 智能体。AutoGPT 正致力于降低 AI 应用门槛，让人人都能成为 AI 的创造者与受益者。",183572,"2026-04-20T04:47:55",[13,36,27,14,15],"语言模型",{"id":38,"name":39,"github_repo":40,"description_zh":41,"stars":42,"difficulty_score":10,"last_commit_at":43,"category_tags":44,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":46,"name":47,"github_repo":48,"description_zh":49,"stars":50,"difficulty_score":24,"last_commit_at":51,"category_tags":52,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",109154,"2026-04-18T11:18:24",[14,15,13],{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":24,"last_commit_at":59,"category_tags":60,"status":17},6121,"gemini-cli","google-gemini\u002Fgemini-cli","gemini-cli 是一款由谷歌推出的开源 AI 命令行工具，它将强大的 Gemini 大模型能力直接集成到用户的终端环境中。对于习惯在命令行工作的开发者而言，它提供了一条从输入提示词到获取模型响应的最短路径，无需切换窗口即可享受智能辅助。\n\n这款工具主要解决了开发过程中频繁上下文切换的痛点，让用户能在熟悉的终端界面内直接完成代码理解、生成、调试以及自动化运维任务。无论是查询大型代码库、根据草图生成应用，还是执行复杂的 Git 操作，gemini-cli 都能通过自然语言指令高效处理。\n\n它特别适合广大软件工程师、DevOps 人员及技术研究人员使用。其核心亮点包括支持高达 100 万 token 的超长上下文窗口，具备出色的逻辑推理能力；内置 Google 搜索、文件操作及 Shell 命令执行等实用工具；更独特的是，它支持 MCP（模型上下文协议），允许用户灵活扩展自定义集成，连接如图像生成等外部能力。此外，个人谷歌账号即可享受免费的额度支持，且项目基于 Apache 2.0 协议完全开源，是提升终端工作效率的理想助手。",100752,"2026-04-10T01:20:03",[27,13,15,14],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":67,"readme_en":68,"readme_zh":69,"quickstart_zh":70,"use_case_zh":71,"hero_image_url":72,"owner_login":73,"owner_name":73,"owner_avatar_url":74,"owner_bio":75,"owner_company":76,"owner_location":77,"owner_email":78,"owner_twitter":77,"owner_website":79,"owner_url":80,"languages":81,"stars":90,"forks":91,"last_commit_at":92,"license":93,"difficulty_score":24,"env_os":94,"env_gpu":95,"env_ram":96,"env_deps":97,"category_tags":105,"github_topics":107,"view_count":24,"oss_zip_url":77,"oss_zip_packed_at":77,"status":17,"created_at":116,"updated_at":117,"faqs":118,"releases":149},10127,"scraed\u002FLanPaint","LanPaint","High quality training free inpaint for every stable diffusion model. Supports ComfyUI","LanPaint 是一款专为 Stable Diffusion 系列模型打造的高质量图像修复（Inpainting）采样器，无需额外训练即可直接提升现有模型的修复效果。它主要解决了传统修复方法在复杂场景下边缘生硬、内容不协调或细节模糊的痛点，让 AI 在生成最终像素前能进行多轮“思考”迭代，从而以更优的计算策略换取更自然、逼真的修复画质。\n\n该工具特别适合希望突破模型原生修复极限的创作者、设计师以及研究人员使用。对于普通用户，若通过 ComfyUI 工作流操作，也能轻松获得专业级的图像编辑能力；对于开发者，它提供了清晰的接口以集成到各类扩散模型应用中。\n\nLanPaint 的核心技术亮点在于其独特的“思考模式”（Think Mode），这是一种基于渐进式精确条件采样的算法。它不依赖昂贵的重新训练，而是通过优化采样过程，让模型在去噪前充分理解掩码区域与周围环境的逻辑关系。目前，LanPaint 不仅全面支持 ComfyUI 插件化部署，还率先实现了对 Z-Image、Z-Image-Base 等新型架构的支持，甚至拓展到了基于 Wan 2.2 的视频修复与扩展（Outpainting）领","LanPaint 是一款专为 Stable Diffusion 系列模型打造的高质量图像修复（Inpainting）采样器，无需额外训练即可直接提升现有模型的修复效果。它主要解决了传统修复方法在复杂场景下边缘生硬、内容不协调或细节模糊的痛点，让 AI 在生成最终像素前能进行多轮“思考”迭代，从而以更优的计算策略换取更自然、逼真的修复画质。\n\n该工具特别适合希望突破模型原生修复极限的创作者、设计师以及研究人员使用。对于普通用户，若通过 ComfyUI 工作流操作，也能轻松获得专业级的图像编辑能力；对于开发者，它提供了清晰的接口以集成到各类扩散模型应用中。\n\nLanPaint 的核心技术亮点在于其独特的“思考模式”（Think Mode），这是一种基于渐进式精确条件采样的算法。它不依赖昂贵的重新训练，而是通过优化采样过程，让模型在去噪前充分理解掩码区域与周围环境的逻辑关系。目前，LanPaint 不仅全面支持 ComfyUI 插件化部署，还率先实现了对 Z-Image、Z-Image-Base 等新型架构的支持，甚至拓展到了基于 Wan 2.2 的视频修复与扩展（Outpainting）领域，展现了极强的通用性与前瞻性。","\u003Cdiv align=\"center\">\n\n# LanPaint: Universal Inpainting Sampler with \"Think Mode\"\n[![TMLR PDF](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FTMLR-PDF-8A2BE2?logo=openreview&logoColor=white)](https:\u002F\u002Fopenreview.net\u002Fpdf?id=JPC8JyOUSW)\n[![Python Benchmark](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F🐍-Python_Benchmark-3776AB?logo=python)](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaintBench)\n[![ComfyUI Extension](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComfyUI-Extension-7B5DFF)](https:\u002F\u002Fgithub.com\u002Fcomfyanonymous\u002FComfyUI)\n[![Hugging Face](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FHugging%20Face-yellow?logo=huggingface&logoColor=white)](https:\u002F\u002Fhuggingface.co\u002Fcharrywhite\u002FLanPaint)\n[![Blog](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F📝-Blog-9cf)](https:\u002F\u002Fscraed.github.io\u002FscraedBlog\u002F)\n[![GitHub stars](https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fstars\u002Fscraed\u002FLanPaint)](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fstargazers)\n[![Discord](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FDiscord-5865F2?style=for-the-badge&logo=discord&logoColor=white)](https:\u002F\u002Fdiscord.gg\u002FyN5wYDE6W4)\n\u003C\u002Fdiv>\n\n\nUniversally applicable inpainting ability for every model. LanPaint sampler lets the model \"think\" through multiple iterations before denoising, enabling you to invest more computation time for superior inpainting quality.  \n\nThis is the official implementation of [\"LanPaint: Training-Free Diffusion Inpainting with Asymptotically Exact and Fast Conditional Sampling\"](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.03491), accepted by TMLR. \n\nThe repository is for ComfyUI extension. \n\nDiffusers Support: [LanPaint-Diffusers](https:\u002F\u002Fgithub.com\u002Fcharrywhite\u002FLanPaint-diffusers) by [@charrywhite](https:\u002F\u002Fgithub.com\u002Fcharrywhite\u002F)\n\nBenchmark code for paper reproduce: [LanPaintBench](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaintBench).\n\n## Citation\n\n```\n@article{\nzheng2025lanpaint,\ntitle={LanPaint: Training-Free Diffusion Inpainting with Asymptotically Exact and Fast Conditional Sampling},\nauthor={Candi Zheng and Yuan Lan and Yang Wang},\njournal={Transactions on Machine Learning Research},\nissn={2835-8856},\nyear={2025},\nurl={https:\u002F\u002Fopenreview.net\u002Fforum?id=JPC8JyOUSW},\nnote={}\n}\n```\n**🎉 NEW 2026: Join our discord!**\n\n[Join our Discord](https:\u002F\u002Fdiscord.gg\u002FyN5wYDE6W4) to share experiences, discuss features, and explore future development.\n\n`v1.5.0` fixes an important hidden bug that reduced performance and could blur images (especially with `z-image-base`) and also boosts overall LanPaint performance across other models. \nIf your inpainting results have wierd (glowing \u002F broken) mask boundary, check this [issue](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fissues\u002F80).\n\n**🎬 NEW: LanPaint now supports inpainting and outpainting based on Z-Image!**\n\n\n\n| Original | Masked | Inpainted |\n|:--------:|:------:|:---------:|\n| ![Original Z-image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_b8f88b2623b2.png) | ![Masked Z-image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_1a083a9802c6.png) | ![Inpainted Z-image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_6e7bce9bca2b.png) |\n\n**🎬 NEW: LanPaint now supports Z-Image-Base too!**\n\n| Original | Masked | Inpainted |\n|:--------:|:------:|:---------:|\n| ![Original Z-image-base](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_2d534da66100.png) | ![Masked Z-image-base](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_8ad1135a7885.png) | ![Inpainted Z-image-base](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_8eff94514c24.png) |\n\n\n**🎬 NEW: LanPaint now supports video inpainting and outpainting based on Wan 2.2!**\n\n\u003Cdiv align=\"center\">\n\n| Original Video | Mask (edit T-shirt text) | Inpainted Result |\n|:--------------:|:----:|:----------------:|\n| ![Original](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_6e24810e9357.gif) | ![Mask](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_5655fb9d9e09.png) | ![Result](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_6cbf9584785b.gif) |\n\n*Video Inpainting Example: 81 frames with temporal consistency*\n\n\u003C\u002Fdiv>\n\nCheck our latest [Wan 2.2 Video Examples](#video-examples-beta), [Wan 2.2 Image Examples](#example-wan22-inpaintlanpaint-k-sampler-5-steps-of-thinking), and \n[Qwen Image Edit 2509](#example-qwen-edit-2509-inpaint) support.\n  \n\n## Table of Contents\n- [Features](#features)\n- [Quickstart](#quickstart)\n- [How to Use Examples](#how-to-use-examples)\n- [Video Examples (Beta)](#video-examples-beta)\n  - [Wan 2.2 Video Inpainting](#wan-22-video-inpainting)\n  - [Wan 2.2 5B Video Inpainting](#wan-22-5b-video-inpainting)\n  - [Wan 2.2 Video Outpainting](#wan-22-video-outpainting)\n  - [Resource Consumption](#resource-consumption)\n- [Image Examples](#image-examples)\n  - [Flux.2.Dev](#example-flux2dev-inpaintlanpaint-k-sampler-5-steps-of-thinking)\n  - [Flux 2 klein](#example-flux-2-klein-inpaintlanpaint-k-sampler-2-steps-of-thinking)\n  - [Z-image](#example-z-image-inpaintlanpaint-k-sampler-5-steps-of-thinking)\n  - [Z-image-base](#example-z-image-base-inpaintlanpaint-k-sampler-3-steps-of-thinking)\n  - [Hunyuan T2I](#example-hunyuan-t2i-inpaintlanpaint-k-sampler-5-steps-of-thinking)\n  - [Wan 2.2 T2I](#example-wan22-inpaintlanpaint-k-sampler-5-steps-of-thinking)\n  - [Wan 2.2 T2I with reference](#example-wan22-partial-inpaintlanpaint-k-sampler-5-steps-of-thinking)\n  - [Qwen Image Edit 2511 2509](#example-qwen-edit-2509-inpaint)\n  - [Qwen Image Edit 2508](#example-qwen-edit-2508-inpaint)\n  - [Qwen Image](#example-qwen-image-inpaintlanpaint-k-sampler-5-steps-of-thinking)\n  - [HiDream](#example-hidream-inpaint-lanpaint-k-sampler-5-steps-of-thinking)\n  - [SD 3.5](#example-sd-35-inpaintlanpaint-k-sampler-5-steps-of-thinking)\n  - [Flux](#example-flux-inpaintlanpaint-k-sampler-5-steps-of-thinking)\n  - [SDXL](#example-sdxl-0-character-consistency-side-view-generation-lanpaint-k-sampler-5-steps-of-thinking)\n- [Usage](#usage)\n  - [Basic Sampler](#basic-sampler)\n  - [Advanced Sampler](#lanpaint-ksampler-advanced)\n  - [Tuning Guide](#lanpaint-ksampler-advanced-tuning-guide)\n- [Community Showcase](#community-showcase-)\n- [FAQ](#faq)\n- [Updates](#updates)\n- [ToDo](#todo)\n- [Citation](#citation)\n\n## Features\n\n- **Universal Compatibility** – Works instantly with almost any model (**Z-image, Z-image-base, Hunyuan, Wan 2.2, Qwen Image\u002FEdit, HiDream, SD 3.5, Flux-series, SDXL, SD 1.5 or custom LoRAs**) and ControlNet.  \n![Inpainting Result 13](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_bca68cb2be22.jpg) \n- **No Training Needed** – Works out of the box with your existing model.  \n- **Easy to Use** – Same workflow as standard ComfyUI KSampler.  \n- **Flexible Masking** – Supports any mask shape, size, or position for inpainting\u002Foutpainting.  \n- **No Workarounds** – Generates 100% new content (no blending or smoothing) without relying on partial denoising.  \n- **Beyond Inpainting** – You can even use it as a simple way to generate consistent characters. \n\n**Warning**: LanPaint has degraded performance on distillation models, such as Flux.dev, due to a similar [issue with LORA training](https:\u002F\u002Fmedium.com\u002F@zhiwangshi28\u002Fwhy-flux-lora-so-hard-to-train-and-how-to-overcome-it-a0c70bc59eaf). Please use low flux guidance (1.0-2.0) to mitigate this [issue](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fissues\u002F30).\n\n## Quickstart\n\n1. **Install ComfyUI**: Follow the official [ComfyUI installation guide](https:\u002F\u002Fdocs.comfy.org\u002Fget_started) to set up ComfyUI on your system. Or ensure your ComfyUI version > 0.3.11.\n2. **Install ComfyUI-Manager**: Add the [ComfyUI-Manager](https:\u002F\u002Fgithub.com\u002Fltdrdata\u002FComfyUI-Manager) for easy extension management.  \n3. **Install LanPaint Nodes**:  \n   - **Via ComfyUI-Manager**: Search for \"[LanPaint](https:\u002F\u002Fregistry.comfy.org\u002Fpublishers\u002Fscraed\u002Fnodes\u002FLanPaint)\" in the manager and install it directly.  \n   - **Manually**: Click \"Install via Git URL\" in ComfyUI-Manager and input the GitHub repository link:  \n     ```\n     https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint.git\n     ```  \n     Alternatively, clone this repository into the `ComfyUI\u002Fcustom_nodes` folder.  \n4. **Restart ComfyUI**: Restart ComfyUI to load the LanPaint nodes.  \n\nOnce installed, you'll find the LanPaint nodes under the \"sampling\" category in ComfyUI. Use them just like the default KSampler for high-quality inpainting!\n\n\n## **How to Use Examples:**  \n1. Navigate to the **example** folder (i.e example_1), download all pictures.  \n2. Drag **InPainted_Drag_Me_to_ComfyUI.png** into ComfyUI to load the workflow.  \n3. Download the required model (i.e clicking **Model Used in This Example**).  \n4. Load the model in ComfyUI.\n5. Upload **Masked_Load_Me_in_Loader.png** to the **\"Load image\"** node in the **\"Mask image for inpainting\"** group (second from left), or the **Prepare Image** node.  \n7. Queue the task, you will get inpainted results from LanPaint. Some example also gives you inpainted results from the following methods for comparison:\n   - **[VAE Encode for Inpainting](https:\u002F\u002Fcomfyanonymous.github.io\u002FComfyUI_examples\u002Finpaint\u002F)**\n   - **[Set Latent Noise Mask](https:\u002F\u002Fcomfyui-wiki.com\u002Fen\u002Ftutorial\u002Fbasic\u002Fhow-to-inpaint-an-image-in-comfyui)**\n\n## Video Examples (Beta)\n\nLanPaint now supports video inpainting with Wan 2.2, enabling you to seamlessly inpaint masked regions across video frames while maintaining temporal consistency.\n\n**Note:** LanPaint supports video inpainting for longer sequences (e.g., 81 frames), but processing time increases significantly (please check the [Resource Consumption](#resource-consumption) section for details) and performance may become unstable. For optimal results and stability, we recommend limiting video inpainting to **40 frames or fewer**.\n\n### Wan 2.2 Video Inpainting \n\n*Example: Wan2.2 t2v 14B, 480p video (11:6), 40 frames, LanPaint K Sampler, 2 steps of thinking*\n\n| Original Video | Mask (Add a white hat) | Inpainted Result |\n|:--------------:|:----:|:----------------:|\n| ![Original Video](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_9742a2569a18.gif) | ![Mask](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_8395f2e6ccb1.png) | ![Inpainted Result](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_42c493031c5e.gif) |\n\n[View Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_17)\n\nYou need to follow the ComfyUI version of [Wan2.2 T2V workflow](https:\u002F\u002Fdocs.comfy.org\u002Ftutorials\u002Fvideo\u002Fwan\u002Fwan2_2) to download and install the T2V model.\n\n### Wan 2.2 5B Video Inpainting \n\nSimilar to Wan 2.2 14B with slightly different workflow. [View Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_17)\n\n### Wan 2.2 Video Outpainting\n\nExtend your videos beyond their original boundaries with LanPaint's video outpainting capability based on Wan 2.2. This feature allows you to expand the canvas of your videos while maintaining coherent motion and context.\n\n*Example: Wan2.2 t2v 14B, 480p video (1:1 outpaint to 11:6), 40 frames, LanPaint K Sampler, 2 steps of thinking*\n\n| Original Video | Mask (Expand to 880x480) | Outpainted Result |\n|:--------------:|:----:|:-----------------:|\n| ![Original Video](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_8a20e4643417.gif) | ![Mask](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_5408315fb21f.png) | ![Outpainted Result](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_2af219946b2d.gif) |\n\n[View Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_19)\n\nYou need to follow the ComfyUI version of [Wan2.2 T2V workflow](https:\u002F\u002Fdocs.comfy.org\u002Ftutorials\u002Fvideo\u002Fwan\u002Fwan2_2) to download and install the T2V model.\n\n### Resource Consumption\n\n\n\u003Ctable>\n\u003Cthead>\n\u003Ctr>\n\u003Cth align=\"left\">Processing Mode\u003C\u002Fth>\n\u003Cth align=\"left\">Resolution\u003C\u002Fth>\n\u003Cth align=\"left\">Frames Processed\u003C\u002Fth>\n\u003Cth align=\"left\">VRAM Required\u003C\u002Fth>\n\u003Cth align=\"left\">Total Runtime (20 steps)\u003C\u002Fth>\n\u003C\u002Ftr>\n\u003C\u002Fthead>\n\u003Ctbody>\n\u003Ctr style=\"background-color: #e8f4f8;\">\n\u003Ctd>\u003Cstrong>Inpainting\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003Ctd>880×480 (11:6)\u003C\u002Ftd>\n\u003Ctd>40 frames\u003C\u002Ftd>\n\u003Ctd>39.8 GB\u003C\u002Ftd>\n\u003Ctd>\u003Cstrong>05:37 min\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003C\u002Ftr>\n\u003Ctr style=\"background-color: #e8f4f8;\">\n\u003Ctd>\u003Cstrong>Inpainting\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003Ctd>480×480 (1:1)\u003C\u002Ftd>\n\u003Ctd>40 frames\u003C\u002Ftd>\n\u003Ctd>38.0 GB\u003C\u002Ftd>\n\u003Ctd>\u003Cstrong>05:35 min\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003C\u002Ftr>\n\u003Ctr style=\"background-color: #e8f4f8;\">\n\u003Ctd>\u003Cstrong>Outpainting\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003Ctd>880×480 (11:6)\u003C\u002Ftd>\n\u003Ctd>40 frames\u003C\u002Ftd>\n\u003Ctd>40.2 GB\u003C\u002Ftd>\n\u003Ctd>\u003Cstrong>05:36 min\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003C\u002Ftr>\n\u003Ctr style=\"background-color: #fff4e6;\">\n\u003Ctd>\u003Cstrong>Inpainting\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003Ctd>880×480 (11:6)\u003C\u002Ftd>\n\u003Ctd>81 frames\u003C\u002Ftd>\n\u003Ctd>43.3 GB\u003C\u002Ftd>\n\u003Ctd>\u003Cstrong>16:23 min\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003C\u002Ftr>\n\u003Ctr style=\"background-color: #fff4e6;\">\n\u003Ctd>\u003Cstrong>Inpainting\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003Ctd>480×480 (1:1)\u003C\u002Ftd>\n\u003Ctd>81 frames\u003C\u002Ftd>\n\u003Ctd>39.8 GB\u003C\u002Ftd>\n\u003Ctd>\u003Cstrong>14:25 min\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003C\u002Ftr>\n\u003Ctr style=\"background-color: #fff4e6;\">\n\u003Ctd>\u003Cstrong>Outpainting\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003Ctd>880×480 (11:6)\u003C\u002Ftd>\n\u003Ctd>81 frames\u003C\u002Ftd>\n\u003Ctd>42.6 GB\u003C\u002Ftd>\n\u003Ctd>\u003Cstrong>13:46 min\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003C\u002Ftr>\n\u003C\u002Ftbody>\n\u003C\u002Ftable>\n\n\u003Csub>**Test Platform**: All tests were conducted on an NVIDIA RTX Pro 6000.\u003Cbr>\n**Model Used**: `wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors` and `wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors`.\u003Cbr>\n**Processing Steps**:  20 sampling steps x 2 (LanPaint steps of thinking).\u003C\u002Fsub>\n\n**Note:** Vram is required by the model, not LanPaint. To further reduce VRAM requirements, we recommend generating less frames and loading CLIP on CPU.\n\n## Image Examples\n\n### Example Hunyuan T2I: InPaint(LanPaint K Sampler, 5 steps of thinking)\nWe are excited to announce that LanPaint now supports inpainting with Hunyuan text to image generation.\n\n[View Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_20)\n\n\nYou need to follow the ComfyUI version of [Hunyuan workflow](https:\u002F\u002Fdocs.comfy.org\u002Ftutorials\u002Fvideo\u002Fhunyuan-video#hunyuan-text-to-video-workflow) to download and install the model.\n\n### Example Wan2.2: InPaint(LanPaint K Sampler, 5 steps of thinking)\nWe are excited to announce that LanPaint now supports Wan2.2 text to image generation with Wan2.2 T2V model.\n\n![Inpainting Result 45](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_e2c6626581c0.jpg)  \n[View Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_15)\n\n\nYou need to follow the ComfyUI version of [Wan2.2 T2V workflow](https:\u002F\u002Fdocs.comfy.org\u002Ftutorials\u002Fvideo\u002Fwan\u002Fwan2_2) to download and install the T2V model.\n\n### Example Z-image: InPaint(LanPaint K Sampler, 5 steps of thinking)\nLanPaint also supports inpainting with the Z-image text-to-image model.\n\n\u003Cdetails open>\n\u003Csummary>View Original \u002F Masked \u002F Inpainted Comparison\u003C\u002Fsummary>\n\n| Original | Masked | Inpainted |\n|:--------:|:------:|:---------:|\n| ![Original Z-image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_b8f88b2623b2.png) | ![Masked Z-image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_1a083a9802c6.png) | ![Inpainted Z-image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_6e7bce9bca2b.png) |\n\n\u003C\u002Fdetails>\n\n[View Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_21)\n\n\u003Cdetails open>\n\u003Csummary>View Z-image Outpainting (Original \u002F Masked \u002F Outpainted)\u003C\u002Fsummary>\n\n| Original | Masked | Outpainted |\n|:--------:|:------:|:----------:|\n| ![Original Z-image Outpaint](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_b8f88b2623b2.png) | ![Masked Z-image Outpaint](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_63665bfc6b77.png) | ![Outpainted Z-image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_23db3cd49631.png) |\n\n\u003C\u002Fdetails>\n\n[View Outpaint Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_22)\n\nYou can download the Z-image model for ComfyUI from [Z-image](https:\u002F\u002Fdocs.comfy.org\u002Fzh-CN\u002Ftutorials\u002Fimage\u002Fz-image\u002Fz-image-turbo).\n\n### Example Z-image-base: InPaint(LanPaint K Sampler, 3 steps of thinking)\nLanPaint also supports inpainting with the Z-image-base model.\n\n**Warning (stability)**: Z-image-base can easily diverge with LanPaint. Start with **small `LanPaint_StepSize`** and **fewer thinking iterations** (lower `LanPaint_NumSteps`) and increase gradually only if stable.\n\n\u003Cdetails open>\n\u003Csummary>View Original \u002F Masked \u002F Inpainted Comparison\u003C\u002Fsummary>\n\n| Original | Masked | Inpainted |\n|:--------:|:------:|:---------:|\n| ![Original Z-image-base](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_2d534da66100.png) | ![Masked Z-image-base](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_8ad1135a7885.png) | ![Inpainted Z-image-base](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_8eff94514c24.png) |\n\n\u003C\u002Fdetails>\n\n[View Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_25)\n\nWorkflow template (JSON): [Z_image_base_Inpaint.json](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fblob\u002Fmaster\u002Fexample_workflows\u002FZ_image_base_Inpaint.json)\n\n### Example Wan2.2: Partial InPaint(LanPaint K Sampler, 5 steps of thinking)\nSometimes we don't want to inpaint completely new content, but rather let the inpainted image reference the original image. One option to achieve this is to inpaint with an edit model like Qwen Image Edit. Another option is to perform a partial inpaint: allowing the diffusion process to start at some middle steps rather than from 0.\n\n![Inpainting Result 46](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_31287cea0914.jpg)  \n[View Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_16)\n\n\nYou need to follow the ComfyUI version of [Wan2.2 T2V workflow](https:\u002F\u002Fdocs.comfy.org\u002Ftutorials\u002Fvideo\u002Fwan\u002Fwan2_2) to download and install the T2V model.\n\n\n### Example Qwen Edit 2509: InPaint\nCheck our latest updated [Mased Qwen Edit Workflow](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_14) for Qwen Image Edit 2509. Download the model at [Qwen Image Edit 2509 Comfy](https:\u002F\u002Fhuggingface.co\u002FComfy-Org\u002FQwen-Image-Edit_ComfyUI\u002Ftree\u002Fmain\u002Fsplit_files\u002Fdiffusion_models). This workflow also supports Qwen Image Edit 2511.\n\n![Qwen Result 3](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_23a219b23183.jpg) \n\n### Example Qwen Edit 2508: InPaint\n![Qwen Result 2](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_1bfddfbdc592.jpg) \nCheck [Mased Qwen Edit Workflow](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_14). You need to follow the ComfyUI version of [Qwen Image Edit workflow](https:\u002F\u002Fdocs.comfy.org\u002Ftutorials\u002Fimage\u002Fqwen\u002Fqwen-image-edit) to download and install the model.\n\n\n\n### Example Qwen Image: InPaint(LanPaint K Sampler, 5 steps of thinking)\n\n![Inpainting Result 14](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_7bf252dae2f2.jpg)  \n[View Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_11)\n\n\nYou need to follow the ComfyUI version of [Qwen Image workflow](https:\u002F\u002Fdocs.comfy.org\u002Ftutorials\u002Fimage\u002Fqwen\u002Fqwen-image) to download and install the model.\n\nThe following examples utilize a random seed of 0 to generate a batch of 4 images for variance demonstration and fair comparison. (Note: Generating 4 images may exceed your GPU memory; please adjust the batch size as necessary.)\n\n![Qwen Result 1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_1243eb4150a5.jpg) \nAlso check [Qwen Inpaint Workflow](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_13) and [Qwen Outpaint Workflow](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_12). You need to follow the ComfyUI version of [Qwen Image workflow](https:\u002F\u002Fdocs.comfy.org\u002Ftutorials\u002Fimage\u002Fqwen\u002Fqwen-image) to download and install the model.\n\n### Example HiDream: InPaint (LanPaint K Sampler, 5 steps of thinking)\n![Inpainting Result 8](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_4406455a65ca.jpg)  \n[View Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_8)\n\nYou need to follow the ComfyUI version of [HiDream workflow](https:\u002F\u002Fdocs.comfy.org\u002Ftutorials\u002Fimage\u002Fhidream\u002Fhidream-i1) to download and install the model.\n\n### Example HiDream: OutPaint(LanPaint K Sampler, 5 steps of thinking)\n![Inpainting Result 8](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fblob\u002Fmaster\u002Fexamples\u002FInpaintChara_13(1).jpg)  \n[View Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_10)\n\nYou need to follow the ComfyUI version of [HiDream workflow](https:\u002F\u002Fdocs.comfy.org\u002Ftutorials\u002Fimage\u002Fhidream\u002Fhidream-i1) to download and install the model. Thanks [Amazon90](https:\u002F\u002Fgithub.com\u002FAmazon90) for providing this example.\n\n### Example SD 3.5: InPaint(LanPaint K Sampler, 5 steps of thinking)\n![Inpainting Result 8](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_4411df386ed1.jpg)  \n[View Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_9)\n\nYou need to follow the ComfyUI version of [SD 3.5 workflow](https:\u002F\u002Fcomfyui-wiki.com\u002Fen\u002Ftutorial\u002Fadvanced\u002Fstable-diffusion-3-5-comfyui-workflow) to download and install the model.\n\n### Example Flux.2.Dev: InPaint(LanPaint K Sampler, 5 steps of thinking)\n\n\u003Cdetails open>\n\u003Csummary>View Original \u002F Masked \u002F Inpainted Comparison\u003C\u002Fsummary>\n\n| Original | Masked | Inpainted |\n|:--------:|:------:|:---------:|\n| ![Original Flux.2.Dev](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_fffd5c3053e6.png) | ![Masked Flux.2.Dev](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_0aa1cba2099e.png) | ![Inpainted Flux.2.Dev](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_3dc6861abfc8.png) |\n\n\u003C\u002Fdetails>\n\n[View Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_23)\n\n[Model Used in This Example](https:\u002F\u002Fhuggingface.co\u002FComfy-Org\u002Fflux2-dev)\n\n(Note: Prompt First mode is disabled on Flux.2.Dev. As it does not use CFG guidance.)\n\n### Example Flux 2 klein: InPaint(LanPaint K Sampler, 2 steps of thinking)\n\n\u003Cdetails open>\n\u003Csummary>View Original \u002F Masked \u002F Inpainted Comparison\u003C\u002Fsummary>\n\n| Original | Masked | Inpainted |\n|:--------:|:------:|:---------:|\n| ![Original Flux 2 klein](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_3e811bb58ee3.png) | ![Masked Flux 2 klein](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_e529767d595f.png) | ![Inpainted Flux 2 klein](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_96af6019bc13.png) |\n\n\u003C\u002Fdetails>\n\n[View Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_24)\n\n[Model Used in This Example](https:\u002F\u002Fdocs.comfy.org\u002Fzh-CN\u002Ftutorials\u002Fflux\u002Fflux-2-klein). If you have quality problem on Comfy 0.11 and 0.12, check [this issue](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fissues\u002F80).\n\n\n### Example Flux: InPaint(LanPaint K Sampler, 5 steps of thinking)\n![Inpainting Result 7](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_0e646e6e734f.jpg)  \n[View Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_7)\n[Model Used in This Example](https:\u002F\u002Fhuggingface.co\u002FComfy-Org\u002Fflux1-dev\u002Fblob\u002Fmain\u002Fflux1-dev-fp8.safetensors) \n(Note: Prompt First mode is disabled on Flux. As it does not use CFG guidance.)\n\n### Example SDXL 0: Character Consistency (Side View Generation) (LanPaint K Sampler, 5 steps of thinking)\n![Inpainting Result 6](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_5bababe6b780.jpg)  \n[View Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_6)\n[Model Used in This Example](https:\u002F\u002Fcivitai.com\u002Fmodels\u002F1188071?modelVersionId=1408658) \n\n(Tricks 1: You can emphasize the character by copy it's image multiple times with Photoshop. Here I have made one extra copy.)\n\n(Tricks 2: Use prompts like multiple views, multiple angles, clone, turnaround. Use LanPaint's Prompt first mode (does not support Flux))\n\n(Tricks 3: Remeber LanPaint can in-paint: Mask non-consistent regions and try again!)\n\n\n### Example SDXL 1: Basket to Basket Ball (LanPaint K Sampler, 2 steps of thinking).\n![Inpainting Result 1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_f7fd47cdd3c5.jpg)  \n[View Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_1) \n[Model Used in This Example](https:\u002F\u002Fcivitai.com\u002Fmodels\u002F1188071?modelVersionId=1408658) \n### Example SDXL 2: White Shirt to Blue Shirt (LanPaint K Sampler, 5 steps of thinking)\n![Inpainting Result 2](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_75ebd2248af3.jpg)  \n[View Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_2)\n[Model Used in This Example](https:\u002F\u002Fcivitai.com\u002Fmodels\u002F1188071?modelVersionId=1408658)\n### Example SDXL 3: Smile to Sad (LanPaint K Sampler, 5 steps of thinking)\n![Inpainting Result 3](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_f3a31f61ead2.jpg)  \n[View Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_3)\n[Model Used in This Example](https:\u002F\u002Fcivitai.com\u002Fmodels\u002F133005\u002Fjuggernaut-xl)\n### Example SDXL 4: Damage Restoration (LanPaint K Sampler, 5 steps of thinking)\n![Inpainting Result 4](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_5ec31a79f791.jpg)  \n[View Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_4)\n[Model Used in This Example](https:\u002F\u002Fcivitai.com\u002Fmodels\u002F133005\u002Fjuggernaut-xl)\n### Example SDXL 5: Huge Damage Restoration (LanPaint K Sampler, 20 steps of thinking)\n![Inpainting Result 5](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_bccbad30ff8e.jpg)  \n[View Workflow & Masks](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_5)\n[Model Used in This Example](https:\u002F\u002Fcivitai.com\u002Fmodels\u002F133005\u002Fjuggernaut-xl)\n\nCheck more for use cases like inpaint on [fine tuned models](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fissues\u002F12#issuecomment-2938662021) and [face swapping](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fissues\u002F12#issuecomment-2938723501), thanks to [Amazon90](https:\u002F\u002Fgithub.com\u002FAmazon90).\n\n\n## Usage\n\n**Workflow Setup**  \nSame as default ComfyUI KSampler - simply replace with LanPaint KSampler nodes. The inpainting workflow is the same as the [SetLatentNoiseMask](https:\u002F\u002Fcomfyui-wiki.com\u002Fzh\u002Fcomfyui-nodes\u002Flatent\u002Finpaint\u002Fset-latent-noise-mask) inpainting workflow.\n\n**Note**\n- LanPaint requires binary masks (values of 0 or 1) without opacity or smoothing. To ensure compatibility, set the mask's **opacity and hardness to maximum** in your mask editor. During inpainting, any mask with smoothing or gradients will automatically be converted to a binary mask.\n- LanPaint relies heavily on your text prompts to guide inpainting - explicitly describe the content you want generated in the masked area. If results show artifacts or mismatched elements, counteract them with targeted negative prompts.\n\n## Basic Sampler\n![Samplers](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_7dea117b23cd.jpg)  \n\n- LanPaint KSampler: The most basic and easy to use sampler for inpainting.\n- LanPaint KSampler (Advanced): Full control of all parameters.\n\n### LanPaint KSampler\nSimplified interface with recommended defaults:\n\n- Steps: 20 - 50. More steps will give more \"thinking\" and better results.\n- LanPaint NumSteps: The turns of thinking before denoising. Recommend 5 for most of tasks ( which means 5 times slower than sampling without thinking). Use 10 for more challenging tasks. \n- LanPaint Prompt mode: Image First mode and Prompt First mode. Image First mode focuses on the image, inpaint based on image context (maybe ignore prompt), while Prompt First mode focuses more on the prompt. Use Prompt First mode for tasks like character consistency. (Technically, it Prompt First mode change CFG scale to negative value in the BIG score to emphasis prompt, which will costs image quality.)\n\n### LanPaint KSampler (Advanced)\nFull parameter control:\n**Key Parameters**\n\n| Parameter | Range | Description |\n|-----------|-------|-------------|\n| `Steps` | 0-100 | Total steps of diffusion sampling. Higher means better inpainting. Recommend 20-50. |\n| `LanPaint_NumSteps` | 0-20 | Reasoning iterations per denoising step (\"thinking depth\"). Easy task: 2-5. Hard task: 5-10 |\n| `LanPaint_Lambda` | 0.1-50 | Content alignment strength (higher = stricter). Recommend 4.0 - 10.0 |\n| `LanPaint_StepSize` | 0.1-1.0 | The StepSize of each thinking step. Recommend 0.1-0.5. |\n| `LanPaint_Beta` | 0.1-2.0 | The StepSize ratio between masked \u002F unmasked region. Small value can compensate high lambda values. Recommend 1.0 |\n| `LanPaint_Friction` | 0.0-100.0 | The friction of Langevin dynamics. Higher means more slow but stable, lower means fast but unstable. Recommend 10.0 - 20.0|\n| `LanPaint_EarlyStop` | 0-10 | Stop LanPaint iteration before the final sampling step. Helps to remove artifacts in some cases. Recommend 1-5|\n| `LanPaint_PromptMode` | Image First \u002F Prompt First | Image First mode focuses on the image context, maybe ignore prompt. Prompt First mode focuses more on the prompt. |\n\nFor detailed descriptions of each parameter, simply hover your mouse over the corresponding input field to view tooltips with additional information.\n\n### LanPaint Mask Blend\nThis node blends the original image with the inpainted image based on the mask. It is useful if you want the unmasked region to match the original image pixel perfectly.\n\n## LanPaint KSampler (Advanced) Tuning Guide\nFor challenging inpainting tasks:  \n\n1️⃣ **Boost Quality**\nIncrease **total number of sampling steps** (very important!), **LanPaint_NumSteps** (thinking iterations) or **LanPaint_Lambda** if the inpainted result does not meet your expectations.\n  \n2️⃣ **Boost Speed**\nDecrease **LanPaint_NumSteps** to accelerate generation! If you want better results but still need fewer steps, consider:\n    - **Increasing LanPaint_StepSize** to speed up the thinking process.\n    - **Decreasing LanPaint_Friction** to make the Langevin dynamics converges more faster.\n    \n3️⃣ **Fix Unstability**:  \nIf you find the results have wired texture, try\n- Reduce **LanPaint_Friction** to make the Langevin dynamics more stable. \n- Reduce **LanPaint_StepSize** to use smaller step size.\n- Reduce **LanPaint_Beta** if you are using a high lambda value.\n\n⚠️ **Notes**:  \n- For effective tuning, **fix the seed** and adjust parameters incrementally while observing the results. This helps isolate the impact of each setting.  Better to do it with a batche of images to avoid overfitting on a single image.\n\n## Community Showcase [](#community-showcase-)\n\nDiscover how the community is using LanPaint! Here are some user-created tutorials:\n\n- [Ai绘画进阶148-三大王炸！庆祝高允贞出道6周年！T8即将直播？当AI绘画学会深度思考？！万能修复神器LanPaint，万物皆可修！-T8 Comfyui教程](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=Z4DSTv3UPJo)\n- [Ai绘画进阶151-真相了！T8竟是个AI？！LanPaint进阶（二），人物一致性，多视角实验性测试，新参数讲解，工作流分享-T8 Comfyui教程](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=landiRhvF3k)\n- [重绘和三视图角色一致性解决新方案！LanPaint节点尝试](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=X0WbXdm6FA0)\n- [ComfyUI: HiDream with Perturbation Upscale, LanPaint Inpainting (Workflow Tutorial)](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=2-mGe4QVIIw&t=2785s)\n- [ComfyUI必备LanPaint插件超详细使用教程](https:\u002F\u002Fplugin.aix.ink\u002Farchives\u002Flanpaint)\n\nSubmit a PR to add your tutorial\u002Fvideo here, or open an [Issue](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fissues) with details!\n\n## FAQ\n[Working togather with crop&stitch](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fissues\u002F46)\n\n## Updates\n- 2026\u002F03\u002F02\n    - `v1.5.0`: Fixed a hidden bug that hurt performance and caused image blur (especially on `z-image-base`), and improved overall LanPaint performance on other models too.\n- 2026\u002F01\u002F30\n    - Add Z-image-base documentation and Example_25 workflow images.\n- 2025\u002F08\u002F08\n    - Add Qwen image support\n- 2025\u002F06\u002F21\n    - Update the algorithm with enhanced stability and outpaint performance.\n    - Add outpaint example\n    - Supports Sampler Custom (Thanks to [MINENEMA](https:\u002F\u002Fgithub.com\u002FMINENEMA))\n- 2025\u002F06\u002F04\n    - Add more sampler support.\n    - Add early stopping to advanced sampler.\n- 2025\u002F05\u002F28\n    - Major update on the Langevin solver. It is now much faster and more stable.\n    - Greatly simplified the parameters for advanced sampler.\n    - Fix performance issue on Flux and SD 3.5\n- 2025\u002F04\u002F16\n    - Added Primary HiDream support\n- 2025\u002F03\u002F22\n    - Added Primary Flux support\n    - Added Tease Mode\n- 2025\u002F03\u002F10\n    - LanPaint has received a major update! All examples now use the LanPaint K Sampler, offering a simplified interface with enhanced performance and stability.\n- 2025\u002F03\u002F06:\n    - Bug Fix for str not callable error and unpack error. Big thanks to [jamesWalker55](https:\u002F\u002Fgithub.com\u002FjamesWalker55) and [EricBCoding](https:\u002F\u002Fgithub.com\u002FEricBCoding).\n\n## ToDo\n- Try Implement Detailer\n- ~~Provide inference code on without GUI.~~ Check our local Python benchmark code [LanPaintBench](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaintBench).\n\n\n## Citation\n\n```\n@article{\nzheng2025lanpaint,\ntitle={LanPaint: Training-Free Diffusion Inpainting with Asymptotically Exact and Fast Conditional Sampling},\nauthor={Candi Zheng and Yuan Lan and Yang Wang},\njournal={Transactions on Machine Learning Research},\nissn={2835-8856},\nyear={2025},\nurl={https:\u002F\u002Fopenreview.net\u002Fforum?id=JPC8JyOUSW},\nnote={}\n}\n```\n\n\n\n\n\n","\u003Cdiv align=\"center\">\n\n# LanPaint：带有“思考模式”的通用修复采样器\n[![TMLR PDF](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FTMLR-PDF-8A2BE2?logo=openreview&logoColor=white)](https:\u002F\u002Fopenreview.net\u002Fpdf?id=JPC8JyOUSW)\n[![Python 基准测试](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F🐍-Python_Benchmark-3776AB?logo=python)](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaintBench)\n[![ComfyUI 插件](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FComfyUI-Extension-7B5DFF)](https:\u002F\u002Fgithub.com\u002Fcomfyanonymous\u002FComfyUI)\n[![Hugging Face](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FHugging%20Face-yellow?logo=huggingface&logoColor=white)](https:\u002F\u002Fhuggingface.co\u002Fcharrywhite\u002FLanPaint)\n[![博客](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F📝-Blog-9cf)](https:\u002F\u002Fscraed.github.io\u002FscraedBlog\u002F)\n[![GitHub 星标](https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fstars\u002Fscraed\u002FLanPaint)](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fstargazers)\n[![Discord](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FDiscord-5865F2?style=for-the-badge&logo=discord&logoColor=white)](https:\u002F\u002Fdiscord.gg\u002FyN5wYDE6W4)\n\u003C\u002Fdiv>\n\n\n适用于所有模型的通用修复能力。LanPaint 采样器允许模型在去噪之前通过多次迭代进行“思考”，从而使您能够投入更多计算时间以获得更优质的修复效果。\n\n这是被 TMLR 接受的论文《LanPaint：无需训练的扩散修复，具有渐近精确且快速的条件采样》（https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.03491）的官方实现。\n\n该仓库是用于 ComfyUI 的扩展插件。\n\nDiffusers 支持：由 [@charrywhite](https:\u002F\u002Fgithub.com\u002Fcharrywhite\u002F) 提供的 [LanPaint-Diffusers](https:\u002F\u002Fgithub.com\u002Fcharrywhite\u002FLanPaint-diffusers)\n\n用于复现论文结果的基准测试代码：[LanPaintBench](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaintBench)。\n\n## 引用\n\n```\n@article{\nzheng2025lanpaint,\ntitle={LanPaint: Training-Free Diffusion Inpainting with Asymptotically Exact and Fast Conditional Sampling},\nauthor={Candi Zheng and Yuan Lan and Yang Wang},\njournal={Transactions on Machine Learning Research},\nissn={2835-8856},\nyear={2025},\nurl={https:\u002F\u002Fopenreview.net\u002Fforum?id=JPC8JyOUSW},\nnote={}\n}\n```\n**🎉 新消息 2026：加入我们的 Discord！**\n\n[加入我们的 Discord](https:\u002F\u002Fdiscord.gg\u002FyN5wYDE6W4)，分享经验、讨论功能并探索未来开发方向。\n\n`v1.5.0` 修复了一个重要的隐藏 bug，该 bug 会降低性能并可能导致图像模糊（尤其是在使用 `z-image-base` 时），同时也提升了 LanPaint 在其他模型上的整体性能。\n如果您的修复结果出现奇怪的（发光或破损）遮罩边界，请查看此 [issue](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fissues\u002F80)。\n\n**🎬 新功能：LanPaint 现在支持基于 Z-Image 的修复和外扩！**\n\n\n\n| 原图 | 遮罩 | 修复后 |\n|:--------:|:------:|:---------:|\n| ![原始 Z-image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_b8f88b2623b2.png) | ![遮罩后的 Z-image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_1a083a9802c6.png) | ![修复后的 Z-image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_6e7bce9bca2b.png) |\n\n**🎬 新功能：LanPaint 现在也支持 Z-Image-Base！**\n\n| 原图 | 遮罩 | 修复后 |\n|:--------:|:------:|:---------:|\n| ![原始 Z-image-base](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_2d534da66100.png) | ![遮罩后的 Z-image-base](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_8ad1135a7885.png) | ![修复后的 Z-image-base](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_8eff94514c24.png) |\n\n\n**🎬 新功能：LanPaint 现在支持基于 Wan 2.2 的视频修复和外扩！**\n\n\u003Cdiv align=\"center\">\n\n| 原始视频 | 遮罩（编辑 T 恤文字） | 修复结果 |\n|:--------------:|:----:|:----------------:|\n| ![原始](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_6e24810e9357.gif) | ![遮罩](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_5655fb9d9e09.png) | ![结果](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_6cbf9584785b.gif) |\n\n*视频修复示例：81 帧，具有时间一致性*\n\n\u003C\u002Fdiv>\n\n请查看我们最新的 [Wan 2.2 视频示例](#video-examples-beta)、[Wan 2.2 图像示例](#example-wan22-inpaintlanpaint-k-sampler-5-steps-of-thinking)，以及对 [Qwen Image Edit 2509](#example-qwen-edit-2509-inpaint) 的支持。\n  \n\n## 目录\n- [功能](#features)\n- [快速入门](#quickstart)\n- [如何使用示例](#how-to-use-examples)\n- [视频示例（测试版）](#video-examples-beta)\n  - [Wan 2.2 视频修复](#wan-22-video-inpainting)\n  - [Wan 2.2 5B 视频修复](#wan-22-5b-video-inpainting)\n  - [Wan 2.2 视频外扩](#wan-22-video-outpainting)\n  - [资源消耗](#resource-consumption)\n- [图像示例](#image-examples)\n  - [Flux.2.Dev](#example-flux2dev-inpaintlanpaint-k-sampler-5-steps-of-thinking)\n  - [Flux 2 klein](#example-flux-2-klein-inpaintlanpaint-k-sampler-2-steps-of-thinking)\n  - [Z-image](#example-z-image-inpaintlanpaint-k-sampler-5-steps-of-thinking)\n  - [Z-image-base](#example-z-image-base-inpaintlanpaint-k-sampler-3-steps-of-thinking)\n  - [Hunyuan T2I](#example-hunyuan-t2i-inpaintlanpaint-k-sampler-5-steps-of-thinking)\n  - [Wan 2.2 T2I](#example-wan22-inpaintlanpaint-k-sampler-5-steps-of-thinking)\n  - [Wan 2.2 T2I 带参考](#example-wan22-partial-inpaintlanpaint-k-sampler-5-steps-of-thinking)\n  - [Qwen Image Edit 2511 2509](#example-qwen-edit-2509-inpaint)\n  - [Qwen Image Edit 2508](#example-qwen-edit-2508-inpaint)\n  - [Qwen Image](#example-qwen-image-inpaintlanpaint-k-sampler-5-steps-of-thinking)\n  - [HiDream](#example-hidream-inpaint-lanpaint-k-sampler-5-steps-of-thinking)\n  - [SD 3.5](#example-sd-35-inpaintlanpaint-k-sampler-5-steps-of-thinking)\n  - [Flux](#example-flux-inpaintlanpaint-k-sampler-5-steps-of-thinking)\n  - [SDXL](#example-sdxl-0-character-consistency-side-view-generation-lanpaint-k-sampler-5-steps-of-thinking)\n- [使用方法](#usage)\n  - [基础采样器](#basic-sampler)\n  - [高级采样器](#lanpaint-ksampler-advanced)\n  - [调优指南](#lanpaint-ksampler-advanced-tuning-guide)\n- [社区展示](#community-showcase-)\n- [常见问题](#faq)\n- [更新](#updates)\n- [待办事项](#todo)\n- [引用](#citation)\n\n## 功能特性\n\n- **通用兼容性** – 可立即与几乎任何模型（**Z-image、Z-image-base、Hunyuan、Wan 2.2、Qwen Image\u002FEdit、HiDream、SD 3.5、Flux系列、SDXL、SD 1.5 或自定义 LoRA**）及 ControlNet 配合使用。  \n![修复结果 13](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_bca68cb2be22.jpg) \n- **无需训练** – 直接与您现有的模型配合使用，开箱即用。  \n- **易于使用** – 工作流程与标准 ComfyUI KSampler 完全相同。  \n- **灵活的掩码处理** – 支持任意形状、大小和位置的掩码，用于图像修复或扩展。  \n- **无需变通方法** – 生成 100% 全新内容（无混合或平滑处理），不依赖部分去噪技术。  \n- **超越图像修复** – 您甚至可以将其用作生成一致角色的简单方式。\n\n**警告**：由于与 [LoRA 训练类似的问题](https:\u002F\u002Fmedium.com\u002F@zhiwangshi28\u002Fwhy-flux-lora-so-hard-to-train-and-how-to-overcome-it-a0c70bc59eaf)，LanPaint 在蒸馏模型上性能有所下降，例如 Flux.dev。请使用较低的 flux 引导值（1.0–2.0）来缓解此问题[参见此处](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fissues\u002F30)。\n\n## 快速入门\n\n1. **安装 ComfyUI**：按照官方[ComfyUI 安装指南](https:\u002F\u002Fdocs.comfy.org\u002Fget_started)在您的系统上设置 ComfyUI。或确保您的 ComfyUI 版本 > 0.3.11。\n2. **安装 ComfyUI 管理器**：添加[ComfyUI 管理器](https:\u002F\u002Fgithub.com\u002Fltdrdata\u002FComfyUI-Manager)，以便轻松管理扩展插件。  \n3. **安装 LanPaint 节点**：  \n   - **通过 ComfyUI 管理器**：在管理器中搜索“[LanPaint](https:\u002F\u002Fregistry.comfy.org\u002Fpublishers\u002Fscraed\u002Fnodes\u002FLanPaint)”并直接安装。  \n   - **手动安装**：在 ComfyUI 管理器中点击“通过 Git URL 安装”，并输入以下 GitHub 仓库链接：  \n     ```\n     https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint.git\n     ```  \n     或者将此仓库克隆到 `ComfyUI\u002Fcustom_nodes` 文件夹中。  \n4. **重启 ComfyUI**：重启 ComfyUI 以加载 LanPaint 节点。  \n\n安装完成后，您将在 ComfyUI 的“采样”类别下找到 LanPaint 节点。像使用默认的 KSampler 一样使用它们，即可实现高质量的图像修复！\n\n\n## 使用示例说明：\n\n1. 导航至 **example** 文件夹（例如 example_1），下载所有图片。  \n2. 将 **InPainted_Drag_Me_to_ComfyUI.png** 拖入 ComfyUI，以加载工作流。  \n3. 下载所需的模型（例如点击“本示例所用模型”）。  \n4. 在 ComfyUI 中加载该模型。  \n5. 将 **Masked_Load_Me_in_Loader.png** 上传至 **“用于图像修复的掩码图像”** 组中的 **“加载图像”** 节点（从左数第二个），或 **“准备图像”** 节点。  \n7. 提交任务队列，您将获得 LanPaint 的修复结果。部分示例还提供了以下方法的修复结果作为对比：\n   - **[VAE 编码用于图像修复](https:\u002F\u002Fcomfyanonymous.github.io\u002FComfyUI_examples\u002Finpaint\u002F)**\n   - **[设置潜在噪声掩码](https:\u002F\u002Fcomfyui-wiki.com\u002Fen\u002Ftutorial\u002Fbasic\u002Fhow-to-inpaint-an-image-in-comfyui)**\n\n## 视频示例（测试版）\n\nLanPaint 现已支持使用 Wan 2.2 进行视频修复，能够在保持时间一致性的同时，无缝修复视频帧中的遮挡区域。\n\n**注意**：LanPaint 支持较长序列的视频修复（例如 81 帧），但处理时间会显著增加（详情请参阅[资源消耗](#resource-consumption)部分），且性能可能变得不稳定。为获得最佳效果和稳定性，我们建议将视频修复限制在**40 帧或更少**。\n\n### Wan 2.2 视频修复 \n\n*示例：Wan2.2 t2v 14B，480p 视频（1:6），40 帧，LanPaint K Sampler，2 步思考*\n\n| 原始视频 | 掩码（添加一顶白帽） | 修复结果 |\n|:--------------:|:----:|:----------------:|\n| ![原始视频](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_9742a2569a18.gif) | ![掩码](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_8395f2e6ccb1.png) | ![修复结果](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_42c493031c5e.gif) |\n\n[查看工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_17)\n\n您需要遵循 ComfyUI 版本的[Wan2.2 T2V 工作流](https:\u002F\u002Fdocs.comfy.org\u002Ftutorials\u002Fvideo\u002Fwan\u002Fwan2_2)，以下载并安装 T2V 模型。\n\n### Wan 2.2 5B 视频修复 \n\n与 Wan 2.2 14B 类似，但工作流略有不同。[查看工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_17)\n\n### Wan 2.2 视频扩展\n\n借助基于 Wan 2.2 的 LanPaint 视频扩展功能，您可以将视频画面延伸至原始边界之外。此功能允许您扩展视频画布，同时保持流畅的运动和连贯的上下文。\n\n*示例：Wan2.2 t2v 14B，480p 视频（1:1 扩展至 11:6），40 帧，LanPaint K Sampler，2 步思考*\n\n| 原始视频 | 掩码（扩展至 880x480） | 扩展结果 |\n|:--------------:|:----:|:-----------------:|\n| ![原始视频](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_8a20e4643417.gif) | ![掩码](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_5408315fb21f.png) | ![扩展结果](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_2af219946b2d.gif) |\n\n[查看工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_19)\n\n您需要遵循 ComfyUI 版本的[Wan2.2 T2V 工作流](https:\u002F\u002Fdocs.comfy.org\u002Ftutorials\u002Fvideo\u002Fwan\u002Fwan2_2)，以下载并安装 T2V 模型。\n\n### 资源消耗\n\n\n\u003Ctable>\n\u003Cthead>\n\u003Ctr>\n\u003Cth align=\"left\">处理模式\u003C\u002Fth>\n\u003Cth align=\"left\">分辨率\u003C\u002Fth>\n\u003Cth align=\"left\">处理帧数\u003C\u002Fth>\n\u003Cth align=\"left\">所需显存\u003C\u002Fth>\n\u003Cth align=\"left\">总运行时间（20步）\u003C\u002Fth>\n\u003C\u002Ftr>\n\u003C\u002Fthead>\n\u003Ctbody>\n\u003Ctr style=\"background-color: #e8f4f8;\">\n\u003Ctd>\u003Cstrong>修复填充\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003Ctd>880×480（11:6）\u003C\u002Ftd>\n\u003Ctd>40帧\u003C\u002Ftd>\n\u003Ctd>39.8 GB\u003C\u002Ftd>\n\u003Ctd>\u003Cstrong>05:37 分钟\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003C\u002Ftr>\n\u003Ctr style=\"background-color: #e8f4f8;\">\n\u003Ctd>\u003Cstrong>修复填充\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003Ctd>480×480（1:1）\u003C\u002Ftd>\n\u003Ctd>40帧\u003C\u002Ftd>\n\u003Ctd>38.0 GB\u003C\u002Ftd>\n\u003Ctd>\u003Cstrong>05:35 分钟\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003C\u002Ftr>\n\u003Ctr style=\"background-color: #e8f4f8;\">\n\u003Ctd>\u003Cstrong>扩展填充\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003Ctd>880×480（11:6）\u003C\u002Ftd>\n\u003Ctd>40帧\u003C\u002Ftd>\n\u003Ctd>40.2 GB\u003C\u002Ftd>\n\u003Ctd>\u003Cstrong>05:36 分钟\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003C\u002Ftr>\n\u003Ctr style=\"background-color: #fff4e6;\">\n\u003Ctd>\u003Cstrong>修复填充\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003Ctd>880×480（11:6）\u003C\u002Ftd>\n\u003Ctd>81帧\u003C\u002Ftd>\n\u003Ctd>43.3 GB\u003C\u002Ftd>\n\u003Ctd>\u003Cstrong>16:23 分钟\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003C\u002Ftr>\n\u003Ctr style=\"background-color: #fff4e6;\">\n\u003Ctd>\u003Cstrong>修复填充\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003Ctd>480×480（1:1）\u003C\u002Ftd>\n\u003Ctd>81帧\u003C\u002Ftd>\n\u003Ctd>39.8 GB\u003C\u002Ftd>\n\u003Ctd>\u003Cstrong>14:25 分钟\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003C\u002Ftr>\n\u003Ctr style=\"background-color: #fff4e6;\">\n\u003Ctd>\u003Cstrong>扩展填充\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003Ctd>880×480（11:6）\u003C\u002Ftd>\n\u003Ctd>81帧\u003C\u002Ftd>\n\u003Ctd>42.6 GB\u003C\u002Ftd>\n\u003Ctd>\u003Cstrong>13:46 分钟\u003C\u002Fstrong>\u003C\u002Ftd>\n\u003C\u002Ftr>\n\u003C\u002Ftbody>\n\u003C\u002Ftable>\n\n\u003Csub>**测试平台**：所有测试均在NVIDIA RTX Pro 6000上进行。\u003Cbr>\n**所用模型**：`wan2.2_t2v_low_noise_14B_fp8_scaled.safetensors`和`wan2.2_t2v_high_noise_14B_fp8_scaled.safetensors`。\u003Cbr>\n**处理步骤**：20次采样步骤 × 2（LanPaint的思考步骤）。\u003C\u002Fsub>\n\n**注**：显存需求由模型而非LanPaint本身决定。为进一步降低显存占用，建议减少生成帧数，并将CLIP加载至CPU。\n\n## 图像示例\n\n### Hunyuan T2I 示例：修复填充（LanPaint K采样器，5步思考）\n我们很高兴地宣布，LanPaint现已支持使用Hunyuan文本到图像生成模型进行修复填充。\n\n[查看工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_20)\n\n\n您需要按照ComfyUI版本的[Hunyuan工作流](https:\u002F\u002Fdocs.comfy.org\u002Ftutorials\u002Fvideo\u002Fhunyuan-video#hunyuan-text-to-video-workflow)下载并安装该模型。\n\n### Wan2.2 示例：修复填充（LanPaint K采样器，5步思考）\n我们很高兴地宣布，LanPaint现已支持使用Wan2.2 T2V模型进行Wan2.2文本到图像生成。\n\n![修复填充结果45](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_e2c6626581c0.jpg)  \n[查看工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_15)\n\n\n您需要按照ComfyUI版本的[Wan2.2 T2V工作流](https:\u002F\u002Fdocs.comfy.org\u002Ftutorials\u002Fvideo\u002Fwan\u002Fwan2_2)下载并安装T2V模型。\n\n### Z-image 示例：修复填充（LanPaint K采样器，5步思考）\nLanPaint还支持使用Z-image文本到图像模型进行修复填充。\n\n\u003Cdetails open>\n\u003Csummary>查看原图\u002F掩码图\u002F修复填充图对比\u003C\u002Fsummary>\n\n| 原图 | 掩码图 | 修复填充图 |\n|:--------:|:------:|:---------:|\n| ![原Z-image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_b8f88b2623b2.png) | ![掩码Z-image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_1a083a9802c6.png) | ![修复填充Z-image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_6e7bce9bca2b.png) |\n\n\u003C\u002Fdetails>\n\n[查看工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_21)\n\n\u003Cdetails open>\n\u003Csummary>查看Z-image扩展填充（原图\u002F掩码图\u002F扩展填充图）\u003C\u002Fsummary>\n\n| 原图 | 掩码图 | 扩展填充图 |\n|:--------:|:------:|:----------:|\n| ![原Z-image扩展填充](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_b8f88b2623b2.png) | ![掩码Z-image扩展填充](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_63665bfc6b77.png) | ![扩展填充Z-image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_23db3cd49631.png) |\n\n\u003C\u002Fdetails>\n\n[查看扩展填充工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_22)\n\n您可以从[Z-image](https:\u002F\u002Fdocs.comfy.org\u002Fzh-CN\u002Ftutorials\u002Fimage\u002Fz-image\u002Fz-image-turbo)下载适用于ComfyUI的Z-image模型。\n\n### Z-image-base 示例：修复填充（LanPaint K采样器，3步思考）\nLanPaint也支持使用Z-image-base模型进行修复填充。\n\n**警告（稳定性）**：Z-image-base与LanPaint结合时容易发散。请从**较小的`LanPaint_StepSize`**和**较少的思考次数**（较低的`LanPaint_NumSteps`）开始，仅在稳定的情况下逐步增加。\n\n\u003Cdetails open>\n\u003Csummary>查看原图\u002F掩码图\u002F修复填充图对比\u003C\u002Fsummary>\n\n| 原图 | 掩码图 | 修复填充图 |\n|:--------:|:------:|:---------:|\n| ![原Z-image-base](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_2d534da66100.png) | ![掩码Z-image-base](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_8ad1135a7885.png) | ![修复填充Z-image-base](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_8eff94514c24.png) |\n\n\u003C\u002Fdetails>\n\n[查看工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_25)\n\n工作流模板（JSON）：[Z_image_base_Inpaint.json](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fblob\u002Fmaster\u002Fexample_workflows\u002FZ_image_base_Inpaint.json)\n\n### Wan2.2 示例：部分修复填充（LanPaint K采样器，5步思考）\n有时我们并不希望完全替换原有内容，而是让修复后的图像保留一些原始图像的特征。实现这一目标的一种方法是使用编辑模型，如Qwen Image Edit进行修复填充。另一种方法则是进行部分修复填充：让扩散过程从中间步骤开始，而不是从第0步开始。\n\n![修复填充结果46](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_31287cea0914.jpg)  \n[查看工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_16)\n\n\n您需要按照ComfyUI版本的[Wan2.2 T2V工作流](https:\u002F\u002Fdocs.comfy.org\u002Ftutorials\u002Fvideo\u002Fwan\u002Fwan2_2)下载并安装T2V模型。\n\n\n### Qwen Edit 2509 示例：修复填充\n请查看我们最新更新的[Mased Qwen Edit工作流](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_14)，适用于Qwen Image Edit 2509。您可以在[Qwen Image Edit 2509 Comfy](https:\u002F\u002Fhuggingface.co\u002FComfy-Org\u002FQwen-Image-Edit_ComfyUI\u002Ftree\u002Fmain\u002Fsplit_files\u002Fdiffusion_models)下载该模型。此工作流同样支持Qwen Image Edit 2511。\n\n![Qwen 结果3](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_23a219b23183.jpg)\n\n### 示例 Qwen 编辑 2508：InPaint\n![Qwen 结果 2](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_1bfddfbdc592.jpg) \n请查看 [Mased Qwen 编辑工作流](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_14)。您需要按照 ComfyUI 版本的 [Qwen 图像编辑工作流](https:\u002F\u002Fdocs.comfy.org\u002Ftutorials\u002Fimage\u002Fqwen\u002Fqwen-image-edit) 下载并安装模型。\n\n\n\n### 示例 Qwen 图像：InPaint（LanPaint K 采样器，5 步思考）\n\n![修补结果 14](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_7bf252dae2f2.jpg)  \n[查看工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_11)\n\n\n您需要按照 ComfyUI 版本的 [Qwen 图像工作流](https:\u002F\u002Fdocs.comfy.org\u002Ftutorials\u002Fimage\u002Fqwen\u002Fqwen-image) 下载并安装模型。\n\n以下示例使用随机种子 0 生成一批 4 张图像，用于展示方差并进行公平比较。（注意：生成 4 张图像可能会超出您的 GPU 显存；请根据需要调整批次大小。）\n\n![Qwen 结果 1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_1243eb4150a5.jpg) \n同时请查看 [Qwen Inpaint 工作流](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_13) 和 [Qwen Outpaint 工作流](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_12)。您需要按照 ComfyUI 版本的 [Qwen 图像工作流](https:\u002F\u002Fdocs.comfy.org\u002Ftutorials\u002Fimage\u002Fqwen\u002Fqwen-image) 下载并安装模型。\n\n### 示例 HiDream：InPaint（LanPaint K 采样器，5 步思考）\n![修补结果 8](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_4406455a65ca.jpg)  \n[查看工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_8)\n\n您需要按照 ComfyUI 版本的 [HiDream 工作流](https:\u002F\u002Fdocs.comfy.org\u002Ftutorials\u002Fimage\u002Fhidream\u002Fhidream-i1) 下载并安装模型。\n\n### 示例 HiDream：OutPaint（LanPaint K 采样器，5 步思考）\n![修补结果 8](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fblob\u002Fmaster\u002Fexamples\u002FInpaintChara_13(1).jpg)  \n[查看工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_10)\n\n您需要按照 ComfyUI 版本的 [HiDream 工作流](https:\u002F\u002Fdocs.comfy.org\u002Ftutorials\u002Fimage\u002Fhidream\u002Fhidream-i1) 下载并安装模型。感谢 [Amazon90](https:\u002F\u002Fgithub.com\u002FAmazon90) 提供此示例。\n\n### 示例 SD 3.5：InPaint（LanPaint K 采样器，5 步思考）\n![修补结果 8](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_4411df386ed1.jpg)  \n[查看工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_9)\n\n您需要按照 ComfyUI 版本的 [SD 3.5 工作流](https:\u002F\u002Fcomfyui-wiki.com\u002Fen\u002Ftutorial\u002Fadvanced\u002Fstable-diffusion-3-5-comfyui-workflow) 下载并安装模型。\n\n### 示例 Flux.2.Dev：InPaint（LanPaint K 采样器，5 步思考）\n\n\u003Cdetails open>\n\u003Csummary>查看原图 \u002F 掩码图 \u002F 修补图对比\u003C\u002Fsummary>\n\n| 原图 | 掩码图 | 修补图 |\n|:--------:|:------:|:---------:|\n| ![原图 Flux.2.Dev](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_fffd5c3053e6.png) | ![掩码图 Flux.2.Dev](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_0aa1cba2099e.png) | ![修补图 Flux.2.Dev](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_3dc6861abfc8.png) |\n\n\u003C\u002Fdetails>\n\n[查看工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_23)\n\n[本示例所用模型](https:\u002F\u002Fhuggingface.co\u002FComfy-Org\u002Fflux2-dev)\n\n（注：Flux.2.Dev 上未启用“提示优先”模式，因为它不使用 CFG 引导。）\n\n### 示例 Flux 2 klein：InPaint（LanPaint K 采样器，2 步思考）\n\n\u003Cdetails open>\n\u003Csummary>查看原图 \u002F 掩码图 \u002F 修补图对比\u003C\u002Fsummary>\n\n| 原图 | 掩码图 | 修补图 |\n|:--------:|:------:|:---------:|\n| ![原图 Flux 2 klein](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_3e811bb58ee3.png) | ![掩码图 Flux 2 klein](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_e529767d595f.png) | ![修补图 Flux 2 klein](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_96af6019bc13.png) |\n\n\u003C\u002Fdetails>\n\n[查看工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_24)\n\n[本示例所用模型](https:\u002F\u002Fdocs.comfy.org\u002Fzh-CN\u002Ftutorials\u002Fflux\u002Fflux-2-klein)。如果您在 Comfy 0.11 和 0.12 上遇到质量问题，请查看 [此问题](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fissues\u002F80)。\n\n\n### 示例 Flux：InPaint（LanPaint K 采样器，5 步思考）\n![修补结果 7](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_0e646e6e734f.jpg)  \n[查看工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_7)\n[本示例所用模型](https:\u002F\u002Fhuggingface.co\u002FComfy-Org\u002Fflux1-dev\u002Fblob\u002Fmain\u002Fflux1-dev-fp8.safetensors) \n（注：Flux 上未启用“提示优先”模式，因为它不使用 CFG 引导。）\n\n### 示例 SDXL 0：角色一致性（侧视图生成）（LanPaint K 采样器，5 步思考）\n![修补结果 6](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_5bababe6b780.jpg)  \n[查看工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_6)\n[本示例所用模型](https:\u002F\u002Fcivitai.com\u002Fmodels\u002F1188071?modelVersionId=1408658) \n\n（技巧 1：您可以通过 Photoshop 多次复制角色图像来强化角色特征。这里我额外制作了一张副本。）\n\n（技巧 2：使用诸如多视角、多角度、克隆、转身等提示词。请使用 LanPaint 的“提示优先”模式（不支持 Flux）。）\n\n（技巧 3：请记住，LanPaint 可以进行修补：遮盖不一致的区域并再次尝试！）\n\n\n### 示例 SDXL 1：篮子变篮球（LanPaint K 采样器，2 步思考）。\n![修补结果 1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_f7fd47cdd3c5.jpg)  \n[查看工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_1) \n[本示例所用模型](https:\u002F\u002Fcivitai.com\u002Fmodels\u002F1188071?modelVersionId=1408658) \n### 示例 SDXL 2：白衬衫变蓝衬衫（LanPaint K 采样器，5 步思考）\n![修补结果 2](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_75ebd2248af3.jpg)  \n[查看工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_2)\n[本示例所用模型](https:\u002F\u002Fcivitai.com\u002Fmodels\u002F1188071?modelVersionId=1408658)\n### 示例 SDXL 3：微笑变悲伤（LanPaint K 采样器，5 步思考）\n![修补结果 3](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_f3a31f61ead2.jpg)  \n[查看工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_3)\n[本示例所用模型](https:\u002F\u002Fcivitai.com\u002Fmodels\u002F133005\u002Fjuggernaut-xl)\n\n### 示例 SDXL 4：损伤修复（LanPaint K采样器，5步思考）\n![修复结果 4](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_5ec31a79f791.jpg)  \n[查看工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_4)\n[本示例使用的模型](https:\u002F\u002Fcivitai.com\u002Fmodels\u002F133005\u002Fjuggernaut-xl)\n\n### 示例 SDXL 5：大规模损伤修复（LanPaint K采样器，20步思考）\n![修复结果 5](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_bccbad30ff8e.jpg)  \n[查看工作流与掩码](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Ftree\u002Fmaster\u002Fexamples\u002FExample_5)\n[本示例使用的模型](https:\u002F\u002Fcivitai.com\u002Fmodels\u002F133005\u002Fjuggernaut-xl)\n\n更多用例如在[微调模型](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fissues\u002F12#issuecomment-2938662021)上进行修复，以及[人脸替换](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fissues\u002F12#issuecomment-2938723501)，感谢[Amazon90](https:\u002F\u002Fgithub.com\u002FAmazon90)。\n\n## 使用方法\n\n**工作流设置**  \n与默认的 ComfyUI KSampler 相同——只需将节点替换为 LanPaint KSampler 即可。修复工作流与 [SetLatentNoiseMask](https:\u002F\u002Fcomfyui-wiki.com\u002Fzh\u002Fcomfyui-nodes\u002Flatent\u002Finpaint\u002Fset-latent-noise-mask) 的修复工作流一致。\n\n**注意**\n- LanPaint 需要二值掩码（值为 0 或 1），不能带有透明度或平滑处理。为确保兼容性，请在您的掩码编辑器中将掩码的**透明度和硬度设置为最大值**。在修复过程中，任何带有平滑或渐变的掩码都会自动转换为二值掩码。\n- LanPaint 在很大程度上依赖于文本提示来指导修复过程——请明确描述您希望在遮罩区域内生成的内容。如果结果出现伪影或不匹配的元素，请使用有针对性的负面提示来纠正。\n\n## 基础采样器\n![采样器](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_readme_7dea117b23cd.jpg)  \n\n- LanPaint KSampler：最基础且易于使用的修复采样器。\n- LanPaint KSampler（高级）：全面控制所有参数。\n\n### LanPaint KSampler\n简化界面，配有推荐默认值：\n\n- 步数：20–50。步数越多，“思考”越充分，效果越好。\n- LanPaint NumSteps：去噪前的思考轮次。大多数任务建议设置为 5（即比无思考采样慢 5 倍）。对于更具挑战性的任务，可设置为 10。\n- LanPaint Prompt 模式：图像优先模式与提示优先模式。图像优先模式更注重图像本身，会根据图像上下文进行修复（可能忽略提示）；而提示优先模式则更强调提示内容。建议在需要保持角色一致性等任务时使用提示优先模式。（从技术层面讲，提示优先模式会将 BIG 分数中的 CFG 缩放系数调整为负值以强化提示，但这可能会降低图像质量。）\n\n### LanPaint KSampler（高级）\n完全参数控制：\n**关键参数**\n\n| 参数 | 范围 | 描述 |\n|------|------|------|\n| `Steps` | 0–100 | 扩散采样的总步数。数值越高，修复效果越好。建议设置为 20–50。 |\n| `LanPaint_NumSteps` | 0–20 | 每个去噪步骤中的推理迭代次数（“思考深度”）。简单任务：2–5。困难任务：5–10 |\n| `LanPaint_Lambda` | 0.1–50 | 内容对齐强度（数值越高，约束越严格）。建议设置为 4.0–10.0 |\n| `LanPaint_StepSize` | 0.1–1.0 | 每次思考步骤的大小。建议设置为 0.1–0.5。 |\n| `LanPaint_Beta` | 0.1–2.0 | 遮罩区域与非遮罩区域之间的步长比例。较小的值可以抵消较高的 Lambda 值。建议设置为 1.0 |\n| `LanPaint_Friction` | 0.0–100.0 | 朗之万动力学的摩擦力。数值越高，速度越慢但越稳定；数值越低，速度越快但越不稳定。建议设置为 10.0–20.0 |\n| `LanPaint_EarlyStop` | 0–10 | 在最终采样步骤之前停止 LanPaint 迭代。有助于在某些情况下去除伪影。建议设置为 1–5 |\n| `LanPaint_PromptMode` | 图像优先 \u002F 提示优先 | 图像优先模式侧重于图像上下文，可能忽略提示。提示优先模式则更注重提示内容。 |\n\n如需了解每个参数的详细说明，只需将鼠标悬停在相应输入字段上，即可查看包含附加信息的工具提示。\n\n### LanPaint Mask Blend\n此节点根据掩码将原始图像与修复后的图像混合。如果您希望未遮罩区域的像素与原始图像完全一致，此节点非常有用。\n\n## LanPaint KSampler（高级）调优指南\n针对具有挑战性的修复任务：\n\n1️⃣ **提升质量**\n如果修复结果未达到预期，请增加**总采样步数**（非常重要！）、**LanPaint_NumSteps**（思考迭代次数）或**LanPaint_Lambda**。\n\n2️⃣ **提升速度**\n减少**LanPaint_NumSteps**以加快生成速度！若希望获得更好的效果但仍需减少步数，可考虑：\n    - **增大 LanPaint_StepSize**以加快思考过程。\n    - **降低 LanPaint_Friction**以使朗之万动力学更快收敛。\n\n3️⃣ **解决不稳定问题**：  \n如果发现结果纹理异常，可尝试：\n- 降低**LanPaint_Friction**以提高朗之万动力学的稳定性。\n- 减小**LanPaint_StepSize**以使用更小的步长。\n- 如果使用了较高的 Lambda 值，可适当降低**LanPaint_Beta**。\n\n⚠️ **注意事项**：  \n- 为有效调优，请**固定种子**，并逐步调整参数，同时观察结果。这有助于隔离每个设置的影响。最好使用一批图像进行测试，以免过度拟合单张图像。\n\n## 社区展示 [](#community-showcase-)\n\n看看社区是如何使用 LanPaint 的吧！以下是一些用户创建的教程：\n\n- [AI绘画进阶148-三大王炸！庆祝高允贞出道6周年！T8即将直播？当AI绘画学会深度思考？！万能修复神器LanPaint，万物皆可修！-T8 Comfyui教程](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=Z4DSTv3UPJo)\n- [AI绘画进阶151-真相了！T8竟是个AI？！LanPaint进阶（二），人物一致性，多视角实验性测试，新参数讲解，工作流分享-T8 Comfyui教程](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=landiRhvF3k)\n- [重绘和三视图角色一致性解决新方案！LanPaint节点尝试](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=X0WbXdm6FA0)\n- [ComfyUI: HiDream with Perturbation Upscale, LanPaint Inpainting (Workflow Tutorial)](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=2-mGe4QVIIw&t=2785s)\n- [ComfyUI必备LanPaint插件超详细使用教程](https:\u002F\u002Fplugin.aix.ink\u002Farchives\u002Flanpaint)\n\n欢迎提交 PR 将您的教程\u002F视频添加到这里，或通过[Issue](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fissues)提供详细信息！\n\n## 常见问题解答\n[与裁剪拼接协同工作](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fissues\u002F46)\n\n## 更新\n- 2026年3月2日\n    - `v1.5.0`：修复了一个隐藏的性能瓶颈问题，该问题会导致图像模糊（尤其是在 `z-image-base` 上），同时也提升了其他模型上 LanPaint 的整体性能。\n- 2026年1月30日\n    - 添加 Z-image-base 的文档及 Example_25 工作流示意图。\n- 2025年8月8日\n    - 增加对 Qwen 图像模型的支持。\n- 2025年6月21日\n    - 更新算法，提升稳定性和外扩绘图性能。\n    - 添加外扩绘图示例。\n    - 支持自定义采样器（感谢 [MINENEMA](https:\u002F\u002Fgithub.com\u002FMINENEMA)）。\n- 2025年6月4日\n    - 增加更多采样器支持。\n    - 在高级采样器中加入提前停止功能。\n- 2025年5月28日\n    - 对 Langevin 求解器进行重大更新。现在速度更快、稳定性更高。\n    - 大幅简化了高级采样器的参数设置。\n    - 修复了 Flux 和 SD 3.5 上的性能问题。\n- 2025年4月16日\n    - 增加对 Primary HiDream 的支持。\n- 2025年3月22日\n    - 增加对 Primary Flux 的支持。\n    - 新增“戏弄模式”。\n- 2025年3月10日\n    - LanPaint 进行了一次重大更新！所有示例现均采用 LanPaint K 采样器，界面更加简洁，性能和稳定性也得到了提升。\n- 2025年3月6日：\n    - 修复了字符串不可调用错误和解包错误。特别感谢 [jamesWalker55](https:\u002F\u002Fgithub.com\u002FjamesWalker55) 和 [EricBCoding](https:\u002F\u002Fgithub.com\u002FEricBCoding)。\n\n## 待办事项\n- 尝试实现细节增强模块。\n- ~~提供无 GUI 的推理代码。~~ 请参阅我们的本地 Python 基准测试代码 [LanPaintBench](https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaintBench)。\n\n\n## 引用\n\n```\n@article{\nzheng2025lanpaint,\ntitle={LanPaint：无需训练的扩散模型内补画技术——渐近精确且快速的条件采样},\nauthor={Candi Zheng, Yuan Lan, Yang Wang},\njournal={机器学习研究汇刊},\nissn={2835-8856},\nyear={2025},\nurl={https:\u002F\u002Fopenreview.net\u002Fforum?id=JPC8JyOUSW},\nnote={}\n}\n```","# LanPaint 快速上手指南\n\nLanPaint 是一个通用的扩散模型修复（Inpainting）采样器，支持“思考模式”（Think Mode）。它允许模型在去噪前进行多次迭代思考，无需重新训练即可显著提升各类模型（如 Flux、SDXL、Wan 2.2、Z-Image 等）的图像和视频修复质量。\n\n## 1. 环境准备\n\n在开始之前，请确保您的系统满足以下要求：\n\n*   **操作系统**: Windows, Linux 或 macOS\n*   **Python**: 3.10 或更高版本（推荐与 ComfyUI 环境一致）\n*   **核心依赖**: \n    *   **ComfyUI**: 版本需大于 `0.3.11`\n    *   **ComfyUI-Manager**: 用于便捷管理节点插件\n*   **硬件建议**: 推荐使用 NVIDIA GPU (显存建议 8GB 以上，视频修复需更大显存)\n\n> **注意**：本工具主要作为 ComfyUI 的自定义节点运行。\n\n## 2. 安装步骤\n\n您可以通过 **ComfyUI-Manager** 进行一键安装，这是最推荐的方式。\n\n### 方法一：通过 ComfyUI-Manager 安装（推荐）\n\n1.  启动 ComfyUI，点击右侧菜单中的 **\"Manager\"** 按钮。\n2.  选择 **\"Install Custom Nodes\"**。\n3.  在搜索框中输入 `LanPaint`。\n4.  找到由 `scraed` 发布的 **[LanPaint]** 节点，点击 **\"Install\"**。\n5.  安装完成后，**重启 ComfyUI** 以加载新节点。\n\n### 方法二：手动 Git 安装\n\n如果您无法使用 Manager 或需要特定版本，可使用命令行手动安装：\n\n```bash\n# 进入 ComfyUI 的 custom_nodes 目录\ncd ComfyUI\u002Fcustom_nodes\n\n# 克隆仓库\ngit clone https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint.git\n\n# 安装 Python 依赖 (如有 requirements.txt)\ncd LanPaint\npip install -r requirements.txt\n\n# 返回上级目录并重启 ComfyUI\ncd ..\n# 重启 ComfyUI 服务\n```\n\n> **国内加速提示**：如果 `git clone` 速度较慢，可使用国内镜像源：\n> ```bash\n> git clone https:\u002F\u002Fgitee.com\u002Fmirrors\u002FLanPaint.git  # 若存在镜像\n> # 或使用通用加速代理\n> git clone https:\u002F\u002Fghproxy.com\u002Fhttps:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint.git\n> ```\n\n## 3. 基本使用\n\n安装重启后，您将在 ComfyUI 的节点列表中找到 LanPaint 相关节点。其使用逻辑与原生的 `KSampler` 高度一致。\n\n### 最简单的工作流示例\n\n1.  **加载节点**：\n    在画布空白处双击，搜索并添加 **`LanPaint KSampler`** 节点（位于 `sampling` 分类下）。\n\n2.  **连接线路**：\n    *   **Model**: 连接您的主模型加载器输出。\n    *   **Positive\u002FNegative**: 连接提示词编码输出。\n    *   **Latent Image**: 连接包含蒙版（Mask）的潜空间图像。\n        *   *关键步骤*：确保您的输入图像已通过 `VAE Encode (for Inpainting)` 或类似节点处理，并正确连接了 **Mask** 通道。\n    *   **Image Output**: 连接到 `VAE Decode` 进而查看结果。\n\n3.  **参数设置**：\n    *   **steps**: 总去噪步数（例如 20）。\n    *   **think_steps** (核心参数): “思考”步数。\n        *   设为 `0`：退化为普通采样。\n        *   设为 `2` - `5`：推荐值。模型会在正式去噪前进行多次条件采样迭代，显著提升修复边缘的自然度和内容一致性。\n        *   *注意*：增加此参数会增加计算时间。\n    *   **cfg**: 引导系数。对于 Flux 等蒸馏模型，建议使用较低的值 (1.0 - 2.0) 以避免画面模糊。\n\n4.  **运行生成**：\n    点击 **\"Queue Prompt\"**，即可看到经过 LanPaint 优化后的修复结果。\n\n### 进阶提示\n*   **视频修复**：支持 Wan 2.2 等视频模型。工作流类似，但需确保输入为视频帧序列的 Latent，并注意帧数限制（建议 40 帧以内以保证稳定性）。\n*   **模型兼容性**：适用于 Flux, SDXL, SD3.5, Hunyuan, Z-Image 等几乎所有主流扩散模型，无需额外训练 LoRA。","一位电商设计师急需为促销海报移除模特衣服上过时的品牌 Logo，并自然替换为新的活动标语，同时必须保持衣物褶皱和光影的完美连贯。\n\n### 没有 LanPaint 时\n- **边缘融合生硬**：传统重绘采样器往往无法理解掩码周围的复杂纹理，导致修补区域与原始衣物之间出现明显的接缝或光晕。\n- **结构逻辑断裂**：直接重绘容易忽略衣物的物理褶皱走向，新生成的文字像浮在表面，缺乏随布料起伏的真实立体感。\n- **模型兼容性差**：为了获得较好效果，不得不专门寻找支持重绘的微调模型，无法直接利用手头现有的高质量通用大模型。\n- **反复试错成本高**：需要多次调整蒙版羽化值和重绘幅度，耗费大量时间微调参数仍难以达到“无痕”效果。\n\n### 使用 LanPaint 后\n- **智能“思考”上下文**：LanPaint 的\"Think Mode\"让模型在去噪前进行多步推理，精准捕捉周围像素特征，实现了掩码边界的天衣无缝融合。\n- **完美保留几何结构**：生成的新标语严格遵循原有衣物的褶皱形态和光照方向，仿佛原本就印在衣服上一样自然逼真。\n- **通用模型即插即用**：无需重新训练或切换特定模型，直接在 ComfyUI 中调用任意 Stable Diffusion 模型即可实现电影级的重绘质量。\n- **一次生成即达标**：凭借渐进式精确采样技术，大幅减少了重复生成的次数，将原本半小时的修图工作缩短至几分钟。\n\nLanPaint 通过让模型在重绘前深度“思考”，彻底解决了复杂场景下局部编辑的结构断层问题，让任何开源模型都能瞬间具备专业级的图像修复能力。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fscraed_LanPaint_51bd2325.png","scraed","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fscraed_c4b10697.png","PostDoc@HKUST","Hong Kong University of Science and Technology",null,"czhengac@connect.ust.hk","https:\u002F\u002Fscraed.github.io\u002F","https:\u002F\u002Fgithub.com\u002Fscraed",[82,86],{"name":83,"color":84,"percentage":85},"Python","#3572A5",98.8,{"name":87,"color":88,"percentage":89},"JavaScript","#f1e05a",1.2,1108,38,"2026-04-19T09:40:04","GPL-3.0","未说明 (基于 ComfyUI，通常支持 Windows, Linux, macOS)","必需 (NVIDIA GPU 推荐)。显存需求取决于具体模型和视频帧数：视频修复建议限制在 40 帧以内以保证稳定性，长序列处理资源消耗显著增加。具体型号未说明，但需支持所选扩散模型（如 Wan 2.2, Flux, SDXL 等）的运行需求。","未说明 (视频处理和多步“思考”模式会显著增加内存占用)",{"notes":98,"python":99,"dependencies":100},"1. 本工具主要作为 ComfyUI 的扩展节点运行，需先安装 ComfyUI 及 ComfyUI-Manager。\n2. 支持多种模型（Z-image, Wan 2.2, Flux, SDXL 等），无需重新训练。\n3. 视频修复功能处于 Beta 阶段，处理长序列（如 81 帧）耗时显著且可能不稳定，建议限制在 40 帧以内。\n4. 在蒸馏模型（如 Flux.dev）上性能可能下降，建议使用较低的 guidance scale (1.0-2.0)。\n5. v1.5.0 版本修复了可能导致图像模糊的隐藏 bug。","未说明 (依赖宿主 ComfyUI 环境，通常要求 Python 3.8+)",[101,102,103,104],"ComfyUI (>0.3.11)","ComfyUI-Manager","torch (版本依模型而定)","diffusers (可选，通过 LanPaint-Diffusers)",[15,106],"视频",[108,109,110,111,112,113,114,115],"diffusion-models","generative-ai","image-editing","inpainting","inpainting-algorithm","video-editing","diffusion-inpainting","generative-model","2026-03-27T02:49:30.150509","2026-04-20T19:33:12.048513",[119,124,129,134,139,144],{"id":120,"question_zh":121,"answer_zh":122,"source_url":123},45457,"为什么使用 LanPaint 进行图像修复时，整个图像的质量会下降（出现噪点或分辨率降低的感觉）？","这是因为底层的扩散模型要求图像尺寸必须是 8 的倍数。如果图像尺寸不符合该要求，会导致质量问题。解决方案是使用“自动调整大小（auto resize）”节点，将图像和蒙版调整为 8 的倍数后再输入到 LanPaint 节点中。开发者也计划在未来版本中加入错误提示以强制此要求。","https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fissues\u002F27",{"id":125,"question_zh":126,"answer_zh":127,"source_url":128},45458,"生成的图像在蒙版区域周围有明显的接缝，或者不遵循提示词，该如何解决？","这通常是由于 ComfyUI 中的 [Set Latent Noise Mask] 节点行为异常导致的（表现得像 VAEEncode for inpainting）。LanPaint 依赖此节点，因此继承了该问题。解决方法包括：1. 更新 ComfyUI 到最新版本；2. 检查是否有其他自定义节点干扰了该节点的行为。修复该基础节点的问题后，LanPaint 即可正常工作。","https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fissues\u002F11",{"id":130,"question_zh":131,"answer_zh":132,"source_url":133},45459,"在使用 Qwen-Edit 等模型时遇到报错：\"Input and output must have the same number of spatial dimensions...\"（空间维度不匹配），如何解决？","这是一个已知的维度对齐问题，特别是在使用蒙版与 'Set Latent Noise' 时。维护者已在 LanPaint 节点内部修复了此问题，无需直接修改 ComfyUI 代码。请将 LanPaint 插件更新至版本 1.4.8 或更高版本即可解决该错误。","https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fissues\u002F64",{"id":135,"question_zh":136,"answer_zh":137,"source_url":138},45460,"是否提供不包含 ComfyUI 界面的纯 Python 本地测试演示代码？","是的，本地 Python 代码已经发布。请查看项目根目录下的 README 文件获取详细信息和代码链接。此外，解释当前算法细节的新版论文也已发布，相关的 Python 基准测试代码近期也会放出。","https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fissues\u002F7",{"id":140,"question_zh":141,"answer_zh":142,"source_url":143},45461,"LanPaint 是否支持单张图像的修复任务？运行速度如何优化？","支持单张图像修复。如果发现处理速度过慢（例如在 4090 上需要数十分钟），可以尝试结合“裁剪与缝合（crop & stitch）”的工作流来优化性能。社区用户反馈使用特定的工作流配置可以显著减少迭代时间。建议参考社区分享的使用了 crop & stitch 技术的工作流文件。","https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fissues\u002F46",{"id":145,"question_zh":146,"answer_zh":147,"source_url":148},45462,"LanPaint 除了基础的图像修复（Inpainting），还支持哪些高级应用场景？","LanPaint 功能强大，不仅限于基础修复。用户已成功将其用于：1. 图像外绘（Outpainting），扩展图像边界；2. 同时执行修复和换脸（Face Swap）任务。通过调整参数（如添加早期停止参数），还可以探索更多用例。具体效果可参考社区分享的案例图片。","https:\u002F\u002Fgithub.com\u002Fscraed\u002FLanPaint\u002Fissues\u002F12",[150,155,160,165,170,175,180,185,190,195,200,205,210,215,220,225,230,235,240,245],{"id":151,"version":152,"summary_zh":153,"released_at":154},360376,"1.5.3","- 缩小了信息显示区域，使其不再随节点大小调整，并避免浪费空间。","2026-04-11T03:15:14",{"id":156,"version":157,"summary_zh":158,"released_at":159},360377,"1.5.2","- 修复高级自定义采样器中的 ValueError: 解包值不足 错误\r\n- 修复 Flux Klein 图像修复工作流的缩放 bug","2026-03-26T09:14:08",{"id":161,"version":162,"summary_zh":163,"released_at":164},360378,"1.5.0","- 修复了一个重要的隐藏 bug，该 bug会降低性能并可能导致图像模糊（尤其是在使用 `z-image-base` 时），同时还能提升 LanPaint 在其他模型上的整体性能。\n- 修复了 LanPaint Sampler Custom（高级）中“解包值不足”的问题。","2026-03-02T14:32:21",{"id":166,"version":167,"summary_zh":168,"released_at":169},360379,"1.4.13","- 由 godnight10061 提供的提前停止逻辑。 - 修复 Flux Klein 示例工作流。","2026-02-02T15:54:42",{"id":171,"version":172,"summary_zh":173,"released_at":174},360380,"1.4.11","- 修复通义形状错误","2026-01-27T09:01:36",{"id":176,"version":177,"summary_zh":178,"released_at":179},360381,"1.4.10","- 修复了与 NaN 数字和掩码形状相关的 bug。","2026-01-13T14:19:31",{"id":181,"version":182,"summary_zh":183,"released_at":184},360382,"1.4.8","- 修复 ComfyUI > 6.0.0 版本中 Qwen 编辑和 Wan 2.2 的形状错误","2025-12-25T15:37:30",{"id":186,"version":187,"summary_zh":188,"released_at":189},360383,"1.4.7","- 修复在 z-image inpaint 示例工作流中使用自定义图像时的图像缩放错误。","2025-12-15T10:17:16",{"id":191,"version":192,"summary_zh":193,"released_at":194},360384,"1.4.6","- 当图片尺寸与预期不符时，提供更清晰的错误报告","2025-12-06T02:27:50",{"id":196,"version":197,"summary_zh":198,"released_at":199},360385,"1.4.5","- z 图像支持","2025-12-04T06:38:15",{"id":201,"version":202,"summary_zh":203,"released_at":204},360386,"1.4.4","- Fix blurry problem on Wan 2.2","2025-11-22T17:12:26",{"id":206,"version":207,"summary_zh":208,"released_at":209},360387,"1.4.3","- Support Wan 2.2 5B\r\n- No longer copy the original image\u002Fvideo back during denoising process. Will slightly improve performance.","2025-11-17T08:13:01",{"id":211,"version":212,"summary_zh":213,"released_at":214},360388,"1.4.2","- Fix bug with latest comfyUI's change in CFG sampling","2025-10-26T15:31:22",{"id":216,"version":217,"summary_zh":218,"released_at":219},360389,"1.4.1","- Supports Hunyuan T2I inpainting\r\n- Adjust default LanPaint step size from 0.15 to 0.2 to improve performance","2025-10-16T08:11:56",{"id":221,"version":222,"summary_zh":223,"released_at":224},360390,"1.4.0","Add beta video inpainting\u002Foutpainting for Wan 2.2","2025-10-02T15:56:59",{"id":226,"version":227,"summary_zh":228,"released_at":229},360391,"1.3.2","- Add Wan 2.2 T2I support","2025-09-17T08:06:47",{"id":231,"version":232,"summary_zh":233,"released_at":234},360392,"1.3.1","- Add example workflows","2025-08-25T03:38:55",{"id":236,"version":237,"summary_zh":238,"released_at":239},360393,"1.3.0","* Support Qwen Image Edit\r\n* Performance optimization: 200% faster than previous version.","2025-08-21T16:28:07",{"id":241,"version":242,"summary_zh":243,"released_at":244},360394,"1.2.0","Add Qwen image support","2025-08-08T06:14:39",{"id":246,"version":247,"summary_zh":248,"released_at":249},360395,"1.1.0","- Update the algorithm with enhanced stability and outpaint performance.\r\n- Add outpaint example\r\n- Supports Sampler Custom (Thanks to [MINENEMA](https:\u002F\u002Fgithub.com\u002FMINENEMA))","2025-06-21T06:19:09"]