[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-UMass-Embodied-AGI--3D-VLA":3,"tool-UMass-Embodied-AGI--3D-VLA":64},[4,17,27,35,43,56],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":16},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,3,"2026-04-05T11:01:52",[13,14,15],"开发框架","图像","Agent","ready",{"id":18,"name":19,"github_repo":20,"description_zh":21,"stars":22,"difficulty_score":23,"last_commit_at":24,"category_tags":25,"status":16},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",140436,2,"2026-04-05T23:32:43",[13,15,26],"语言模型",{"id":28,"name":29,"github_repo":30,"description_zh":31,"stars":32,"difficulty_score":23,"last_commit_at":33,"category_tags":34,"status":16},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,"2026-04-03T11:11:01",[13,14,15],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":23,"last_commit_at":41,"category_tags":42,"status":16},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[13,26],{"id":44,"name":45,"github_repo":46,"description_zh":47,"stars":48,"difficulty_score":23,"last_commit_at":49,"category_tags":50,"status":16},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,"2026-04-05T10:45:23",[14,51,52,53,15,54,26,13,55],"数据工具","视频","插件","其他","音频",{"id":57,"name":58,"github_repo":59,"description_zh":60,"stars":61,"difficulty_score":10,"last_commit_at":62,"category_tags":63,"status":16},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,"2026-04-04T04:44:48",[15,14,13,26,54],{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":69,"readme_en":70,"readme_zh":71,"quickstart_zh":72,"use_case_zh":73,"hero_image_url":74,"owner_login":75,"owner_name":76,"owner_avatar_url":77,"owner_bio":78,"owner_company":79,"owner_location":79,"owner_email":79,"owner_twitter":79,"owner_website":80,"owner_url":81,"languages":82,"stars":91,"forks":92,"last_commit_at":93,"license":79,"difficulty_score":10,"env_os":94,"env_gpu":95,"env_ram":94,"env_deps":96,"category_tags":110,"github_topics":79,"view_count":10,"oss_zip_url":79,"oss_zip_packed_at":79,"status":16,"created_at":111,"updated_at":112,"faqs":113,"releases":114},625,"UMass-Embodied-AGI\u002F3D-VLA","3D-VLA","[ICML 2024] 3D-VLA: A 3D Vision-Language-Action Generative World Model","3D-VLA 是一款面向三维物理世界的视觉 - 语言 - 动作生成式世界模型框架。它打破了传统二维模型的局限，将感知、推理与行动深度融合，模拟人类在真实环境中的认知过程。\n\n当前许多 AI 系统难以理解复杂的三维空间结构，限制了机器人在现实场景中的操作能力。3D-VLA 通过生成式技术有效解决了这一痛点。它基于 3D-LLM 构建，利用交互令牌与环境互动，并训练具身扩散模型来精准预测目标图像和点云。这种设计让智能体不仅能“看懂”场景，还能规划出符合三维逻辑的行动路径。\n\n该项目非常适合从事具身智能、机器人学习及多模态大模型的研究人员与开发者。开源内容涵盖预训练代码、目标生成扩散模型以及详细的模型说明。如果你希望探索如何让 AI 真正理解三维空间并执行复杂任务，3D-VLA 提供了一个强有力的基础架构。其最新模型已发布在 Hugging Face，方便直接进行推理测试。作为 ICML 2024 的录用成果，它为下一代具身智能研究提供了重要参考。","\u003Cbr\u002F>\n\u003Cp align=\"center\">\n  \u003Ch1 align=\"center\">\u003Ca style=\"color:#61a5c2;\">3D\u003C\u002Fa>-\u003Ca style=\"color:#94D2BD;\">V\u003C\u002Fa>\u003Ca style=\"color:#EE9B00;\">L\u003C\u002Fa>\u003Ca style=\"color:#CA6502;\">A\u003C\u002Fa>: A 3D Vision-Language-Action Generative World Model\u003C\u002Fh1>\n  \u003Cp align=\"center\">\n    ICML 2024\n  \u003C\u002Fp>\n  \u003Cp align=\"center\">\n    \u003Ca href=\"https:\u002F\u002Fhaoyuzhen.com\">Haoyu Zhen\u003C\u002Fa>,\n    \u003Ca href=\"\">Xiaowen Qiu\u003C\u002Fa>,\n    \u003Ca href=\"https:\u002F\u002Fpeihaochen.github.io\">Peihao Chen\u003C\u002Fa>,\n    \u003Ca href=\"https:\u002F\u002Fgithub.com\u002FYang-Chincheng\">Jincheng Yang\u003C\u002Fa>,\n    \u003Ca href=\"https:\u002F\u002Fcakeyan.github.io\">Xin Yan\u003C\u002Fa>,\n    \u003Ca href=\"https:\u002F\u002Fyilundu.github.io\">Yilun Du\u003C\u002Fa>,\n    \u003Ca href=\"https:\u002F\u002Fevelinehong.github.io\">Yining Hong\u003C\u002Fa>,\n    \u003Ca href=\"https:\u002F\u002Fpeople.csail.mit.edu\u002Fganchuang\">Chuang Gan\u003C\u002Fa>\n  \u003C\u002Fp>\n  \u003Cp align=\"center\">\n    \u003Ca href=\"https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.09631\">\n      \u003Cimg src='https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FPaper-PDF-red?style=flat&logo=arXiv&logoColor=red' alt='Paper PDF'>\n    \u003C\u002Fa>\n    \u003Ca href='https:\u002F\u002Fvis-www.cs.umass.edu\u002F3dvla' style='padding-left: 0.5rem;'>\n      \u003Cimg src='https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FProject-Page-blue?style=flat&logo=Google%20chrome&logoColor=blue' alt='Project Page'>\n    \u003C\u002Fa>\n  \u003C\u002Fp>\n\u003C\u002Fp>\n\n\u003C!-- TABLE OF CONTENTS -->\n\u003Cdetails open=\"open\" style='padding: 10px; border-radius:5px 30px 30px 5px; border-style: solid; border-width: 1px;'>\n  \u003Csummary>Tabel of Contents\u003C\u002Fsummary>\n  \u003Col>\n    \u003Cli>\n      \u003Ca href=\"#method\">Method\u003C\u002Fa>\n    \u003C\u002Fli>\n    \u003Cli>\n      \u003Ca href=\"#installation\">Installation\u003C\u002Fa>\n    \u003C\u002Fli>\n    \u003Cli>\n      \u003Ca href=\"#embodied-diffusion-models\">Embodied Diffusion Models\u003C\u002Fa>\n      \u003Cul>\n        \u003Cli>\u003Ca href=\"#goal-image-generation\">Goal Image Generation\u003C\u002Fa>\u003C\u002Fli>\n      \u003C\u002Ful>\n      \u003Cul>\n        \u003Cli>\u003Ca href=\"#goal-point-cloud-generation\">Goal Point Cloud Generation\u003C\u002Fa>\u003C\u002Fli>\n      \u003C\u002Ful>\n    \u003C\u002Fli>\n    \u003Cli>\n      \u003Ca href=\"#multimodal-large-language-model\">Multimodal Large Language Model\u003C\u002Fa>\n      \u003Cul>\n        \u003Cli>\u003Ca href=\"#pretrain-3d-vla\">Pretrain 3D-VLA\u003C\u002Fa>\u003C\u002Fli>\n      \u003C\u002Ful>\n    \u003C\u002Fli>\n    \u003Cli>\n      \u003Ca href=\"#citation\">Citation\u003C\u002Fa>\n    \u003C\u002Fli>\n    \u003Cli>\n      \u003Ca href=\"#acknowledgement\">Acknowledgement\u003C\u002Fa>\n    \u003C\u002Fli>\n  \u003C\u002Fol>\n\u003C\u002Fdetails>\n\n## News 📢\n\n- [2024\u002F10] All the diffusion models are released on Hugging Face! You could find the models [here](https:\u002F\u002Fhuggingface.co\u002Fanyezhy). We also provide a [Model Card](docs\u002Fmodel_card.md) for the detailed information. Running the inference code will automatically download the latest models.\n- [2024\u002F07] 3D-VLA Pretraining code and goal image generation LDM checkpoint are released.\n- [2024\u002F06] Training and inference code for goal generation diffusion models are released.\n- [2024\u002F05] 3D-VLA is accepted to ICML 2024!\n- [2024\u002F03] [Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.09631) is on arXiv.\n\n## Method\n\n3D-VLA is a framework that connects vision-language-action (VLA) models to the 3D physical world. Unlike traditional 2D models, 3D-VLA integrates 3D perception, reasoning, and action through a generative world model, similar to human cognitive processes. It is built on the [3D-LLM](https:\u002F\u002Fvis-www.cs.umass.edu\u002F3dllm\u002F) and uses interaction tokens to engage with the environment. Embodied diffusion models are trained and aligned with the LLM to predict goal images and point clouds.\n\n\u003Cp align=\"center\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FUMass-Embodied-AGI_3D-VLA_readme_58ac8300c73e.png\" alt=\"Logo\" width=\"80%\">\n\u003C\u002Fp>\n\n## Installation\n\n```bash\nconda create -n 3dvla python=3.9\nconda activate 3dvla\npip install -r requirements.txt\n```\n\nWe will update the file structure and the installation process in the future.\n\nWe provide a [model card](docs\u002Fmodel_card.md) for the 3D-VLA model. The model card includes the task description, model description, and training datasets.\n\n## Embodied Diffusion Models\n\n### Goal Image Generation\nTrain the goal image latent diffusion model with the following command. If you want to include depth information, you could add `--include_depth` to the command in the `train_ldm.sh` file.\n```bash\nbash launcher\u002Ftrain_ldm.sh [NUM_GPUS] [NUM_NODES]\n```\n\nThen you could generate the goal images. The results will be saved in the `lavis\u002Foutput\u002FLDM\u002Fpix2pix\u002Fresults` folder.\n```bash\npython inference_ldm_goal_image.py \\\n    --ckpt_folder lavis\u002Foutput\u002FLDM\u002Fpix2pix\u002Fruns (--include_depth)\n```\n\nWe have released our model on Hugging Face: [goal-image](https:\u002F\u002Fhuggingface.co\u002Fanyezhy\u002F3dvla-diffusion) and [goal-depth](https:\u002F\u002Fhuggingface.co\u002Fanyezhy\u002F3dvla-diffusion-depth). A simple demo can be run using the following command:\n\n```bash\npython inference_ldm_goal_image.py \\\n    --ckpt_folder anyezhy\u002F3dvla-diffusion \\\n    --image docs\u002Fcans.png --text \"knock pepsi can over\" \\\n    --save_path result.png\n\npython inference_ldm_goal_image.py \\\n    --ckpt_folder anyezhy\u002F3dvla-diffusion-depth --include_depth \\\n    --image docs\u002Fbottle.png --text \"move water bottle near sponge\" \\\n    --save_path result.png\n```\n\n### Goal Point Cloud Generation\nWe have implemented [xFormers](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fxformers) for the goal point cloud diffusion model. You could install it and accelerate the training and inference process.\n\nTrain the goal point cloud diffusion model (finetuning the pretrained Point-E model).\n```bash\nbash launcher\u002Ftrain_pe.sh [NUM_GPUS] [NUM_NODES]\n```\n\nWe have released our model on Hugging Face: [goal-point-cloud](https:\u002F\u002Fhuggingface.co\u002Fanyezhy\u002F3dvla-diffusion-pointcloud).\nInferece the goal point cloud with the following command. If you want to use multiple GPUs, use `torchrun --nproc_per_node=[NUM_GPUS] --master_port=[PORT] inference_pe_goal_pcd.py` instead.\n  ```bash\n  python inference_pe_goal_pcd.py \\\n    --input_npy docs\u002Fpoint_cloud.npy --text \"close bottom drawer\" \\\n    --output_dir SAVE_PATH\n\n  python inference_pe_goal_pcd.py \\\n    --input_npy docs\u002Fmoney.npy \\\n    --text \"put the money away in the safe on the bottom shelf\"\n  ```\n\n## Multimodal Large Language Model\n\n### Pretrain 3D-VLA\nTrain our 3D-VLA model:\n```bash\nbash launcher\u002Ftrain_llm.sh [NUM_GPUS] [NUM_NODES]\n```\n\n## Citation\n```\n@article{zhen20243dvla,\n  author = {Zhen, Haoyu and Qiu, Xiaowen and Chen, Peihao and Yang, Jincheng and Yan, Xin and Du, Yilun and Hong, Yining and Gan, Chuang},\n  title = {3D-VLA: 3D Vision-Language-Action Generative World Model},\n  journal = {arXiv preprint arXiv:2403.09631},\n  year = {2024},\n}\n```\n\n## Acknowledgement\nHere we would like to thank the following resources for their great work:\n- [SAM](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsegment-anything), [ConceptFusion](https:\u002F\u002Fgithub.com\u002Fconcept-fusion\u002Fconcept-fusion) and [3D-CLR](https:\u002F\u002Fgithub.com\u002Fevelinehong\u002F3D-CLR-Official) for Data Processing.\n- [Diffusers](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Fdiffusers), [InstructPix2Pix](https:\u002F\u002Fgithub.com\u002Ftimothybrooks\u002Finstruct-pix2pix), [StableDiffusion](https:\u002F\u002Fgithub.com\u002FStability-AI\u002FStableDiffusion) and [Point-E](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fpoint-e) for the Diffusion Model.\n- [LAVIS](https:\u002F\u002Fgithub.com\u002Fsalesforce\u002FLAVIS) and [3D-LLM](https:\u002F\u002Fgithub.com\u002FUMass-Foundation-Model\u002F3D-LLM) for the Codebase and Architecture.\n- [OpenX](https:\u002F\u002Frobotics-transformer-x.github.io) for Dataset.\n- [RLBench](https:\u002F\u002Fgithub.com\u002Fstepjam\u002FRLBench) and [Hiveformer](https:\u002F\u002Fgithub.com\u002Fvlc-robot\u002Fhiveformer) for Evaluation.\n","\u003Cbr\u002F>\n\u003Cp align=\"center\">\n  \u003Ch1 align=\"center\">\u003Ca style=\"color:#61a5c2;\">3D\u003C\u002Fa>-\u003Ca style=\"color:#94D2BD;\">V\u003C\u002Fa>\u003Ca style=\"color:#EE9B00;\">L\u003C\u002Fa>\u003Ca style=\"color:#CA6502;\">A\u003C\u002Fa>: 一种 3D 视觉 - 语言 - 动作生成式世界模型\u003C\u002Fh1>\n  \u003Cp align=\"center\">\n    ICML 2024\n  \u003C\u002Fp>\n  \u003Cp align=\"center\">\n    \u003Ca href=\"https:\u002F\u002Fhaoyuzhen.com\">Haoyu Zhen\u003C\u002Fa>,\n    \u003Ca href=\"\">Xiaowen Qiu\u003C\u002Fa>,\n    \u003Ca href=\"https:\u002F\u002Fpeihaochen.github.io\">Peihao Chen\u003C\u002Fa>,\n    \u003Ca href=\"https:\u002F\u002Fgithub.com\u002FYang-Chincheng\">Jincheng Yang\u003C\u002Fa>,\n    \u003Ca href=\"https:\u002F\u002Fcakeyan.github.io\">Xin Yan\u003C\u002Fa>,\n    \u003Ca href=\"https:\u002F\u002Fyilundu.github.io\">Yilun Du\u003C\u002Fa>,\n    \u003Ca href=\"https:\u002F\u002Fevelinehong.github.io\">Yining Hong\u003C\u002Fa>,\n    \u003Ca href=\"https:\u002F\u002Fpeople.csail.mit.edu\u002Fganchuang\">Chuang Gan\u003C\u002Fa>\n  \u003C\u002Fp>\n  \u003Cp align=\"center\">\n    \u003Ca href=\"https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.09631\">\n      \u003Cimg src='https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FPaper-PDF-red?style=flat&logo=arXiv&logoColor=red' alt='论文 PDF'>\n    \u003C\u002Fa>\n    \u003Ca href='https:\u002F\u002Fvis-www.cs.umass.edu\u002F3dvla' style='padding-left: 0.5rem;'>\n      \u003Cimg src='https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FProject-Page-blue?style=flat&logo=Google%20chrome&logoColor=blue' alt='项目页面'>\n    \u003C\u002Fa>\n  \u003C\u002Fp>\n\u003C\u002Fp>\n\n\u003C!-- TABLE OF CONTENTS -->\n\u003Cdetails open=\"open\" style='padding: 10px; border-radius:5px 30px 30px 5px; border-style: solid; border-width: 1px;'>\n  \u003Csummary>目录\u003C\u002Fsummary>\n  \u003Col>\n    \u003Cli>\n      \u003Ca href=\"#method\">方法\u003C\u002Fa>\n    \u003C\u002Fli>\n    \u003Cli>\n      \u003Ca href=\"#installation\">安装\u003C\u002Fa>\n    \u003C\u002Fli>\n    \u003Cli>\n      \u003Ca href=\"#embodied-diffusion-models\">具身扩散模型\u003C\u002Fa>\n      \u003Cul>\n        \u003Cli>\u003Ca href=\"#goal-image-generation\">目标图像生成\u003C\u002Fa>\u003C\u002Fli>\n      \u003C\u002Ful>\n      \u003Cul>\n        \u003Cli>\u003Ca href=\"#goal-point-cloud-generation\">目标点云生成\u003C\u002Fa>\u003C\u002Fli>\n      \u003C\u002Ful>\n    \u003C\u002Fli>\n    \u003Cli>\n      \u003Ca href=\"#multimodal-large-language-model\">多模态大语言模型\u003C\u002Fa>\n      \u003Cul>\n        \u003Cli>\u003Ca href=\"#pretrain-3d-vla\">预训练 3D-VLA\u003C\u002Fa>\u003C\u002Fli>\n      \u003C\u002Ful>\n    \u003C\u002Fli>\n    \u003Cli>\n      \u003Ca href=\"#citation\">引用\u003C\u002Fa>\n    \u003C\u002Fli>\n    \u003Cli>\n      \u003Ca href=\"#acknowledgement\">致谢\u003C\u002Fa>\n    \u003C\u002Fli>\n  \u003C\u002Fol>\n\u003C\u002Fdetails>\n\n## 新闻 📢\n\n- [2024\u002F10] 所有扩散模型已在 Hugging Face 上发布！你可以在 [这里](https:\u002F\u002Fhuggingface.co\u002Fanyezhy) 找到这些模型。我们还提供了 [模型卡片](docs\u002Fmodel_card.md) 以获取详细信息。运行推理代码将自动下载最新模型。\n- [2024\u002F07] 3D-VLA 预训练代码和目标图像生成的 LDM 检查点已发布。\n- [2024\u002F06] 目标生成扩散模型的训练和推理代码已发布。\n- [2024\u002F05] 3D-VLA 已被 ICML 2024 录用！\n- [2024\u002F03] [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.09631) 已上传至 arXiv。\n\n## 方法\n\n3D-VLA 是一个将视觉 - 语言 - 动作 (VLA) 模型连接到 3D 物理世界的框架。与传统 2D 模型不同，3D-VLA 通过生成式世界模型整合了 3D 感知、推理和动作，类似于人类的认知过程。它基于 [3D-LLM](https:\u002F\u002Fvis-www.cs.umass.edu\u002F3dllm\u002F) 构建，并使用交互 token 与环境进行交互。具身扩散模型经过训练并与大语言模型 (LLM) 对齐，以预测目标图像和点云。\n\n\u003Cp align=\"center\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FUMass-Embodied-AGI_3D-VLA_readme_58ac8300c73e.png\" alt=\"Logo\" width=\"80%\">\n\u003C\u002Fp>\n\n## 安装\n\n```bash\nconda create -n 3dvla python=3.9\nconda activate 3dvla\npip install -r requirements.txt\n```\n\n我们将来会更新文件结构和安装流程。\n\n我们为 3D-VLA 模型提供了一个 [模型卡片](docs\u002Fmodel_card.md)。模型卡片包含任务描述、模型描述以及训练数据集。\n\n## 具身扩散模型\n\n### 目标图像生成\n使用以下命令训练目标图像潜在扩散模型。如果您想包含深度信息，可以在 `train_ldm.sh` 文件的命令中添加 `--include_depth`。\n```bash\nbash launcher\u002Ftrain_ldm.sh [NUM_GPUS] [NUM_NODES]\n```\n\n然后您可以生成目标图像。结果将保存在 `lavis\u002Foutput\u002FLDM\u002Fpix2pix\u002Fresults` 文件夹中。\n```bash\npython inference_ldm_goal_image.py \\\n    --ckpt_folder lavis\u002Foutput\u002FLDM\u002Fpix2pix\u002Fruns (--include_depth)\n```\n\n我们已在 Hugging Face 上发布了我们的模型：[goal-image](https:\u002F\u002Fhuggingface.co\u002Fanyezhy\u002F3dvla-diffusion) 和 [goal-depth](https:\u002F\u002Fhuggingface.co\u002Fanyezhy\u002F3dvla-diffusion-depth)。可以使用以下命令运行简单的演示：\n\n```bash\npython inference_ldm_goal_image.py \\\n    --ckpt_folder anyezhy\u002F3dvla-diffusion \\\n    --image docs\u002Fcans.png --text \"knock pepsi can over\" \\\n    --save_path result.png\n\npython inference_ldm_goal_image.py \\\n    --ckpt_folder anyezhy\u002F3dvla-diffusion-depth --include_depth \\\n    --image docs\u002Fbottle.png --text \"move water bottle near sponge\" \\\n    --save_path result.png\n```\n\n### 目标点云生成\n我们为目标点云扩散模型实现了 [xFormers](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fxformers)。您可以安装它并加速训练和推理过程。\n\n训练目标点云扩散模型（微调预训练的 Point-E 模型）。\n```bash\nbash launcher\u002Ftrain_pe.sh [NUM_GPUS] [NUM_NODES]\n```\n\n我们已在 Hugging Face 上发布了我们的模型：[goal-point-cloud](https:\u002F\u002Fhuggingface.co\u002Fanyezhy\u002F3dvla-diffusion-pointcloud)。使用以下命令推理目标点云。如果您想使用多个 GPU，请改用 `torchrun --nproc_per_node=[NUM_GPUS] --master_port=[PORT] inference_pe_goal_pcd.py`。\n  ```bash\n  python inference_pe_goal_pcd.py \\\n    --input_npy docs\u002Fpoint_cloud.npy --text \"close bottom drawer\" \\\n    --output_dir SAVE_PATH\n\n  python inference_pe_goal_pcd.py \\\n    --input_npy docs\u002Fmoney.npy \\\n    --text \"put the money away in the safe on the bottom shelf\"\n  ```\n\n## 多模态大语言模型\n\n### 预训练 3D-VLA\n训练我们的 3D-VLA 模型：\n```bash\nbash launcher\u002Ftrain_llm.sh [NUM_GPUS] [NUM_NODES]\n```\n\n## 引用\n```\n@article{zhen20243dvla,\n  author = {Zhen, Haoyu and Qiu, Xiaowen and Chen, Peihao and Yang, Jincheng and Yan, Xin and Du, Yilun and Hong, Yining and Gan, Chuang},\n  title = {3D-VLA: 3D Vision-Language-Action Generative World Model},\n  journal = {arXiv preprint arXiv:2403.09631},\n  year = {2024},\n}\n```\n\n## 致谢\n在此，我们要感谢以下资源及其杰出的工作：\n- [SAM](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsegment-anything)、[ConceptFusion](https:\u002F\u002Fgithub.com\u002Fconcept-fusion\u002Fconcept-fusion) 和 [3D-CLR](https:\u002F\u002Fgithub.com\u002Fevelinehong\u002F3D-CLR-Official) 用于数据处理。\n- [Diffusers](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Fdiffusers)、[InstructPix2Pix](https:\u002F\u002Fgithub.com\u002Ftimothybrooks\u002Finstruct-pix2pix)、[StableDiffusion](https:\u002F\u002Fgithub.com\u002FStability-AI\u002FStableDiffusion) 和 [Point-E](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fpoint-e) 用于扩散模型。\n- [LAVIS](https:\u002F\u002Fgithub.com\u002Fsalesforce\u002FLAVIS) 和 [3D-LLM](https:\u002F\u002Fgithub.com\u002FUMass-Foundation-Model\u002F3D-LLM) 用于代码库和架构。\n- [OpenX](https:\u002F\u002Frobotics-transformer-x.github.io) 用于数据集。\n- [RLBench](https:\u002F\u002Fgithub.com\u002Fstepjam\u002FRLBench) 和 [Hiveformer](https:\u002F\u002Fgithub.com\u002Fvlc-robot\u002Fhiveformer) 用于评估。","# 3D-VLA 快速上手指南\n\n**3D-VLA** 是一个将视觉 - 语言 - 动作（VLA）模型连接到 3D 物理世界的生成式世界模型框架。它基于 3D-LLM，通过生成式模型实现 3D 感知、推理和动作预测，支持目标图像生成和目标点云生成等任务。该工具已收录于 ICML 2024。\n\n---\n\n## 1. 环境准备\n\n*   **操作系统**: Linux \u002F macOS \u002F Windows (推荐 Linux 用于训练)\n*   **Python 版本**: 3.9\n*   **依赖管理**: Conda\n*   **硬件要求**: 建议配备 NVIDIA GPU 以运行扩散模型和训练任务。\n\n---\n\n## 2. 安装步骤\n\n请在终端中执行以下命令创建虚拟环境并安装依赖：\n\n```bash\nconda create -n 3dvla python=3.9\nconda activate 3dvla\npip install -r requirements.txt\n```\n\n> **注意**: 推理代码会自动从 Hugging Face 下载预训练模型，请确保网络可访问 `huggingface.co`。如需加速下载，可配置国内镜像源。\n\n---\n\n## 3. 基本使用（推理示例）\n\n本工具提供了预训练模型，您可以直接运行推理代码进行体验。\n\n### 3.1 目标图像生成 (Goal Image Generation)\n\n使用预训练的扩散模型生成目标图像。\n\n```bash\npython inference_ldm_goal_image.py \\\n    --ckpt_folder anyezhy\u002F3dvla-diffusion \\\n    --image docs\u002Fcans.png --text \"knock pepsi can over\" \\\n    --save_path result.png\n```\n\n若需包含深度信息（Depth），请使用以下命令：\n\n```bash\npython inference_ldm_goal_image.py \\\n    --ckpt_folder anyezhy\u002F3dvla-diffusion-depth --include_depth \\\n    --image docs\u002Fbottle.png --text \"move water bottle near sponge\" \\\n    --save_path result.png\n```\n\n### 3.2 目标点云生成 (Goal Point Cloud Generation)\n\n使用预训练的 Point-E 微调模型生成目标点云。\n\n```bash\npython inference_pe_goal_pcd.py \\\n    --input_npy docs\u002Fpoint_cloud.npy --text \"close bottom drawer\" \\\n    --output_dir SAVE_PATH\n```\n\n---\n\n## 4. 引用信息\n\n如果您在研究中使用此代码或模型，请引用：\n\n```bibtex\n@article{zhen20243dvla,\n  author = {Zhen, Haoyu and Qiu, Xiaowen and Chen, Peihao and Yang, Jincheng and Yan, Xin and Du, Yilun and Hong, Yining and Gan, Chuang},\n  title = {3D-VLA: 3D Vision-Language-Action Generative World Model},\n  journal = {arXiv preprint arXiv:2403.09631},\n  year = {2024},\n}\n```\n\n更多详细信息请访问 [项目主页](https:\u002F\u002Fvis-www.cs.umass.edu\u002F3dvla) 或查看 [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.09631)。","某物流仓储中心正在部署智能分拣机器人，要求它能根据自然语言指令（如“把左边那瓶水拿下来”）从杂乱堆叠的货架上精准抓取指定商品。\n\n### 没有 3D-VLA 时\n- 依赖传统 2D 摄像头，系统难以准确判断物体间的深度遮挡关系，导致抓取路径规划错误。\n- 机器人无法将模糊的自然语言指令转化为具体的三维空间坐标，需要人工预先标定每个物品位置。\n- 面对新商品或货架布局微调时，必须收集大量新数据进行全量重训，迭代周期长达数周。\n- 缺乏对物理环境的动态预测能力，机械臂在执行过程中容易发生碰撞或抓空。\n\n### 使用 3D-VLA 后\n- 3D-VLA 直接融合点云与图像信息，实时构建高精度的三维环境模型，彻底解决深度感知盲区。\n- 通过多模态大语言模型理解语义，自动生成目标物体的理想状态图像与点云，精确锁定抓取位姿。\n- 基于生成式世界模型模拟动作结果，能预判机械臂运动轨迹是否安全，大幅降低试错成本。\n- 具备强大的泛化能力，即使面对未见过的商品类别，也能通过语言描述快速适配新的抓取任务。\n\n3D-VLA 让机器人真正具备了在复杂三维环境中理解人类指令并安全、灵活执行任务的核心能力。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FUMass-Embodied-AGI_3D-VLA_52de1334.png","UMass-Embodied-AGI","UMass Embodied AGI Group","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FUMass-Embodied-AGI_bd6a3d2e.png","Embodied AGI Group at University of Massachusetts Amherst",null,"https:\u002F\u002Fembodied-agi.cs.umass.edu\u002F","https:\u002F\u002Fgithub.com\u002FUMass-Embodied-AGI",[83,87],{"name":84,"color":85,"percentage":86},"Python","#3572A5",99.6,{"name":88,"color":89,"percentage":90},"Shell","#89e051",0.4,619,24,"2026-04-01T01:48:19","未说明","需要 NVIDIA GPU (xFormers 加速)，支持多卡训练，具体显存要求未说明",{"notes":97,"python":98,"dependencies":99},"使用 conda 创建 python=3.9 虚拟环境；首次运行代码会自动从 Hugging Face 下载模型文件；点云生成任务建议安装 xFormers 以加速；支持多 GPU 分布式训练与推理","3.9",[100,101,102,103,104,105,106,107,108,109],"torch","xFormers","LAVIS","Diffusers","3D-LLM","Point-E","SAM","InstructPix2Pix","StableDiffusion","transformers",[26,14,15,54],"2026-03-27T02:49:30.150509","2026-04-06T08:46:05.982571",[],[]]