[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-mbzuai-oryx--groundingLMM":3,"tool-mbzuai-oryx--groundingLMM":64},[4,17,27,35,43,56],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":16},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,3,"2026-04-05T11:01:52",[13,14,15],"开发框架","图像","Agent","ready",{"id":18,"name":19,"github_repo":20,"description_zh":21,"stars":22,"difficulty_score":23,"last_commit_at":24,"category_tags":25,"status":16},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",138956,2,"2026-04-05T11:33:21",[13,15,26],"语言模型",{"id":28,"name":29,"github_repo":30,"description_zh":31,"stars":32,"difficulty_score":23,"last_commit_at":33,"category_tags":34,"status":16},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,"2026-04-03T11:11:01",[13,14,15],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":23,"last_commit_at":41,"category_tags":42,"status":16},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[13,26],{"id":44,"name":45,"github_repo":46,"description_zh":47,"stars":48,"difficulty_score":23,"last_commit_at":49,"category_tags":50,"status":16},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,"2026-04-05T10:45:23",[14,51,52,53,15,54,26,13,55],"数据工具","视频","插件","其他","音频",{"id":57,"name":58,"github_repo":59,"description_zh":60,"stars":61,"difficulty_score":10,"last_commit_at":62,"category_tags":63,"status":16},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,"2026-04-04T04:44:48",[15,14,13,26,54],{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":69,"readme_en":70,"readme_zh":71,"quickstart_zh":72,"use_case_zh":73,"hero_image_url":74,"owner_login":75,"owner_name":76,"owner_avatar_url":77,"owner_bio":78,"owner_company":79,"owner_location":79,"owner_email":79,"owner_twitter":79,"owner_website":80,"owner_url":81,"languages":82,"stars":91,"forks":92,"last_commit_at":93,"license":94,"difficulty_score":10,"env_os":95,"env_gpu":95,"env_ram":95,"env_deps":96,"category_tags":99,"github_topics":100,"view_count":23,"oss_zip_url":79,"oss_zip_packed_at":79,"status":16,"created_at":106,"updated_at":107,"faqs":108,"releases":129},1972,"mbzuai-oryx\u002FgroundingLMM","groundingLMM","[CVPR 2024 🔥] Grounding Large Multimodal Model (GLaMM), the first-of-its-kind model capable of generating natural language responses that are seamlessly integrated with object segmentation masks.","groundingLMM是CVPR 2024入选的多模态模型，首次实现自然语言响应与物体分割掩码的无缝集成。当用户描述图像中的特定物体（如“左边的红色杯子”），它不仅能生成回答，还能直接在图像上精准勾勒出对应区域，解决了传统模型难以精确关联文本与像素位置的问题。该模型支持图像和区域输入，通过端到端训练统一了短语定位、指代表达分割和视觉对话任务，并基于GranD数据集（750万独特概念、8.1亿区域标注）训练，适用于研究人员和开发者在智能设计、机器人视觉或医学影像分析等场景中构建高精度视觉交互系统。","# GLaMM \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_e2eeb2921862.png\" height=\"40\">: Pixel Grounding Large Multimodal Model [CVPR 2024]\n\u003Cp align=\"center\">\n    \u003Cimg src=\"https:\u002F\u002Fi.imgur.com\u002FwaxVImv.png\" alt=\"Oryx Video-ChatGPT\">\n\u003C\u002Fp>\n\n#### [Hanoona Rasheed](https:\u002F\u002Fwww.hanoonarasheed.com\u002F)\\*, [Muhammad Maaz](https:\u002F\u002Fwww.mmaaz60.com)\\*, [Sahal Shaji](https:\u002F\u002Fwww.linkedin.com\u002Fin\u002Fsahalshajim), [Abdelrahman Shaker](https:\u002F\u002Famshaker.github.io), [Salman Khan](https:\u002F\u002Fsalman-h-khan.github.io\u002F), [Hisham Cholakkal](https:\u002F\u002Fscholar.google.ae\u002Fcitations?user=bZ3YBRcAAAAJ&hl=fr), [Rao M. Anwer](https:\u002F\u002Fscholar.google.fi\u002Fcitations?user=_KlvMVoAAAAJ&hl=en), [Eric Xing](https:\u002F\u002Fwww.cs.cmu.edu\u002F~epxing), [Ming-Hsuan Yang](https:\u002F\u002Fscholar.google.com.pk\u002Fcitations?user=p9-ohHsAAAAJ&hl=en) and [Fahad Khan](https:\u002F\u002Fsites.google.com\u002Fview\u002Ffahadkhans\u002Fhome)\n\n#### **Mohamed bin Zayed University of AI, Australian National University, Aalto University, Carnegie Mellon University, University of California - Merced, Linköping University, Google Research**\n\n[![paper](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FarXiv-Paper-blue.svg)](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.03356)\n[![Dataset](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FDataset-Access-\u003CCOLOR>)](https:\u002F\u002Fgrounding-anything.com)\n[![Demo](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FOnline-Demo-red)](https:\u002F\u002Fglamm.mbzuai-oryx.ngrok.app)\n[![Website](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FProject-Website-87CEEB)](https:\u002F\u002Fmbzuai-oryx.github.io\u002FgroundingLMM)\n[![video](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FVideo-Presentation-F9D371)](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=0dZ4dlNIGTY)\n\n---\n\n## 📢 Latest Updates\n- **Nov-07-24**: VideoGLaMM is released. It extends the grounded conversation generation task for videos 🎥 ! Check it out at [VideoGLaMM](https:\u002F\u002Fmbzuai-oryx.github.io\u002FVideoGLaMM\u002F) 🔥🔥\n- **Mar-21-24**- We're excited to announce the release of [GranD](https:\u002F\u002Fgrounding-anything.com) dataset and the [GranD Automated Annotation Pipeline](docs\u002FGranD.md#preparing-the-pretraining-annotations-from-grand-) 🔥\n- **Feb-27-23**- We're thrilled to share that GLaMM has been accepted to CVPR 2024! 🎊\n- **Dec-27-23**- GLaMM training and evaluation codes, pretrained checkpoints and GranD-f dataset are released [click for details](#-dive-deeper-inside-glamms-training-and-evaluation) 🔥🔥\n- **Nov-29-23**: GLaMM online interactive demo is released [demo link](https:\u002F\u002Fglamm.mbzuai-oryx.ngrok.app). 🔥\n- **Nov-07-23**: GLaMM paper is released [arxiv link](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.03356). 🌟\n- 🌟 **Featured**: GLaMM is now highlighted at the top on AK's [Daily Papers](https:\u002F\u002Fhuggingface.co\u002Fpapers?date=2023-11-07) page on HuggingFace! 🌟\n\n---\n\n## \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_e2eeb2921862.png\" height=\"40\"> GLaMM Overview\n\nGrounding Large Multimodal Model (GLaMM) is an end-to-end trained LMM which provides visual grounding capabilities with the flexibility to process both image and region inputs. This enables the new unified task of Grounded Conversation Generation that combines phrase grounding, referring expression segmentation, and vision-language conversations. Equipped with the capability for detailed region understanding, pixel-level groundings, and conversational abilities, GLaMM offers a versatile capability to interact with visual inputs provided by the user at multiple granularity levels.\n\n---\n\n## 🏆 Contributions\n\n- **GLaMM Introduction.** We present the Grounding Large Multimodal Model (GLaMM), the first-of-its-kind model capable of generating natural language responses that are seamlessly integrated with object segmentation masks.\n\n- **Novel Task & Evaluation.** We propose a new task of Grounded Conversation Generation (GCG). We also introduce a comprehensive evaluation protocol for this task.\n\n- **GranD Dataset Creation.** We create the GranD - Grounding-anything Dataset, a large-scale densely annotated dataset with 7.5M unique concepts grounded in 810M regions.\n\n---\n\n## 🚀 Dive Deeper: Inside GLaMM's Training and Evaluation\n\nDelve into the core of GLaMM with our detailed guides on the model's Training and Evaluation methodologies.\n- [**Installation**](.\u002Fdocs\u002Finstall.md): Provides guide to set up conda environment for running GLaMM training, evaluation and demo.\n\n- [**Datasets**](.\u002Fdocs\u002Fdatasets.md): Provides detailed instructions to download and arrange datasets required for training and evaluation.\n\n- [**GranD**](.\u002Fdocs\u002FGranD.md): Provides detailed instructions to download the GranD dataset and run the automated annotation pipeline.\n\n- [**Model Zoo**](.\u002Fdocs\u002Fmodel_zoo.md): Provides downloadable links to all pretrained GLaMM checkpoints.\n\n- [**Training**](.\u002Fdocs\u002Ftraining.md): Provides instructions on how to train the GLaMM model for its various capabilities including Grounded Conversation Generation (GCG), Region-level captioning, and Referring Expression Segmentation.\n\n- [**Evaluation**](.\u002Fdocs\u002Fevaluation.md): Outlines the procedures for evaluating the GLaMM model using pretrained checkpoints, covering Grounded Conversation Generation (GCG), Region-level captioning, and Referring Expression Segmentation, as reported in our paper.\n\n- [**Demo**](.\u002Fdocs\u002Foffline_demo.md): Guides you through setting up a local demo to showcase GLaMM's functionalities.\n\n## 👁️💬 GLaMM: Grounding Large Multimodal Model\n\nThe components of GLaMM are cohesively designed to handle both textual and optional visual prompts (image level and region of interest), allowing for interaction at multiple levels of granularity, and generating grounded text responses.\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"images\u002Fglamm\u002Fmodel_arch.png\" alt=\"GLaMM Architectural Overview\">\n\u003C\u002Fp>\n\n---\n\n## 🔍 Grounding-anything Dataset (GranD)\n\nThe [Grounding-anything](https:\u002F\u002Fgrounding-anything.com\u002F) GranD dataset, a large-scale dataset with automated annotation pipeline for detailed region-level understanding and segmentation masks. GranD comprises 7.5M unique concepts anchored in a total of 810M regions, each with a segmentation mask.\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_8735d74809ad.png\" alt=\"Dataset Annotation Pipeline\">\n\u003C\u002Fp>\n\n---\nBelow we present some examples of the GranD dataset.\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_9c03249e0da1.png\" alt=\"GranD Dataset Sample\">\n\u003C\u002Fp>\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_ad3d89f39ecd.png\" alt=\"GranD Dataset Sample\">\n\u003C\u002Fp>\n\n---\n\n## 📚 Building GranD-f for Grounded Conversation Generation\n\nThe [GranD-f](https:\u002F\u002Fgrounding-anything.com\u002FGranD-f) dataset is designed for the GCG task, with about 214K image-grounded text pairs for higher-quality data in fine-tuning stage.\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_4384852285e1.png\" alt=\"GranD-f Dataset Sample\">\n\u003C\u002Fp>\n\n---\n\n## 🤖 Grounded Conversation Generation (GCG)\n\nIntroducing GCG, a task to create image-level captions tied to segmentation masks, enhancing the model’s visual grounding in natural language captioning.\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_d16425491b6f.png\" alt=\"Results_GCG\">\n\u003C\u002Fp>\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_0ebfcc88a6bf.png\" alt=\"GCG_Table\">\n\u003C\u002Fp>\n\n---\n\n## 🚀 Downstream Applications\n\n### 🎯 Referring Expression Segmentation\n\nOur model excels in creating segmentation masks from text-based referring expressions.\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_14867d25bddd.png\" alt=\"Results_RefSeg\">\n\u003C\u002Fp>\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_cccc7238bbf1.png\" alt=\"Table_RefSeg\">\n\u003C\u002Fp>\n\n---\n\n### 🖼️ Region-Level Captioning\n\nGLaMM generates detailed region-specific captions and answers reasoning-based visual questions.\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_bcb24ffd280c.png\" alt=\"Results_RegionCap\">\n\u003C\u002Fp>\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_4c1207ab6be4.png\" alt=\"Table_RegionCap\">\n\u003C\u002Fp>\n\n---\n\n### 📷 Image Captioning\n\nComparing favorably to specialized models, GLaMM provides high-quality image captioning.\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_bc802f0087e3.png\" alt=\"Results_Cap\">\n\u003C\u002Fp>\n\n---\n\n## 💬 Conversational Style Question Answering\n\nGLaMM demonstrates its prowess in engaging in detailed, region-specific, and grounded conversations. This effectively highlights its adaptability in intricate visual-language interactions and robustly retaining reasoning capabilities inherent to LLMs.\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_8c55d68cd09e.png\" alt=\"Results_Conv\">\n\u003C\u002Fp>\n\n---\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_1f8538aaf212.png\" alt=\"Results_Conv\">\n\u003C\u002Fp>\n\n---\n\n## 📜 Citation\n```bibtex\n  @article{hanoona2023GLaMM,\n          title={GLaMM: Pixel Grounding Large Multimodal Model},\n          author={Rasheed, Hanoona and Maaz, Muhammad and Shaji, Sahal and Shaker, Abdelrahman and Khan, Salman and Cholakkal, Hisham and Anwer, Rao M. and Xing, Eric and Yang, Ming-Hsuan and Khan, Fahad S.},\n          journal={The IEEE\u002FCVF Conference on Computer Vision and Pattern Recognition},\n          year={2024}\n  }\n```\n\n---\n## 🙏 Acknowledgement\nWe are thankful to LLaVA, GPT4ROI, and LISA for releasing their models and code as open-source contributions.\n\n\n---\n[\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_45d2297f2f63.png\" width=\"200\" height=\"100\">](https:\u002F\u002Fwww.ival-mbzuai.com)\n[\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_f7ee9d1ef19f.png\" width=\"100\" height=\"100\">](https:\u002F\u002Fgithub.com\u002Fmbzuai-oryx)\n[\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_5538daa7b5d2.png\" width=\"360\" height=\"85\">](https:\u002F\u002Fmbzuai.ac.ae)\n","# GLaMM \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_e2eeb2921862.png\" height=\"40\">：像素级多模态大模型 [CVPR 2024]\n\u003Cp align=\"center\">\n    \u003Cimg src=\"https:\u002F\u002Fi.imgur.com\u002FwaxVImv.png\" alt=\"Oryx Video-ChatGPT\">\n\u003C\u002Fp>\n\n#### [哈努娜·拉希德](https:\u002F\u002Fwww.hanoonarasheed.com\u002F)﹡，[穆罕默德·马兹](https:\u002F\u002Fwww.mmaaz60.com\u002F)﹡，[萨哈尔·沙吉](https:\u002F\u002Fwww.linkedin.com\u002Fin\u002Fsahalshajim)，[阿卜杜勒拉赫曼·沙克尔](https:\u002F\u002Famshaker.github.io)，[萨尔曼·汗](https:\u002F\u002Fsalman-h-khan.github.io\u002F)，[希沙姆·乔拉卡尔](https:\u002F\u002Fscholar.google.ae\u002Fcitations?user=bZ3YBRcAAAAJ&hl=fr)，[拉奥·M·安瓦尔](https:\u002F\u002Fscholar.google.fi\u002Fcitations?user=_KlvMVoAAAAJ&hl=en)，[埃里克·辛格](https:\u002F\u002Fwww.cs.cmu.edu\u002F~epxing)，[明-轩·杨](https:\u002F\u002Fscholar.google.com.pk\u002Fcitations?user=p9-ohHsAAAAJ&hl=en) 和 [法哈德·汗](https:\u002F\u002Fsites.google.com\u002Fview\u002Ffahadkhans\u002Fhome)\n\n#### **穆罕默德·本·扎耶德人工智能大学、澳大利亚国立大学、阿尔托大学、卡内基梅隆大学、加州大学默塞德分校、林雪平大学、谷歌研究院**\n\n[![论文](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FarXiv-论文-blue.svg)](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.03356)\n[![数据集](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F数据集-访问-\u003CCOLOR>)](https:\u002F\u002Fgrounding-anything.com)\n[![演示](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F在线演示-red)](https:\u002F\u002Fglamm.mbzuai-oryx.ngrok.app)\n[![网站](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F项目-网站-87CEEB)](https:\u002F\u002Fmbzuai-oryx.github.io\u002FgroundingLMM)\n[![视频](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F视频-演示-F9D371)](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=0dZ4dlNIGTY)\n\n---\n\n## 📢 最新动态\n- **11月7日，2024年**：VideoGLaMM发布。它将视频的语义对话生成任务扩展到了视频领域🎥！快来看看[VideoGLaMM](https:\u002F\u002Fmbzuai-oryx.github.io\u002FVideoGLaMM\u002F)吧🔥🔥\n- **3月21日，2024年**：我们很高兴地宣布[GranD](https:\u002F\u002Fgrounding-anything.com)数据集和[GranD自动化标注流程](docs\u002FGranD.md#preparing-the-pretraining-annotations-from-grand-)正式发布🔥\n- **2月27日，2023年**：我们非常激动地宣布GLaMM已被CVPR 2024录用！🎊\n- **12月27日，2023年**：GLaMM训练与评估代码、预训练检查点以及GranD-f数据集均已发布[点击查看详情](#-深入探索GLaMM的训练与评估)🔥🔥\n- **11月29日，2023年**：GLaMM在线交互式演示版发布[演示链接](https:\u002F\u002Fglamm.mbzuai-oryx.ngrok.app)。🔥\n- **11月7日，2023年**：GLaMM论文发布[arXiv链接](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.03356)。🌟\n- 🌟 **特别推荐**：GLaMM现在已在HuggingFace的AK[每日论文](https:\u002F\u002Fhuggingface.co\u002Fpapers?date=2023-11-07)页面上被置顶推荐！🌟\n\n---\n\n## \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_e2eeb2921862.png\" height=\"40\"> GLaMM概览\n\nGrounding Large Multimodal Model (GLaMM) 是一种端到端训练的多模态大模型，具备视觉定位能力，可灵活处理图像和区域输入。这使得它能够实现全新的统一任务——语义对话生成，该任务结合了短语定位、指代表达分割以及视觉语言对话。凭借对区域细节的理解、像素级定位及对话能力，GLaMM为用户提供的视觉输入提供了多粒度级别的交互能力。\n\n---\n\n## 🏆 主要贡献\n\n- **GLaMM简介**。我们提出了Grounding Large Multimodal Model (GLaMM)，这是首个能够生成自然语言响应并与物体分割掩码无缝融合的模型。\n\n- **全新任务与评估**。我们提出了一种名为语义对话生成（GCG）的新任务，并为此任务设计了一套全面的评估方案。\n\n- **GranD数据集创建**。我们创建了GranD——Grounding-anything数据集，这是一个大规模密集标注的数据集，包含750万个独特概念，覆盖8.1亿个区域。\n\n---\n\n## 🚀 深入探索：GLaMM的训练与评估\n\n通过我们的详细指南，深入了解GLaMM的核心训练与评估方法。\n- [**安装指南**](.\u002Fdocs\u002Finstall.md)：提供设置conda环境以运行GLaMM训练、评估和演示的指导。\n\n- [**数据集**](.\u002Fdocs\u002Fdatasets.md)：提供下载并整理训练与评估所需数据集的详细说明。\n\n- [**GranD**](.\u002Fdocs\u002FGranD.md)：提供下载GranD数据集并运行自动化标注流程的详细说明。\n\n- [**模型库**](.\u002Fdocs\u002Fmodel_zoo.md)：提供所有预训练GLaMM检查点的下载链接。\n\n- [**训练指南**](.\u002Fdocs\u002Ftraining.md)：介绍如何训练GLaMM模型，使其具备语义对话生成（GCG）、区域级描述和指代表达分割等多种能力。\n\n- [**评估指南**](.\u002Fdocs\u002Fevaluation.md)：概述使用预训练检查点评估GLaMM模型的流程，涵盖语义对话生成（GCG）、区域级描述和指代表达分割，这些内容均在我们的论文中有所报道。\n\n- [**演示指南**](.\u002Fdocs\u002Foffline_demo.md)：指导您搭建本地演示环境，展示GLaMM的各项功能。\n\n## 👁️💬 GLaMM：Grounding Large Multimodal Model\n\nGLaMM的各个组件经过精心设计，能够同时处理文本和可选的视觉提示（图像级别和感兴趣区域），从而支持多粒度级别的交互，并生成语义一致的文本响应。\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_9ccd79a76062.png\" alt=\"GLaMM架构概览\">\n\u003C\u002Fp>\n\n---\n\n## 🔍 Grounding-anything数据集（GranD）\n\n[Grounding-anything](https:\u002F\u002Fgrounding-anything.com\u002F) GranD数据集是一个大规模数据集，配备了自动化标注流程，用于实现精细的区域级理解和分割掩码。GranD包含750万个独特概念，共覆盖8.1亿个区域，每个区域都配有分割掩码。\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_8735d74809ad.png\" alt=\"数据集标注流程\">\n\u003C\u002Fp>\n\n---\n下面是一些GranD数据集的示例。\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_9c03249e0da1.png\" alt=\"GranD数据集样本\">\n\u003C\u002Fp>\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_ad3d89f39ecd.png\" alt=\"GranD数据集样本\">\n\u003C\u002Fp>\n\n---\n\n## 📚 为语义对话生成构建GranD-f\n\n[GranD-f](https:\u002F\u002Fgrounding-anything.com\u002FGranD-f)数据集专为GCG任务设计，包含约21.4万对图像与文本的配对数据，以便在微调阶段获得更高质量的数据。\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_4384852285e1.png\" alt=\"GranD-f数据集样本\">\n\u003C\u002Fp>\n\n---\n\n## 🤖 语义对话生成（GCG）\n\n我们推出GCG任务，旨在生成与分割掩码绑定的图像级描述，从而提升模型在自然语言描述中的视觉定位能力。\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_d16425491b6f.png\" alt=\"GCG结果\">\n\u003C\u002Fp>\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_0ebfcc88a6bf.png\" alt=\"GCG表格\">\n\u003C\u002Fp>\n\n---\n\n## 🚀 下游应用\n\n### 🎯 指称表达分割\n\n我们的模型擅长从基于文本的指称表达中生成分割掩码。\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_14867d25bddd.png\" alt=\"Results_RefSeg\">\n\u003C\u002Fp>\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_cccc7238bbf1.png\" alt=\"Table_RefSeg\">\n\u003C\u002Fp>\n\n---\n\n### 🖼️ 区域级描述生成\n\nGLaMM能够生成详细的区域特定描述，并回答基于推理的视觉问题。\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_bcb24ffd280c.png\" alt=\"Results_RegionCap\">\n\u003C\u002Fp>\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_4c1207ab6be4.png\" alt=\"Table_RegionCap\">\n\u003C\u002Fp>\n\n---\n\n### 📷 图像描述生成\n\n与专业模型相比，GLaMM的图像描述质量同样出色。\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_bc802f0087e3.png\" alt=\"Results_Cap\">\n\u003C\u002Fp>\n\n---\n\n## 💬 对话式风格问答\n\nGLaMM在进行详细、区域特定且有据可依的对话方面表现出色。这充分彰显了它在复杂视觉语言交互中的适应能力，以及稳健地保留大语言模型固有的推理能力。\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_8c55d68cd09e.png\" alt=\"Results_Conv\">\n\u003C\u002Fp>\n\n---\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_1f8538aaf212.png\" alt=\"Results_Conv\">\n\u003C\u002Fp>\n\n---\n\n## 📜 引用\n```bibtex\n  @article{hanoona2023GLaMM,\n          title={GLaMM: 像素级对齐的大规模多模态模型},\n          author={拉希德，哈努娜；马兹，穆罕默德；沙吉，萨哈尔；沙克尔，阿卜杜勒拉赫曼；汗，萨尔曼；乔拉卡尔，希沙姆；安瓦尔，拉奥·M.；邢，埃里克；杨，明轩；汗，法哈德·S.},\n          journal={IEEE\u002FCVF计算机视觉与模式识别会议},\n          year={2024}\n  }\n```\n\n---\n## 🙏 致谢\n我们感谢LLaVA、GPT4ROI和LISA将他们的模型和代码作为开源贡献发布出来。\n\n\n---\n[\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_45d2297f2f63.png\" width=\"200\" height=\"100\">](https:\u002F\u002Fwww.ival-mbzuai.com)\n[\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_f7ee9d1ef19f.png\" width=\"100\" height=\"100\">](https:\u002F\u002Fgithub.com\u002Fmbzuai-oryx)\n[\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_readme_5538daa7b5d2.png\" width=\"360\" height=\"85\">](https:\u002F\u002Fmbzuai.ac.ae)","# groundingLMM（GLaMM）中文快速上手指南\n\n## 环境准备\n- **系统**：Linux（Ubuntu 20.04+）或 Windows WSL2  \n- **GPU**：NVIDIA显卡（显存≥16GB），推荐RTX 3090及以上  \n- **CUDA**：11.8+（需提前安装）  \n- **Python**：3.8+  \n\n## 安装步骤\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002Fmbzuai-oryx\u002FgroundingLMM.git\ncd groundingLMM\nconda create -n glamm python=3.10 -y && conda activate glamm\npip install torch torchvision torchaudio --index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu118 -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\npip install -r requirements.txt -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\nmkdir -p checkpoints && wget https:\u002F\u002Fhf-mirror.com\u002Fmbzuai-oryx\u002FGLaMM\u002Fresolve\u002Fmain\u002Fglamm_v1.0.pth -P checkpoints\n```\n\n## 基本使用\n```bash\npython demo.py --model checkpoints\u002Fglamm_v1.0.pth --image examples\u002Fsample.jpg --prompt \"请描述图片中的内容\"\n```\n运行后将输出带区域标注的中文描述结果，支持自定义图片路径和提示词。","某服装电商平台的客服团队每天处理数千张用户上传的试穿照片，需自动识别服装部位并生成尺寸调整建议。\n\n### 没有groundingLMM时\n- 传统目标检测仅能提供矩形框，无法区分衣领、袖口等细节部位，导致建议错误（如将袖口误判为衣领）\n- 人工标注每张图需3分钟，团队积压严重，用户等待时间超24小时\n- 无法识别特定区域的污渍或破损，导致售后纠纷频发\n- 多人同框时无法区分不同人的服装，建议混淆率高达40%\n\n### 使用groundingLMM后\n- 通过像素级分割精准定位衣领、袖口等部位，生成“左袖口褶皱需放大10%”等具体建议\n- 处理速度提升至5秒\u002F张，用户等待时间缩短至5分钟内\n- 自动高亮标注破损区域并生成维修建议，售后纠纷减少60%\n- 多人同框时独立分割每位用户的服装，建议准确率提升至95%\n\ngroundingLMM将视觉理解从“识别物体”升级为“理解场景细节”，彻底解决人工标注瓶颈，让AI真正读懂图片中的每个像素。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmbzuai-oryx_groundingLMM_9ccd79a7.png","mbzuai-oryx","ORYX","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fmbzuai-oryx_e1ef1b3c.jpg","A Library for Large Vision-Language Models",null,"https:\u002F\u002Fival-mbzuai.com","https:\u002F\u002Fgithub.com\u002Fmbzuai-oryx",[83,87],{"name":84,"color":85,"percentage":86},"Python","#3572A5",99.6,{"name":88,"color":89,"percentage":90},"Shell","#89e051",0.4,950,53,"2026-03-27T10:21:55","Apache-2.0","未说明",{"notes":97,"python":95,"dependencies":98},"建议使用 conda 管理环境，首次运行需下载模型文件",[],[15,26],[101,102,103,104,105],"foundation-models","lmm","vision-and-language","vision-language-model","llm-agent","2026-03-27T02:49:30.150509","2026-04-06T07:12:58.367834",[109,114,119,124],{"id":110,"question_zh":111,"answer_zh":112,"source_url":113},8880,"代码、模型和数据何时发布？","数据集和代码已发布。数据集访问链接: https:\u002F\u002Fgithub.com\u002Fmbzuai-oryx\u002FgroundingLMM\u002Fblob\u002Fmain\u002Fdocs\u002FGranD.md，训练和评估代码见: https:\u002F\u002Fgithub.com\u002Fmbzuai-oryx\u002FgroundingLMM?tab=readme-ov-file#-dive-deeper-inside-glamms-training-and-evaluation","https:\u002F\u002Fgithub.com\u002Fmbzuai-oryx\u002FgroundingLMM\u002Fissues\u002F1",{"id":115,"question_zh":116,"answer_zh":117,"source_url":118},8881,"GranD Automated Annotation Pipeline的具体操作步骤是什么？","首先创建环境，例如: `conda create --name grand_env_1 --file requirements_grand_env_1.txt`，重复创建所有环境（grand_env_1 到 grand_env_9 和 grand_env_utils）。然后运行脚本，参数包括: IMG_DIR (图像目录), PRED_DIR (预测保存目录), CKPT_DIR (检查点目录), SAM_ANNOTATIONS_DIR (SAM注释目录)。","https:\u002F\u002Fgithub.com\u002Fmbzuai-oryx\u002FgroundingLMM\u002Fissues\u002F35",{"id":120,"question_zh":121,"answer_zh":122,"source_url":123},8882,"如何从头训练模型？","模型初始化自LLaVA-1.5。在训练时设置 `--pretrained` 参数为 False。具体在 train.py 中设置。","https:\u002F\u002Fgithub.com\u002Fmbzuai-oryx\u002FgroundingLMM\u002Fissues\u002F31",{"id":125,"question_zh":126,"answer_zh":127,"source_url":128},8883,"为什么demo的caption简单，无法复现论文中的详细结果？","demo使用的是fine-tuned模型，不支持phrase grounding功能。论文中的结果来自特定模型，demo未发布该模型。请参考论文和代码库的定量结果以了解模型能力。","https:\u002F\u002Fgithub.com\u002Fmbzuai-oryx\u002FgroundingLMM\u002Fissues\u002F25",[]]