[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-Hedlen--awesome-segment-anything":3,"tool-Hedlen--awesome-segment-anything":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",160411,2,"2026-04-18T23:33:24",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",109154,"2026-04-18T11:18:24",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":32,"last_commit_at":50,"category_tags":51,"status":17},6121,"gemini-cli","google-gemini\u002Fgemini-cli","gemini-cli 是一款由谷歌推出的开源 AI 命令行工具，它将强大的 Gemini 大模型能力直接集成到用户的终端环境中。对于习惯在命令行工作的开发者而言，它提供了一条从输入提示词到获取模型响应的最短路径，无需切换窗口即可享受智能辅助。\n\n这款工具主要解决了开发过程中频繁上下文切换的痛点，让用户能在熟悉的终端界面内直接完成代码理解、生成、调试以及自动化运维任务。无论是查询大型代码库、根据草图生成应用，还是执行复杂的 Git 操作，gemini-cli 都能通过自然语言指令高效处理。\n\n它特别适合广大软件工程师、DevOps 人员及技术研究人员使用。其核心亮点包括支持高达 100 万 token 的超长上下文窗口，具备出色的逻辑推理能力；内置 Google 搜索、文件操作及 Shell 命令执行等实用工具；更独特的是，它支持 MCP（模型上下文协议），允许用户灵活扩展自定义集成，连接如图像生成等外部能力。此外，个人谷歌账号即可享受免费的额度支持，且项目基于 Apache 2.0 协议完全开源，是提升终端工作效率的理想助手。",100752,"2026-04-10T01:20:03",[52,13,15,14],"插件",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":10,"last_commit_at":59,"category_tags":60,"status":17},4487,"LLMs-from-scratch","rasbt\u002FLLMs-from-scratch","LLMs-from-scratch 是一个基于 PyTorch 的开源教育项目，旨在引导用户从零开始一步步构建一个类似 ChatGPT 的大型语言模型（LLM）。它不仅是同名技术著作的官方代码库，更提供了一套完整的实践方案，涵盖模型开发、预训练及微调的全过程。\n\n该项目主要解决了大模型领域“黑盒化”的学习痛点。许多开发者虽能调用现成模型，却难以深入理解其内部架构与训练机制。通过亲手编写每一行核心代码，用户能够透彻掌握 Transformer 架构、注意力机制等关键原理，从而真正理解大模型是如何“思考”的。此外，项目还包含了加载大型预训练权重进行微调的代码，帮助用户将理论知识延伸至实际应用。\n\nLLMs-from-scratch 特别适合希望深入底层原理的 AI 开发者、研究人员以及计算机专业的学生。对于不满足于仅使用 API，而是渴望探究模型构建细节的技术人员而言，这是极佳的学习资源。其独特的技术亮点在于“循序渐进”的教学设计：将复杂的系统工程拆解为清晰的步骤，配合详细的图表与示例，让构建一个虽小但功能完备的大模型变得触手可及。无论你是想夯实理论基础，还是为未来研发更大规模的模型做准备",90106,"2026-04-06T11:19:32",[35,15,13,14],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":66,"readme_en":67,"readme_zh":68,"quickstart_zh":69,"use_case_zh":70,"hero_image_url":71,"owner_login":72,"owner_name":73,"owner_avatar_url":74,"owner_bio":75,"owner_company":76,"owner_location":77,"owner_email":78,"owner_twitter":75,"owner_website":79,"owner_url":80,"languages":75,"stars":81,"forks":82,"last_commit_at":83,"license":84,"difficulty_score":32,"env_os":85,"env_gpu":86,"env_ram":86,"env_deps":87,"category_tags":90,"github_topics":91,"view_count":32,"oss_zip_url":75,"oss_zip_packed_at":75,"status":17,"created_at":102,"updated_at":103,"faqs":104,"releases":150},9390,"Hedlen\u002Fawesome-segment-anything","awesome-segment-anything","Tracking and collecting papers\u002Fprojects\u002Fothers related to Segment Anything.","awesome-segment-anything 是一个专注于追踪和整理\"Segment Anything\"（SAM）模型生态的开源资源库。随着 SAM 在计算机视觉领域引发突破性进展，相关研究呈爆发式增长，开发者往往难以高效获取最新成果。该资源库正是为了解决这一信息分散痛点而生，它系统性地汇总了全球范围内与 SAM 相关的学术论文、衍生项目及应用案例。\n\n内容覆盖极为广泛，不仅包含基础模型论文，还深入细分至医疗影像分割、视频目标跟踪、图像修复、3D 重建、遥感分析乃至机器人交互等十多个前沿方向。其独特亮点在于极高的更新频率与结构化分类，从理论分析到代码实现，甚至包含了前端 JS SDK 等实用工具，帮助用户快速定位所需资源。无论是希望跟进最新算法的研究人员，还是寻求将 SAM 集成到实际产品中的开发者，都能在此找到极具价值的参考。对于设计师或对 AI 视觉技术感兴趣的学习者，这里也是探索“万物皆可分割”技术边界的理想窗口。通过持续维护与社区共享，awesome-segment-anything 致力于成为连接 SAM 理论基础与创新应用的重要桥梁。","\n\u003Cdiv align=\"center\">\n\u003Cbr>\n\u003Cimage src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_4cbbd9b616db.png\", width=\"600px\", height=\"287px\">\n\u003Cbr>\n\u003C\u002Fdiv>\n\u003C!-- ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_4cbbd9b616db.png) -->\n\n# Awesome Segment Anything [![Awesome](https:\u002F\u002Fcdn.rawgit.com\u002Fsindresorhus\u002Fawesome\u002Fd7305f38d29fed78fa85652e3a63e154dd8e8829\u002Fmedia\u002Fbadge.svg)](https:\u002F\u002Fgithub.com\u002Fsindresorhus\u002Fawesome)\nSegment Anything has led to a new breakthrough in the field of Computer Vision (CV), and this repository will continue to track and summarize the research progress of Segment Anything in various fields, including Papers\u002FProjects, etc. \n\nIf you find this repository helpful, please consider Stars ⭐ or Sharing ⬆️. Thanks.\n\n## News\n```\n- 2024.8.16 Add Segment Anything2 and SaLIP.\n- 2023.8.29: Update some recent works.\n- 2023.5.20: Update document structure and add a robotic-related article. Happy 520 Day！\n- 2023.5.4: Add SEEM.\n- 2023.4.18: Add job Inpainting Anything and SAM-Track.\n- 2023.4.12: An initial version of recent papers or projects.\n```\n\n## Contents\n\n- [Basemodel Papers](#basemodel-papers) \n- [Derivative Papers](#derivative-papers)\n  - [Analysis and Expansion of SAM](#analysis-and-expansion-of-sam)\n  - [Medical Image Segmentation](#medical-image-segmentation)\n  - [Inpainting](#inpainting)\n  - [Camouflaged Object Detection](#camouflaged-object-detection)\n  - [Video Frame Interpolation](#video-frame-interpolation)\n  - [Low Level Vision](#low-level-vision)\n  - [Image Matting](#image-matting)\n  - [Robotic](#robotic)\n  - [Bioinformatics](#bioinformatics)\n  - [3D](#3d)\n  - [Remote Sensing](#remote-sensing)\n  - [Tracking](#tracking)\n  - [Audio-visual Localization and Segmentation](#audio-visual-ocalization-and-segmentation)\n  - [Adversarial Attacks](#adversarial-attacks)\n- [Derivative Projects](#derivative-projects) \n  - [Image Segmentation task](#image-segmentation-task)\n  - [Video Segmentation task](#video-segmentation-task)\n  - [Medical image Segmentation task](#medical-image-segmentation-task)\n  - [Inpainting task](#inpainting-task)\n  - [3D task](#3d-task)\n  - [Image Generation task](#image-generation-task)\n  - [Remote Sensing task](#remote-sensing-task)\n  - [Moving Object Detection task](#moving-object-detection-task)\n  - [OCR task](#ocr-task)\n- [front-end framework](#front-end-framework)\n   - [JS SDK for SAM](#samjs)\n\n## Papers\u002FProjects\n### Basemodel Papers\n| Title |Presentation| Paper page | Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| CLIP | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_6616cf5de1ef.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.00020) | [Colab](https:\u002F\u002Fcolab.research.google.com\u002Fgithub\u002Fopenai\u002Fclip\u002Fblob\u002Fmaster\u002Fnotebooks\u002FInteracting_with_CLIP.ipynb) | [Code](https:\u002F\u002Fgithub.com\u002Fopenai\u002FCLIP) | OpenAI | Contrastive Language-Image Pre-Training.| \n| OWL-ViT | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_a188b8992be7.gif)| [ECCV2022](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.06230) | - | [Code](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fscenic\u002Ftree\u002Fmain\u002Fscenic\u002Fprojects\u002Fowl_vit) | Google | A open-vocabulary object detector. | \n| OvSeg | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_ccd7bd4c7a73.gif) | [CVPR2023](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.04150) | [Project](https:\u002F\u002Fjeff-liangf.github.io\u002Fprojects\u002Fovseg\u002F) | [Code](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fov-seg) | META | Segment an image into semantic regions according to text descriptions.| \n| Painter | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_76cb44b68e76.jpg) | [CVPR2023](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.02499) | - | [Code](https:\u002F\u002Fgithub.com\u002Fbaaivision\u002FPainter) | BAAI | A Generalist Painter for In-Context Visual Learning.| \n| Grounding DINO | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_ab6e4790a438.png)| [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.05499) | [Colab](https:\u002F\u002Fcolab.research.google.com\u002Fgithub\u002Froboflow-ai\u002Fnotebooks\u002Fblob\u002Fmain\u002Fnotebooks\u002Fzero-shot-object-detection-with-grounding-dino.ipynb) &[Huggingface](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FShilongLiu\u002FGrounding_DINO_demo) | [Code](https:\u002F\u002Fgithub.com\u002FIDEA-Research\u002FGroundingDINO) | IDEA | A stronger open-set object detector|\n| Segment Anything | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_198eb5158aa3.png)![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_8b3b879859a2.jpg)| [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.02643) | [Project page](https:\u002F\u002Fsegment-anything.com\u002F) | [Code](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsegment-anything) | Meta | A stronger Large model which can be used to generate masks for all objects in an image.| \n| SegGPT | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_8e2112165621.png)| [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03284) | [Project page](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FBAAI\u002FSegGPT) | [Code](https:\u002F\u002Fgithub.com\u002Fbaaivision\u002FPainter) | BAAI | Segmenting Everything In Context based on Painter.|\n| Segment Everything Everywhere All at Once (SEEM) | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_a2f2edc48c74.png) |[arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.06718.pdf) | [Project Page](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fxdecoder\u002FSEEM) | [Code](https:\u002F\u002Fgithub.com\u002FUX-Decoder\u002FSegment-Everything-Everywhere-All-At-Once)| Microsoft | Semantic Segmentation with various prompt types.|\n| Segment Everything2  | ![img]([https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_a2f2edc48c74.png](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsegment-anything-2\u002Fblob\u002Fmain\u002Fassets\u002Fmodel_diagram.png?raw=true)) |[Paper]([https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.06718.pdf](https:\u002F\u002Fscontent-fmx1-1.xx.fbcdn.net\u002Fv\u002Ft39.2365-6\u002F453626691_1879405402541497_3155007177325245432_n.pdf?_nc_cat=106&ccb=1-7&_nc_sid=3c67a6&_nc_ohc=lkNRHLYBebIQ7kNvgHl-Sjg&_nc_ht=scontent-fmx1-1.xx&oh=00_AYBDsA5Jo0xafWqN9cTfq7tklJ9QHxbyyzLnvg5qCaG6kw&oe=66C4AD0C)) | [Project Page]([https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fxdecoder\u002FSEEM](https:\u002F\u002Fai.meta.com\u002Fsam2\u002F)) | [Code]([https:\u002F\u002Fgithub.com\u002FUX-Decoder\u002FSegment-Everything-Everywhere-All-At-Once](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsegment-anything-2?tab=readme-ov-file))| Meta | A foundation model towards solving promptable visual segmentation in images and videos..|\n\n### Derivative Papers\n\n#### Analysis and Expansion of SAM\n| Title | Presentation| Paper page | Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| CLIP_Surgery | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_d2ca55efc0ac.jpg)| [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.05653.pdf) |[Demo](https:\u002F\u002Fgithub.com\u002Fxmed-lab\u002FCLIP_Surgery\u002Fblob\u002Fmaster\u002Fdemo.ipynb)| [Code](https:\u002F\u002Fgithub.com\u002Fxmed-lab\u002FCLIP_Surgery) | HKUST | This work about SAM based on CLIP's explainability to achieve text to mask without manual points.|\n|GenSAM | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_10725dfc182f.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.07374) | [Project Page](https:\u002F\u002Flwpyh.github.io\u002FGenSAM\u002F) | [Code](https:\u002F\u002Fgithub.com\u002FjyLin8100\u002FGenSAM) | QMUL | This work relaxes the requirement for instance-specific prompts in SAM.|\n| Segment Anything Is Not Always Perfect | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_850a20f36aa9.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.05750.pdf) | - | - | Samsung | This paper analyzes and discusses the benefits and limitations of SAM.|\n| PerSAM | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_dd1556df478d.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.03048) | [Project Page](https:\u002F\u002Fhuggingface.co\u002Fpapers\u002F2305.03048) | [Code](https:\u002F\u002Fgithub.com\u002FZrrSkywalker\u002FPersonalize-SAM) | - |Segment Anything with specific concepts. |\n| Matcher: Segment Anything with One Shot Using All-Purpose Feature Matching | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_ba26c6e7791f.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.13310) | - | [Code](https:\u002F\u002Fgithub.com\u002Faim-uofa\u002FMatcher) | - | One shot semantic segmentation by integrating an all-purpose feature extraction model and a class-agnostic segmentation model. |\n| Segment Anything in High Quality |![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_ea4fd28017b3.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.01567.pdf) | [Project Page](https:\u002F\u002Fhuggingface.co\u002Fpapers\u002F2306.01567) | - | ETH Zürich & HKUST | HQ-SAM: improve segmentation quality of SAM using learnable High-Quality Output Token. |\n|Detect Any Shadow: Segment Anything for Video Shadow Detection| ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_d09371984205.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.16698.pdf) | - | [Code](https:\u002F\u002Fgithub.com\u002Fharrytea\u002FDetect-AnyShadow) | University of Science and Technology of China | Use SAM to detect initial frames then use an LSTM network for subsequent frames. |\n| Fast Segment Anything | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_3fa3a830f635.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.12156v1.pdf) | [Project Page](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FAn-619\u002FFastSAM) | [Code](https:\u002F\u002Fgithub.com\u002Fcasia-iva-lab\u002Ffastsam) | - | Reformulate the architecture and improve the speed of SAM. | \n| MobileSAM (Faster Segment Anything) | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_56bbdbc95c6f.jpg) | [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.14289.pdf) | [Project Page](https:\u002F\u002Fhuggingface.co\u002Fpapers\u002F2306.14289) | [Code](https:\u002F\u002Fgithub.com\u002FChaoningZhang\u002FMobileSAM) | Kyung Hee University | make SAM mobile-friendly by replacing the heavyweight image encoder with a lightweight one.|\n| FoodSAM (Any Food Segmentation) | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_24593d488c52.jpg) | [arxiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.05938) | [Project Page](https:\u002F\u002Fstarhiking.github.io\u002FFoodSAM_Page\u002F) | [Code](https:\u002F\u002Fgithub.com\u002Fjamesjg\u002FFoodSAM) | UCAS | semantic, instance, panoptic, interactive segmentation on food image.|\n| DefectSAM | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_a7d19e693778.png) | [arxiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.10245) | - | [Code](https:\u002F\u002Fgithub.com\u002Fbozhenhhu\u002FDefectSAM) | ZJU, Westlake, UESTC, etc. | infrared thermal images, defect detection.|\n| SlimSAM | ![img](https:\u002F\u002Fgithub.com\u002Fczg1225\u002FSlimSAM\u002Fblob\u002Fmaster\u002Fimages\u002Fpaper\u002Fintro.PNG) | [arxiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.05284) | - | [Code](https:\u002F\u002Fgithub.com\u002Fczg1225\u002FSlimSAM) | NUS | 0.1% Data Makes Segment Anything Slim.|\n\n#### Medical Image Segmentation\n| Title | Presentation| Paper page | Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| Segment Anything Model (SAM) for Digital Pathology | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_91ecbefa603e.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.04155) | - | - | - | SAM + Tumor segmentation\u002FTissue segmentation\u002FCell nuclei segmentation. |\n| Segment Anything in Medical Images | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_a92b3051b92e.png)|[arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.12306.pdf)| - |[Code](https:\u002F\u002Fgithub.com\u002Fbowang-lab\u002FMedSAM) | - | A step-by-step tutorial with a small dataset to help you quickly utilize SAM.|\n| SAM Fails to Segment Anything? | ![img1](https:\u002F\u002Fcamo.githubusercontent.com\u002Fbae32a4f7f7b6cf23aafde7f574ef96544b87f59196c9aa125f93240bb178b36\u002F68747470733a2f2f7469616e72756e2d6368656e2e6769746875622e696f2f53414d2d41646170746f722f7374617469632f696d616765732f706f6c79702e6a7067)|[arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.09148.pdf)| - |[Code](https:\u002F\u002Fgithub.com\u002Ftianrun-chen\u002FSAM-Adapter-PyTorch) | - | SAM-adapter: Adapting SAM in Underperformed Scenes: Camouflage, Shadow, Medical Image Segmentation, and More.|\n| Segment Anything Model for Medical Image Analysis: an Experimental Study | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_4ec1ffd8e332.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.10517.pdf) | - | - | - | Thorough experiments evaluating how SAM performs on 19 medical image datasets. |\n| Medical-SAM-Adapter | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_123550b16926.jpeg) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.12620.pdf) | - | [Code](https:\u002F\u002Fgithub.com\u002FKidsWithTokens\u002FMedical-SAM-Adapter) | - | A project to finetune SAM using Adaption for the Medical Imaging. |\n| SAM-Med2d | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_7b4aa30354d5.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.16184) | - | [Code](https:\u002F\u002Fgithub.com\u002FOpenGVLab\u002FSAM-Med2D) | Sichuan University & Shanghai AI Laboratory | The most comprehensive studies on applying SAM to medical 2D images |\n| ScribblePrompt-SAM | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_e3fa4283512e.gif) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.07381) | [Project Page](https:\u002F\u002Fscribbleprompt.csail.mit.edu\u002F) | [Code](https:\u002F\u002Fgithub.com\u002Fhalleewong\u002FScribblePrompt) | MIT & MGH | Fine-tuned SAM on 65 biomedical imaging datasets with scribble, click, and bounding box inputs |\n| SaLIP | - | [arXiv]([https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.07381](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.06362)) | [Project Page](https:\u002F\u002Fscribbleprompt.csail.mit.edu\u002F) | [Code]([https:\u002F\u002Fgithub.com\u002Fhalleewong\u002FScribblePrompt](https:\u002F\u002Fgithub.com\u002Faleemsidra\u002FSaLIP)) | - | Test-Time Adaptation with SaLIP: A Cascade of SAM and CLIP for Zero-shot\nMedical Image Segmentation. |\n#### Bioimage Analysis\n| Title | Presentation| Paper page | Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| Segment Anything for Microscopy | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_f80eda0c25c7.png) | [bioRxiv](https:\u002F\u002Fdoi.org\u002F10.1101\u002F2023.08.21.554208) | [Demo](https:\u002F\u002Fcomputational-cell-analytics.github.io\u002Fmicro-sam\u002Fmicro_sam.html#installation) | [Code](https:\u002F\u002Fgithub.com\u002Fcomputational-cell-analytics\u002Fmicro-sam) | University of Göttingen, Germany | Segment Anything for Microscopy implements automatic and interactive annotation for microscopy data. It is built on top of Segment Anything and specializes it for microscopy and other bio-imaging data. Its core components are: \u003Cul>\u003Cli>The `micro_sam` tools for interactive data annotation with napari.\u003C\u002Fli>\u003Cli>The `micro_sam` library to apply Segment Anything to 2d and 3d data or fine-tune it on your data.\u003C\u002Fli>\u003Cli>The `micro_sam` models that are fine-tuned on publicly available microscopy data.\u003C\u002Fli> Our goal is to build fast and interactive annotation tools for microscopy data |\n\n#### Inpainting\n| Title | Presentation| Paper page | Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| Inpaint Anything | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_707dae910137.png)|[arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.06790)| - |[Code](https:\u002F\u002Fgithub.com\u002Fgeekyutao\u002FInpaint-Anything) | USTC & EIT | SAM + Inpainting, which is able to remove the object smoothly.|\n| SAM + Stable Diffusion for Text-to-Image Inpainting | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_ff8861173ce3.png) | - | [Project](https:\u002F\u002Fwww.comet.com\u002Fexamples\u002Fdemo-text-to-inpainting-sam-stablediffusion\u002Fview\u002FbRnI022tXQUdKGsVCFmjFRRtT\u002F) | [Code](https:\u002F\u002Fcolab.research.google.com\u002Fdrive\u002F1B7L4cork9UFTtIB02EntjiZRLYuqJS2b#scrollTo=LtZghyHoJabf) | comet | Grounding DINO + SAM + Stable Diffusion |\n\n#### Camouflaged Object Detection\n| Title | Presentation| Paper page | Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| SAMCOD | - | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.04709) | - | [Code](https:\u002F\u002Fgithub.com\u002Fluckybird1994\u002FSAMCOD) | - | SAM + Camouflaged object detection (COD) task.|\n\n#### Video Frame Interpolation\n| Title | Presentation| Paper page | Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| Clearer Frames, Anytime: Resolving Velocity Ambiguity in Video Frame Interpolation | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_10660d03788b.gif) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.08007) | [Project Page](https:\u002F\u002Fzzh-tech.github.io\u002FInterpAny-Clearer\u002F) & [Interactive Demo](http:\u002F\u002Fai4sports.opengvlab.com\u002Finterpany-clearer\u002F) | [Code](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FInterpAny-Clearer) | Shanghai AI Laboratory & Snap Inc. | Editable video frame interpolation with SAM. |\n\n#### Low Level Vision\n| Title | Presentation| Paper page | Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| Segment Anything in Video Super-resolution | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_57d361496527.png)|[arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.06524.pdf)| - | - | - | The first step to use SAM for low-level vision.|\n| SAM-IQA | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_6691507f1056.png)|[arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.04455.pdf)| - | [Code](https:\u002F\u002Fgithub.com\u002FHedlen\u002FSAM-IQA) | Megvii | The first to introduce the SAM in IQA and demonstrate its strong generalization ability in this domain.|\n\n#### Image Matting\n| Title | Presentation| Paper page | Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n|Matte Anything|![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_b20d92614357.png)![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_2db493dbb787.gif)|[arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.04121)| - | [Code](https:\u002F\u002Fgithub.com\u002Fhustvl\u002FMatte-Anything)| HUST Vision Lab| An interactive natural image matting system with excellent performance for both opaque and transparent objects |\n| Matting Anything | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_4443fdff6ac5.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.05399) | [Project page](https:\u002F\u002Fhuggingface.co\u002Fpapers\u002F2306.05399) | [Code](https:\u002F\u002Fgithub.com\u002FSHI-Labs\u002FMatting-Anything) | SHI Labs | Leverage feature maps from SAM and adopts a Mask-to-Matte module to predict the alpha matte. |\n\n#### Robotic\n| Title | Presentation| Paper page | Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| Instruct2Act | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_3110daa6f48d.png)|[arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.11176.pdf)| - | [Code](https:\u002F\u002Fgithub.com\u002FOpenGVLab\u002FInstruct2Act) | OpenGVLab | A SAM application in the Robotic field.|\n\n#### Bioinformatics\n| Title | Presentation| Paper page | Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| IAMSAM | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_acc02e84f41d.png)|[bioRxiv](https:\u002F\u002Fwww.biorxiv.org\u002Fcontent\u002F10.1101\u002F2023.05.25.542052v1)| - | [Code](https:\u002F\u002Fgithub.com\u002Fportrai-io\u002FIAMSAM) | Portrai Inc. | A SAM application for the analysis of Spatial Transcriptomics.|  \n  \n#### 3D\n| Title | Presentation| Paper page | Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| Point-SAM| ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_a35bf7bf169d.gif)|[arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.17741)| [Page](https:\u002F\u002Fpoint-sam.github.io) | [Code](https:\u002F\u002Fgithub.com\u002Fzyc00\u002FPoint-SAM) | UCSD | An open-world 3D native promptable point-cloud segmentation method.|\n| SAMPro3D | ![img2](https:\u002F\u002Fgithub.com\u002FGAP-LAB-CUHK-SZ\u002FSAMPro3D\u002Fblob\u002Fmain\u002Ffigures\u002Fteaser_ori.jpg)|[arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.17707)| [Page](https:\u002F\u002Fmutianxu.github.io\u002Fsampro3d\u002F) | [Code](https:\u002F\u002Fgithub.com\u002FGAP-LAB-CUHK-SZ\u002FSAMPro3D) | CUHKSZ, MSRA |A novel method to segment any 3D indoor scenes by applying the SAM to 2D frames, without need any training, tuning, distillation or 3D pretrained networks.|\n| Seal | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_2714a19e265f.gif)|[arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.09347)| [Page](https:\u002F\u002Fldkong.com\u002FSeal) | [Code](https:\u002F\u002Fgithub.com\u002Fyouquanl\u002FSegment-Any-Point-Cloud) | - | A framework capable of leveraging 2D vision foundation models for self-supervised learning on large-scale 3D point clouds.|\n| TomoSAM | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_7082f470a63c.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.08609.pdf) | [Video Tutorial](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=4nXCYrvBSjk) | [Code](https:\u002F\u002Fgithub.com\u002Ffsemerar\u002FSlicerTomoSAM) | - | An extension of 3D Slicer using the SAM to aid the segmentation of 3D data from tomography or other imaging techniques. |\n| SegmentAnythingin3D | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_3764dc6e98d6.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.12308.pdf) | [Project](https:\u002F\u002Fjumpat.github.io\u002FSA3D\u002F) | [Code](hhttps:\u002F\u002Fgithub.com\u002FJumpat\u002FSegmentAnythingin3D) | - | A novel framework to Segment Anything in 3D, named SA3D. |\n\n#### Remote Sensing\n| Title | Presentation| Paper page | Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| RSPrompter | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_3deb40921df2.jpg) | [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.16269.pdf) | [Project Page](https:\u002F\u002Fkyanchen.github.io\u002FRSPrompter\u002F) | [Code](https:\u002F\u002Fgithub.com\u002FKyanChen\u002FRSPrompter) | Beihang University | An automated instance segmentation approach for remote sensing images based on the SAM. |\n| SAM-CD | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_befef9435725.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.01429) | - | [Code](https:\u002F\u002Fgithub.com\u002FggsDing\u002FSAM-CD) | PLA Information Engineering University | A sample-efficient change detection framework that employs SAM as the visual encoder. |\n| SAM-Road: Segment Anything Model for Road Network Graph Extraction | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_29921ad44f39.png) | [arXiv](http:\u002F\u002Farxiv.org\u002Fabs\u002F2403.16051) | - | [Code](https:\u002F\u002Fgithub.com\u002Fhtcr\u002Fsam_road) | Carnegie Mellon University | A simple and fast method applying SAM for vectorized large-scale road network graph extraction. It reaches state-of-the-art accuracy while being 40 times faster. |\n\n#### Tracking\n| Title | Presentation| Paper page | Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| Follow Anything | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_b7a7a3f33c79.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.05737) | [Page](https:\u002F\u002Fhuggingface.co\u002Fpapers\u002F2308.05737) | [Code](https:\u002F\u002Fgithub.com\u002Falaamaalouf\u002FFollowAnything) | MIT, Harvard University | an open-vocabulary and multimodal model to detects, tracks, and follows any objects in real-time.|\n| Track-Anything | [Video](https:\u002F\u002Fgithub.com\u002Fgaomingqi\u002FTrack-Anything\u002Fraw\u002Fmaster\u002Fassets\u002Favengers.gif) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.11968) | - | [Code](https:\u002F\u002Fgithub.com\u002Fgaomingqi\u002FTrack-Anything) | MIT, Harvard University | an open-vocabulary and multimodal model to detects, tracks, and follows any objects in real-time.|\n| SAM-Track | [Video](https:\u002F\u002Fcamo.githubusercontent.com\u002F149f974fc6e13f3764e30d843880fa1e15e0fbecf607f905805d84290ec87155\u002F68747470733a2f2f7265732e636c6f7564696e6172792e636f6d2f6d6172636f6d6f6e74616c62616e6f2f696d6167652f75706c6f61642f76313638313731333039352f766964656f5f746f5f6d61726b646f776e2f696d616765732f796f75747562652d2d555068747066316b3648412d63303562353861633665623463343730303833316232623330373063643430332e6a7067) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.06558) | - | [Code](https:\u002F\u002Fgithub.com\u002Fz-x-yang\u002FSegment-and-Track-Anything) | MIT, Harvard University | A framework called Segment And Track Anything (SAMTrack) that allows users to precisely and effectively segment and track any object in a video.|\n\n#### Audio-visual Localization and Segmentation\n| Title | Presentation| Paper page | Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| AV-SAM | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_cad01fec11f2.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.01836.pdf) | - | [Code](https:\u002F\u002Fgithub.com\u002Falaamaalouf\u002FFollowAnything) | CMU | A simple yet effective audio-visual localization and segmentation framework based on the SAM.|\n\n#### Adversarial Attacks\n| Title | Presentation| Paper page | Project page | Code base | Affiliation|                                               Description                                               |\n|:---:|:---:|:---:|:---:| :---:| :---:|:-------------------------------------------------------------------------------------------------------:|\n| Attack-SAM | - | [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.00866.pdf) | - | - | KAIST | The first work of conduct a comprehensive investigation on how to attack SAM with adversarial examples. |\n\n#### Multimedia Forensics\n| Title | Presentation| Paper page | Project page | Code base | Affiliation|                                               Description                                               |\n|:---:|:---:|:---:|:---:| :---:| :---:|:-------------------------------------------------------------------------------------------------------:|\n| SAFIRE: Segment Any Forged Image Region | ![safire_image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_8e6a1d636ba5.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.08197) | - | [Code](https:\u002F\u002Fgithub.com\u002Fmjkwon2021\u002FSAFIRE) | KAIST | Extends SAM's point prompting capability to image forensics, enabling precdise source-awqare segmentation for forgery localization. |\n\n### Derivative Projects\n#### Image Segmentation task\n| Title | Presentation|  Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:|\n| Grounded Segment Anything | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_3447d7544f77.png)|[Colab](https:\u002F\u002Fgithub.com\u002Fcamenduru\u002Fgrounded-segment-anything-colab) & [Huggingface](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fyizhangliu\u002FGrounded-Segment-Anything) | [Code](https:\u002F\u002Fgithub.com\u002FIDEA-Research\u002FGrounded-Segment-Anything) | - | Combining Grounding DINO and Segment Anything| - | \n| GroundedSAM Anomaly Detection | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_6d76861fa86f.png) | - | [Code](https:\u002F\u002Fgithub.com\u002Fcaoyunkang\u002FGroundedSAM-zero-shot-anomaly-detection)| - | Grounding DINO + SAM to segment any anomaly. |\n| Semantic Segment Anything | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_3b0b9346cb00.png) |- | [Code](https:\u002F\u002Fgithub.com\u002Ffudan-zvg\u002FSemantic-Segment-Anything) | Fudan | A dense category annotation engine. |\n| Magic Copy | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_2927be26e40b.png) | - |[Code](https:\u002F\u002Fgithub.com\u002Fkevmo314\u002Fmagic-copy) | - | Magic Copy is a Chrome extension that uses SAM. |\n| YOLO-World + EfficientViT SAM | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_bdec792d3ab9.png) | 🤗 [HuggingFace Space](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fcurt-park\u002Fyolo-world-with-efficientvit-sam) | [Code](https:\u002F\u002Fgithub.com\u002FCurt-Park\u002Fyolo-world-with-efficientvit-sam) | - | Efficient open-vocabulary object detection and segmentation with YOLO-World + EfficientViT SAM |\n| Segment Anything with Clip | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_0111c2fb057b.png) | 🤗 [HuggingFace Space](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fcurt-park\u002Fsegment-anything-with-clip) |[Code](https:\u002F\u002Fgithub.com\u002FCurt-Park\u002Fsegment-anything-with-clip) | -  | SAM + CLIP| \n| SAM-Clip | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_b2ae2caeb471.png) | - |[Code](https:\u002F\u002Fgithub.com\u002Fmaxi-w\u002FCLIP-SAM) | - | SAM + CLIP.|\n| Prompt Segment Anything | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_f60e78129ebd.jpg)| - | [Code](https:\u002F\u002Fgithub.com\u002FRockeyCoss\u002FPrompt-Segment-Anything)| - | SAM + Zero-shot Instance Segmentation.|\n| RefSAM | - | - |[Code](https:\u002F\u002Fgithub.com\u002Fhelblazer811\u002FRefSAM) | - | Evaluating the basic performance of SAM on the Referring Image segmentation task.| \n| SAM-RBox | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_34dfe912a346.png) | - |[Code](https:\u002F\u002Fgithub.com\u002FLi-Qingyun\u002Fsam-mmrotate) | - | An implementation of SAM for generating rotated bounding boxes with MMRotate.|\n| Open Vocabulary Segment Anything | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_d7cf9404daf7.png)| - |[Code](https:\u002F\u002Fgithub.com\u002Fngthanhtin\u002Fowlvit_segment_anything) | - | An interesting demo by combining OWL-ViT of Google and SAM.|\n| SegDrawer |![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_82a6f07b21c1.gif)![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_231cc16b056e.gif) | - |[Code](https:\u002F\u002Fgithub.com\u002Flujiazho\u002FSegDrawer) | - | Simple static web-based mask drawer, supporting semantic drawing with SAM.|\n| AnyLabeling |![](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_be08dd2a82ab.png) | [YoutubeDemo](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=5qVJiYNX5Kk) |[Code](https:\u002F\u002Fgithub.com\u002Fvietanhdev\u002Fanylabeling) | - | SAM + Labelme + LabelImg + Auto-labeling.|\n| ISAT with segment anything |![](https:\u002F\u002Fgithub.com\u002FyatengLG\u002FISAT_with_segment_anything\u002Fblob\u002Fmaster\u002Fdisplay\u002F%E6%A0%87%E6%B3%A8.gif) | [YoutubeDemo](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=yLdZCPmX-Bc) [BiliBili Demo](https:\u002F\u002Fwww.bilibili.com\u002Fvideo\u002FBV1or4y1R7EJ\u002F) | [Code](https:\u002F\u002Fgithub.com\u002FyatengLG\u002FISAT_with_segment_anything) | - | Labeling tool by SAM(segment anything model),supports SAM, sam-hq, MobileSAM EdgeSAM etc.|\n| Annotation Anything Pipeline |![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_51f6c904020d.png) | - |[Code](https:\u002F\u002Fgithub.com\u002FYuqifan1117\u002FAnnotation-anything-pipeline) | - | GPT + SAM.|\n| Roboflow Annotate |![roboflow-sam-optimized-faster](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_e50256f01305.gif) | [App](https:\u002F\u002Fapp.roboflow.com) |[Blog](https:\u002F\u002Fblog.roboflow.com\u002Flabel-data-segment-anything-model-sam\u002F) | Roboflow | SAM-assisted labeling for training computer vision models.|\n| SALT |![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_731124b0c7c4.gif) | - |[Code](https:\u002F\u002Fgithub.com\u002Fanuragxel\u002Fsalt) | - | A tool that adds a basic interface for image labeling and saves the generated masks in COCO format.]\n| SAM U Specify |![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_e48f9ecef610.png) | - |[Code](https:\u002F\u002Fgithub.com\u002FMaybeShewill-CV\u002Fsegment-anything-u-specify) | - | Use SAM and CLIP model to segment unique instances you want.]\n| SAM web UI |![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_c8c392f2390f.gif) | [App](https:\u002F\u002Fsegment-anything-webui.vercel.app\u002F) |[Code](https:\u002F\u002Fgithub.com\u002FKingfish404\u002Fsegment-anything-webui\u002F) | - | This is a new web interface for the SAM.|\n| Finetune Anything | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_e3121beec052.png) | - | [Code](https:\u002F\u002Fgithub.com\u002Fziqi-jin\u002Ffinetune-anything) | - |A class-aware one-stage tool for training fine-tuning models based on SAM.|\n| NanoSAM | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_b1e545ea7545.jpg) | - | [Code](https:\u002F\u002Fgithub.com\u002FNVIDIA-AI-IOT\u002Fnanosam) | NVIDIA |A distilled Segment Anything (SAM) model capable of running real-time with NVIDIA TensorRT.|\n| Segment-Anything-UI |  ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_7cb9749e8509.png) | - | [Code](https:\u002F\u002Fgithub.com\u002Fbranislavhesko\u002Fsegment-anything-ui) | - | A PySide6 based annotation tool for Segment Anything. |\n| Segment-Anything-2-UI | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_76b71579fce5.png) | - | [Code](https:\u002F\u002Fgithub.com\u002Fbranislavhesko\u002Fsegment-anything-2-ui) | - | A PySide6 based annotation tool for Segment Anything 2. Video tracking with multiple objects. |\n\n#### Video Segmentation task\n| Title | Presentation| Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:| \n| MetaSeg | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_a393590cefb3.gif) |[HuggingFace](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FArtGAN\u002FSegment-Anything-Video) |[Code](https:\u002F\u002Fgithub.com\u002Fkadirnar\u002Fsegment-anything-video) | - | SAM + Video. |\n| SAM-Track | [Video](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=UPhtpf1k6HA&feature=youtu.be&themeRefresh=1) |[YoutubeDemo](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=Xyd54AngvV8) |[Code](https:\u002F\u002Fgithub.com\u002Fz-x-yang\u002FSegment-and-Track-Anything) | Zhejiang University | This project, which is based on SAM and DeAOT, focuses on segmenting and tracking objects in videos. |\n\n#### Medical image Segmentation task\n| Title | Presentation|  Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:| \n| SAM in Napari |[Video](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=OPE1Xnw487E)|- |[Code](https:\u002F\u002Fgithub.com\u002FMIC-DKFZ\u002Fnapari-sam) | - | Segment anything with Napari integration of SAM.|\n| SAM Medical Imaging |![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_ba9a89a17f70.png)|- |[Code](https:\u002F\u002Fgithub.com\u002Famine0110\u002FSAM-Medical-Imaging) | - | SAM for Medical Imaging.|\n\n#### Inpainting task\n| Title | Presentation|  Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:| \n| SegAnythingPro | ![img](https:\u002F\u002Fcamo.githubusercontent.com\u002F7d5fb67ffcd6c209cf22ffe302d95b3b46d36b92116fe216022bf2a359c4b588\u002F68747470733a2f2f6a6968756c61622e636f6d2f676f646c792f666765722f2d2f7261772f6d61696e2f696d616765732f323032332f30342f31315f31325f345f34325f32303233303431313132303433392e706e67)|- |[Code](https:\u002F\u002Fgithub.com\u002Fjinfagang\u002FDisappear) | - | SAM + Inpainting\u002FReplacing.|\n\n\n#### 3D task\n| Title | Presentation|  Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:| \n| 3D-Box | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_a7ee4c79894b.png)|- |[Code](https:\u002F\u002Fgithub.com\u002Fdvlab-research\u002F3D-Box-Segment-Anything) | - | SAM is extended to 3D perception by combining it with VoxelNeXt.|\n| Anything 3DNovel View | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_14d431a6b777.jpeg)|- |[Code](https:\u002F\u002Fgithub.com\u002FAnything-of-anything\u002FAnything-3D) | - | SAM + [Zero 1-to-3](https:\u002F\u002Fgithub.com\u002Fcvlab-columbia\u002Fzero123).|\n| Any 3DFace | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_c9e341ea5a14.jpg)![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_bbad962d158a.gif)|- |[Code](https:\u002F\u002Fgithub.com\u002FAnything-of-anything\u002FAnything-3D) | - | SAM + [HRN](https:\u002F\u002Fyounglbw.github.io\u002FHRN-homepage\u002F).|\n| Segment Anything 3D | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_eb708b851b38.png) | - | [Code](https:\u002F\u002Fgithub.com\u002FPointcept\u002FSegmentAnything3D) | Pointcept | Extending Segment Anything to 3D perception by transferring the segmentation information of 2D images to 3D space|\n\n#### Image Generation task\n| Title | Presentation| Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:| \n| Edit Anything | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_c0e0cab7f21d.jpg) | - |[Code](https:\u002F\u002Fgithub.com\u002Fsail-sg\u002FEditAnything) | - | Edit and Generate Anything in an image.|\n| Image Edit Anything |![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_976b735b0350.png)| - |[Code](https:\u002F\u002Fgithub.com\u002Ffeizc\u002FIEA) | - | Stable Diffusion + SAM.|\n| SAM for Stable Diffusion Webui |![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_7e4f2ffe8d54.png)| - |[Code](https:\u002F\u002Fgithub.com\u002Fcontinue-revolution\u002Fsd-webui-segment-anything) | - | Stable Diffusion + SAM.|\n\n#### Remote Sensing task\n| Title | Presentation| Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:| \n| Earth Observation Tools | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_dbf62cc39f6f.png) |[Colab](https:\u002F\u002Fcolab.research.google.com\u002Fdrive\u002F1RC1V68tD1O-YissBq9nOvS2PHEjAsFkA?usp=share_link) |[Code](https:\u002F\u002Fgithub.com\u002Faliaksandr960\u002Fsegment-anything-eo) | - | SAM + Remote Sensing. |\n\n#### Moving Object Detection task\n| Title | Presentation| Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:| \n| Moving Object Detection | ![img](https:\u002F\u002Fcamo.githubusercontent.com\u002Fcd073471951017a15cd445062d196242a446eb20acd90b2afa1728f239465fc7\u002F687474703a2f2f7777772e616368616c646176652e636f6d2f70726f6a656374732f616e797468696e672d746861742d6d6f7665732f766964656f732f5a584e36412d747261636b65642d776974682d6f626a6563746e6573732d7472696d6d65642e676966) | - |[Code](https:\u002F\u002Fgithub.com\u002Fachalddave\u002Fsegment-any-moving) | - | SAM + Moving Object Detection. |\n\n\n#### OCR task\n| Title | Presentation| Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:| \n| OCR-SAM | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_3f22cf5d87c6.png) | [Blog](https:\u002F\u002Fwww.zhihu.com\u002Fquestion\u002F593914819\u002Fanswer\u002F2976012032)|[Code](https:\u002F\u002Fgithub.com\u002Fyeungchenwa\u002FOCR-SAM) | - | Optical Character Recognition with SAM. |\n\n### front-end framework\n\n#### SAMJS\n| Title | Presentation| Project page | Code base | Affiliation| Description|\n|:---:|:---:|:---:|:---:| :---:| :---:| \n| SAMJS | ![samjs](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_36fd19ec1a80.png) | [demo](http:\u002F\u002Fsamjs.antv.vision\u002Fdemos)|[Code](https:\u002F\u002Fgithub.com\u002Fantvis\u002FSAMJS) | - | JS SDK for SAM, Support remote sensing data segmentation and vectorization|\n\n## Acknowledgement\nSome of the presentations in this repository are borrowed from the original author, and we are very thankful for their contribution.\n\n## License\nThis project is released under the MIT license. Please see the [LICENSE](LICENSE) file for more information.\n","\u003Cdiv align=\"center\">\n\u003Cbr>\n\u003Cimage src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_4cbbd9b616db.png\", width=\"600px\", height=\"287px\">\n\u003Cbr>\n\u003C\u002Fdiv>\n\u003C!-- ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_4cbbd9b616db.png) -->\n\n# 令人惊叹的 Segment Anything [![Awesome](https:\u002F\u002Fcdn.rawgit.com\u002Fsindresorhus\u002Fawesome\u002Fd7305f38d29fed78fa85652e3a63e154dd8e8829\u002Fmedia\u002Fbadge.svg)](https:\u002F\u002Fgithub.com\u002Fsindresorhus\u002Fawesome)\nSegment Anything 在计算机视觉（CV）领域取得了新的突破，本仓库将持续跟踪并总结 Segment Anything 在各个领域的研究进展，包括论文、项目等。\n\n如果您觉得这个仓库有帮助，请考虑给它点个 Star ⭐ 或分享出去 ⬆️。谢谢！\n\n## 新闻\n```\n- 2024.8.16 增加 Segment Anything2 和 SaLIP。\n- 2023.8.29：更新了一些最新工作。\n- 2023.5.20：更新文档结构，并添加了一篇与机器人相关的文章。祝大家520快乐！\n- 2023.5.4：增加 SEEM。\n- 2023.4.18：增加 Inpainting Anything 和 SAM-Track 工作。\n- 2023.4.12：发布了近期论文或项目的初版。\n```\n\n## 目录\n\n- [基础模型论文](#basemodel-papers) \n- [衍生论文](#derivative-papers)\n  - [SAM 的分析与扩展](#analysis-and-expansion-of-sam)\n  - [医学图像分割](#medical-image-segmentation)\n  - [修复填充](#inpainting)\n  - [伪装目标检测](#camouflaged-object-detection)\n  - [视频帧插值](#video-frame-interpolation)\n  - [低层视觉](#low-level-vision)\n  - [图像抠图](#image-matting)\n  - [机器人](#robotic)\n  - [生物信息学](#bioinformatics)\n  - [3D](#3d)\n  - [遥感](#remote-sensing)\n  - [跟踪](#tracking)\n  - [视听定位与分割](#audio-visual-ocalization-and-segmentation)\n  - [对抗攻击](#adversarial-attacks)\n- [衍生项目](#derivative-projects) \n  - [图像分割任务](#image-segmentation-task)\n  - [视频分割任务](#video-segmentation-task)\n  - [医学图像分割任务](#medical-image-segmentation-task)\n  - [修复填充任务](#inpainting-task)\n  - [3D 任务](#3d-task)\n  - [图像生成任务](#image-generation-task)\n  - [遥感任务](#remote-sensing-task)\n  - [移动物体检测任务](#moving-object-detection-task)\n  - [OCR 任务](#ocr-task)\n- [前端框架](#front-end-framework)\n   - [适用于 SAM 的 JS SDK](#samjs)\n\n## 论文\u002F项目\n### 基础模型论文\n| 标题 |演示图| 论文链接 | 项目页面 | 代码库 | 所属机构| 描述|\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| CLIP | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_6616cf5de1ef.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.00020) | [Colab](https:\u002F\u002Fcolab.research.google.com\u002Fgithub\u002Fopenai\u002Fclip\u002Fblob\u002Fmaster\u002Fnotebooks\u002FInteracting_with_CLIP.ipynb) | [代码](https:\u002F\u002Fgithub.com\u002Fopenai\u002FCLIP) | OpenAI | 对比语言-图像预训练。| \n| OWL-ViT | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_a188b8992be7.gif)| [ECCV2022](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.06230) | - | [代码](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fscenic\u002Ftree\u002Fmain\u002Fscenic\u002Fprojects\u002Fowl_vit) | Google | 一种开放词汇的目标检测器。| \n| OvSeg | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_ccd7bd4c7a73.gif) | [CVPR2023](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.04150) | [项目](https:\u002F\u002Fjeff-liangf.github.io\u002Fprojects\u002Fovseg\u002F) | [代码](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fov-seg) | META | 根据文本描述将图像分割为语义区域。| \n| Painter | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_76cb44b68e76.jpg) | [CVPR2023](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.02499) | - | [代码](https:\u002F\u002Fgithub.com\u002Fbaaivision\u002FPainter) | BAAI | 一种用于上下文视觉学习的通用画家。| \n| Grounding DINO | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_ab6e4790a438.png)| [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.05499) | [Colab](https:\u002F\u002Fcolab.research.google.com\u002Fgithub\u002Froboflow-ai\u002Fnotebooks\u002Fblob\u002Fmain\u002Fnotebooks\u002Fzero-shot-object-detection-with-grounding-dino.ipynb) & [Huggingface](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FShilongLiu\u002FGrounding_DINO_demo) | [代码](https:\u002F\u002Fgithub.com\u002FIDEA-Research\u002FGroundingDINO) | IDEA | 一个更强大的开放集目标检测器|\n| Segment Anything | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_198eb5158aa3.png)![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_8b3b879859a2.jpg)| [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.02643) | [项目页面](https:\u002F\u002Fsegment-anything.com\u002F) | [代码](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsegment-anything) | Meta | 一个功能更强大的大型模型，可用于为图像中的所有对象生成掩码。| \n| SegGPT | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_8e2112165621.png)| [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03284) | [项目页面](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FBAAI\u002FSegGPT) | [代码](https:\u002F\u002Fgithub.com\u002Fbaaivision\u002FPainter) | BAAI | 基于 Painter，在上下文中对一切进行分割。|\n| Segment Everything Everywhere All at Once (SEEM) | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_a2f2edc48c74.png) |[arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.06718.pdf) | [项目页面](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fxdecoder\u002FSEEM) | [代码](https:\u002F\u002Fgithub.com\u002FUX-Decoder\u002FSegment-Everything-Everywhere-All-At-Once)| Microsoft | 基于多种提示类型的语义分割。|\n| Segment Everything2  | ![img]([https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_a2f2edc48c74.png](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsegment-anything-2\u002Fblob\u002Fmain\u002Fassets\u002Fmodel_diagram.png?raw=true)) |[论文]([https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.06718.pdf](https:\u002F\u002Fscontent-fmx1-1.xx.fbcdn.net\u002Fv\u002Ft39.2365-6\u002F453626691_1879405402541497_3155007177325245432_n.pdf?_nc_cat=106&ccb=1-7&_nc_sid=3c67a6&_nc_ohc=lkNRHLYBebIQ7kNvgHl-Sjg&_nc_ht=scontent-fmx1-1.xx&oh=00_AYBDsA5Jo0xafWqN9cTfq7tklJ9QHxbyyzLnvg5qCaG6kw&oe=66C4AD0C)) | [项目页面]([https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fxdecoder\u002FSEEM](https:\u002F\u002Fai.meta.com\u002Fsam2\u002F)) | [代码]([https:\u002F\u002Fgithub.com\u002FUX-Decoder\u002FSegment-Everything-Everywhere-All-At-Once](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsegment-anything-2?tab=readme-ov-file))| Meta | 一个用于解决图像和视频中可提示视觉分割的基础模型..|\n\n### Derivative Papers\n\n#### SAM 的分析与扩展\n| 标题 | 演示图 | 论文页 | 项目页 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| CLIP_Surgery | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_d2ca55efc0ac.jpg)| [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.05653.pdf) |[Demo](https:\u002F\u002Fgithub.com\u002Fxmed-lab\u002FCLIP_Surgery\u002Fblob\u002Fmaster\u002Fdemo.ipynb)| [Code](https:\u002F\u002Fgithub.com\u002Fxmed-lab\u002FCLIP_Surgery) | 香港科技大学 | 该工作基于 CLIP 的可解释性，实现无需手动标注点的文本到掩码转换。|\n|GenSAM | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_10725dfc182f.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.07374) | [Project Page](https:\u002F\u002Flwpyh.github.io\u002FGenSAM\u002F) | [Code](https:\u002F\u002Fgithub.com\u002FjyLin8100\u002FGenSAM) | 伦敦玛丽女王大学 | 该工作放宽了对 SAM 中特定实例提示的要求。|\n| Segment Anything Is Not Always Perfect | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_850a20f36aa9.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.05750.pdf) | - | - | 三星 | 本文分析并讨论了 SAM 的优势与局限性。|\n| PerSAM | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_dd1556df478d.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.03048) | [Project Page](https:\u002F\u002Fhuggingface.co\u002Fpapers\u002F2305.03048) | [Code](https:\u002F\u002Fgithub.com\u002FZrrSkywalker\u002FPersonalize-SAM) | - | 针对特定概念进行分割。|\n| Matcher: 使用通用特征匹配实现单次拍摄的万物分割 | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_ba26c6e7791f.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.13310) | - | [Code](https:\u002F\u002Fgithub.com\u002Faim-uofa\u002FMatcher) | - | 通过整合通用特征提取模型和类无关分割模型，实现单次拍摄的语义分割。|\n| 高质量万物分割 |![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_ea4fd28017b3.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.01567.pdf) | [Project Page](https:\u002F\u002Fhuggingface.co\u002Fpapers\u002F2306.01567) | - | 苏黎世联邦理工学院 & 香港科技大学 | HQ-SAM：利用可学习的高质量输出令牌提升 SAM 的分割质量。|\n| Detect Any Shadow：用于视频阴影检测的万物分割 | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_d09371984205.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.16698.pdf) | - | [Code](https:\u002F\u002Fgithub.com\u002Fharrytea\u002FDetect-AnyShadow) | 中国科学技术大学 | 使用 SAM 检测初始帧，随后利用 LSTM 网络处理后续帧。|\n| 快速万物分割 | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_3fa3a830f635.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.12156v1.pdf) | [Project Page](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FAn-619\u002FFastSAM) | [Code](https:\u002F\u002Fgithub.com\u002Fcasia-iva-lab\u002Ffastsam) | - | 重新设计架构并提升 SAM 的速度。|\n| MobileSAM（更快速的万物分割） | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_56bbdbc95c6f.jpg) | [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.14289.pdf) | [Project Page](https:\u002F\u002Fhuggingface.co\u002Fpapers\u002F2306.14289) | [Code](https:\u002F\u002Fgithub.com\u002FChaoningZhang\u002FMobileSAM) | 庆熙大学 | 通过用轻量级图像编码器替换重量级编码器，使 SAM 更适合移动端应用。|\n| FoodSAM（任意食物分割） | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_24593d488c52.jpg) | [arxiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.05938) | [Project Page](https:\u002F\u002Fstarhiking.github.io\u002FFoodSAM_Page\u002F) | [Code](https:\u002F\u002Fgithub.com\u002Fjamesjg\u002FFoodSAM) | 中国科学院大学 | 对食物图像进行语义、实例、全景及交互式分割。|\n| DefectSAM | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_a7d19e693778.png) | [arxiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.10245) | - | [Code](https:\u002F\u002Fgithub.com\u002Fbozhenhhu\u002FDefectSAM) | 浙江大学、西湖大学、电子科技大学等 | 针对红外热成像进行缺陷检测。|\n| SlimSAM | ![img](https:\u002F\u002Fgithub.com\u002Fczg1225\u002FSlimSAM\u002Fblob\u002Fmaster\u002Fimages\u002Fpaper\u002Fintro.PNG) | [arxiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.05284) | - | [Code](https:\u002F\u002Fgithub.com\u002Fczg1225\u002FSlimSAM) | 国立新加坡大学 | 仅需 0.1% 数据即可实现精简版万物分割。\n\n#### 医学图像分割\n| 标题 | 演示 | 论文页 | 项目页 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| 数字病理中的 Segment Anything Model (SAM) | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_91ecbefa603e.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.04155) | - | - | - | SAM + 肿瘤分割\u002F组织分割\u002F细胞核分割。 |\n| 医学图像中的 Segment Anything | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_a92b3051b92e.png)|[arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.12306.pdf)| - |[代码](https:\u002F\u002Fgithub.com\u002Fbowang-lab\u002FMedSAM) | - | 带有小型数据集的分步教程，帮助您快速使用 SAM。|\n| SAM 是否无法分割任何内容？ | ![img1](https:\u002F\u002Fcamo.githubusercontent.com\u002Fbae32a4f7f7b6cf23aafde7f574ef96544b87f59196c9aa125f93240bb178b36\u002F68747470733a2f2f7469616e72756e2d6368656e2e6769746875622e696f2f53414D2d41646170746F722f7374617469632f696d616765732f706F6C79702e6A7067)|[arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.09148.pdf)| - |[代码](https:\u002F\u002Fgithub.com\u002Ftianrun-chen\u002FSAM-Adapter-PyTorch) | - | SAM适配器：在表现不佳的场景中适配 SAM：伪装、阴影、医学图像分割等。|\n| 用于医学图像分析的 Segment Anything Model：一项实验研究 | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_4ec1ffd8e332.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.10517.pdf) | - | - | - | 对 SAM 在 19 个医学图像数据集上的表现进行的全面实验。 |\n| Medical-SAM-Adapter | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_123550b16926.jpeg) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.12620.pdf) | - | [代码](https:\u002F\u002Fgithub.com\u002FKidsWithTokens\u002FMedical-SAM-Adapter) | - | 一个使用适配技术对医学影像进行微调的项目。 |\n| SAM-Med2d | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_7b4aa30354d5.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.16184) | - | [代码](https:\u002F\u002Fgithub.com\u002FOpenGVLab\u002FSAM-Med2D) | 四川大学 & 上海人工智能实验室 | 关于将 SAM 应用于医学 2D 图像的最全面研究 |\n| ScribblePrompt-SAM | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_e3fa4283512e.gif) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.07381) | [项目页](https:\u002F\u002Fscribbleprompt.csail.mit.edu\u002F) | [代码](https:\u002F\u002Fgithub.com\u002Fhalleewong\u002FScribblePrompt) | MIT & MGH | 使用涂鸦、点击和边界框输入，在 65 个生物医学成像数据集上微调 SAM |\n| SaLIP | - | [arXiv]([https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.07381](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.06362)) | [项目页](https:\u002F\u002Fscribbleprompt.csail.mit.edu\u002F) | [代码]([https:\u002F\u002Fgithub.com\u002Fhalleewong\u002FScribblePrompt](https:\u002F\u002Fgithub.com\u002Faleemsidra\u002FSaLIP)) | - | 使用 SaLIP 的测试时适应：SAM 和 CLIP 的级联，用于零样本医学图像分割。 |\n#### 生物图像分析\n| 标题 | 演示 | 论文页 | 项目页 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| 显微镜下的 Segment Anything | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_f80eda0c25c7.png) | [bioRxiv](https:\u002F\u002Fdoi.org\u002F10.1101\u002F2023.08.21.554208) | [演示](https:\u002F\u002Fcomputational-cell-analytics.github.io\u002Fmicro-sam\u002Fmicro_sam.html#installation) | [代码](https:\u002F\u002Fgithub.com\u002Fcomputational-cell-analytics\u002Fmicro-sam) | 德国哥廷根大学 | 显微镜下的 Segment Anything 实现了显微镜数据的自动和交互式标注。它基于 Segment Anything 构建，并专门针对显微镜和其他生物成像数据进行了优化。其核心组件包括：\u003Cul>\u003Cli>`micro_sam` 工具，用于通过 napari 进行交互式数据标注。\u003C\u002Fli>\u003Cli>`micro_sam` 库，可将 Segment Anything 应用于 2D 和 3D 数据，或根据您的数据进行微调。\u003C\u002Fli>\u003Cli>`micro_sam` 模型，这些模型已在公开可用的显微镜数据上进行了微调。\u003C\u002Fli> 我们的目标是为显微镜数据构建快速且交互式的标注工具 |\n\n#### 图像修复\n| 标题 | 演示 | 论文页 | 项目页 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| 修复任何内容 | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_707dae910137.png)|[arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.06790)| - |[代码](https:\u002F\u002Fgithub.com\u002Fgeekyutao\u002FInpaint-Anything) | 中国科学技术大学 & EIT | SAM + 图像修复，能够平滑地移除物体。|\n| SAM + Stable Diffusion 用于文本到图像的图像修复 | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_ff8861173ce3.png) | - | [项目](https:\u002F\u002Fwww.comet.com\u002Fexamples\u002Fdemo-text-to-inpainting-sam-stablediffusion\u002Fview\u002FbRnI022tXQUdKGsVCFmjFRRtT\u002F) | [代码](https:\u002F\u002Fcolab.research.google.com\u002Fdrive\u002F1B7L4cork9UFTtIB02EntjiZRLYuqJS2b#scrollTo=LtZghyHoJabf) | comet | Grounding DINO + SAM + Stable Diffusion |\n\n#### 伪装目标检测\n| 标题 | 演示 | 论文页 | 项目页 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| SAMCOD | - | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.04709) | - | [代码](https:\u002F\u002Fgithub.com\u002Fluckybird1994\u002FSAMCOD) | - | SAM + 伪装目标检测 (COD) 任务。|\n\n#### 视频帧插值\n| 标题 | 演示 | 论文页 | 项目页 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| 更清晰的帧，随时可用：解决视频帧插值中的速度模糊问题 | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_10660d03788b.gif) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.08007) | [项目页](https:\u002F\u002Fzzh-tech.github.io\u002FInterpAny-Clearer\u002F) & [交互式演示](http:\u002F\u002Fai4sports.opengvlab.com\u002Finterpany-clearer\u002F) | [代码](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FInterpAny-Clearer) | 上海人工智能实验室 & Snap Inc. | 使用 SAM 的可编辑视频帧插值。 |\n\n#### 低层视觉\n| 标题 | 演示 | 论文页 | 项目页 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| 视频超分辨率中的 Segment Anything | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_57d361496527.png)|[arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.06524.pdf)| - | - | - | 将 SAM 用于低层视觉的第一步。|\n| SAM-IQA | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_6691507f1056.png)|[arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.04455.pdf)| - | [代码](https:\u002F\u002Fgithub.com\u002FHedlen\u002FSAM-IQA) | Megvii | 首次将 SAM 引入 IQA 领域，并展示了其在该领域的强大泛化能力。|\n\n#### 图像抠图\n| 标题 | 演示 | 论文页面 | 项目页面 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n|Matte Anything|![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_b20d92614357.png)![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_2db493dbb787.gif)|[arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.04121)| - | [代码](https:\u002F\u002Fgithub.com\u002Fhustvl\u002FMatte-Anything)| 华中科技大学视觉实验室| 一个交互式的自然图像抠图系统，对不透明和透明物体均表现出色 |\n| Matting Anything | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_4443fdff6ac5.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.05399) | [项目页面](https:\u002F\u002Fhuggingface.co\u002Fpapers\u002F2306.05399) | [代码](https:\u002F\u002Fgithub.com\u002FSHI-Labs\u002FMatting-Anything) | SHI Labs | 利用SAM的特征图，并采用Mask-to-Matte模块来预测alpha抠图。|\n\n#### 机器人\n| 标题 | 演示 | 论文页面 | 项目页面 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| Instruct2Act | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_3110daa6f48d.png)|[arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.11176.pdf)| - | [代码](https:\u002F\u002Fgithub.com\u002FOpenGVLab\u002FInstruct2Act) | OpenGVLab | SAM在机器人领域的应用。|\n\n#### 生物信息学\n| 标题 | 演示 | 论文页面 | 项目页面 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| IAMSAM | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_acc02e84f41d.png)|[bioRxiv](https:\u002F\u002Fwww.biorxiv.org\u002Fcontent\u002F10.1101\u002F2023.05.25.542052v1)| - | [代码](https:\u002F\u002Fgithub.com\u002Fportrai-io\u002FIAMSAM) | Portrai Inc. | 一种用于空间转录组学分析的SAM应用。|\n  \n#### 3D\n| 标题 | 演示 | 论文页面 | 项目页面 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| Point-SAM| ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_a35bf7bf169d.gif)|[arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.17741)| [页面](https:\u002F\u002Fpoint-sam.github.io) | [代码](https:\u002F\u002Fgithub.com\u002Fzyc00\u002FPoint-SAM) | UCSD | 一种开放世界、原生可提示的3D点云分割方法。|\n| SAMPro3D | ![img2](https:\u002F\u002Fgithub.com\u002FGAP-LAB-CUHK-SZ\u002FSAMPro3D\u002Fblob\u002Fmain\u002Ffigures\u002Fteaser_ori.jpg)|[arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.17707)| [页面](https:\u002F\u002Fmutianxu.github.io\u002Fsampro3d\u002F) | [代码](https:\u002F\u002Fgithub.com\u002FGAP-LAB-CUHK-SZ\u002FSAMPro3D) | CUHKSZ, MSRA | 一种新颖的方法，通过将SAM应用于2D帧来分割任何3D室内场景，无需任何训练、调优、蒸馏或3D预训练网络。|\n| Seal | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_2714a19e265f.gif)|[arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.09347)| [页面](https:\u002F\u002Fldkong.com\u002FSeal) | [代码](https:\u002F\u002Fgithub.com\u002Fyouquanl\u002FSegment-Any-Point-Cloud) | - | 一个能够利用2D视觉基础模型进行大规模3D点云自监督学习的框架。|\n| TomoSAM | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_7082f470a63c.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.08609.pdf) | [视频教程](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=4nXCYrvBSjk) | [代码](https:\u002F\u002Fgithub.com\u002Ffsemerar\u002FSlicerTomoSAM) | - | 一个基于SAM扩展的3D Slicer插件，用于辅助分割来自断层扫描或其他成像技术的3D数据。|\n| SegmentAnythingin3D | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_3764dc6e98d6.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.12308.pdf) | [项目](https:\u002F\u002Fjumpat.github.io\u002FSA3D\u002F) | [代码](hhttps:\u002F\u002Fgithub.com\u002FJumpat\u002FSegmentAnythingin3D) | - | 一个名为SA3D的新颖3D“万物分割”框架。|\n\n#### 遥感\n| 标题 | 演示 | 论文页面 | 项目页面 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| RSPrompter | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_3deb40921df2.jpg) | [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.16269.pdf) | [项目页面](https:\u002F\u002Fkyanchen.github.io\u002FRSPrompter\u002F) | [代码](https:\u002F\u002Fgithub.com\u002FKyanChen\u002FRSPrompter) | 北京航空航天大学 | 一种基于SAM的遥感图像自动实例分割方法。|\n| SAM-CD | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_befef9435725.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.01429) | - | [代码](https:\u002F\u002Fgithub.com\u002FggsDing\u002FSAM-CD) | 中国人民解放军信息工程大学 | 一个样本高效的变化检测框架，采用SAM作为视觉编码器。|\n| SAM-Road: 用于道路网络图提取的Segment Anything Model | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_29921ad44f39.png) | [arXiv](http:\u002F\u002Farxiv.org\u002Fabs\u002F2403.16051) | - | [代码](https:\u002F\u002Fgithub.com\u002Fhtcr\u002Fsam_road) | 卡内基梅隆大学 | 一种简单快速的应用SAM进行大规模道路网络矢量化提取的方法。该方法在达到最先进精度的同时，速度提升了40倍。|\n\n#### 跟踪\n| 标题 | 演示 | 论文页面 | 项目页面 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| Follow Anything | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_b7a7a3f33c79.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.05737) | [页面](https:\u002F\u002Fhuggingface.co\u002Fpapers\u002F2308.05737) | [代码](https:\u002F\u002Fgithub.com\u002Falaamaalouf\u002FFollowAnything) | MIT、哈佛大学 | 一个开放词汇、多模态的模型，可实时检测、跟踪并跟随任何物体。|\n| Track-Anything | [视频](https:\u002F\u002Fgithub.com\u002Fgaomingqi\u002FTrack-Anything\u002Fraw\u002Fmaster\u002Fassets\u002Favengers.gif) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.11968) | - | [代码](https:\u002F\u002Fgithub.com\u002Fgaomingqi\u002FTrack-Anything) | MIT、哈佛大学 | 一个开放词汇、多模态的模型，可实时检测、跟踪并跟随任何物体。|\n| SAM-Track | [视频](https:\u002F\u002Fcamo.githubusercontent.com\u002F149f974fc6e13f3764e30d843880fa1e15e0fbecf607f905805d84290ec87155\u002F68747470733a2f2f7265732e636c6f7564696e6172792e636f6d2f6d6172636f6d6f6e74616c62616e6f2f696d6167652f75706l6f61642f76316838317331333039352f766964656f5f746f5f6d61726b646f776e2f696d616765732f796f75747562652d2d555068747066316b3648412d63303562353861633665623463343730303833316232623330373063643430332e6jpng) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.06558) | - | [代码](https:\u002F\u002Fgithub.com\u002Fz-x-yang\u002FSegment-and-Track-Anything) | MIT、哈佛大学 | 一个名为“分割与跟踪万物”（SAMTrack）的框架，允许用户精确有效地分割和跟踪视频中的任何物体。|\n\n#### 视听定位与分割\n| 标题 | 演示 | 论文页 | 项目页 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:|:---:|\n| AV-SAM | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_cad01fec11f2.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.01836.pdf) | - | [代码](https:\u002F\u002Fgithub.com\u002Falaamaalouf\u002FFollowAnything) | 卡内基梅隆大学 | 一个基于SAM的简单而高效的视听定位与分割框架。|\n\n#### 对抗攻击\n| 标题 | 演示 | 论文页 | 项目页 | 代码库 | 所属机构 |                                               描述                                               |\n|:---:|:---:|:---:|:---:| :---:| :---:|:-------------------------------------------------------------------------------------------------------:|\n| Attack-SAM | - | [arXiv](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.00866.pdf) | - | - | 韩国科学技术院 | 首次全面研究如何利用对抗样本攻击SAM的工作。 |\n\n#### 多媒体取证\n| 标题 | 演示 | 论文页 | 项目页 | 代码库 | 所属机构 |                                               描述                                               |\n|:---:|:---:|:---:|:---:| :---:| :---:|:-------------------------------------------------------------------------------------------------------:|\n| SAFIRE：分割任何伪造图像区域 | ![safire_image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_8e6a1d636ba5.png) | [arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.08197) | - | [代码](https:\u002F\u002Fgithub.com\u002Fmjkwon2021\u002FSAFIRE) | 韩国科学技术院 | 将SAM的点提示能力扩展到图像取证领域，实现精确的源信息感知分割，用于伪造内容的定位。 |\n\n### 衍生项目\n#### 图像分割任务\n| 标题 | 演示 | 项目页面 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:|\n| Grounded Segment Anything | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_3447d7544f77.png)|[Colab](https:\u002F\u002Fgithub.com\u002Fcamenduru\u002Fgrounded-segment-anything-colab) & [Huggingface](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fyizhangliu\u002FGrounded-Segment-Anything) | [代码](https:\u002F\u002Fgithub.com\u002FIDEA-Research\u002FGrounded-Segment-Anything) | - | 结合 Grounding DINO 和 Segment Anything| - | \n| GroundedSAM 异常检测 | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_6d76861fa86f.png) | - | [代码](https:\u002F\u002Fgithub.com\u002Fcaoyunkang\u002FGroundedSAM-zero-shot-anomaly-detection)| - | 使用 Grounding DINO + SAM 来分割任何异常。 |\n| 语义 Segment Anything | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_3b0b9346cb00.png) |- | [代码](https:\u002F\u002Fgithub.com\u002Ffudan-zvg\u002FSemantic-Segment-Anything) | 复旦大学 | 一个密集类别标注引擎。 |\n| Magic Copy | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_2927be26e40b.png) | - |[代码](https:\u002F\u002Fgithub.com\u002Fkevmo314\u002Fmagic-copy) | - | Magic Copy 是一款使用 SAM 的 Chrome 扩展程序。 |\n| YOLO-World + EfficientViT SAM | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_bdec792d3ab9.png) | 🤗 [HuggingFace Space](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fcurt-park\u002Fyolo-world-with-efficientvit-sam) | [代码](https:\u002F\u002Fgithub.com\u002FCurt-Park\u002Fyolo-world-with-efficientvit-sam) | - | 使用 YOLO-World + EfficientViT SAM 实现高效的开放词汇目标检测和分割 |\n| Clip 辅助的 Segment Anything | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_0111c2fb057b.png) | 🤗 [HuggingFace Space](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fcurt-park\u002Fsegment-anything-with-clip) |[代码](https:\u002F\u002Fgithub.com\u002FCurt-Park\u002Fsegment-anything-with-clip) | -  | SAM + CLIP| \n| SAM-Clip | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_b2ae2caeb471.png) | - |[代码](https:\u002F\u002Fgithub.com\u002Fmaxi-w\u002FCLIP-SAM) | - | SAM + CLIP。|\n| Prompt Segment Anything | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_f60e78129ebd.jpg)| - | [代码](https:\u002F\u002Fgithub.com\u002FRockeyCoss\u002FPrompt-Segment-Anything)| - | SAM + 零样本实例分割。|\n| RefSAM | - | - |[代码](https:\u002F\u002Fgithub.com\u002Fhelblazer811\u002FRefSAM) | - | 在引用式图像分割任务上评估 SAM 的基本性能。|\n| SAM-RBox | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_34dfe912a346.png) | - |[代码](https:\u002F\u002Fgithub.com\u002FLi-Qingyun\u002Fsam-mmrotate) | - | SAM 在 MMRotate 中用于生成旋转边界框的实现。|\n| 开放词汇 Segment Anything | ![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_d7cf9404daf7.png)| - |[代码](https:\u002F\u002Fgithub.com\u002Fngthanhtin\u002Fowlvit_segment_anything) | - | 通过结合 Google 的 OWL-ViT 和 SAM 实现的一个有趣演示。|\n| SegDrawer |![img1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_82a6f07b21c1.gif)![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_231cc16b056e.gif) | - |[代码](https:\u002F\u002Fgithub.com\u002Flujiazho\u002FSegDrawer) | - | 一个简单的基于 Web 的静态掩码绘制工具，支持使用 SAM 进行语义绘图。|\n| AnyLabeling |![](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_be08dd2a82ab.png) | [YoutubeDemo](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=5qVJiYNX5Kk) |[代码](https:\u002F\u002Fgithub.com\u002Fvietanhdev\u002Fanylabeling) | - | SAM + Labelme + LabelImg + 自动标注。|\n| ISAT 结合 segment anything |![](https:\u002F\u002Fgithub.com\u002FyatengLG\u002FISAT_with_segment_anything\u002Fblob\u002Fmaster\u002Fdisplay\u002F%E6%A0%88%E6%B3%A8.gif) | [YoutubeDemo](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=yLdZCPmX-Bc) [BiliBili Demo](https:\u002F\u002Fwww.bilibili.com\u002Fvideo\u002FBV1or4y1R7EJ\u002F) | [代码](https:\u002F\u002Fgithub.com\u002FyatengLG\u002FISAT_with_segment_anything) | - | 基于 SAM（segment anything 模型）的标注工具，支持 SAM、sam-hq、MobileSAM、EdgeSAM 等。|\n| Annotation Anything Pipeline |![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_51f6c904020d.png) | - |[代码](https:\u002F\u002Fgithub.com\u002FYuqifan1117\u002FAnnotation-anything-pipeline) | - | GPT + SAM。|\n| Roboflow Annotate |![roboflow-sam-optimized-faster](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_e50256f01305.gif) | [App](https:\u002F\u002Fapp.roboflow.com) |[博客](https:\u002F\u002Fblog.roboflow.com\u002Flabel-data-segment-anything-model-sam\u002F) | Roboflow | 基于 SAM 的辅助标注，用于训练计算机视觉模型。|\n| SALT |![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_731124b0c7c4.gif) | - |[代码](https:\u002F\u002Fgithub.com\u002Fanuragxel\u002Fsalt) | - | 一个为图像标注添加基础界面的工具，并将生成的掩码保存为 COCO 格式。|\n| SAM U Specify |![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_e48f9ecef610.png) | - |[代码](https:\u002F\u002Fgithub.com\u002FMaybeShewill-CV\u002Fsegment-anything-u-specify) | - | 使用 SAM 和 CLIP 模型来分割您想要的独特实例。|\n| SAM web UI |![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_c8c392f2390f.gif) | [App](https:\u002F\u002Fsegment-anything-webui.vercel.app\u002F) |[代码](https:\u002F\u002Fgithub.com\u002FKingfish404\u002Fsegment-anything-webui\u002F) | - | 这是 SAM 的全新 Web 界面。|\n| Finetune Anything | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_e3121beec052.png) | - | [代码](https:\u002F\u002Fgithub.com\u002Fziqi-jin\u002Ffinetune-anything) | - | 一种基于 SAM 的、具有类别感知的一阶段微调模型训练工具。|\n| NanoSAM | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_b1e545ea7545.jpg) | - | [代码](https:\u002F\u002Fgithub.com\u002FNVIDIA-AI-IOT\u002Fnanosam) | NVIDIA | 一种经过蒸馏的 Segment Anything (SAM) 模型，能够在 NVIDIA TensorRT 上实现实时运行。|\n| Segment-Anything-UI |  ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_7cb9749e8509.png) | - | [代码](https:\u002F\u002Fgithub.com\u002Fbranislavhesko\u002Fsegment-anything-ui) | - | 一款基于 PySide6 的 Segment Anything 注释工具。|\n| Segment-Anything-2-UI | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_76b71579fce5.png) | - | [代码](https:\u002F\u002Fgithub.com\u002Fbranislavhesko\u002Fsegment-anything-2-ui) | - | 一款基于 PySide6 的 Segment Anything 2 注释工具。支持多目标视频跟踪。 |\n\n#### 视频分割任务\n| 标题 | 演示 | 项目页面 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:| \n| MetaSeg | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_a393590cefb3.gif) |[HuggingFace](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FArtGAN\u002FSegment-Anything-Video) |[代码](https:\u002F\u002Fgithub.com\u002Fkadirnar\u002Fsegment-anything-video) | - | SAM + 视频。 |\n| SAM-Track | [视频](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=UPhtpf1k6HA&feature=youtu.be&themeRefresh=1) |[YoutubeDemo](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=Xyd54AngvV8) |[代码](https:\u002F\u002Fgithub.com\u002Fz-x-yang\u002FSegment-and-Track-Anything) | 浙江大学 | 该项目基于SAM和DeAOT，专注于视频中目标的分割与跟踪。 |\n\n#### 医学图像分割任务\n| 标题 | 演示 | 项目页面 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:| \n| SAM in Napari |[视频](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=OPE1Xnw487E)|- |[代码](https:\u002F\u002Fgithub.com\u002FMIC-DKFZ\u002Fnapari-sam) | - | 结合Napari的SAM进行任意物体分割。|\n| SAM Medical Imaging |![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_ba9a89a17f70.png)|- |[代码](https:\u002F\u002Fgithub.com\u002Famine0110\u002FSAM-Medical-Imaging) | - | 用于医学影像的SAM。|\n\n#### 图像修复任务\n| 标题 | 演示 | 项目页面 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:| \n| SegAnythingPro | ![img](https:\u002F\u002Fcamo.githubusercontent.com\u002F7d5fb67ffcd6c209cf22ffe302d95b3b46d36b92116fe216022bf2a359c4b588\u002F68747470733a2f2f6a6968756c61622e636f6d2f676f646c792f666765722f2d2f7261772f6d61696e2f696d616765732f323032332f30342f31315f31325f345f34325f32303233303431313132303433392e706e67)|- |[代码](https:\u002F\u002Fgithub.com\u002Fjinfagang\u002FDisappear) | - | SAM + 图像修复\u002F替换。|\n\n\n#### 3D任务\n| 标题 | 演示 | 项目页面 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:| \n| 3D-Box | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_a7ee4c79894b.png)|- |[代码](https:\u002F\u002Fgithub.com\u002Fdvlab-research\u002F3D-Box-Segment-Anything) | - | 通过将SAM与VoxelNeXt结合，将其扩展到3D感知领域。|\n| Anything 3DNovel View | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_14d431a6b777.jpeg)|- |[代码](https:\u002F\u002Fgithub.com\u002FAnything-of-anything\u002FAnything-3D) | - | SAM + [Zero 1-to-3](https:\u002F\u002Fgithub.com\u002Fcvlab-columbia\u002Fzero123)。|\n| Any 3DFace | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_c9e341ea5a14.jpg)![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_bbad962d158a.gif)|- |[代码](https:\u002F\u002Fgithub.com\u002FAnything-of-anything\u002FAnything-3D) | - | SAM + [HRN](https:\u002F\u002Fyounglbw.github.io\u002FHRN-homepage\u002F)。|\n| Segment Anything 3D | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_eb708b851b38.png) | - | [代码](https:\u002F\u002Fgithub.com\u002FPointcept\u002FSegmentAnything3D) | Pointcept | 通过将2D图像的分割信息迁移到3D空间，将Segment Anything扩展到3D感知领域。|\n\n#### 图像生成任务\n| 标题 | 演示 | 项目页面 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:| \n| Edit Anything | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_c0e0cab7f21d.jpg) | - |[代码](https:\u002F\u002Fgithub.com\u002Fsail-sg\u002FEditAnything) | - | 对图像中的任何内容进行编辑和生成。|\n| Image Edit Anything |![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_976b735b0350.png)| - |[代码](https:\u002F\u002Fgithub.com\u002Ffeizc\u002FIEA) | - | Stable Diffusion + SAM。|\n| SAM for Stable Diffusion Webui |![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_7e4f2ffe8d54.png)| - |[代码](https:\u002F\u002Fgithub.com\u002Fcontinue-revolution\u002Fsd-webui-segment-anything) | - | Stable Diffusion + SAM。|\n\n#### 遥感任务\n| 标题 | 演示 | 项目页面 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:| \n| Earth Observation Tools | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_dbf62cc39f6f.png) |[Colab](https:\u002F\u002Fcolab.research.google.com\u002Fdrive\u002F1RC1V68tD1O-YissBq9nOvS2PHEjAsFkA?usp=share_link) |[代码](https:\u002F\u002Fgithub.com\u002Faliaksandr960\u002Fsegment-anything-eo) | - | SAM + 遥感。 |\n\n#### 运动目标检测任务\n| 标题 | 演示 | 项目页面 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:| \n| Moving Object Detection | ![img](https:\u002F\u002Fcamo.githubusercontent.com\u002Fcd073471951017a15cd445062d196242a446eb20acd90b2afa1728f239465fc7\u002F687474703a2f2f7777772e616368616c646176652e636f6d2f70726f6a656374732f616e797468696e672d746861742d6d6f7665732f766964656f732f5Z584N364A2d747261636b65642d776974682d6f626a6563746e6573732d7472696d6d65642e676966) | - |[代码](https:\u002F\u002Fgithub.com\u002Fachalddave\u002Fsegment-any-moving) | - | SAM + 运动目标检测。 |\n\n\n#### OCR任务\n| 标题 | 演示 | 项目页面 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:| \n| OCR-SAM | ![img](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_3f22cf5d87c6.png) | [博客](https:\u002F\u002Fwww.zhihu.com\u002Fquestion\u002F593914819\u002Fanswer\u002F2976012032)|[代码](https:\u002F\u002Fgithub.com\u002Fyeungchenwa\u002FOCR-SAM) | - | 基于SAM的光学字符识别。 |\n\n\n\n### 前端框架\n\n#### SAMJS\n| 标题 | 演示 | 项目页面 | 代码库 | 所属机构 | 描述 |\n|:---:|:---:|:---:|:---:| :---:| :---:| \n| SAMJS | ![samjs](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_readme_36fd19ec1a80.png) | [demo](http:\u002F\u002Fsamjs.antv.vision\u002Fdemos)|[代码](https:\u002F\u002Fgithub.com\u002Fantvis\u002FSAMJS) | - | SAM的JS SDK，支持遥感数据的分割和矢量化|\n\n## 致谢\n本仓库中的部分演示内容借用了原作者的作品，我们对此深表感谢。\n\n## 许可证\n本项目采用MIT许可证发布。更多信息请参阅[LICENSE](LICENSE)文件。","# Awesome Segment Anything 快速上手指南\n\n`awesome-segment-anything` 是一个汇总了 Segment Anything (SAM) 及其衍生模型（如 SAM2, MobileSAM, MedSAM 等）在计算机视觉各领域应用的研究仓库。本指南将帮助你快速搭建基础环境并运行核心的 SAM 模型。\n\n## 环境准备\n\n在开始之前，请确保你的开发环境满足以下要求：\n\n*   **操作系统**: Linux (推荐 Ubuntu 18.04+) 或 macOS。Windows 用户建议使用 WSL2。\n*   **Python 版本**: Python 3.8 - 3.10\n*   **硬件要求**: \n    *   推荐使用 NVIDIA GPU (显存 >= 8GB) 以获得最佳推理速度。\n    *   仅使用 CPU 也可运行，但速度较慢。\n*   **前置依赖**:\n    *   `git`\n    *   `conda` (推荐用于管理虚拟环境) 或 `pip`\n    *   `PyTorch` (需匹配你的 CUDA 版本)\n\n## 安装步骤\n\n### 1. 创建并激活虚拟环境\n建议使用 Conda 创建独立的虚拟环境以避免依赖冲突。\n\n```bash\nconda create -n sam-env python=3.9 -y\nconda activate sam-env\n```\n\n### 2. 安装 PyTorch\n请访问 [PyTorch 官网](https:\u002F\u002Fpytorch.org\u002Fget-started\u002Flocally\u002F) 获取适合你硬件的命令。以下是基于 CUDA 11.8 的示例（国内用户可使用清华源加速）：\n\n```bash\n# 使用清华源安装 PyTorch (CUDA 11.8 版本示例)\npip install torch torchvision torchaudio --index-url https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n```\n\n### 3. 克隆仓库并安装核心依赖\n克隆官方 `segment-anything` 代码库（这是本列表中大多数衍生项目的基础），并安装所需依赖。\n\n```bash\n# 克隆 Meta 官方 SAM 仓库\ngit clone https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsegment-anything.git\ncd segment-anything\n\n# 安装依赖\npip install -e .\n```\n\n> **提示**：如果你需要运行特定的衍生模型（如 `MobileSAM` 或 `MedSAM`），请前往 [awesome-segment-anything](https:\u002F\u002Fgithub.com\u002Fxmed-lab\u002Fawesome-segment-anything) 仓库对应的 \"Code base\" 链接克隆具体项目，安装步骤通常类似。\n\n### 4. 下载预训练模型\n你需要下载预训练的 Checkpoint 文件。可以从官方链接或国内镜像下载。\n\n```bash\n# 下载 ViT-H 模型 (高性能版)\nwget https:\u002F\u002Fdl.fbaipublicfiles.com\u002Fsegment_anything\u002Fsam_vit_h_4b8939.pth\n\n# 或者下载 ViT-L 模型 (平衡版)\n# wget https:\u002F\u002Fdl.fbaipublicfiles.com\u002Fsegment_anything\u002Fsam_vit_l_0b3195.pth\n\n# 或者下载 ViT-B 模型 (轻量版)\n# wget https:\u002F\u002Fdl.fbaipublicfiles.com\u002Fsegment_anything\u002Fsam_vit_b_01ec64.pth\n```\n\n## 基本使用\n\n以下是一个最简单的 Python 脚本示例，展示如何加载模型并对图像进行自动掩码生成。\n\n1.  准备一张测试图片（例如 `test.jpg`）。\n2.  创建文件 `demo.py` 并填入以下代码：\n\n```python\nimport cv2\nimport torch\nfrom segment_anything import SamPredictor, sam_model_registry\n\n# 配置参数\nmodel_type = \"vit_h\"  # 根据下载的模型选择: vit_h, vit_l, 或 vit_b\ncheckpoint = \"sam_vit_h_4b8939.pth\"\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n# 初始化模型\nsam = sam_model_registry[model_type](checkpoint=checkpoint)\nsam.to(device=device)\npredictor = SamPredictor(sam)\n\n# 读取图像\nimage = cv2.imread('test.jpg')\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n# 设置图像\npredictor.set_image(image)\n\n# 执行预测 (不使用任何 prompt，尝试生成分割掩码)\n# 注意：SAM 通常需要点、框或文本作为 prompt。\n# 这里演示自动模式下的全图分割逻辑通常需要结合其他工具，\n# 标准用法是提供一个中心点作为示例：\ninput_point = [[500, 375]]\ninput_label = [1]\n\nmasks, scores, logits = predictor.predict(\n    point_coords=input_point,\n    point_labels=input_label,\n    multimask_output=True,\n)\n\n# 选择得分最高的掩码\nmask = masks[scores.argmax()]\n\n# 可视化结果\nimport matplotlib.pyplot as plt\n\nplt.figure(figsize=(10, 5))\nplt.subplot(1, 2, 1)\nplt.imshow(image)\nplt.title(\"Original Image\")\nplt.axis('off')\n\nplt.subplot(1, 2, 2)\nplt.imshow(image)\nplt.imshow(mask, cmap='gray', alpha=0.5)\nplt.title(f\"Segmented Mask (Score: {scores.max():.2f})\")\nplt.axis('off')\n\nplt.tight_layout()\nplt.show()\n```\n\n3.  运行脚本：\n\n```bash\npython demo.py\n```\n\n运行成功后，你将看到原图与分割结果的对比图。对于更多特定任务（如医疗影像、视频分割、无提示分割等），请参考 `awesome-segment-anything` 列表中对应项目的具体文档和代码实现。","某医疗 AI 初创团队正致力于开发一款辅助医生快速标注肺部 CT 影像中病灶区域的系统，急需集成最新的分割算法以提升标注效率。\n\n### 没有 awesome-segment-anything 时\n- **信息搜集耗时巨大**：研究人员需手动在 arXiv、GitHub 等多个平台分散搜索\"Segment Anything\"相关论文，每天耗费数小时筛选无效信息。\n- **技术选型盲目低效**：面对海量衍生项目，难以区分哪些适用于医学图像（如处理模糊边界），哪些仅针对自然图像，导致多次试错成本高昂。\n- **代码复现门槛高**：找不到经过验证的代码库或官方项目链接，常因依赖缺失或文档不全导致环境配置失败，拖延研发进度。\n- **前沿动态滞后**：无法及时知晓如 SAM-Track 或医学专用微调模型等最新突破，致使技术方案在立项初期就已落后于社区水平。\n\n### 使用 awesome-segment-anything 后\n- **一站式资源聚合**：直接查阅按“医学图像分割”分类的清单，瞬间锁定数十篇高相关性论文与对应项目，搜集时间从数天缩短至几分钟。\n- **精准场景匹配**：通过清晰的分类标签（如 Medical Image Segmentation），快速识别出专门针对病灶边缘优化的衍生模型，避免通用模型的水土不服。\n- **开箱即用体验**：每个条目均附带经过验证的代码库链接和论文页，团队成员能迅速拉取代码并跑通 Demo，大幅降低复现难度。\n- **同步最新进展**：借助持续的更新日志（如新增 SaLIP 或机器人应用文章），团队能即时将业界最先进的方法论融入产品迭代，保持技术领先性。\n\nawesome-segment-anything 将原本碎片化、高成本的科研调研工作转化为标准化的流水线作业，让开发者能专注于核心业务逻辑而非重复造轮子。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHedlen_awesome-segment-anything_ccd7bd4c.gif","Hedlen","Dylan","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FHedlen_50bdf008.png",null,"MEGVII","China, UESTC","tjhedlen@gmail.com","https:\u002F\u002Fhedlen.github.io","https:\u002F\u002Fgithub.com\u002FHedlen",1691,131,"2026-04-13T11:49:33","MIT","","未说明",{"notes":88,"python":86,"dependencies":89},"该仓库（awesome-segment-anything）是一个资源汇总列表，用于追踪和总结 Segment Anything 相关的论文和项目，本身不是一个可直接运行的软件工具，因此 README 中未提供具体的运行环境需求、依赖库或安装说明。用户需根据列表中链接的具体子项目（如 MobileSAM, FastSAM, MedSAM 等）查阅其各自的代码仓库以获取详细的运行环境要求。",[],[15,35],[92,93,94,95,96,97,98,99,100,101],"awesome-list","image-generation","inpainting","object-detection","segmentation","suvey","application","chatgpt","segment-anything","stable-diffusion","2026-03-27T02:49:30.150509","2026-04-19T09:37:50.581568",[105,110,115,120,125,130,135,140,145],{"id":106,"question_zh":107,"answer_zh":108,"source_url":109},42128,"如何在我的自定义数据集上微调 SAM 模型？","您可以参考 SAM 的原始论文，并根据您的自定义数据集进行修改。目前已有许多论文对 SAM 进行了微调调整，使其更适合特定领域的应用。维护者在其精选列表（awesome list）中也收录了一些相关文章供参考。","https:\u002F\u002Fgithub.com\u002FHedlen\u002Fawesome-segment-anything\u002Fissues\u002F39",{"id":111,"question_zh":112,"answer_zh":113,"source_url":114},42129,"我想将自己的 SAM 相关项目或论文添加到该列表中，应该如何操作？","您可以将您的工作添加到仓库中并提交一个 Pull Request（合并请求）。维护者会在审查后将其合并到主分支中。这是添加新项目或论文的标准流程。","https:\u002F\u002Fgithub.com\u002FHedlen\u002Fawesome-segment-anything\u002Fissues\u002F51",{"id":116,"question_zh":117,"answer_zh":118,"source_url":119},42130,"是否有适用于移动设备的轻量级 SAM 版本？","是的，有一个名为 MobileSAM 的项目旨在使 SAM 轻量化以适用于移动应用。该项目已被维护者合并到精选列表中，您可以查看相关仓库获取更多信息。","https:\u002F\u002Fgithub.com\u002FHedlen\u002Fawesome-segment-anything\u002Fissues\u002F22",{"id":121,"question_zh":122,"answer_zh":123,"source_url":124},42131,"有没有用于缺陷检测（Defect Detection）的 SAM 变体？","有的，DefectSAM 是专门用于缺陷检测的 SAM 变体。其论文链接为 https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.10245，代码仓库位于 https:\u002F\u002Fgithub.com\u002Fbozhenhhu\u002FDefectSAM，该项目已被收录。","https:\u002F\u002Fgithub.com\u002FHedlen\u002Fawesome-segment-anything\u002Fissues\u002F34",{"id":126,"question_zh":127,"answer_zh":128,"source_url":129},42132,"是否有 NanoSAM 的 C++ 实现以支持实时应用？","是的，有一个在 Windows 上使用 C++ 实现的 NanoSAM 项目，专为实时分割应用设计。代码仓库地址为：https:\u002F\u002Fgithub.com\u002Fspacewalk01\u002Fnanosam-cpp，已被维护者合并收录。","https:\u002F\u002Fgithub.com\u002FHedlen\u002Fawesome-segment-anything\u002Fissues\u002F42",{"id":131,"question_zh":132,"answer_zh":133,"source_url":134},42133,"是否有无需训练即可将 SAM 提升到 3D 室内场景分割的方法？","有的，SAMPro3D 可以将 SAM 提升至 3D 室内场景分割，且不需要任何训练、微调或蒸馏过程，能取得优异的分割结果。项目主页为：https:\u002F\u002Fmutianxu.github.io\u002Fsampro3d\u002F，已收录于 3D 类别中。","https:\u002F\u002Fgithub.com\u002FHedlen\u002Fawesome-segment-anything\u002Fissues\u002F37",{"id":136,"question_zh":137,"answer_zh":138,"source_url":139},42134,"是否有针对医学图像分割的 SAM 适配器或零样本方法？","有的。例如 'Medical SAM Adapter' 提供了医学领域的适配代码（https:\u002F\u002Fgithub.com\u002FKidsWithTokens\u002FMedical-SAM-Adapter）；另外 'SaLIP' 提出了结合 SAM 和 CLIP 的级联方法，用于医学图像的零样本分割（代码：https:\u002F\u002Fgithub.com\u002Faleemsidra\u002FSaLIP）。这些项目均已被收录。","https:\u002F\u002Fgithub.com\u002FHedlen\u002Fawesome-segment-anything\u002Fissues\u002F47",{"id":141,"question_zh":142,"answer_zh":143,"source_url":144},42135,"是否有集成到 3D Slicer 中的 SAM 扩展工具？","有的，TomoSAM 是 3D Slicer 的一个扩展插件，支持断层扫描图像的分割。相关论文见 https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.08609，视频教程见 https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=4nXCYrvBSjk，代码库为 https:\u002F\u002Fgithub.com\u002Ffsemerar\u002FSlicerTomoSAM。","https:\u002F\u002Fgithub.com\u002FHedlen\u002Fawesome-segment-anything\u002Fissues\u002F25",{"id":146,"question_zh":147,"answer_zh":148,"source_url":149},42136,"是否有适用于遥感领域的 SAM 项目？","有的，SAM-CD 是一个面向遥感变化检测的项目。论文链接为 http:\u002F\u002Farxiv.org\u002Fabs\u002F2309.01429，代码仓库为 https:\u002F\u002Fgithub.com\u002FggsDing\u002FSAM-CD，已被维护者合并收录。","https:\u002F\u002Fgithub.com\u002FHedlen\u002Fawesome-segment-anything\u002Fissues\u002F32",[]]