[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-augmentedstartups--AS-One":3,"tool-augmentedstartups--AS-One":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",154349,2,"2026-04-13T23:32:16",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",108322,"2026-04-10T11:39:34",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":32,"last_commit_at":50,"category_tags":51,"status":17},6121,"gemini-cli","google-gemini\u002Fgemini-cli","gemini-cli 是一款由谷歌推出的开源 AI 命令行工具，它将强大的 Gemini 大模型能力直接集成到用户的终端环境中。对于习惯在命令行工作的开发者而言，它提供了一条从输入提示词到获取模型响应的最短路径，无需切换窗口即可享受智能辅助。\n\n这款工具主要解决了开发过程中频繁上下文切换的痛点，让用户能在熟悉的终端界面内直接完成代码理解、生成、调试以及自动化运维任务。无论是查询大型代码库、根据草图生成应用，还是执行复杂的 Git 操作，gemini-cli 都能通过自然语言指令高效处理。\n\n它特别适合广大软件工程师、DevOps 人员及技术研究人员使用。其核心亮点包括支持高达 100 万 token 的超长上下文窗口，具备出色的逻辑推理能力；内置 Google 搜索、文件操作及 Shell 命令执行等实用工具；更独特的是，它支持 MCP（模型上下文协议），允许用户灵活扩展自定义集成，连接如图像生成等外部能力。此外，个人谷歌账号即可享受免费的额度支持，且项目基于 Apache 2.0 协议完全开源，是提升终端工作效率的理想助手。",100752,"2026-04-10T01:20:03",[52,13,15,14],"插件",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":32,"last_commit_at":59,"category_tags":60,"status":17},4721,"markitdown","microsoft\u002Fmarkitdown","MarkItDown 是一款由微软 AutoGen 团队打造的轻量级 Python 工具，专为将各类文件高效转换为 Markdown 格式而设计。它支持 PDF、Word、Excel、PPT、图片（含 OCR）、音频（含语音转录）、HTML 乃至 YouTube 链接等多种格式的解析，能够精准提取文档中的标题、列表、表格和链接等关键结构信息。\n\n在人工智能应用日益普及的今天，大语言模型（LLM）虽擅长处理文本，却难以直接读取复杂的二进制办公文档。MarkItDown 恰好解决了这一痛点，它将非结构化或半结构化的文件转化为模型“原生理解”且 Token 效率极高的 Markdown 格式，成为连接本地文件与 AI 分析 pipeline 的理想桥梁。此外，它还提供了 MCP（模型上下文协议）服务器，可无缝集成到 Claude Desktop 等 LLM 应用中。\n\n这款工具特别适合开发者、数据科学家及 AI 研究人员使用，尤其是那些需要构建文档检索增强生成（RAG）系统、进行批量文本分析或希望让 AI 助手直接“阅读”本地文件的用户。虽然生成的内容也具备一定可读性，但其核心优势在于为机器",93400,"2026-04-06T19:52:38",[52,14],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":67,"readme_en":68,"readme_zh":69,"quickstart_zh":70,"use_case_zh":71,"hero_image_url":72,"owner_login":73,"owner_name":74,"owner_avatar_url":75,"owner_bio":76,"owner_company":77,"owner_location":78,"owner_email":78,"owner_twitter":78,"owner_website":79,"owner_url":80,"languages":81,"stars":110,"forks":111,"last_commit_at":112,"license":113,"difficulty_score":114,"env_os":115,"env_gpu":116,"env_ram":117,"env_deps":118,"category_tags":130,"github_topics":131,"view_count":32,"oss_zip_url":78,"oss_zip_packed_at":78,"status":17,"created_at":146,"updated_at":147,"faqs":148,"releases":149},7373,"augmentedstartups\u002FAS-One","AS-One","Easy & Modular Computer Vision Detectors, Trackers & SAM - Run YOLOv9,v8,v7,v6,v5,R,X in under 10 lines of code.","AS-One 是一款专为计算机视觉任务打造的模块化 Python 库，旨在让目标检测、图像分割、姿态估计及多目标追踪变得极其简单。它像一个统一的“工具箱”，将 YOLO 系列（涵盖 v5 至最新的 v9 及 R、X 版本）与强大的跟踪算法（如 ByteTrack、DeepSORT、NorFair）以及 SAM（Segment Anything Model）无缝集成。\n\n过去，开发者往往需要为不同版本的模型编写复杂的适配代码，或花费大量时间处理格式转换。AS-One 完美解决了这一痛点，用户仅需不到 10 行代码，即可灵活调用 ONNX、PyTorch 或 CoreML 等多种格式的模型，快速实现从检测到追踪的全流程部署。其高度模块化的设计允许用户像搭积木一样，自由组合不同的检测器与追踪器，极大提升了开发效率与实验灵活性。\n\n这款工具非常适合计算机视觉开发者、算法研究人员以及希望快速验证原型的工程师使用。无论你是需要构建实时监控系统，还是进行学术算法对比，AS-One 都能提供稳定且高效的支持。通过屏蔽底层繁琐的配置细节，它让用户能更专注于核心业务逻辑与创新，是探索 YOLO 生态与前沿","AS-One 是一款专为计算机视觉任务打造的模块化 Python 库，旨在让目标检测、图像分割、姿态估计及多目标追踪变得极其简单。它像一个统一的“工具箱”，将 YOLO 系列（涵盖 v5 至最新的 v9 及 R、X 版本）与强大的跟踪算法（如 ByteTrack、DeepSORT、NorFair）以及 SAM（Segment Anything Model）无缝集成。\n\n过去，开发者往往需要为不同版本的模型编写复杂的适配代码，或花费大量时间处理格式转换。AS-One 完美解决了这一痛点，用户仅需不到 10 行代码，即可灵活调用 ONNX、PyTorch 或 CoreML 等多种格式的模型，快速实现从检测到追踪的全流程部署。其高度模块化的设计允许用户像搭积木一样，自由组合不同的检测器与追踪器，极大提升了开发效率与实验灵活性。\n\n这款工具非常适合计算机视觉开发者、算法研究人员以及希望快速验证原型的工程师使用。无论你是需要构建实时监控系统，还是进行学术算法对比，AS-One 都能提供稳定且高效的支持。通过屏蔽底层繁琐的配置细节，它让用户能更专注于核心业务逻辑与创新，是探索 YOLO 生态与前沿视觉技术的得力助手。","# AS-One v2 : A Modular Library for YOLO Object Detection, Segmentation, Tracking & Pose\n\n\n\n\u003Cdiv align=\"center\">\n  \u003Cp>\n    \u003Ca align=\"center\" href=\"\" target=\"https:\u002F\u002Fbadge.fury.io\u002Fpy\u002Fasone\">\n      \u003Cimg\n        width=\"100%\"\n        src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Faugmentedstartups_AS-One_readme_a9bdf8435592.jpg\" width=\"100%\">\n      \u003Ca href=\"https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=K-VcpPwcM8k\" style=\"display:inline-block;padding:10px 20px;background-color:red;color:white;text-decoration:none;font-size:16px;font-weight:bold;border-radius:5px;transition:background-color 0.3s;\" target=\"_blank\">Watch Video\u003C\u002Fa>\n\n    \n  \u003C\u002Fp>\n\n  \u003Cbr>\n\n  \u003Cbr>\n\n[![PyPI version](https:\u002F\u002Fbadge.fury.io\u002Fpy\u002Fasone.svg)](https:\u002F\u002Fbadge.fury.io\u002Fpy\u002Fasone)\n[![python-version](https:\u002F\u002Fimg.shields.io\u002Fpypi\u002Fpyversions\u002Fsupervision)](https:\u002F\u002Fbadge.fury.io\u002Fpy\u002Fasone)\n[![colab](https:\u002F\u002Fcolab.research.google.com\u002Fassets\u002Fcolab-badge.svg)](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F1xy5P9WGI19-PzRH3ceOmoCgp63K6J_Ls\u002Fview?usp=sharing)\n[![start with why](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fversion-2.0.0-green)](https:\u002F\u002Fgithub.com\u002Faugmentedstartups\u002FAS-One)\n[![GPLv3 License](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLicense-GPL%20v3-yellow.svg)](https:\u002F\u002Fopensource.org\u002Flicenses\u002F)\n\n\u003C\u002Fdiv>\n\n## 👋 Hello\n\n==UPDATE: ASOne v2 is now out! We've updated with YOLOV9 and SAM==\n\nAS-One is a python wrapper for multiple detection and tracking algorithms all at one place. Different trackers such as `ByteTrack`, `DeepSORT` or `NorFair` can be integrated with different versions of `YOLO` with minimum lines of code.\nThis python wrapper provides YOLO models in `ONNX`, `PyTorch` & `CoreML` flavors. We plan to offer support for future versions of YOLO when they get released.\n\nThis is One Library for most of your computer vision needs.\n\nIf you would like to dive deeper into YOLO Object Detection and Tracking, then check out our [courses](https:\u002F\u002Fwww.augmentedstartups.com\u002Fstore) and [projects](https:\u002F\u002Fstore.augmentedstartups.com)\n\n[\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Faugmentedstartups_AS-One_readme_38ebab5c272f.jpg\" width=\"50%\">](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=K-VcpPwcM8k)\n\nWatch the step-by-step tutorial 🤝\n\n\n\n## 💻 Install\n\u003Cdetails>\u003Csummary> 🔥 Prerequisites\u003C\u002Fsummary>\n\n- Make sure to install `GPU` drivers in your system if you want to use `GPU` . Follow [driver installation](asone\u002Flinux\u002FInstructions\u002FDriver-Installations.md) for further instructions.\n- Make sure you have [MS Build tools](https:\u002F\u002Faka.ms\u002Fvs\u002F17\u002Frelease\u002Fvs_BuildTools.exe) installed in system if using windows.\n- [Download git for windows](https:\u002F\u002Fgit-scm.com\u002Fdownload\u002Fwin) if not installed.\n\u003C\u002Fdetails>\n\n```bash\npip install asone\n```\n\nFor windows machine, you will need to install from source to run `asone` library. Check out instructions in `👉 Install from Source` section below to install on windows.\n\u003Cdetails>\n\u003Csummary> 👉 Install from Source\u003C\u002Fsummary>\n\n### 💾 Clone the Repository\n\nNavigate to an empty folder of your choice.\n\n`git clone https:\u002F\u002Fgithub.com\u002Faugmentedstartups\u002FAS-One.git`\n\nChange Directory to AS-One\n\n`cd AS-One`\n\n\u003Cdetails open>\n\u003Csummary> 👉 For Linux\u003C\u002Fsummary>\n\n\n```shell\npython3 -m venv .env\nsource .env\u002Fbin\u002Factivate\n\npip install -r requirements.txt\n\n# for CPU\npip install torch torchvision\n# for GPU\npip install torch torchvision --extra-index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu113\n```\n\n\n\u003C\u002Fdetails>\n\n\u003Cdetails>\n\u003Csummary> 👉 For Windows 10\u002F11\u003C\u002Fsummary>\n\n```shell\npython -m venv .env\n.env\\Scripts\\activate\npip install numpy Cython\npip install lap\npip install -e git+https:\u002F\u002Fgithub.com\u002Fsamson-wang\u002Fcython_bbox.git#egg=cython-bbox\n\npip install asone onnxruntime-gpu==1.12.1\npip install typing_extensions==4.7.1\npip install super-gradients==3.1.3\n# for CPU\npip install torch torchvision\n\n# for GPU\npip install torch torchvision --extra-index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu113\nor\npip install torch==1.10.1+cu113 torchvision==0.11.2+cu113 torchaudio===0.10.1+cu113 -f https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu113\u002Ftorch_stable.html\n```\n\n\u003C\u002Fdetails>\n\u003Cdetails>\n\u003Csummary> 👉 For MacOS\u003C\u002Fsummary>\n\n```shell\npython3 -m venv .env\nsource .env\u002Fbin\u002Factivate\n\n\npip install -r requirements.txt\n\n# for CPU\npip install torch torchvision\n```\n\n\u003C\u002Fdetails>\n\u003C\u002Fdetails>\n\n##  Quick Start 🏃‍♂️\n\nUse tracker on sample video.\n\n```python\nimport asone\nfrom asone import ASOne\n\nmodel = ASOne(tracker=asone.BYTETRACK, detector=asone.YOLOV9_C, use_cuda=True)\ntracks = model.video_tracker('data\u002Fsample_videos\u002Ftest.mp4', filter_classes=['car'])\n\nfor model_output in tracks:\n    annotations = ASOne.draw(model_output, display=False)\n```\n\n\n### Run in `Google Colab` 💻\n\n\n\u003Ca href=\"https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F1xy5P9WGI19-PzRH3ceOmoCgp63K6J_Ls\u002Fview?usp=sharing\">\u003Cimg src=\"https:\u002F\u002Fcolab.research.google.com\u002Fassets\u002Fcolab-badge.svg\" alt=\"Open In Colab\">\u003C\u002Fa>\n\n##  Sample Code Snippets 📃\n\n\u003Cdetails>\n\u003Csummary>6.1 👉 Object Detection\u003C\u002Fsummary>\n\n```python\nimport asone\nfrom asone import ASOne\n\nmodel = ASOne(detector=asone.YOLOV9_C, use_cuda=True) # Set use_cuda to False for cpu\nvid = model.read_video('data\u002Fsample_videos\u002Ftest.mp4')\n\nfor img in vid:\n    detection = model.detecter(img)\n    annotations = ASOne.draw(detection, img=img, display=True)\n```\n\nRun the `asone\u002Fdemo_detector.py` to test detector.\n\n```shell\n# run on gpu\npython -m asone.demo_detector data\u002Fsample_videos\u002Ftest.mp4\n\n# run on cpu\npython -m asone.demo_detector data\u002Fsample_videos\u002Ftest.mp4 --cpu\n```\n\n\n\u003Cdetails>\n\u003Csummary>6.1.1 👉 Use Custom Trained Weights for Detector\u003C\u002Fsummary>\n\u003C!-- ### 6.1.2 Use Custom Trained Weights -->\n\nUse your custom weights of a detector model trained on custom data by simply providing path of the weights file.\n\n```python\nimport asone\nfrom asone import ASOne\n\nmodel = ASOne(detector=asone.YOLOV9_C, weights='data\u002Fcustom_weights\u002Fyolov7_custom.pt', use_cuda=True) # Set use_cuda to False for cpu\nvid = model.read_video('data\u002Fsample_videos\u002Flicense_video.mp4')\n\nfor img in vid:\n    detection = model.detecter(img)\n    annotations = ASOne.draw(detection, img=img, display=True, class_names=['license_plate'])\n```\n\n\u003C\u002Fdetails>\n\n\u003Cdetails>\n\u003Csummary>6.1.2 👉 Changing Detector Models \u003C\u002Fsummary>\n\nChange detector by simply changing detector flag. The flags are provided in [benchmark](asone\u002Flinux\u002FInstructions\u002FBenchmarking.md) tables.\n\n- Our library now supports YOLOv5, YOLOv7, and YOLOv8 on macOS.\n\n```python\n# Change detector\nmodel = ASOne(detector=asone.YOLOX_S_PYTORCH, use_cuda=True)\n\n# For macOs\n# YOLO5\nmodel = ASOne(detector=asone.YOLOV5X_MLMODEL)\n# YOLO7\nmodel = ASOne(detector=asone.YOLOV7_MLMODEL)\n# YOLO8\nmodel = ASOne(detector=asone.YOLOV8L_MLMODEL)\n```\n\n\u003C\u002Fdetails>\n\n\u003C\u002Fdetails>\n\n\u003Cdetails>\n\u003Csummary>6.2 👉 Object Tracking \u003C\u002Fsummary>\n\nUse tracker on sample video.\n\n```python\nimport asone\nfrom asone import ASOne\n\n# Instantiate Asone object\nmodel = ASOne(tracker=asone.BYTETRACK, detector=asone.YOLOV9_C, use_cuda=True) #set use_cuda=False to use cpu\ntracks = model.video_tracker('data\u002Fsample_videos\u002Ftest.mp4', filter_classes=['car'])\n\n# Loop over track to retrieve outputs of each frame\nfor model_output in tracks:\n    annotations = ASOne.draw(model_output, display=True)\n    # Do anything with bboxes here\n```\n\n[Note] Use can use custom weights for a detector model by simply providing path of the weights file. in `ASOne` class.\n\n\u003Cdetails>\n\u003Csummary>6.2.1 👉 Changing Detector and Tracking Models\u003C\u002Fsummary>\n\n\u003C!-- ### Changing Detector and Tracking Models -->\n\nChange Tracker by simply changing the tracker flag.\n\nThe flags are provided in [benchmark](asone\u002Flinux\u002FInstructions\u002FBenchmarking.md) tables.\n\n```python\nmodel = ASOne(tracker=asone.BYTETRACK, detector=asone.YOLOV9_C, use_cuda=True)\n# Change tracker\nmodel = ASOne(tracker=asone.DEEPSORT, detector=asone.YOLOV9_C, use_cuda=True)\n```\n\n```python\n# Change Detector\nmodel = ASOne(tracker=asone.DEEPSORT, detector=asone.YOLOX_S_PYTORCH, use_cuda=True)\n```\n\n\u003C\u002Fdetails>\n\nRun the `asone\u002Fdemo_tracker.py` to test detector.\n\n```shell\n# run on gpu\npython -m asone.demo_tracker data\u002Fsample_videos\u002Ftest.mp4\n\n# run on cpu\npython -m asone.demo_tracker data\u002Fsample_videos\u002Ftest.mp4 --cpu\n```\n\n\u003C\u002Fdetails>\n\n\u003Cdetails>\n\u003Csummary>6.3 👉 Segmentation\u003C\u002Fsummary>\n\n\n```python\nimport asone\nfrom asone import ASOne\n\nmodel = ASOne(detector=asone.YOLOV9_C, segmentor=asone.SAM, use_cuda=True) #set use_cuda=False to use cpu\ntracks = model.video_detecter('data\u002Fsample_videos\u002Ftest.mp4', filter_classes=['car'])\n\nfor model_output in tracks:\n    annotations = ASOne.draw_masks(model_output, display=True) # Draw masks\n```\n\u003C\u002Fdetails>\n\n\u003Cdetails>\n\u003Csummary>6.4 👉 Text Detection\u003C\u002Fsummary>\n  \nSample code to detect text on an image\n\n```python\n# Detect and recognize text\nimport asone\nfrom asone import ASOne, utils\nimport cv2\n\nmodel = ASOne(detector=asone.CRAFT, recognizer=asone.EASYOCR, use_cuda=True) # Set use_cuda to False for cpu\nimg = cv2.imread('data\u002Fsample_imgs\u002Fsample_text.jpeg')\nresults = model.detect_text(img)\nannotations = utils.draw_text(img, results, display=True)\n```\n\nUse Tracker on Text\n\n```python\nimport asone\nfrom asone import ASOne\n\n# Instantiate Asone object\nmodel = ASOne(tracker=asone.DEEPSORT, detector=asone.CRAFT, recognizer=asone.EASYOCR, use_cuda=True) #set use_cuda=False to use cpu\ntracks = model.video_tracker('data\u002Fsample_videos\u002FGTA_5-Unique_License_Plate.mp4')\n\n# Loop over track to retrieve outputs of each frame\nfor model_output in tracks:\n    annotations = ASOne.draw(model_output, display=True)\n\n    # Do anything with bboxes here\n```\n\nRun the `asone\u002Fdemo_ocr.py` to test ocr.\n\n```shell\n# run on gpu\n python -m asone.demo_ocr data\u002Fsample_videos\u002FGTA_5-Unique_License_Plate.mp4\n\n# run on cpu\n python -m asone.demo_ocr data\u002Fsample_videos\u002FGTA_5-Unique_License_Plate.mp4 --cpu\n```\n\n\u003C\u002Fdetails>\n\n\u003Cdetails>\n\u003Csummary>6.5 👉 Pose Estimation\u003C\u002Fsummary>\n\n\nSample code to estimate pose on an image\n\n```python\n# Pose Estimation\nimport asone\nfrom asone import PoseEstimator, utils\nimport cv2\n\nmodel = PoseEstimator(estimator_flag=asone.YOLOV8M_POSE, use_cuda=True) #set use_cuda=False to use cpu\nimg = cv2.imread('data\u002Fsample_imgs\u002Ftest2.jpg')\nkpts = model.estimate_image(img)\nannotations = utils.draw_kpts(kpts, image=img, display=True)\n```\n\n- Now you can use Yolov8 and Yolov7-w6 for pose estimation. The flags are provided in [benchmark](asone\u002Flinux\u002FInstructions\u002FBenchmarking.md) tables.\n\n```python\n# Pose Estimation on video\nimport asone\nfrom asone import PoseEstimator, utils\n\nmodel = PoseEstimator(estimator_flag=asone.YOLOV7_W6_POSE, use_cuda=True) #set use_cuda=False to use cpu\nestimator = model.video_estimator('data\u002Fsample_videos\u002Ffootball1.mp4')\nfor model_output in estimator:\n    annotations = utils.draw_kpts(model_output)\n    # Do anything with kpts here\n```\n\nRun the `asone\u002Fdemo_pose_estimator.py` to test Pose estimation.\n\n```shell\n# run on gpu\n python -m asone.demo_pose_estimator data\u002Fsample_videos\u002Ffootball1.mp4\n\n# run on cpu\n python -m asone.demo_pose_estimator data\u002Fsample_videos\u002Ffootball1.mp4 --cpu\n```\n\n\u003C\u002Fdetails>\n\nTo setup ASOne using Docker follow instructions given in [docker setup](asone\u002Flinux\u002FInstructions\u002FDocker-Setup.md)🐳\n\n### ToDo 📝\n\n- [x] First Release\n- [x] Import trained models\n- [x] Simplify code even further\n- [x] Updated for YOLOv8\n- [x] OCR and Counting\n- [x] OCSORT, StrongSORT, MoTPy\n- [x] M1\u002F2 Apple Silicon Compatibility\n- [x] Pose Estimation YOLOv7\u002Fv8\n- [x] YOLO-NAS\n- [x] Updated for YOLOv8.1\n- [x] YOLOV9\n- [x] SAM Integration\n\n\n| Offered By 💼 :                                                                                                                                                  | Maintained By 👨‍💻 :                                                                                                                                    |\n| ------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------- |\n| [![AugmentedStarups](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Faugmentedstartups_AS-One_readme_7eae217fd3bf.png)](https:\u002F\u002Faugmentedstartups.com) | [![AxcelerateAI](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Faugmentedstartups_AS-One_readme_ea0e3ba83b4e.png)](https:\u002F\u002Faxcelerate.ai\u002F) |\n","# AS-One v2：用于 YOLO 目标检测、分割、跟踪与姿态估计的模块化库\n\n\n\n\u003Cdiv align=\"center\">\n  \u003Cp>\n    \u003Ca align=\"center\" href=\"\" target=\"https:\u002F\u002Fbadge.fury.io\u002Fpy\u002Fasone\">\n      \u003Cimg\n        width=\"100%\"\n        src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Faugmentedstartups_AS-One_readme_a9bdf8435592.jpg\" width=\"100%\">\n      \u003Ca href=\"https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=K-VcpPwcM8k\" style=\"display:inline-block;padding:10px 20px;background-color:red;color:white;text-decoration:none;font-size:16px;font-weight:bold;border-radius:5px;transition:background-color 0.3s;\" target=\"_blank\">观看视频\u003C\u002Fa>\n\n    \n  \u003C\u002Fp>\n\n  \u003Cbr>\n\n  \u003Cbr>\n\n[![PyPI version](https:\u002F\u002Fbadge.fury.io\u002Fpy\u002Fasone.svg)](https:\u002F\u002Fbadge.fury.io\u002Fpy\u002Fasone)\n[![python-version](https:\u002F\u002Fimg.shields.io\u002Fpypi\u002Fpyversions\u002Fsupervision)](https:\u002F\u002Fbadge.fury.io\u002Fpy\u002Fasone)\n[![colab](https:\u002F\u002Fcolab.research.google.com\u002Fassets\u002Fcolab-badge.svg)](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F1xy5P9WGI19-PzRH3ceOmoCgp63K6J_Ls\u002Fview?usp=sharing)\n[![start with why](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fversion-2.0.0-green)](https:\u002F\u002Fgithub.com\u002Faugmentedstartups\u002FAS-One)\n[![GPLv3 License](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLicense-GPL%20v3-yellow.svg)](https:\u002F\u002Fopensource.org\u002Flicenses\u002F)\n\n\u003C\u002Fdiv>\n\n## 👋 你好\n\n==更新：ASOne v2 现已发布！我们已集成 YOLOV9 和 SAM==\n\nAS-One 是一个 Python 封装库，将多种目标检测和跟踪算法集中于一处。不同的跟踪器，如 `ByteTrack`、`DeepSORT` 或 `NorFair`，只需少量代码即可与不同版本的 `YOLO` 集成。\n该 Python 封装库提供 `ONNX`、`PyTorch` 和 `CoreML` 格式的 YOLO 模型。我们计划在后续版本的 YOLO 发布时为其提供支持。\n\n这是一站式库，满足您大部分计算机视觉需求。\n\n如果您想深入学习 YOLO 目标检测与跟踪，欢迎查看我们的 [课程](https:\u002F\u002Fwww.augmentedstartups.com\u002Fstore) 和 [项目](https:\u002F\u002Fstore.augmentedstartups.com)。\n\n[\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Faugmentedstartups_AS-One_readme_38ebab5c272f.jpg\" width=\"50%\">](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=K-VcpPwcM8k)\n\n观看分步教程 🤝\n\n\n\n## 💻 安装\n\u003Cdetails>\u003Csummary> 🔥 先决条件\u003C\u002Fsummary>\n\n- 如果您希望使用 GPU，请确保已在系统中安装 GPU 驱动程序。请参阅 [驱动程序安装指南](asone\u002Flinux\u002FInstructions\u002FDriver-Installations.md) 获取详细说明。\n- 如果您使用的是 Windows 系统，请确保已安装 [MS Build 工具](https:\u002F\u002Faka.ms\u002Fvs\u002F17\u002Frelease\u002Fvs_BuildTools.exe)。\n- 如果尚未安装 Git，请下载 [适用于 Windows 的 Git](https:\u002F\u002Fgit-scm.com\u002Fdownload\u002Fwin)。\n\u003C\u002Fdetails>\n\n```bash\npip install asone\n```\n\n对于 Windows 机器，您需要从源代码安装才能运行 asone 库。请参阅下方的 `👉 从源代码安装` 部分，了解在 Windows 上的安装步骤。\n\u003Cdetails>\n\u003Csummary> 👉 从源代码安装\u003C\u002Fsummary>\n\n### 💾 克隆仓库\n\n导航到您选择的空文件夹。\n\n`git clone https:\u002F\u002Fgithub.com\u002Faugmentedstartups\u002FAS-One.git`\n\n进入 AS-One 目录：\n\n`cd AS-One`\n\n\u003Cdetails open>\n\u003Csummary> 👉 对于 Linux\u003C\u002Fsummary>\n\n\n```shell\npython3 -m venv .env\nsource .env\u002Fbin\u002Factivate\n\npip install -r requirements.txt\n\n# 对于 CPU\npip install torch torchvision\n# 对于 GPU\npip install torch torchvision --extra-index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu113\n```\n\n\n\u003C\u002Fdetails>\n\n\u003Cdetails>\n\u003Csummary> 👉 对于 Windows 10\u002F11\u003C\u002Fsummary>\n\n```shell\npython -m venv .env\n.env\\Scripts\\activate\npip install numpy Cython\npip install lap\npip install -e git+https:\u002F\u002Fgithub.com\u002Fsamson-wang\u002Fcython_bbox.git#egg=cython-bbox\n\npip install asone onnxruntime-gpu==1.12.1\npip install typing_extensions==4.7.1\npip install super-gradients==3.1.3\n# 对于 CPU\npip install torch torchvision\n\n# 对于 GPU\npip install torch torchvision --extra-index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu113\n或\npip install torch==1.10.1+cu113 torchvision==0.11.2+cu113 torchaudio===0.10.1+cu113 -f https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu113\u002Ftorch_stable.html\n```\n\n\u003C\u002Fdetails>\n\u003Cdetails>\n\u003Csummary> 👉 对于 macOS\u003C\u002Fsummary>\n\n```shell\npython3 -m venv .env\nsource .env\u002Fbin\u002Factivate\n\n\npip install -r requirements.txt\n\n# 对于 CPU\npip install torch torchvision\n```\n\n\u003C\u002Fdetails>\n\u003C\u002Fdetails>\n\n## 快速入门 🏃‍♂️\n\n在示例视频上使用跟踪器。\n\n```python\nimport asone\nfrom asone import ASOne\n\nmodel = ASOne(tracker=asone.BYTETRACK, detector=asone.YOLOV9_C, use_cuda=True)\ntracks = model.video_tracker('data\u002Fsample_videos\u002Ftest.mp4', filter_classes=['car'])\n\nfor model_output in tracks:\n    annotations = ASOne.draw(model_output, display=False)\n```\n\n\n### 在 `Google Colab` 中运行 💻\n\n\n\u003Ca href=\"https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F1xy5P9WGI19-PzRH3ceOmoCgp63K6J_Ls\u002Fview?usp=sharing\">\u003Cimg src=\"https:\u002F\u002Fcolab.research.google.com\u002Fassets\u002Fcolab-badge.svg\" alt=\"在 Colab 中打开\">\u003C\u002Fa>\n\n## 示例代码片段 📃\n\n\u003Cdetails>\n\u003Csummary>6.1 👉 目标检测\u003C\u002Fsummary>\n\n```python\nimport asone\nfrom asone import ASOne\n\nmodel = ASOne(detector=asone.YOLOV9_C, use_cuda=True) # 设置 use_cuda 为 False 以使用 CPU\nvid = model.read_video('data\u002Fsample_videos\u002Ftest.mp4')\n\nfor img in vid:\n    detection = model.detecter(img)\n    annotations = ASOne.draw(detection, img=img, display=True)\n```\n\n运行 `asone\u002Fdemo_detector.py` 来测试检测器。\n\n```shell\n# 在 GPU 上运行\npython -m asone.demo_detector data\u002Fsample_videos\u002Ftest.mp4\n\n# 在 CPU 上运行\npython -m asone.demo_detector data\u002Fsample_videos\u002Ftest.mp4 --cpu\n```\n\n\n\u003Cdetails>\n\u003Csummary>6.1.1 👉 使用自定义训练的权重进行检测\u003C\u002Fsummary>\n\u003C!-- ### 6.1.2 使用自定义训练的权重 -->\n\n只需提供权重文件的路径，即可使用基于自定义数据训练的检测器模型的自定义权重。\n\n```python\nimport asone\nfrom asone import ASOne\n\nmodel = ASOne(detector=asone.YOLOV9_C, weights='data\u002Fcustom_weights\u002Fyolov7_custom.pt', use_cuda=True) # 设置 use_cuda 为 False 以使用 CPU\nvid = model.read_video('data\u002Fsample_videos\u002Flicense_video.mp4')\n\nfor img in vid:\n    detection = model.detecter(img)\n    annotations = ASOne.draw(detection, img=img, display=True, class_names=['license_plate'])\n```\n\n\u003C\u002Fdetails>\n\n\u003Cdetails>\n\u003Csummary>6.1.2 👉 更改检测器模型\u003C\u002Fsummary>\n\n只需更改检测器标志即可切换检测器。这些标志在 [基准测试](asone\u002Flinux\u002FInstructions\u002FBenchmarking.md) 表格中列出。\n\n- 我们的库现在支持在 macOS 上使用 YOLOv5、YOLOv7 和 YOLOv8。\n\n```python\n# 更改检测器\nmodel = ASOne(detector=asone.YOLOX_S_PYTORCH, use_cuda=True)\n\n# 对于 macOS\n# YOLO5\nmodel = ASOne(detector=asone.YOLOV5X_MLMODEL)\n# YOLO7\nmodel = ASOne(detector=asone.YOLOV7_MLMODEL)\n# YOLO8\nmodel = ASOne(detector=asone.YOLOV8L_MLMODEL)\n```\n\n\u003C\u002Fdetails>\n\n\u003C\u002Fdetails>\n\n\u003Cdetails>\n\u003Csummary>6.2 👉 目标跟踪\u003C\u002Fsummary>\n\n在示例视频上使用跟踪器。\n\n```python\nimport asone\nfrom asone import ASOne\n\n# 实例化 Asone 对象\nmodel = ASOne(tracker=asone.BYTETRACK, detector=asone.YOLOV9_C, use_cuda=True) # 设置 use_cuda=False 以使用 CPU\ntracks = model.video_tracker('data\u002Fsample_videos\u002Ftest.mp4', filter_classes=['car'])\n\n# 遍历跟踪结果，获取每一帧的输出\nfor model_output in tracks:\n    annotations = ASOne.draw(model_output, display=True)\n    # 在这里可以对边界框进行任何操作\n```\n\n[注意] 可以通过在 `ASOne` 类中提供检测器模型权重文件的路径来使用自定义权重。\n\n\u003Cdetails>\n\u003Csummary>6.2.1 👉 更改检测器和跟踪模型\u003C\u002Fsummary>\n\n\u003C!-- ### 更改检测器和跟踪模型 -->\n\n只需更改 tracker 标志即可更换跟踪器。\n\n标志在 [benchmark](asone\u002Flinux\u002FInstructions\u002FBenchmarking.md) 表格中提供。\n\n```python\nmodel = ASOne(tracker=asone.BYTETRACK, detector=asone.YOLOV9_C, use_cuda=True)\n# 更换跟踪器\nmodel = ASOne(tracker=asone.DEEPSORT, detector=asone.YOLOV9_C, use_cuda=True)\n```\n\n```python\n# 更改检测器\nmodel = ASOne(tracker=asone.DEEPSORT, detector=asone.YOLOX_S_PYTORCH, use_cuda=True)\n```\n\n\u003C\u002Fdetails>\n\n运行 `asone\u002Fdemo_tracker.py` 来测试检测器。\n\n```shell\n# 在 GPU 上运行\npython -m asone.demo_tracker data\u002Fsample_videos\u002Ftest.mp4\n\n# 在 CPU 上运行\npython -m asone.demo_tracker data\u002Fsample_videos\u002Ftest.mp4 --cpu\n```\n\n\u003C\u002Fdetails>\n\n\u003Cdetails>\n\u003Csummary>6.3 👉 分割\u003C\u002Fsummary>\n\n\n```python\nimport asone\nfrom asone import ASOne\n\nmodel = ASOne(detector=asone.YOLOV9_C, segmentor=asone.SAM, use_cuda=True) # 设置 use_cuda=False 以使用 CPU\ntracks = model.video_detecter('data\u002Fsample_videos\u002Ftest.mp4', filter_classes=['car'])\n\nfor model_output in tracks:\n    annotations = ASOne.draw_masks(model_output, display=True) # 绘制分割掩码\n```\n\u003C\u002Fdetails>\n\n\u003Cdetails>\n\u003Csummary>6.4 👉 文本检测\u003C\u002Fsummary>\n  \n在图像上检测文本的示例代码\n\n```python\n# 检测并识别文本\nimport asone\nfrom asone import ASOne, utils\nimport cv2\n\nmodel = ASOne(detector=asone.CRAFT, recognizer=asone.EASYOCR, use_cuda=True) # 将 use_cuda 设置为 False 以使用 CPU\nimg = cv2.imread('data\u002Fsample_imgs\u002Fsample_text.jpeg')\nresults = model.detect_text(img)\nannotations = utils.draw_text(img, results, display=True)\n```\n\n在文本上使用跟踪器\n\n```python\nimport asone\nfrom asone import ASOne\n\n# 实例化 Asone 对象\nmodel = ASOne(tracker=asone.DEEPSORT, detector=asone.CRAFT, recognizer=asone.EASYOCR, use_cuda=True) # 设置 use_cuda=False 以使用 CPU\ntracks = model.video_tracker('data\u002Fsample_videos\u002FGTA_5-Unique_License_Plate.mp4')\n\n# 遍历跟踪结果，获取每一帧的输出\nfor model_output in tracks:\n    annotations = ASOne.draw(model_output, display=True)\n\n    # 在这里可以对边界框进行任何操作\n```\n\n运行 `asone\u002Fdemo_ocr.py` 来测试 OCR。\n\n```shell\n# 在 GPU 上运行\n python -m asone.demo_ocr data\u002Fsample_videos\u002FGTA_5-Unique_License_Plate.mp4\n\n# 在 CPU 上运行\n python -m asone.demo_ocr data\u002Fsample_videos\u002FGTA_5-Unique_License_Plate.mp4 --cpu\n```\n\n\u003C\u002Fdetails>\n\n\u003Cdetails>\n\u003Csummary>6.5 👉 姿态估计\u003C\u002Fsummary>\n\n\n在图像上估计姿态的示例代码\n\n```python\n# 姿态估计\nimport asone\nfrom asone import PoseEstimator, utils\nimport cv2\n\nmodel = PoseEstimator(estimator_flag=asone.YOLOV8M_POSE, use_cuda=True) # 设置 use_cuda=False 以使用 CPU\nimg = cv2.imread('data\u002Fsample_imgs\u002Ftest2.jpg')\nkpts = model.estimate_image(img)\nannotations = utils.draw_kpts(kpts, image=img, display=True)\n```\n\n- 现在可以使用 Yolov8 和 Yolov7-w6 进行姿态估计。标志在 [benchmark](asone\u002Flinux\u002FInstructions\u002FBenchmarking.md) 表格中提供。\n\n```python\n# 视频中的姿态估计\nimport asone\nfrom asone import PoseEstimator, utils\n\nmodel = PoseEstimator(estimator_flag=asone.YOLOV7_W6_POSE, use_cuda=True) # 设置 use_cuda=False 以使用 CPU\nestimator = model.video_estimator('data\u002Fsample_videos\u002Ffootball1.mp4')\nfor model_output in estimator:\n    annotations = utils.draw_kpts(model_output)\n    # 在这里可以对关键点进行任何操作\n```\n\n运行 `asone\u002Fdemo_pose_estimator.py` 来测试姿态估计。\n\n```shell\n# 在 GPU 上运行\n python -m asone.demo_pose_estimator data\u002Fsample_videos\u002Ffootball1.mp4\n\n# 在 CPU 上运行\n python -m asone.demo_pose_estimator data\u002Fsample_videos\u002Ffootball1.mp4 --cpu\n```\n\n\u003C\u002Fdetails>\n\n要使用 Docker 设置 ASOne，请按照 [docker setup](asone\u002Flinux\u002FInstructions\u002FDocker-Setup.md) 中的说明操作🐳\n\n### 待办事项 📝\n\n- [x] 初始发布\n- [x] 导入训练好的模型\n- [x] 进一步简化代码\n- [x] 更新至 YOLOv8\n- [x] OCR 和计数功能\n- [x] OCSORT、StrongSORT、MoTPy\n- [x] M1\u002F2 Apple Silicon 兼容性\n- [x] YOLOv7\u002Fv8 姿态估计\n- [x] YOLO-NAS\n- [x] 更新至 YOLOv8.1\n- [x] YOLOV9\n- [x] SAM 集成\n\n\n| 提供方 💼 :                                                                                                                                                  | 维护方 👨‍💻 :                                                                                                                                    |\n| ------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------- |\n| [![AugmentedStarups](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Faugmentedstartups_AS-One_readme_7eae217fd3bf.png)](https:\u002F\u002Faugmentedstartups.com) | [![AxcelerateAI](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Faugmentedstartups_AS-One_readme_ea0e3ba83b4e.png)](https:\u002F\u002Faxcelerate.ai\u002F) |","# AS-One v2 快速上手指南\n\nAS-One 是一个模块化的 Python 库，集成了 YOLO 系列的目标检测、分割、跟踪及姿态估计功能。它支持将多种追踪器（如 ByteTrack, DeepSORT）与不同版本的 YOLO 模型（包括 YOLOv9 和 SAM）无缝结合，提供极简的代码接口。\n\n## 环境准备\n\n在开始之前，请确保满足以下系统要求：\n\n*   **操作系统**：Linux, Windows 10\u002F11, macOS (支持 M1\u002FM2 Apple Silicon)。\n*   **Python 版本**：推荐 Python 3.8+。\n*   **GPU 支持（可选）**：\n    *   若需使用 GPU 加速，请预先安装对应的 NVIDIA 显卡驱动。\n    *   Windows 用户需安装 [MS Build Tools](https:\u002F\u002Faka.ms\u002Fvs\u002F17\u002Frelease\u002Fvs_BuildTools.exe)。\n    *   所有平台需安装 Git。\n\n## 安装步骤\n\n### 方法一：通过 PyPI 安装（推荐 Linux\u002FmacOS）\n\n对于大多数 Linux 和 macOS 用户，可以直接通过 pip 安装：\n\n```bash\npip install asone\n```\n\n### 方法二：源码安装（Windows 用户必读）\n\nWindows 用户建议从源码安装以确保所有依赖（如 `lap` 和 `cython_bbox`）正确编译。\n\n1.  **克隆仓库**\n    ```bash\n    git clone https:\u002F\u002Fgithub.com\u002Faugmentedstartups\u002FAS-One.git\n    cd AS-One\n    ```\n\n2.  **创建并激活虚拟环境**\n    ```shell\n    # Windows\n    python -m venv .env\n    .env\\Scripts\\activate\n    ```\n\n3.  **安装依赖**\n    依次执行以下命令安装特定依赖和主库：\n    ```shell\n    pip install numpy Cython\n    pip install lap\n    pip install -e git+https:\u002F\u002Fgithub.com\u002Fsamson-wang\u002Fcython_bbox.git#egg=cython-bbox\n\n    pip install asone onnxruntime-gpu==1.12.1\n    pip install typing_extensions==4.7.1\n    pip install super-gradients==3.1.3\n    ```\n\n4.  **安装 PyTorch**\n    根据你的硬件选择 CPU 或 GPU 版本：\n    ```shell\n    # 仅 CPU\n    pip install torch torchvision\n\n    # GPU (CUDA 11.3 示例)\n    pip install torch torchvision --extra-index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu113\n    ```\n\n> **提示**：国内用户若下载缓慢，可在 `pip install` 命令后添加 `-i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple` 使用清华镜像源。PyTorch 建议使用官方源或国内镜像站提供的 wheel 包。\n\n## 基本使用\n\nAS-One 的核心优势在于统一的接口。以下是最简单的**目标检测与跟踪**示例。\n\n### 1. 视频目标跟踪 (Object Tracking)\n\n结合 YOLOv9 检测器和 ByteTrack 追踪器处理视频：\n\n```python\nimport asone\nfrom asone import ASOne\n\n# 初始化模型：指定追踪器、检测器，并启用 CUDA (若无 GPU 设为 False)\nmodel = ASOne(tracker=asone.BYTETRACK, detector=asone.YOLOV9_C, use_cuda=True)\n\n# 运行视频跟踪，可过滤特定类别 (如只跟踪 'car')\ntracks = model.video_tracker('data\u002Fsample_videos\u002Ftest.mp4', filter_classes=['car'])\n\n# 遍历每一帧的结果并绘制\nfor model_output in tracks:\n    annotations = ASOne.draw(model_output, display=True)\n    # 在此处可对 bboxes 进行后续处理\n```\n\n### 2. 纯目标检测 (Object Detection)\n\n如果不需要跟踪，仅进行单帧检测：\n\n```python\nimport asone\nfrom asone import ASOne\n\n# 初始化检测模型\nmodel = ASOne(detector=asone.YOLOV9_C, use_cuda=True)\n\n# 读取视频流\nvid = model.read_video('data\u002Fsample_videos\u002Ftest.mp4')\n\nfor img in vid:\n    detection = model.detecter(img)\n    # 绘制结果，display=True 会弹出窗口显示，False 则返回图像数组\n    annotations = ASOne.draw(detection, img=img, display=True)\n```\n\n### 3. 使用自定义权重\n\n如果你有自己的训练模型，只需在初始化时传入权重路径：\n\n```python\nmodel = ASOne(\n    detector=asone.YOLOV9_C, \n    weights='path\u002Fto\u002Fyour\u002Fcustom_weights.pt', \n    use_cuda=True\n)\n```\n\n### 其他功能速览\n\n*   **实例分割**：设置 `segmentor=asone.SAM`，使用 `ASOne.draw_masks()` 绘制掩码。\n*   **文本检测 (OCR)**：设置 `detector=asone.CRAFT` 和 `recognizer=asone.EASYOCR`，使用 `model.detect_text()`。\n*   **姿态估计**：使用 `PoseEstimator` 类，设置 `estimator_flag=asone.YOLOV8M_POSE`。","某智慧物流团队正在开发一套自动化分拣系统，需要实时识别传送带上的包裹并追踪其运动轨迹，以控制机械臂进行精准抓取。\n\n### 没有 AS-One 时\n- **代码冗余严重**：开发者需分别下载 YOLOv5\u002Fv8 等不同版本的官方仓库，为每个模型编写独立的加载与推理脚本，导致项目结构混乱。\n- **算法集成困难**：若想将检测模型与 ByteTrack 或 DeepSORT 等追踪器结合，需手动对齐数据格式并调试接口，往往需要数百行胶水代码。\n- **环境部署繁琐**：不同模型依赖特定的 PyTorch 版本或 ONNX 运行时，频繁切换环境极易引发依赖冲突，配置 GPU 加速更是耗时耗力。\n- **迭代验证缓慢**：尝试新模型（如最新的 YOLOv9）或分割任务（SAM）时，重新搭建流程需数天时间，严重拖慢原型验证进度。\n\n### 使用 AS-One 后\n- **统一调用接口**：仅需不到 10 行代码即可灵活切换 YOLOv5 至 v9 等多种模型，无需关心底层实现差异，项目结构清晰简洁。\n- **模块化即插即用**：内置多种追踪器与检测模型的预置组合，一键启用“检测 + 追踪”或“分割 + 姿态估计”流水线，大幅降低集成门槛。\n- **跨平台高效运行**：自动适配 ONNX、PyTorch 及 CoreML 后端，屏蔽底层驱动与环境配置细节，让团队能专注于业务逻辑而非环境调试。\n- **快速技术演进**：无缝支持最新发布的 YOLOv9 和 SAM 模型，团队可在几小时内完成新算法的性能测试与对比，显著加速产品迭代。\n\nAS-One 通过高度模块化的设计，将复杂的计算机视觉工程链路简化为简单的 API 调用，让开发者从繁琐的底层适配中解放出来，专注于核心业务价值的创造。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Faugmentedstartups_AS-One_a9bdf843.jpg","augmentedstartups","Augmented AI","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Faugmentedstartups_0512c40f.png","Master AI before it Masters You","AugmentedStartups.com",null,"http:\u002F\u002Fwww.augmentedstartups.com\u002F","https:\u002F\u002Fgithub.com\u002Faugmentedstartups",[82,86,90,94,97,101,104,107],{"name":83,"color":84,"percentage":85},"Python","#3572A5",99.4,{"name":87,"color":88,"percentage":89},"Cython","#fedf5b",0.3,{"name":91,"color":92,"percentage":93},"Shell","#89e051",0.1,{"name":95,"color":96,"percentage":93},"Cuda","#3A4E3A",{"name":98,"color":99,"percentage":100},"C++","#f34b7d",0,{"name":102,"color":103,"percentage":100},"Batchfile","#C1F12E",{"name":105,"color":106,"percentage":100},"Dockerfile","#384d54",{"name":108,"color":109,"percentage":100},"Makefile","#427819",612,106,"2026-03-09T07:19:50","GPL-3.0",4,"Linux, macOS, Windows","非必需（支持 CPU 模式）。若使用 GPU，需安装 NVIDIA 驱动，文档示例中指定了 CUDA 11.3 (cu113) 环境。","未说明",{"notes":119,"python":120,"dependencies":121},"Windows 用户必须从源代码安装（需先安装 MS Build Tools 和 Git）；Linux\u002FmacOS 可直接通过 pip 安装。支持 YOLOv5\u002Fv7\u002Fv8\u002Fv9、SAM、ByteTrack、DeepSORT 等模型。MacOS 支持 Apple Silicon (M1\u002FM2)。若使用 GPU，Linux\u002FWindows 需手动指定 cu113 版本的 PyTorch。","未说明 (根据 PyTorch 和依赖库推断建议 Python 3.8+)",[122,123,124,125,126,127,87,128,129],"torch","torchvision","onnxruntime-gpu==1.12.1","typing_extensions==4.7.1","super-gradients==3.1.3","numpy","lap","cython-bbox",[15,14],[132,133,134,135,136,137,138,139,140,141,142,143,144,145],"computer-vision","opencv","yolor","yolov5","yolov7","yolox","deep-learning","object-detection","pytorch","tracking","ultralytics","yolov8","sam","yolov9","2026-03-27T02:49:30.150509","2026-04-14T12:35:40.787550",[],[150],{"id":151,"version":152,"summary_zh":153,"released_at":154},253210,"AS-Onev1","-  首次发布\n-  导入训练好的模型\n-  进一步简化代码\n-  针对 YOLOv8 进行更新\n-  OCR 与计数\n-  OCSORT、StrongSORT、MoTPy\n-  兼容 M1\u002FM2 苹果芯片\n-  姿态估计：YOLOv7\u002Fv8\n-  YOLO-NAS","2023-06-20T08:32:06"]