[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-videoflow--videoflow":3,"tool-videoflow--videoflow":62},[4,18,26,35,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",108322,2,"2026-04-10T11:39:34",[14,15,13],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":32,"last_commit_at":41,"category_tags":42,"status":17},6121,"gemini-cli","google-gemini\u002Fgemini-cli","gemini-cli 是一款由谷歌推出的开源 AI 命令行工具，它将强大的 Gemini 大模型能力直接集成到用户的终端环境中。对于习惯在命令行工作的开发者而言，它提供了一条从输入提示词到获取模型响应的最短路径，无需切换窗口即可享受智能辅助。\n\n这款工具主要解决了开发过程中频繁上下文切换的痛点，让用户能在熟悉的终端界面内直接完成代码理解、生成、调试以及自动化运维任务。无论是查询大型代码库、根据草图生成应用，还是执行复杂的 Git 操作，gemini-cli 都能通过自然语言指令高效处理。\n\n它特别适合广大软件工程师、DevOps 人员及技术研究人员使用。其核心亮点包括支持高达 100 万 token 的超长上下文窗口，具备出色的逻辑推理能力；内置 Google 搜索、文件操作及 Shell 命令执行等实用工具；更独特的是，它支持 MCP（模型上下文协议），允许用户灵活扩展自定义集成，连接如图像生成等外部能力。此外，个人谷歌账号即可享受免费的额度支持，且项目基于 Apache 2.0 协议完全开源，是提升终端工作效率的理想助手。",100752,"2026-04-10T01:20:03",[43,13,15,14],"插件",{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":10,"last_commit_at":50,"category_tags":51,"status":17},4487,"LLMs-from-scratch","rasbt\u002FLLMs-from-scratch","LLMs-from-scratch 是一个基于 PyTorch 的开源教育项目，旨在引导用户从零开始一步步构建一个类似 ChatGPT 的大型语言模型（LLM）。它不仅是同名技术著作的官方代码库，更提供了一套完整的实践方案，涵盖模型开发、预训练及微调的全过程。\n\n该项目主要解决了大模型领域“黑盒化”的学习痛点。许多开发者虽能调用现成模型，却难以深入理解其内部架构与训练机制。通过亲手编写每一行核心代码，用户能够透彻掌握 Transformer 架构、注意力机制等关键原理，从而真正理解大模型是如何“思考”的。此外，项目还包含了加载大型预训练权重进行微调的代码，帮助用户将理论知识延伸至实际应用。\n\nLLMs-from-scratch 特别适合希望深入底层原理的 AI 开发者、研究人员以及计算机专业的学生。对于不满足于仅使用 API，而是渴望探究模型构建细节的技术人员而言，这是极佳的学习资源。其独特的技术亮点在于“循序渐进”的教学设计：将复杂的系统工程拆解为清晰的步骤，配合详细的图表与示例，让构建一个虽小但功能完备的大模型变得触手可及。无论你是想夯实理论基础，还是为未来研发更大规模的模型做准备",90106,"2026-04-06T11:19:32",[52,15,13,14],"语言模型",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":10,"last_commit_at":59,"category_tags":60,"status":17},4292,"Deep-Live-Cam","hacksider\u002FDeep-Live-Cam","Deep-Live-Cam 是一款专注于实时换脸与视频生成的开源工具，用户仅需一张静态照片，即可通过“一键操作”实现摄像头画面的即时变脸或制作深度伪造视频。它有效解决了传统换脸技术流程繁琐、对硬件配置要求极高以及难以实时预览的痛点，让高质量的数字内容创作变得触手可及。\n\n这款工具不仅适合开发者和技术研究人员探索算法边界，更因其极简的操作逻辑（仅需三步：选脸、选摄像头、启动），广泛适用于普通用户、内容创作者、设计师及直播主播。无论是为了动画角色定制、服装展示模特替换，还是制作趣味短视频和直播互动，Deep-Live-Cam 都能提供流畅的支持。\n\n其核心技术亮点在于强大的实时处理能力，支持口型遮罩（Mouth Mask）以保留使用者原始的嘴部动作，确保表情自然精准；同时具备“人脸映射”功能，可同时对画面中的多个主体应用不同面孔。此外，项目内置了严格的内容安全过滤机制，自动拦截涉及裸露、暴力等不当素材，并倡导用户在获得授权及明确标注的前提下合规使用，体现了技术发展与伦理责任的平衡。",88924,"2026-04-06T03:28:53",[14,15,13,61],"视频",{"id":63,"github_repo":64,"name":65,"description_en":66,"description_zh":67,"ai_summary_zh":68,"readme_en":69,"readme_zh":70,"quickstart_zh":71,"use_case_zh":72,"hero_image_url":73,"owner_login":65,"owner_name":65,"owner_avatar_url":74,"owner_bio":75,"owner_company":76,"owner_location":76,"owner_email":76,"owner_twitter":76,"owner_website":77,"owner_url":78,"languages":79,"stars":92,"forks":93,"last_commit_at":94,"license":95,"difficulty_score":10,"env_os":96,"env_gpu":97,"env_ram":97,"env_deps":98,"category_tags":104,"github_topics":105,"view_count":32,"oss_zip_url":76,"oss_zip_packed_at":76,"status":17,"created_at":111,"updated_at":112,"faqs":113,"releases":114},6774,"videoflow\u002Fvideoflow","videoflow","Python framework that facilitates the quick development of complex video analysis applications and other series-processing based applications in a multiprocessing environment.","Videoflow 是一个专为视频流处理设计的 Python 框架，旨在帮助开发者在 multiprocessing（多进程）环境中快速构建复杂的视频分析应用。它主要解决了传统视频处理流程中代码冗余、并行化配置困难以及深度学习模型集成繁琐等痛点，让用户仅需少量代码即可搭建高效的计算机视觉流水线。\n\n该工具非常适合具备一定编程基础的开发者、算法工程师及研究人员使用，特别是那些需要快速原型验证或部署包含目标检测、物体追踪、人体姿态估计等功能的智能视频系统的人群。对于普通非技术用户而言，由于其依赖代码开发，上手门槛相对较高。\n\nVideoflow 的核心亮点在于其模块化的架构设计：不仅内置了多种开箱即用的参考组件（如基于 TensorFlow 的检测器），还支持用户轻松扩展自定义模块。它巧妙地将生产、处理和消费环节解耦，并原生支持多进程并行计算，显著提升了视频处理的吞吐量。此外，项目秉持简洁理念，核心库依赖极少，复杂的扩展功能可通过独立的 contrib 项目按需加载，既保证了核心的轻量稳定，又兼顾了生态的灵活性。配合完善的文档与 Docker 支持，Videoflow 让构建高性能视频分析","Videoflow 是一个专为视频流处理设计的 Python 框架，旨在帮助开发者在 multiprocessing（多进程）环境中快速构建复杂的视频分析应用。它主要解决了传统视频处理流程中代码冗余、并行化配置困难以及深度学习模型集成繁琐等痛点，让用户仅需少量代码即可搭建高效的计算机视觉流水线。\n\n该工具非常适合具备一定编程基础的开发者、算法工程师及研究人员使用，特别是那些需要快速原型验证或部署包含目标检测、物体追踪、人体姿态估计等功能的智能视频系统的人群。对于普通非技术用户而言，由于其依赖代码开发，上手门槛相对较高。\n\nVideoflow 的核心亮点在于其模块化的架构设计：不仅内置了多种开箱即用的参考组件（如基于 TensorFlow 的检测器），还支持用户轻松扩展自定义模块。它巧妙地将生产、处理和消费环节解耦，并原生支持多进程并行计算，显著提升了视频处理的吞吐量。此外，项目秉持简洁理念，核心库依赖极少，复杂的扩展功能可通过独立的 contrib 项目按需加载，既保证了核心的轻量稳定，又兼顾了生态的灵活性。配合完善的文档与 Docker 支持，Videoflow 让构建高性能视频分析系统变得简单而优雅。","# Videoflow\n\n![Videoflow](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fvideoflow_videoflow_readme_542cb660e063.png)\n\n[![Build Status](https:\u002F\u002Ftravis-ci.org\u002Fvideoflow\u002Fvideoflow.svg?branch=master)](https:\u002F\u002Ftravis-ci.org\u002Fvideoflow\u002Fvideoflow)\n[![license](https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Flicense\u002Fmashape\u002Fapistatus.svg?maxAge=2592000)](https:\u002F\u002Fgithub.com\u002Fvideoflow\u002Fvideoflow\u002Fblob\u002Fmaster\u002FLICENSE)\n\n**Videoflow** is a Python framework for video stream processing. The library is designed to facilitate easy and quick definition of computer vision stream processing pipelines. It empowers developers to build applications and systems with self-contained Deep Learning and Computer Vision capabilities using simple and few lines of code.  It contains off-the-shelf reference components for object detection, object tracking, human pose estimation, etc, and it is easy to extend with your own.\n\nThe complete documentation to the project is located in [**docs.videoflow.dev**](https:\u002F\u002Fdocs.videoflow.dev)\n\n[1.2]: http:\u002F\u002Fi.imgur.com\u002FwWzX9uB.png\n[1]: http:\u002F\u002Fwww.twitter.com\u002Fvideoflow_py\n\u003C!--Follow us on [![alt text][1.2]][1]-->\n\n\n## Installing the framework\n### Requirements\nBefore installing, be sure that you have `cv2` already installed. Python 2 is **NOT SUPPORTED**.  Requires Python 3.6+.  There are some [known issues](https:\u002F\u002Fgithub.com\u002Fvideoflow\u002Fvideoflow\u002Fissues\u002F56) to run it on Windows too\n\n### Installation\nYou can install directly using **pip** by doing `pip3 install videoflow`\n\nAlternatively, you can install by:\n\n1. Clone this repository\n2. Inside the repository folder, execute `pip3 install . --user`\n\n**Usage with docker**\n```bash\n# clone repo\ndocker build -t repo\u002Fvideoflow:latest .\n# runs examples\u002Fobject_detector.py by default\ndocker run -u $(id -u):$(id -g) -v $(pwd):\u002Fusr\u002Fsrc\u002Fapp repo\u002Fvideoflow\n# or mount the volume from your code directory  to \u002Fusr\u002Fsrc\u002Fapp\ndocker run -u $(id -u):$(id -g) -v $(pwd):\u002Fusr\u002Fsrc\u002Fapp repo\u002Fvideoflow python \u002Fusr\u002Fsrc\u002Fapp\u002Fyourown.py\n```\n## Contributing:\nA tentative [roadmap](ROADMAP.md) of where we are headed.\n\n[Contribution rules](CONTRIBUTING.md).\n\nIf you have new processors, producers or consumers that you can to create, check the [videoflow-contrib](https:\u002F\u002Fgithub.com\u002Fvideoflow\u002Fvideoflow-contrib) project.  We want \nto keep videoflow succinct, clean, and simple, with as minimal dependencies to third-party libraries as necessaries. [videoflow-contrib](https:\u002F\u002Fgithub.com\u002Fvideoflow\u002Fvideoflow-contrib) is better suited for adding new components that require new library \ndependencies.\n\n## Sample Videoflow application:\nBelow a sample videoflow application that detects automobiles in an intersection. For more examples see the [examples](examples\u002F) folder. It uses detection model published by [tensorflow\u002Fmodels](https:\u002F\u002Fgithub.com\u002Ftensorflow\u002Fmodels\u002Ftree\u002Fmaster\u002Fresearch\u002Fobject_detection)\n\n[![IMAGE ALT TEXT HERE](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fvideoflow_videoflow_readme_1ddbb3198cc1.jpg)](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=TYGMllb7fHM)\n\n```python\nimport videoflow\nimport videoflow.core.flow as flow\nfrom videoflow.core.constants import BATCH\nfrom videoflow.consumers import VideofileWriter\nfrom videoflow.producers import VideofileReader\nfrom videoflow_contrib.detector_tf import TensorflowObjectDetector\nfrom videoflow.processors.vision.annotators import BoundingBoxAnnotator\nfrom videoflow.utils.downloader import get_file\n\nURL_VIDEO = \"https:\u002F\u002Fgithub.com\u002Fvideoflow\u002Fvideoflow\u002Freleases\u002Fdownload\u002Fexamples\u002Fintersection.mp4\"\n\nclass FrameIndexSplitter(videoflow.core.node.ProcessorNode):\n    def __init__(self):\n        super(FrameIndexSplitter, self).__init__()\n    \n    def process(self, data):\n        index, frame = data\n        return frame\n\ninput_file = get_file(\"intersection.mp4\", URL_VIDEO)\noutput_file = \"output.avi\"\nreader = VideofileReader(input_file)\nframe = FrameIndexSplitter()(reader)\ndetector = TensorflowObjectDetector()(frame)\nannotator = BoundingBoxAnnotator()(frame, detector)\nwriter = VideofileWriter(output_file, fps = 30)(annotator)\nfl = flow.Flow([reader], [writer], flow_type = BATCH)\nfl.run()\nfl.join()\n```\n\nThe output of the application is an annotated video:\n\n\n## The Structure of a flow application\n\nA flow application usually consists of three parts:\n\n1. In the first part of the application you define a directed acyclic graph of computation nodes. There are 3 different kinds of nodes: producers, processors and consumers.  Producer nodes create data (commonly they will get the data from a source that is external to the flow).  Processors receive data as input and produce data as output. Consumers read data and do not produce any output.  You usually use a consumer when you want to write results to a log file, or when you want to push results to an external source (rest API, S3 bucket, etc.)\n\n2. To create a flow object, you need to pass to it your list of producers and your list of consumers. Once a flow is defined you can start it.  Starting the flow means that the producers start putting data into the flow and processors and consumers start receiving data.  Starting the flow also means allocating resources for producers, processors and consumers.  For simplicity for now we can say that each producer, processor and consumer will run on its own process space.\n\n3. Once the flow starts, you can also stop it.  When you stop the flow, it will happen organically.  Producers will stop producing data.  The rest of the nodes in the flow will continue running until the pipes run dry.  The resources used in the flow are deallocated progressively, as each node stops producing\u002Fprocessing\u002Fconsuming data.\n\n## Citing Videoflow\nIf you use Videoflow in your research please use the following BibTeX entry.\n\n```\n@misc{deArmas2019videoflow,\n  author =       {Jadiel de Armas},\n  title =        {Videoflow},\n  howpublished = {\\url{https:\u002F\u002Fgithub.com\u002Fvideoflow\u002Fvideoflow}},\n  year =         {2019}\n}\n```\n","# Videoflow\n\n![Videoflow](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fvideoflow_videoflow_readme_542cb660e063.png)\n\n[![构建状态](https:\u002F\u002Ftravis-ci.org\u002Fvideoflow\u002Fvideoflow.svg?branch=master)](https:\u002F\u002Ftravis-ci.org\u002Fvideoflow\u002Fvideoflow)\n[![许可证](https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Flicense\u002Fmashape\u002Fapistatus.svg?maxAge=2592000)](https:\u002F\u002Fgithub.com\u002Fvideoflow\u002Fvideoflow\u002Fblob\u002Fmaster\u002FLICENSE)\n\n**Videoflow** 是一个用于视频流处理的 Python 框架。该库旨在简化并快速定义计算机视觉流处理管道。它使开发者能够仅用简洁的几行代码，构建具备独立深度学习和计算机视觉功能的应用程序及系统。Videoflow 包含现成的对象检测、目标跟踪、人体姿态估计等参考组件，并且易于通过自定义组件进行扩展。\n\n项目的完整文档位于 [**docs.videoflow.dev**](https:\u002F\u002Fdocs.videoflow.dev)。\n\n[1.2]: http:\u002F\u002Fi.imgur.com\u002FwWzX9uB.png\n[1]: http:\u002F\u002Fwww.twitter.com\u002Fvideoflow_py\n\u003C!--关注我们 [![alt text][1.2]][1]-->\n\n\n## 安装框架\n### 需求\n在安装之前，请确保已安装 `cv2`。Python 2 **不被支持**。需要 Python 3.6 或更高版本。此外，在 Windows 上运行时也存在一些 [已知问题](https:\u002F\u002Fgithub.com\u002Fvideoflow\u002Fvideoflow\u002Fissues\u002F56)。\n\n### 安装\n您可以直接使用 **pip** 进行安装：`pip3 install videoflow`\n\n或者，您也可以通过以下步骤安装：\n\n1. 克隆本仓库。\n2. 在仓库目录内执行 `pip3 install . --user`。\n\n**使用 Docker**\n```bash\n# 克隆仓库\ndocker build -t repo\u002Fvideoflow:latest .\n# 默认运行 examples\u002Fobject_detector.py\ndocker run -u $(id -u):$(id -g) -v $(pwd):\u002Fusr\u002Fsrc\u002Fapp repo\u002Fvideoflow\n# 或者将您的代码目录挂载到 \u002Fusr\u002Fsrc\u002Fapp\ndocker run -u $(id -u):$(id -g) -v $(pwd):\u002Fusr\u002Fsrc\u002Fapp repo\u002Fvideoflow python \u002Fusr\u002Fsrc\u002Fapp\u002Fyourown.py\n```\n## 贡献：\n我们大致的发展 [路线图](ROADMAP.md)。\n\n[贡献指南](CONTRIBUTING.md)。\n\n如果您有新的处理器、生产者或消费者可以创建，请查看 [videoflow-contrib](https:\u002F\u002Fgithub.com\u002Fvideoflow\u002Fvideoflow-contrib) 项目。我们希望保持 Videoflow 简洁、干净且简单，尽量减少对第三方库的依赖。对于需要引入新库依赖的新组件，[videoflow-contrib](https:\u002F\u002Fgithub.com\u002Fvideoflow\u002Fvideoflow-contrib) 更为合适。\n\n## Videoflow 应用示例：\n以下是一个 Videoflow 应用示例，用于检测十字路口中的汽车。更多示例请参阅 [examples](examples\u002F) 文件夹。该示例使用了由 [tensorflow\u002Fmodels](https:\u002F\u002Fgithub.com\u002Ftensorflow\u002Fmodels\u002Ftree\u002Fmaster\u002Fresearch\u002Fobject_detection) 发布的对象检测模型。\n\n[![这里显示图片的替代文本](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fvideoflow_videoflow_readme_1ddbb3198cc1.jpg)](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=TYGMllb7fHM)\n\n```python\nimport videoflow\nimport videoflow.core.flow as flow\nfrom videoflow.core.constants import BATCH\nfrom videoflow.consumers import VideofileWriter\nfrom videoflow.producers import VideofileReader\nfrom videoflow_contrib.detector_tf import TensorflowObjectDetector\nfrom videoflow.processors.vision.annotators import BoundingBoxAnnotator\nfrom videoflow.utils.downloader import get_file\n\nURL_VIDEO = \"https:\u002F\u002Fgithub.com\u002Fvideoflow\u002Fvideoflow\u002Freleases\u002Fdownload\u002Fexamples\u002Fintersection.mp4\"\n\nclass FrameIndexSplitter(videoflow.core.node.ProcessorNode):\n    def __init__(self):\n        super(FrameIndexSplitter, self).__init__()\n    \n    def process(self, data):\n        index, frame = data\n        return frame\n\ninput_file = get_file(\"intersection.mp4\", URL_VIDEO)\noutput_file = \"output.avi\"\nreader = VideofileReader(input_file)\nframe = FrameIndexSplitter()(reader)\ndetector = TensorflowObjectDetector()(frame)\nannotator = BoundingBoxAnnotator()(frame, detector)\nwriter = VideofileWriter(output_file, fps = 30)(annotator)\nfl = flow.Flow([reader], [writer], flow_type = BATCH)\nfl.run()\nfl.join()\n```\n\n该应用的输出是一个带有标注的视频：\n\n\n## 流式应用的结构\n\n一个流式应用通常由三部分组成：\n\n1. 在应用程序的第一部分，您定义一个由计算节点组成的有向无环图。节点分为三种类型：生产者、处理器和消费者。生产者节点生成数据（通常从流外部的源获取数据）。处理器接收输入数据并产生输出数据。消费者读取数据但不产生任何输出。当您需要将结果写入日志文件，或推送到外部源（REST API、S3 存储桶等）时，通常会使用消费者。\n\n2. 要创建一个流对象，您需要传入生产者列表和消费者列表。一旦定义了流，就可以启动它。启动流意味着生产者开始向流中输入数据，而处理器和消费者则开始接收数据。同时，也会为生产者、处理器和消费者分配资源。简单来说，目前每个生产者、处理器和消费者都会在各自的进程空间中运行。\n\n3. 流启动后，也可以停止。停止流的过程是自然发生的：生产者会停止生成数据。流中的其他节点将继续运行，直到数据流耗尽为止。随着每个节点停止生产、处理或消费数据，流中所占用的资源会逐步释放。\n\n## 引用 Videoflow\n如果您在研究中使用 Videoflow，请使用以下 BibTeX 条目。\n\n```\n@misc{deArmas2019videoflow,\n  author =       {Jadiel de Armas},\n  title =        {Videoflow},\n  howpublished = {\\url{https:\u002F\u002Fgithub.com\u002Fvideoflow\u002Fvideoflow}},\n  year =         {2019}\n}\n```","# Videoflow 快速上手指南\n\nVideoflow 是一个用于视频流处理的 Python 框架，旨在帮助开发者通过简洁的代码快速构建包含深度学习和计算机视觉能力的流水线应用。它内置了目标检测、物体跟踪、人体姿态估计等常用组件，并支持轻松扩展自定义模块。\n\n## 环境准备\n\n在开始之前，请确保满足以下系统要求：\n\n- **操作系统**：推荐 Linux 或 macOS（Windows 存在已知兼容性问题）\n- **Python 版本**：Python 3.6 或更高版本（**不支持** Python 2）\n- **前置依赖**：必须预先安装 `opencv-python` (即 `cv2`)\n\n建议先执行以下命令检查并安装基础依赖：\n\n```bash\npip3 install opencv-python\n```\n\n## 安装步骤\n\n你可以通过 pip 直接安装稳定版，或者从源码安装以获取最新功能。\n\n### 方式一：使用 pip 安装（推荐）\n\n```bash\npip3 install videoflow\n```\n\n*注：国内用户若下载缓慢，可尝试指定清华镜像源：*\n```bash\npip3 install videoflow -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n```\n\n### 方式二：从源码安装\n\n1. 克隆仓库：\n   ```bash\n   git clone https:\u002F\u002Fgithub.com\u002Fvideoflow\u002Fvideoflow.git\n   cd videoflow\n   ```\n\n2. 执行安装：\n   ```bash\n   pip3 install . --user\n   ```\n\n### Docker 使用（可选）\n\n如果你希望使用容器化环境，可以执行以下命令构建并运行：\n\n```bash\n# 构建镜像\ndocker build -t repo\u002Fvideoflow:latest .\n\n# 运行默认示例\ndocker run -u $(id -u):$(id -g) -v $(pwd):\u002Fusr\u002Fsrc\u002Fapp repo\u002Fvideoflow\n\n# 运行自定义脚本\ndocker run -u $(id -u):$(id -g) -v $(pwd):\u002Fusr\u002Fsrc\u002Fapp repo\u002Fvideoflow python \u002Fusr\u002Fsrc\u002Fapp\u002Fyourown.py\n```\n\n## 基本使用\n\n下面是一个最简单的 Videoflow 应用示例：从视频文件读取帧，使用 TensorFlow 模型检测汽车，绘制边界框，并保存为新视频。\n\n该示例会自动下载测试视频，并依赖 `videoflow-contrib` 包中的检测器组件。\n\n```python\nimport videoflow\nimport videoflow.core.flow as flow\nfrom videoflow.core.constants import BATCH\nfrom videoflow.consumers import VideofileWriter\nfrom videoflow.producers import VideofileReader\nfrom videoflow_contrib.detector_tf import TensorflowObjectDetector\nfrom videoflow.processors.vision.annotators import BoundingBoxAnnotator\nfrom videoflow.utils.downloader import get_file\n\nURL_VIDEO = \"https:\u002F\u002Fgithub.com\u002Fvideoflow\u002Fvideoflow\u002Freleases\u002Fdownload\u002Fexamples\u002Fintersection.mp4\"\n\nclass FrameIndexSplitter(videoflow.core.node.ProcessorNode):\n    def __init__(self):\n        super(FrameIndexSplitter, self).__init__()\n    \n    def process(self, data):\n        index, frame = data\n        return frame\n\n# 准备输入输出文件\ninput_file = get_file(\"intersection.mp4\", URL_VIDEO)\noutput_file = \"output.avi\"\n\n# 定义节点\nreader = VideofileReader(input_file)\nframe = FrameIndexSplitter()(reader)\ndetector = TensorflowObjectDetector()(frame)\nannotator = BoundingBoxAnnotator()(frame, detector)\nwriter = VideofileWriter(output_file, fps = 30)(annotator)\n\n# 构建并运行流程\nfl = flow.Flow([reader], [writer], flow_type = BATCH)\nfl.run()\nfl.join()\n```\n\n**代码逻辑简述：**\n1. **Producer (`VideofileReader`)**：读取视频数据。\n2. **Processor (`FrameIndexSplitter`, `TensorflowObjectDetector`, `BoundingBoxAnnotator`)**：处理数据（去索引、检测物体、绘制框）。\n3. **Consumer (`VideofileWriter`)**：将结果写入新视频文件。\n4. **Flow**：连接所有节点并启动多进程处理。\n\n运行完成后，当前目录下将生成带有检测框的 `output.avi` 文件。更多示例请参考项目 `examples\u002F` 文件夹。","某智慧交通团队需要开发一套实时系统，从路口监控视频中自动检测车辆并标注轨迹，以便分析高峰时段的通行效率。\n\n### 没有 videoflow 时\n- **多进程管理复杂**：开发者需手动编写繁琐的 Python 多进程代码来处理视频帧的并行读取、推理和写入，极易出现死锁或资源竞争。\n- **流水线搭建耗时**：将视频读取、目标检测模型加载、边界框绘制和结果保存串联起来，需要编写大量胶水代码，开发周期长达数周。\n- **组件复用性差**：每次更换检测模型或调整输出格式时，都需要重构整个数据处理流程，难以快速迭代验证不同算法效果。\n- **性能调优困难**：缺乏统一的框架来平衡各处理环节的速度，常因某一环节阻塞导致整体帧率下降，无法满足实时性要求。\n\n### 使用 videoflow 后\n- **自动化并行处理**：videoflow 内置的多进程环境自动管理数据流，开发者只需定义节点逻辑，即可轻松实现高吞吐量的并行处理。\n- **声明式快速构建**：通过简单的几行代码即可将 `VideofileReader`、`TensorflowObjectDetector` 和 `BoundingBoxAnnotator` 等现成组件串联成完整流水线。\n- **模块化灵活扩展**：内置丰富的计算机视觉组件，且支持自定义节点，替换模型或增加分析逻辑时无需改动核心架构，大幅缩短试错时间。\n- **高效稳定的运行**：框架自动优化数据在 producer、processor 和 consumer 之间的流转，确保视频处理流畅稳定，轻松达到实时帧率。\n\nvideoflow 将复杂的视频流处理工程简化为清晰的组件连接，让开发者能专注于核心算法而非底层架构，显著提升了智能视频应用的交付效率。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fvideoflow_videoflow_1ddbb319.jpg","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fvideoflow_9836d398.png","",null,"https:\u002F\u002Fdocs.videoflow.dev","https:\u002F\u002Fgithub.com\u002Fvideoflow",[80,84,88],{"name":81,"color":82,"percentage":83},"Python","#3572A5",99,{"name":85,"color":86,"percentage":87},"Dockerfile","#384d54",0.9,{"name":89,"color":90,"percentage":91},"Shell","#89e051",0.1,1028,88,"2026-04-05T09:56:13","MIT","Linux, macOS","未说明",{"notes":99,"python":100,"dependencies":101},"Windows 系统存在已知的运行问题，官方不建议在 Windows 上使用。安装前必须确保已预先安装 cv2 库。该项目旨在保持简洁，额外组件建议参考 videoflow-contrib 项目。","3.6+",[102,103],"cv2 (OpenCV)","tensorflow (用于示例中的对象检测)",[16,61,15],[106,107,108,109,110],"video-processing","video-analytics","video-pipeline","object-detection","object-tracking","2026-03-27T02:49:30.150509","2026-04-12T09:06:37.534004",[],[115,120,125,130,135,140,145,150,155,160,165,170,175,180,185],{"id":116,"version":117,"summary_zh":118,"released_at":119},214856,"v0.2.10","错误修复：\n- 修复了在某些特殊情况下导致多进程任务无法完成的 bug。\n- 修复了即使选择使用 CPU 时，某些流程仍会在 GPU 上分配内存的 bug。\n\n新功能：\n- 添加了一个简单的 Webhook 消费者。","2020-03-19T13:54:45",{"id":121,"version":122,"summary_zh":123,"released_at":124},214857,"v0.2.8","错误修复：\n- 修复了在使用多任务节点时可能导致流程无法正常终止的 bug。\n\n改进：\n- 修复了文档中的错误。\n- 移除了对 scikit-learn 的依赖。","2020-01-22T17:27:10",{"id":126,"version":127,"summary_zh":128,"released_at":129},214858,"v0.2.5","# 错误修复\n1. 修复在使用 GPU 分配的处理器进行多进程处理时出现的 bug。","2019-07-31T23:39:26",{"id":131,"version":132,"summary_zh":133,"released_at":134},214859,"v0.2.4","# 新功能\n- 瓶颈检测\n- 改进了瓶颈日志记录","2019-07-22T19:17:01",{"id":136,"version":137,"summary_zh":138,"released_at":139},214860,"classification","分类模型","2019-06-20T16:26:01",{"id":141,"version":142,"summary_zh":143,"released_at":144},214861,"pose","# 姿态模型和多姿态模型","2019-06-20T16:24:47",{"id":146,"version":147,"summary_zh":148,"released_at":149},214862,"segmentation","# 发布的模型：\n- maskrcnn-resnet101_coco，每张图像耗时470毫秒，COCO mAP为33\n- maskrcnn-inceptionv2_coco，每张图像耗时79毫秒，COCO mAP为25","2019-06-05T23:54:39",{"id":151,"version":152,"summary_zh":153,"released_at":154},214863,"v0.2.2","## Bug 修复\n- 向 ``detection`` 发布版本中添加了缺失的文件，并相应更新了源代码。\n\n## 新功能\n- 添加了两个生产者：``VideoUrlReader`` 和 ``VideoDeviceReader``。","2019-05-29T01:27:45",{"id":156,"version":157,"summary_zh":158,"released_at":159},214864,"v0.2.0","新功能：\n- 支持将一个节点分配给多个进程\n- 流在停止时会以自然的方式停止。\n\n重大变更：\n- videoflow.producers.VideoFileReader 现在返回一个元组 (idx, frame)。之前它只返回帧。","2019-05-22T23:26:37",{"id":161,"version":162,"summary_zh":163,"released_at":164},214865,"v0.1.7","## 改进\n- 将 TensorflowObjectDetector 中的默认 device_type 从 'cpu' 改为 'gpu'。","2019-05-17T19:00:39",{"id":166,"version":167,"summary_zh":168,"released_at":169},214866,"v0.1.6","## New features\r\n- Now detection models are downloaded by default from github releases.\r\n- Simple bounding box tracking capabilities added.\r\n- Examples download sample videos from github release.","2019-05-17T18:01:08",{"id":171,"version":172,"summary_zh":173,"released_at":174},214867,"examples","## Binary files release\r\n- Added example video file to run detector and tracker on it.","2019-05-17T16:57:05",{"id":176,"version":177,"summary_zh":178,"released_at":179},214868,"detection","## Binary files release\r\n- Released model files and label files for detection tasks.","2019-05-17T16:30:52",{"id":181,"version":182,"summary_zh":183,"released_at":184},214869,"v0.1.5","## Bug fixes release:\r\n- Improves documentation\r\n- Fixes object detector and object detector annotator issues.\r\n- Fixes issue of STOP_SIGNAL warning signs.","2019-05-15T17:24:56",{"id":186,"version":187,"summary_zh":188,"released_at":189},214870,"v0.1.3","Basic videoflow library with some plugins for reading and writing into video, and doing object detection.","2019-05-13T20:54:55"]