[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-NVIDIA--gpu-operator":3,"tool-NVIDIA--gpu-operator":61},[4,18,28,37,45,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":24,"last_commit_at":25,"category_tags":26,"status":17},9989,"n8n","n8n-io\u002Fn8n","n8n 是一款面向技术团队的公平代码（fair-code）工作流自动化平台，旨在让用户在享受低代码快速构建便利的同时，保留编写自定义代码的灵活性。它主要解决了传统自动化工具要么过于封闭难以扩展、要么完全依赖手写代码效率低下的痛点，帮助用户轻松连接 400 多种应用与服务，实现复杂业务流程的自动化。\n\nn8n 特别适合开发者、工程师以及具备一定技术背景的业务人员使用。其核心亮点在于“按需编码”：既可以通过直观的可视化界面拖拽节点搭建流程，也能随时插入 JavaScript 或 Python 代码、调用 npm 包来处理复杂逻辑。此外，n8n 原生集成了基于 LangChain 的 AI 能力，支持用户利用自有数据和模型构建智能体工作流。在部署方面，n8n 提供极高的自由度，支持完全自托管以保障数据隐私和控制权，也提供云端服务选项。凭借活跃的社区生态和数百个现成模板，n8n 让构建强大且可控的自动化系统变得简单高效。",184740,2,"2026-04-19T23:22:26",[16,14,13,15,27],"插件",{"id":29,"name":30,"github_repo":31,"description_zh":32,"stars":33,"difficulty_score":10,"last_commit_at":34,"category_tags":35,"status":17},10095,"AutoGPT","Significant-Gravitas\u002FAutoGPT","AutoGPT 是一个旨在让每个人都能轻松使用和构建 AI 的强大平台，核心功能是帮助用户创建、部署和管理能够自动执行复杂任务的连续型 AI 智能体。它解决了传统 AI 应用中需要频繁人工干预、难以自动化长流程工作的痛点，让用户只需设定目标，AI 即可自主规划步骤、调用工具并持续运行直至完成任务。\n\n无论是开发者、研究人员，还是希望提升工作效率的普通用户，都能从 AutoGPT 中受益。开发者可利用其低代码界面快速定制专属智能体；研究人员能基于开源架构探索多智能体协作机制；而非技术背景用户也可直接选用预置的智能体模板，立即投入实际工作场景。\n\nAutoGPT 的技术亮点在于其模块化“积木式”工作流设计——用户通过连接功能块即可构建复杂逻辑，每个块负责单一动作，灵活且易于调试。同时，平台支持本地自托管与云端部署两种模式，兼顾数据隐私与使用便捷性。配合完善的文档和一键安装脚本，即使是初次接触的用户也能在几分钟内启动自己的第一个 AI 智能体。AutoGPT 正致力于降低 AI 应用门槛，让人人都能成为 AI 的创造者与受益者。",183572,"2026-04-20T04:47:55",[13,36,27,14,15],"语言模型",{"id":38,"name":39,"github_repo":40,"description_zh":41,"stars":42,"difficulty_score":10,"last_commit_at":43,"category_tags":44,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":46,"name":47,"github_repo":48,"description_zh":49,"stars":50,"difficulty_score":24,"last_commit_at":51,"category_tags":52,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",161147,"2026-04-19T23:31:47",[14,13,36],{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":24,"last_commit_at":59,"category_tags":60,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",109154,"2026-04-18T11:18:24",[14,15,13],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":66,"readme_en":67,"readme_zh":68,"quickstart_zh":69,"use_case_zh":70,"hero_image_url":71,"owner_login":72,"owner_name":73,"owner_avatar_url":74,"owner_bio":75,"owner_company":76,"owner_location":76,"owner_email":76,"owner_twitter":76,"owner_website":77,"owner_url":78,"languages":79,"stars":104,"forks":105,"last_commit_at":106,"license":107,"difficulty_score":108,"env_os":109,"env_gpu":110,"env_ram":111,"env_deps":112,"category_tags":121,"github_topics":122,"view_count":24,"oss_zip_url":76,"oss_zip_packed_at":76,"status":17,"created_at":127,"updated_at":128,"faqs":129,"releases":145},8093,"NVIDIA\u002Fgpu-operator","gpu-operator","NVIDIA GPU Operator creates, configures, and manages GPUs in Kubernetes","gpu-operator 是 NVIDIA 推出的一款自动化工具，旨在简化 Kubernetes 集群中 GPU 资源的部署与管理。在传统模式下，要在 K8s 节点上使用 GPU，管理员往往需要手动配置驱动程序、容器运行时及监控组件等复杂软件栈，过程繁琐且容易出错。gpu-operator 通过引入运营商（Operator）模式，将上述所有必要组件封装为容器，实现了从驱动安装到设备插件配置的全流程自动化。\n\n这一工具让管理员能够像管理普通 CPU 节点一样轻松调度 GPU 节点，无需再为不同硬件准备特殊的操作系统镜像。它特别适合负责维护大规模 AI 训练集群或高性能计算平台的运维工程师与系统架构师，尤其是在需要快速弹性伸缩云资源或混合云环境的场景中表现卓越。其独特的技术亮点在于“一切皆容器”的设计理念，不仅支持动态替换驱动版本，还集成了基于 DCGM 的深度监控与自动节点标记功能，极大降低了底层基础设施的维护门槛，让用户能更专注于上层应用的开发与迭代。","[![license](https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Flicense\u002FNVIDIA\u002Fgpu-operator?style=flat-square)](https:\u002F\u002Fraw.githubusercontent.com\u002FNVIDIA\u002Fgpu-operator\u002Fmaster\u002FLICENSE)\n[![pipeline status](https:\u002F\u002Fgitlab.com\u002Fnvidia\u002Fkubernetes\u002Fgpu-operator\u002Fbadges\u002Fmaster\u002Fpipeline.svg)](https:\u002F\u002Fgitlab.com\u002Fnvidia\u002Fkubernetes\u002Fgpu-operator\u002F-\u002Fpipelines)\n[![coverage report](https:\u002F\u002Fgitlab.com\u002Fnvidia\u002Fkubernetes\u002Fgpu-operator\u002Fbadges\u002Fmaster\u002Fcoverage.svg)](https:\u002F\u002Fgitlab.com\u002Fnvidia\u002Fkubernetes\u002Fgpu-operator\u002F-\u002Fpipelines)\n\n# NVIDIA GPU Operator\n\n![nvidia-gpu-operator](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FNVIDIA_gpu-operator_readme_1a75074e601b.jpg)\n\nKubernetes provides access to special hardware resources such as NVIDIA GPUs, NICs, Infiniband adapters and other devices through the [device plugin framework](https:\u002F\u002Fkubernetes.io\u002Fdocs\u002Fconcepts\u002Fextend-kubernetes\u002Fcompute-storage-net\u002Fdevice-plugins\u002F). However, configuring and managing nodes with these hardware resources requires configuration of multiple software components such as drivers, container runtimes or other libraries which  are difficult and prone to errors.\nThe NVIDIA GPU Operator uses the [operator framework](https:\u002F\u002Fcloud.redhat.com\u002Fblog\u002Fintroducing-the-operator-framework) within Kubernetes to automate the management of all NVIDIA software components needed to provision GPU. These components include the NVIDIA drivers (to enable CUDA), Kubernetes device plugin for GPUs, the NVIDIA Container Runtime, automatic node labelling, [DCGM](https:\u002F\u002Fdeveloper.nvidia.com\u002Fdcgm) based monitoring and others.\n\n## Audience and Use-Cases\nThe GPU Operator allows administrators of Kubernetes clusters to manage GPU nodes just like CPU nodes in the cluster. Instead of provisioning a special OS image for GPU nodes, administrators can rely on a standard OS image for both CPU and GPU nodes and then rely on the GPU Operator to provision the required software components for GPUs.\n\nNote that the GPU Operator is specifically useful for scenarios where the Kubernetes cluster needs to scale quickly - for example provisioning additional GPU nodes on the cloud or on-prem and managing the lifecycle of the underlying software components. Since the GPU Operator runs everything as containers including NVIDIA drivers, the administrators can easily swap various components - simply by starting or stopping containers.\n\n\n## Quick Start\n\nThis section provides a quick guide for deploying the GPU Operator with the data center driver.  \n\nMake sure your Kubernetes cluster meets the [prerequisites](https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002Flatest\u002Fgetting-started.html#prerequisites) and is listed on the [platform support page](https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002Flatest\u002Fplatform-support.html#supported-operating-systems-and-kubernetes-platforms).\n\n\n**Step 1: Add the NVIDIA Helm repository**\n\n```bash\nhelm repo add nvidia https:\u002F\u002Fhelm.ngc.nvidia.com\u002Fnvidia \\\n    && helm repo update\n```\n\n**Step 2: Deploy GPU Operator**\n\n```bash\nhelm install --wait --generate-name \\\n    -n gpu-operator --create-namespace \\\n    nvidia\u002Fgpu-operator\n```\n\nAfter installation, the GPU Operator and its operands should be up and running.\n\nNote:\nTo deploy the GPU Operator on OpenShift, follow the instructions in the [official documentation](https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fopenshift\u002Flatest\u002Fsteps-overview.html).\n\n\n## Product Documentation\nFor information on platform support and getting started, visit the official documentation [repository](https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002Foverview.html).\n\n\n## Roadmap\n\n- Support the latest NVIDIA Data Center GPUs, systems, and drivers.\n- Support RHEL 10.\n- Support KubeVirt with Ubuntu 24.04.\n- Promote the [NVIDIADriver](https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002Flatest\u002Fgpu-driver-configuration.html) CRD to General Availability (GA).\n- Integrate [NVIDIA’s DRA Driver for GPUs](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fk8s-dra-driver-gpu) as a managed component of the GPU Operator.\n\n## Webinar\n[How to easily use GPUs on Kubernetes](https:\u002F\u002Finfo.nvidia.com\u002Fhow-to-use-gpus-on-kubernetes-webinar.html)\n\n## Contributions\n[Read the document on contributions](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fgpu-operator\u002Fblob\u002Fmaster\u002FCONTRIBUTING.md). You can contribute by opening a [pull request](https:\u002F\u002Fhelp.github.com\u002Fen\u002Farticles\u002Fabout-pull-requests).\n\n## Support and Getting Help\nPlease open [an issue on the GitHub project](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fgpu-operator\u002Fissues\u002Fnew) for any questions. Your feedback is appreciated.\n","[![license](https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Flicense\u002FNVIDIA\u002Fgpu-operator?style=flat-square)](https:\u002F\u002Fraw.githubusercontent.com\u002FNVIDIA\u002Fgpu-operator\u002Fmaster\u002FLICENSE)\n[![pipeline status](https:\u002F\u002Fgitlab.com\u002Fnvidia\u002Fkubernetes\u002Fgpu-operator\u002Fbadges\u002Fmaster\u002Fpipeline.svg)](https:\u002F\u002Fgitlab.com\u002Fnvidia\u002Fkubernetes\u002Fgpu-operator\u002F-\u002Fpipelines)\n[![coverage report](https:\u002F\u002Fgitlab.com\u002Fnvidia\u002Fkubernetes\u002Fgpu-operator\u002Fbadges\u002Fmaster\u002Fcoverage.svg)](https:\u002F\u002Fgitlab.com\u002Fnvidia\u002Fkubernetes\u002Fgpu-operator\u002F-\u002Fpipelines)\n\n# NVIDIA GPU Operator\n\n![nvidia-gpu-operator](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FNVIDIA_gpu-operator_readme_1a75074e601b.jpg)\n\nKubernetes 通过 [设备插件框架](https:\u002F\u002Fkubernetes.io\u002Fdocs\u002Fconcepts\u002Fextend-kubernetes\u002Fcompute-storage-net\u002Fdevice-plugins\u002F) 提供对 NVIDIA GPU、网卡、Infiniband 适配器等特殊硬件资源的访问。然而，配置和管理包含这些硬件资源的节点需要设置多个软件组件，如驱动程序、容器运行时或其他库，这一过程复杂且容易出错。\nNVIDIA GPU Operator 利用 Kubernetes 中的 [Operator 框架](https:\u002F\u002Fcloud.redhat.com\u002Fblog\u002Fintroducing-the-operator-framework) 自动化管理部署 GPU 所需的所有 NVIDIA 软件组件。这些组件包括 NVIDIA 驱动程序（以启用 CUDA）、用于 GPU 的 Kubernetes 设备插件、NVIDIA 容器运行时、自动节点标签、基于 [DCGM](https:\u002F\u002Fdeveloper.nvidia.com\u002Fdcgm) 的监控等。\n\n## 目标用户与使用场景\nGPU Operator 使 Kubernetes 集群管理员能够像管理 CPU 节点一样管理 GPU 节点。管理员无需为 GPU 节点准备特殊的操作系统镜像，而是可以使用适用于 CPU 和 GPU 节点的标准操作系统镜像，然后依靠 GPU Operator 来部署 GPU 所需的软件组件。\n\n需要注意的是，GPU Operator 特别适用于 Kubernetes 集群需要快速扩展的场景——例如在云上或本地环境中新增 GPU 节点，并管理底层软件组件的生命周期。由于 GPU Operator 将包括 NVIDIA 驱动程序在内的所有内容都以容器形式运行，管理员只需启动或停止相应的容器，即可轻松替换各种组件。\n\n## 快速入门\n本节提供使用数据中心驱动程序部署 GPU Operator 的快速指南。\n\n请确保您的 Kubernetes 集群满足 [先决条件](https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002Flatest\u002Fgetting-started.html#prerequisites)，并且列于 [平台支持页面](https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002Flatest\u002Fplatform-support.html#supported-operating-systems-and-kubernetes-platforms)。\n\n**步骤 1：添加 NVIDIA Helm 仓库**\n\n```bash\nhelm repo add nvidia https:\u002F\u002Fhelm.ngc.nvidia.com\u002Fnvidia \\\n    && helm repo update\n```\n\n**步骤 2：部署 GPU Operator**\n\n```bash\nhelm install --wait --generate-name \\\n    -n gpu-operator --create-namespace \\\n    nvidia\u002Fgpu-operator\n```\n\n安装完成后，GPU Operator 及其相关组件应已正常运行。\n\n注意：\n若要在 OpenShift 上部署 GPU Operator，请遵循 [官方文档](https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fopenshift\u002Flatest\u002Fsteps-overview.html) 中的说明。\n\n## 产品文档\n有关平台支持和入门信息，请访问官方文档 [仓库](https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002Foverview.html)。\n\n## 路线图\n- 支持最新的 NVIDIA 数据中心 GPU、系统和驱动程序。\n- 支持 RHEL 10。\n- 支持 KubeVirt 与 Ubuntu 24.04。\n- 将 [NVIDIADriver](https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002Flatest\u002Fgpu-driver-configuration.html) CRD 推广至正式发布 (GA)。\n- 将 [NVIDIA 的 DRA GPU 驱动程序](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fk8s-dra-driver-gpu) 集成为 GPU Operator 的托管组件。\n\n## 网络研讨会\n[如何在 Kubernetes 上轻松使用 GPU](https:\u002F\u002Finfo.nvidia.com\u002Fhow-to-use-gpus-on-kubernetes-webinar.html)\n\n## 贡献\n请阅读 [贡献文档](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fgpu-operator\u002Fblob\u002Fmaster\u002FCONTRIBUTING.md)。您可以通过提交 [拉取请求](https:\u002F\u002Fhelp.github.com\u002Fen\u002Farticles\u002Fabout-pull-requests) 进行贡献。\n\n## 支持与帮助\n如有任何问题，请在 [GitHub 项目](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fgpu-operator\u002Fissues\u002Fnew) 上提交问题。我们非常感谢您的反馈。","# NVIDIA GPU Operator 快速上手指南\n\nNVIDIA GPU Operator 是一个基于 Kubernetes Operator Framework 的自动化工具，旨在简化集群中 GPU 节点的管理。它自动部署并管理 NVIDIA 驱动程序、CUDA 工具包、设备插件、容器运行时及监控组件，让管理员能够像管理 CPU 节点一样轻松管理 GPU 节点。\n\n## 环境准备\n\n在部署之前，请确保您的 Kubernetes 集群满足以下要求：\n\n*   **Kubernetes 版本**：需符合 [官方支持的平台列表](https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002Flatest\u002Fplatform-support.html#supported-operating-systems-and-kubernetes-platforms)。\n*   **硬件资源**：节点需配备支持的 NVIDIA Data Center GPU（如 Tesla, A100, H100 等）。\n*   **前置依赖**：\n    *   已安装 `helm` (v3+) 命令行工具。\n    *   节点操作系统内核头文件已安装（用于驱动编译）。\n    *   若使用云厂商托管服务，请确认是否已预装基础驱动或需要 Operator 全量管理。\n*   **网络访问**：确保集群节点可以访问 NVIDIA Helm 仓库及相关的容器镜像仓库。\n    *   *注：国内用户若访问官方源较慢，建议配置本地镜像加速器或使用私有镜像仓库同步相关镜像。*\n\n## 安装步骤\n\n以下是使用 Helm 快速部署 GPU Operator 的标准流程（适用于大多数标准 Kubernetes 集群）：\n\n**第一步：添加 NVIDIA Helm 仓库**\n\n```bash\nhelm repo add nvidia https:\u002F\u002Fhelm.ngc.nvidia.com\u002Fnvidia \\\n    && helm repo update\n```\n\n**第二步：部署 GPU Operator**\n\n执行以下命令将在名为 `gpu-operator` 的命名空间中安装 Operator 及其所有组件：\n\n```bash\nhelm install --wait --generate-name \\\n    -n gpu-operator --create-namespace \\\n    nvidia\u002Fgpu-operator\n```\n\n> **注意**：如果您使用的是 **OpenShift** 平台，请勿直接使用上述命令，务必参考 [OpenShift 专用部署文档](https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fopenshift\u002Flatest\u002Fsteps-overview.html) 进行操作。\n\n安装完成后，Operator 将自动检测集群中的 GPU 节点，并开始拉取和部署所需的驱动程序及插件容器。\n\n## 基本使用\n\n部署成功后，您无需手动干预即可在 Pod 中请求 GPU 资源。GPU Operator 会自动为具备 GPU 的节点添加标签（默认为 `nvidia.com\u002Fgpu.present=true`）。\n\n**验证部署状态**\n\n检查 `gpu-operator` 命名空间下的 Pod 状态，确保所有组件（如 `driver`, `device-plugin`, `toolkit` 等）均为 `Running` 状态：\n\n```bash\nkubectl get pods -n gpu-operator\n```\n\n**运行测试任务**\n\n创建一个简单的 YAML 文件（例如 `gpu-test.yaml`），请求一个 GPU 并运行 `nvidia-smi` 来验证功能：\n\n```yaml\napiVersion: v1\nkind: Pod\nmetadata:\n  name: gpu-test\nspec:\n  restartPolicy: OnFailure\n  containers:\n  - name: cuda-container\n    image: nvcr.io\u002Fnvidia\u002Fk8s\u002Fcuda-sample:vectoradd-cuda10.2\n    resources:\n      limits:\n        nvidia.com\u002Fgpu: 1 # 请求 1 个 GPU\n```\n\n应用该配置并查看日志：\n\n```bash\nkubectl apply -f gpu-test.yaml\nkubectl logs gpu-test\n```\n\n如果输出中包含向量加法计算结果或 `nvidia-smi` 显示的 GPU 信息，即表示 GPU Operator 已成功接管并调度 GPU 资源。","某云原生 AI 团队需要在 Kubernetes 集群中快速扩容数十个 GPU 节点，以支撑大模型训练任务的突发流量。\n\n### 没有 gpu-operator 时\n- **镜像维护沉重**：必须为 GPU 节点单独制作和维护包含特定版本驱动、CUDA 工具包及容器运行时的定制 OS 镜像，每次驱动升级都需重新构建并重启所有节点。\n- **部署极易出错**：人工在每台新节点上手动安装驱动和配置 Device Plugin，步骤繁琐且容易因环境差异导致配置失败，排查问题耗时极长。\n- **弹性扩容缓慢**：面对突发算力需求，自动伸缩组启动新节点后需花费大量时间执行初始化脚本，无法实现秒级就绪，导致训练任务排队积压。\n- **组件耦合紧密**：驱动程序直接安装在宿主机系统层，难以独立升级或回滚，一旦驱动崩溃可能影响整个节点的稳定性。\n\n### 使用 gpu-operator 后\n- **统一镜像管理**：无需定制专用镜像，CPU 和 GPU 节点共用标准 OS 镜像，gpu-operator 自动以容器化方式按需加载驱动和相关组件。\n- **自动化全栈配置**：只需一条 Helm 命令，gpu-operator 即可自动完成驱动安装、Device Plugin 注册、标签注入及监控组件部署，消除人为配置错误。\n- **即时弹性就绪**：新节点加入集群后，gpu-operator 分钟内自动完成软件栈 provisioning，使 GPU 资源迅速可用，完美匹配业务波峰。\n- **生命周期解耦**：所有 NVIDIA 软件组件均运行在容器中，管理员可独立更新或回滚驱动版本而无需重装系统，极大提升了运维灵活性。\n\ngpu-operator 将复杂的 GPU 节点管理转化为标准的 Kubernetes 声明式操作，让算力基础设施像 CPU 一样弹性易用。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FNVIDIA_gpu-operator_2e92a37a.png","NVIDIA","NVIDIA Corporation","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FNVIDIA_7dcf6000.png","",null,"https:\u002F\u002Fnvidia.com","https:\u002F\u002Fgithub.com\u002FNVIDIA",[80,84,88,92,96,100],{"name":81,"color":82,"percentage":83},"Go","#00ADD8",92.3,{"name":85,"color":86,"percentage":87},"Shell","#89e051",5.5,{"name":89,"color":90,"percentage":91},"Makefile","#427819",1.3,{"name":93,"color":94,"percentage":95},"Mustache","#724b3b",0.6,{"name":97,"color":98,"percentage":99},"Dockerfile","#384d54",0.4,{"name":101,"color":102,"percentage":103},"HCL","#844FBA",0,2646,487,"2026-04-16T03:45:21","Apache-2.0",4,"Linux","必需 NVIDIA Data Center GPU（具体型号需参考官方平台支持页面），需安装 NVIDIA 驱动程序以启用 CUDA，具体 CUDA 版本取决于驱动版本","未说明",{"notes":113,"python":111,"dependencies":114},"该工具是用于 Kubernetes 集群的运算符（Operator），而非本地运行的 Python 脚本。它要求集群节点运行受支持的 Linux 操作系统（具体列表需查阅官方文档链接）。管理员无需为 GPU 节点准备特殊 OS 镜像，可使用标准镜像由 Operator 自动部署驱动和运行时组件。支持在云端或本地快速扩展 GPU 节点。OpenShift 用户需遵循专门的部署文档。",[115,116,117,118,119,120],"Kubernetes","Helm","NVIDIA Drivers","NVIDIA Container Runtime","Kubernetes Device Plugin","DCGM (Data Center GPU Manager)",[14],[123,124,125,126],"gpu","kubernetes","cuda","nvidia","2026-03-27T02:49:30.150509","2026-04-20T16:18:43.421527",[130,135,140],{"id":131,"question_zh":132,"answer_zh":133,"source_url":134},36221,"NVIDIA GPU Operator 在 OpenShift 4.3 或 4.4 上无法正常工作怎么办？","该问题在 OpenShift 4.5\u002F4.6 版本中已解决。如果仍在使用旧版本遇到问题，建议升级集群。此外，部分用户通过以下两步解决了相关问题：1. 将内存映射 I\u002FO 基址（Memory Mapped I\u002FO Base）设置为 12TB 以启用正确的 GPU 内存放置；2. 将启动\u002F固件模式从 BIOS 更改为 EFI。完成这些更改后，相关 Pod（如 gpu-feature-discovery）即可正常运行。","https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fgpu-operator\u002Fissues\u002F64",{"id":136,"question_zh":137,"answer_zh":138,"source_url":139},36222,"遇到错误 'failed to get sandbox runtime: no runtime for nvidia is configured' 如何解决？","此错误通常表示 containerd 未正确配置 NVIDIA 运行时。请确保在安装 GPU Operator 时正确设置了 Helm 参数，特别是针对 containerd 的配置。示例配置如下：\n--set operator.defaultRuntime=\"containerd\" \\\n--set toolkit.enabled=true \\\n--set toolkit.env[0].name=CONTAINERD_CONFIG \\\n--set toolkit.env[0].value=\u002Fetc\u002Fcontainerd\u002Fconfig.toml \\\n--set toolkit.env[1].name=CONTAINERD_SOCKET \\\n--set toolkit.env[1].value=\u002Frun\u002Fk3s\u002Fcontainerd\u002Fcontainerd.sock \\\n--set toolkit.env[2].name=CONTAINERD_RUNTIME_CLASS \\\n--set toolkit.env[2].value=nvidia \\\n--set-string toolkit.env[3].name=CONTAINERD_SET_AS_DEFAULT \\\n--set-string toolkit.env[3].value=\"true\"\n注意：根据实际环境，CONTAINERD_CONFIG 的路径可能是 \u002Fetc\u002Fcontainerd\u002Fconfig.toml 或 \u002Fvar\u002Flib\u002Francher\u002Frke2\u002Fagent\u002Fetc\u002Fcontainerd\u002Fconfig.toml.tmpl。如果问题依然存在，建议升级到最新版本的 gpu-operator 重试。","https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fgpu-operator\u002Fissues\u002F432",{"id":141,"question_zh":142,"answer_zh":143,"source_url":144},36223,"在 OpenShift 升级后重新部署 GPU Operator 失败，Pod 卡在 Init 状态或 CrashLoopBackOff，可能是什么原因？","这通常是由于代理（Proxy）配置问题导致的。Pod 尝试连接 Kubernetes API 服务器（如 172.30.0.1:443）时，如果 NO_PROXY 环境变量未包含服务 CIDR（Service CIDR），连接会失败。解决方法是手动编辑相关的 DaemonSet 配置，在容器规格的环境变量中添加 API 服务器地址到 NO_PROXY 中（例如添加 172.30.0.1）。此外，需检查 worker 节点上的 crio 服务配置文件（\u002Fetc\u002Fsystemd\u002Fsystem\u002Fcrio.service.d\u002F*.conf），确认 HTTP_PROXY、HTTPS_PROXY 和 NO_PROXY 环境变量是否正确设置。","https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fgpu-operator\u002Fissues\u002F199",[146,151,156,161,166,171,176,181,186,191,196,201,206,211,216,221,226,231,236,241],{"id":147,"version":148,"summary_zh":149,"released_at":150},289014,"v26.3.0","https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002Flatest\u002Frelease-notes.html#v26-3-0","2026-03-20T17:07:29",{"id":152,"version":153,"summary_zh":154,"released_at":155},289015,"v25.10.1","https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002Flatest\u002Frelease-notes.html#v25-10-1","2025-12-04T20:56:28",{"id":157,"version":158,"summary_zh":159,"released_at":160},289016,"v25.10.0","https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002F25.10\u002Frelease-notes.html","2025-10-28T01:11:25",{"id":162,"version":163,"summary_zh":164,"released_at":165},289017,"v25.3.4","https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002F25.3.4\u002Frelease-notes.html","2025-09-19T18:20:19",{"id":167,"version":168,"summary_zh":169,"released_at":170},289018,"v25.3.3","https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002F25.3.3\u002Frelease-notes.html","2025-09-10T22:41:34",{"id":172,"version":173,"summary_zh":174,"released_at":175},289019,"v25.3.2","https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002F25.3.2\u002Frelease-notes.html","2025-07-25T23:46:12",{"id":177,"version":178,"summary_zh":179,"released_at":180},289020,"v25.3.1","https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002F25.3.1\u002Frelease-notes.html","2025-06-12T22:34:03",{"id":182,"version":183,"summary_zh":184,"released_at":185},289021,"v25.3.0","https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002F25.3.0\u002Frelease-notes.html","2025-03-26T20:41:20",{"id":187,"version":188,"summary_zh":189,"released_at":190},289022,"v24.9.2","https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002F24.9.2\u002Frelease-notes.html","2025-01-28T20:54:24",{"id":192,"version":193,"summary_zh":194,"released_at":195},289023,"v24.9.1","https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002F24.9.1\u002Frelease-notes.html","2024-12-05T19:56:31",{"id":197,"version":198,"summary_zh":199,"released_at":200},289024,"v24.9.0","https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002F24.9.0\u002Frelease-notes.html","2024-10-31T20:24:34",{"id":202,"version":203,"summary_zh":204,"released_at":205},289025,"v24.6.2","https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002F24.6.2\u002Frelease-notes.html","2024-09-25T17:47:20",{"id":207,"version":208,"summary_zh":209,"released_at":210},289026,"v24.6.1","https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002F24.6.1\u002Findex.html","2024-08-12T21:24:17",{"id":212,"version":213,"summary_zh":214,"released_at":215},289027,"v24.6.0","https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002F24.6.0\u002Frelease-notes.html\r\n\r\n","2024-07-31T16:00:01",{"id":217,"version":218,"summary_zh":219,"released_at":220},289028,"v24.3.0","https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002F24.3.0\u002Frelease-notes.html","2024-05-02T19:59:14",{"id":222,"version":223,"summary_zh":224,"released_at":225},289029,"v23.9.2","https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002F23.9.2\u002Frelease-notes.html","2024-03-07T23:00:27",{"id":227,"version":228,"summary_zh":229,"released_at":230},289030,"v23.6.2","https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002F23.6.2\u002Frelease-notes.html","2023-12-14T16:55:33",{"id":232,"version":233,"summary_zh":234,"released_at":235},289031,"v23.9.1","https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002Flatest\u002Frelease-notes.html","2023-12-08T15:59:18",{"id":237,"version":238,"summary_zh":239,"released_at":240},289032,"v23.9.0","https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002Fv23.9.0\u002Frelease-notes.html","2023-10-23T15:08:51",{"id":242,"version":243,"summary_zh":244,"released_at":245},289033,"v23.6.1","https:\u002F\u002Fdocs.nvidia.com\u002Fdatacenter\u002Fcloud-native\u002Fgpu-operator\u002F23.6.1\u002Frelease-notes.html","2023-09-01T18:09:42"]