[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-triton-inference-server--model_analyzer":3,"tool-triton-inference-server--model_analyzer":64},[4,17,27,35,43,56],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":16},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,3,"2026-04-05T11:01:52",[13,14,15],"开发框架","图像","Agent","ready",{"id":18,"name":19,"github_repo":20,"description_zh":21,"stars":22,"difficulty_score":23,"last_commit_at":24,"category_tags":25,"status":16},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",138956,2,"2026-04-05T11:33:21",[13,15,26],"语言模型",{"id":28,"name":29,"github_repo":30,"description_zh":31,"stars":32,"difficulty_score":23,"last_commit_at":33,"category_tags":34,"status":16},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,"2026-04-03T11:11:01",[13,14,15],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":23,"last_commit_at":41,"category_tags":42,"status":16},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[13,26],{"id":44,"name":45,"github_repo":46,"description_zh":47,"stars":48,"difficulty_score":23,"last_commit_at":49,"category_tags":50,"status":16},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,"2026-04-05T10:45:23",[14,51,52,53,15,54,26,13,55],"数据工具","视频","插件","其他","音频",{"id":57,"name":58,"github_repo":59,"description_zh":60,"stars":61,"difficulty_score":10,"last_commit_at":62,"category_tags":63,"status":16},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,"2026-04-04T04:44:48",[15,14,13,26,54],{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":69,"readme_en":70,"readme_zh":71,"quickstart_zh":72,"use_case_zh":73,"hero_image_url":74,"owner_login":75,"owner_name":76,"owner_avatar_url":77,"owner_bio":78,"owner_company":79,"owner_location":79,"owner_email":79,"owner_twitter":79,"owner_website":80,"owner_url":81,"languages":82,"stars":98,"forks":99,"last_commit_at":100,"license":101,"difficulty_score":10,"env_os":78,"env_gpu":102,"env_ram":102,"env_deps":103,"category_tags":105,"github_topics":106,"view_count":10,"oss_zip_url":79,"oss_zip_packed_at":79,"status":16,"created_at":111,"updated_at":112,"faqs":113,"releases":142},1143,"triton-inference-server\u002Fmodel_analyzer","model_analyzer","Triton Model Analyzer is a CLI tool to help with better understanding of the compute and memory requirements of the Triton Inference Server models.","Triton Model Analyzer是一个命令行工具，帮助开发者优化Triton Inference Server上的模型配置。通过自动化搜索不同参数组合，如批处理大小、动态批处理和实例组，它能快速找到性能与资源消耗的最佳平衡。适用于需要提升模型推理效率的开发者和研究人员，特别是处理复杂模型（如Ensemble、BLS、多模型或大语言模型）的场景。支持多种搜索策略，包括智能优化和全面扫描，并生成详细报告，便于分析不同配置的优劣。同时允许设置延迟等QoS约束，确保结果符合实际需求。","\u003C!--\nSPDX-FileCopyrightText: Copyright (c) 2020-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\nSPDX-License-Identifier: Apache-2.0\n-->\n\n[![License](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLicense-Apache_2.0-lightgrey.svg)](https:\u002F\u002Fopensource.org\u002Flicenses\u002FApache-2.0)\n\n# Triton Model Analyzer\n\n\nTriton Model Analyzer is a CLI tool which can help you find a more optimal configuration, on a given piece of hardware, for single, multiple, ensemble, or BLS models running on a [Triton Inference Server](https:\u002F\u002Fgithub.com\u002Ftriton-inference-server\u002Fserver\u002F). Model Analyzer will also generate reports to help you better understand the trade-offs of the different configurations along with their compute and memory requirements.\n\u003Cbr>\u003Cbr>\n\n# Features\n\n### Search Modes\n\n- [Optuna Search](docs\u002Fconfig_search.md#optuna-search-mode) **_-ALPHA RELEASE-_** allows you to search for every parameter that can be specified in the model configuration, using a hyperparameter optimization framework. Please see the [Optuna](https:\u002F\u002Foptuna.org\u002F) website if you are interested in specific details on how the algorithm functions.\n\n- [Quick Search](docs\u002Fconfig_search.md#quick-search-mode) will **sparsely** search the [Max Batch Size](https:\u002F\u002Fgithub.com\u002Ftriton-inference-server\u002Fserver\u002Fblob\u002Fr26.03\u002Fdocs\u002Fuser_guide\u002Fmodel_configuration.md#maximum-batch-size),\n  [Dynamic Batching](https:\u002F\u002Fgithub.com\u002Ftriton-inference-server\u002Fserver\u002Fblob\u002Fr26.03\u002Fdocs\u002Fuser_guide\u002Fbatcher.md#dynamic-batcher), and\n  [Instance Group](https:\u002F\u002Fgithub.com\u002Ftriton-inference-server\u002Fserver\u002Fblob\u002Fr26.03\u002Fdocs\u002Fuser_guide\u002Fmodel_configuration.md#instance-groups) spaces by utilizing a heuristic hill-climbing algorithm to help you quickly find a more optimal configuration\n\n- [Automatic Brute Search](docs\u002Fconfig_search.md#automatic-brute-search) will **exhaustively** search the\n  [Max Batch Size](https:\u002F\u002Fgithub.com\u002Ftriton-inference-server\u002Fserver\u002Fblob\u002Fr26.03\u002Fdocs\u002Fuser_guide\u002Fmodel_configuration.md#maximum-batch-size),\n  [Dynamic Batching](https:\u002F\u002Fgithub.com\u002Ftriton-inference-server\u002Fserver\u002Fblob\u002Fr26.03\u002Fdocs\u002Fuser_guide\u002Fbatcher.md#dynamic-batcher), and\n  [Instance Group](https:\u002F\u002Fgithub.com\u002Ftriton-inference-server\u002Fserver\u002Fblob\u002Fr26.03\u002Fdocs\u002Fuser_guide\u002Fmodel_configuration.md#instance-groups)\n  parameters of your model configuration\n\n- [Manual Brute Search](docs\u002Fconfig_search.md#manual-brute-search) allows you to create manual sweeps for every parameter that can be specified in the model configuration\n\n### Model Types\n\n- [Ensemble](docs\u002Fmodel_types.md#ensemble): Model Analyzer can help you find the optimal\n  settings when profiling an ensemble model\n\n- [BLS](docs\u002Fmodel_types.md#bls): Model Analyzer can help you find the optimal\n  settings when profiling a BLS model\n\n- [Multi-Model](docs\u002Fmodel_types.md#multi-model): Model Analyzer can help you\n  find the optimal settings when profiling multiple concurrent models\n\n- [LLM](docs\u002Fmodel_types.md#llm): Model Analyzer can help you\n  find the optimal settings when profiling Large Language Models\n\n### Other Features\n\n- [Detailed and summary reports](docs\u002Freport.md): Model Analyzer is able to generate\n  summarized and detailed reports that can help you better understand the trade-offs\n  between different model configurations that can be used for your model.\n\n- [QoS Constraints](docs\u002Fconfig.md#constraint): Constraints can help you\n  filter out the Model Analyzer results based on your QoS requirements. For\n  example, you can specify a latency budget to filter out model configurations\n  that do not satisfy the specified latency threshold.\n  \u003Cbr>\u003Cbr>\n\n# Examples and Tutorials\n\n### **Single Model**\n\nSee the [Single Model Quick Start](docs\u002Fquick_start.md) for a guide on how to use Model Analyzer to profile, analyze and report on a simple PyTorch model.\n\n### **Multi Model**\n\nSee the [Multi-model Quick Start](docs\u002Fmm_quick_start.md) for a guide on how to use Model Analyzer to profile, analyze and report on two models running concurrently on the same GPU.\n\n### **Ensemble Model**\n\nSee the [Ensemble Model Quick Start](docs\u002Fensemble_quick_start.md) for a guide on how to use Model Analyzer to profile, analyze and report on a simple Ensemble model.\n\n### **BLS Model**\n\nSee the [BLS Model Quick Start](docs\u002Fbls_quick_start.md) for a guide on how to use Model Analyzer to profile, analyze and report on a simple BLS model.\n\u003Cbr>\u003Cbr>\n\n# Documentation\n\n- [Installation](docs\u002Finstall.md)\n- [Model Analyzer CLI](docs\u002Fcli.md)\n- [Launch Modes](docs\u002Flaunch_modes.md)\n- [Configuring Model Analyzer](docs\u002Fconfig.md)\n- [Model Analyzer Metrics](docs\u002Fmetrics.md)\n- [Model Config Search](docs\u002Fconfig_search.md)\n- [Model Types](docs\u002Fmodel_types.md)\n- [Checkpointing](docs\u002Fcheckpoints.md)\n- [Model Analyzer Reports](docs\u002Freport.md)\n- [Deployment with Kubernetes](docs\u002Fkubernetes_deploy.md)\n  \u003Cbr>\u003Cbr>\n\n# Terminology\n\nBelow are definitions of some commonly used terms in Model Analyzer:\n\n- **Model Type** - Category of model being profiled. Examples of this include single, multi, ensemble, BLS, etc..\n- **Search Mode** - How Model Analyzer explores the possible configuration space when profiling. This is either exhaustive (brute) or heuristic (quick\u002Foptuna).\n- **Model Config Search** - The cross product of model type and search mode.\n- **Launch Mode** - How the Triton Server is deployed and used by Model Analyzer.\n\n# Reporting problems, asking questions\n\nWe appreciate any feedback, questions or bug reporting regarding this\nproject. When help with code is needed, follow the process outlined in\nthe Stack Overflow (https:\u002F\u002Fstackoverflow.com\u002Fhelp\u002Fmcve)\ndocument. Ensure posted examples are:\n\n- minimal – use as little code as possible that still produces the\n  same problem\n\n- complete – provide all parts needed to reproduce the problem. Check\n  if you can strip external dependency and still show the problem. The\n  less time we spend on reproducing problems the more time we have to\n  fix it\n\n- verifiable – test the code you're about to provide to make sure it\n  reproduces the problem. Remove all other problems that are not\n  related to your request\u002Fquestion.\n","\u003C!--\nSPDX-FileCopyrightText: Copyright (c) 2020-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved.\nSPDX-License-Identifier: Apache-2.0\n-->\n\n[![License](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLicense-Apache_2.0-lightgrey.svg)](https:\u002F\u002Fopensource.org\u002Flicenses\u002FApache-2.0)\n\n# Triton 模型分析器\n\n\nTriton 模型分析器是一款命令行工具，可在给定硬件上帮助您为在 [Triton 推理服务器](https:\u002F\u002Fgithub.com\u002Ftriton-inference-server\u002Fserver\u002F) 上运行的单模型、多模型、集成模型或 BLS 模型找到更优的配置。模型分析器还会生成报告，帮助您更好地理解不同配置之间的权衡及其计算和内存需求。\n\u003Cbr>\u003Cbr>\n\n# 功能\n\n### 搜索模式\n\n- [Optuna 搜索](docs\u002Fconfig_search.md#optuna-search-mode) **_-阿尔法版本-_** 允许您使用超参数优化框架搜索模型配置中可指定的每一个参数。如果您想了解该算法的具体工作原理，请参阅 [Optuna](https:\u002F\u002Foptuna.org\u002F) 官网。\n\n- [快速搜索](docs\u002Fconfig_search.md#quick-search-mode) 将以**稀疏**方式搜索 [最大批大小](https:\u002F\u002Fgithub.com\u002Ftriton-inference-server\u002Fserver\u002Fblob\u002Fr26.03\u002Fdocs\u002Fuser_guide\u002Fmodel_configuration.md#maximum-batch-size)、\n  [动态批处理](https:\u002F\u002Fgithub.com\u002Ftriton-inference-server\u002Fserver\u002Fblob\u002Fr26.03\u002Fdocs\u002Fuser_guide\u002Fbatcher.md#dynamic-batcher) 和\n  [实例组](https:\u002F\u002Fgithub.com\u002Ftriton-inference-server\u002Fserver\u002Fblob\u002Fr26.03\u002Fdocs\u002Fuser_guide\u002Fmodel_configuration.md#instance-groups) 参数空间，利用启发式爬山算法帮助您快速找到更优配置。\n\n- [自动暴力搜索](docs\u002Fconfig_search.md#automatic-brute-search) 将**穷举式**搜索您的模型配置中的\n  [最大批大小](https:\u002F\u002Fgithub.com\u002Ftriton-inference-server\u002Fserver\u002Fblob\u002Fr26.03\u002Fdocs\u002Fuser_guide\u002Fmodel_configuration.md#maximum-batch-size)、\n  [动态批处理](https:\u002F\u002Fgithub.com\u002Ftriton-inference-server\u002Fserver\u002Fblob\u002Fr26.03\u002Fdocs\u002Fuser_guide\u002Fbatcher.md#dynamic-batcher) 和\n  [实例组](https:\u002F\u002Fgithub.com\u002Ftriton-inference-server\u002Fserver\u002Fblob\u002Fr26.03\u002Fdocs\u002Fuser_guide\u002Fmodel_configuration.md#instance-groups) 参数。\n\n- [手动暴力搜索](docs\u002Fconfig_search.md#manual-brute-search) 允许您为模型配置中可指定的每个参数创建手动扫描范围。\n\n### 模型类型\n\n- [集成模型](docs\u002Fmodel_types.md#ensemble): 模型分析器可以帮助您在剖析集成模型时找到最优设置。\n\n- [BLS 模型](docs\u002Fmodel_types.md#bls): 模型分析器可以帮助您在剖析 BLS 模型时找到最优设置。\n\n- [多模型](docs\u002Fmodel_types.md#multi-model): 模型分析器可以帮助您在剖析多个并发模型时找到最优设置。\n\n- [LLM 模型](docs\u002Fmodel_types.md#llm): 模型分析器可以帮助您在剖析大型语言模型时找到最优设置。\n\n### 其他功能\n\n- [详细与摘要报告](docs\u002Freport.md): 模型分析器能够生成汇总和详细报告，帮助您更好地理解可用于您模型的不同配置之间的权衡。\n\n- [QoS 约束](docs\u002Fconfig.md#constraint): 约束可以根据您的 QoS 要求筛选模型分析器的结果。例如，您可以指定延迟预算，以过滤掉不符合指定延迟阈值的模型配置。\n\u003Cbr>\u003Cbr>\n\n# 示例与教程\n\n### **单模型**\n\n请参阅 [单模型快速入门](docs\u002Fquick_start.md)，了解如何使用模型分析器对一个简单的 PyTorch 模型进行性能剖析、分析并生成报告。\n\n### **多模型**\n\n请参阅 [多模型快速入门](docs\u002Fmm_quick_start.md)，了解如何使用模型分析器对在同一 GPU 上并发运行的两个模型进行性能剖析、分析并生成报告。\n\n### **集成模型**\n\n请参阅 [集成模型快速入门](docs\u002Fensemble_quick_start.md)，了解如何使用模型分析器对一个简单的集成模型进行性能剖析、分析并生成报告。\n\n### **BLS 模型**\n\n请参阅 [BLS 模型快速入门](docs\u002Fbls_quick_start.md)，了解如何使用模型分析器对一个简单的 BLS 模型进行性能剖析、分析并生成报告。\n\u003Cbr>\u003Cbr>\n\n# 文档\n\n- [安装](docs\u002Finstall.md)\n- [模型分析器 CLI](docs\u002Fcli.md)\n- [启动模式](docs\u002Flaunch_modes.md)\n- [配置模型分析器](docs\u002Fconfig.md)\n- [模型分析器指标](docs\u002Fmetrics.md)\n- [模型配置搜索](docs\u002Fconfig_search.md)\n- [模型类型](docs\u002Fmodel_types.md)\n- [检查点](docs\u002Fcheckpoints.md)\n- [模型分析器报告](docs\u002Freport.md)\n- [Kubernetes 部署](docs\u002Fkubernetes_deploy.md)\n\u003Cbr>\u003Cbr>\n\n# 术语\n\n以下是模型分析器中一些常用术语的定义：\n\n- **模型类型** - 正在剖析的模型类别。例如，单模型、多模型、集成模型、BLS 等。\n- **搜索模式** - 模型分析器在剖析时探索可能配置空间的方式。可以是穷举式（暴力）或启发式（快速\u002FOptuna）。\n- **模型配置搜索** - 模型类型与搜索模式的交叉组合。\n- **启动模式** - Triton 服务器由模型分析器部署和使用的模式。\n\n# 报告问题、提问\n\n我们非常感谢您对该项目的任何反馈、问题或错误报告。当需要代码方面的帮助时，请遵循 Stack Overflow (https:\u002F\u002Fstackoverflow.com\u002Fhelp\u002Fmcve) 文档中概述的流程。请确保发布的示例：\n\n- 最小化——尽可能使用最少的代码，但仍能重现相同的问题。\n- 完整性——提供所有用于重现问题的部分。检查是否可以在去除外部依赖的情况下仍然展示问题。我们花在复现问题上的时间越少，就越有更多时间来修复它。\n- 可验证性——在提供代码之前先测试一下，确保它确实能重现问题。移除所有与您的请求或问题无关的其他问题。","# Triton Model Analyzer 快速上手指南\n\n## 环境准备\n- **系统要求**：Linux (Ubuntu 20.04+)\n- **前置依赖**：\n  - NVIDIA GPU 和 CUDA Toolkit 11.8+\n  - Python 3.8+\n  - `pip` 工具\n\n## 安装步骤\n```bash\npip install triton-model-analyzer -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n```\n\n## 基本使用\n分析单个模型仓库的最简示例：\n```bash\nmodel_analyzer --model-repository \u002Fpath\u002Fto\u002Fmodel_repository\n```\n将 `\u002Fpath\u002Fto\u002Fmodel_repository` 替换为实际模型仓库路径（需提前配置好 Triton Inference Server）。","电商推荐系统团队在部署实时用户行为分析模型时，频繁遭遇推理延迟高、GPU资源利用率低的问题，需快速优化Triton Inference Server配置以提升用户体验。\n\n### 没有 model_analyzer 时\n- 手动调整最大批大小、动态批处理和实例组参数，需反复测试数天，效率低下且易出错。\n- 无法量化比较不同配置的吞吐量与内存占用，优化决策依赖经验，导致高峰期延迟常超100ms。\n- GPU内存浪费严重，利用率仅60%，资源闲置导致成本增加。\n- 无法快速验证配置是否满足延迟预算（如80ms内），影响上线节奏。\n\n### 使用 model_analyzer 后\n- 通过Quick Search模式，1小时内自动找到最优配置，将优化周期从3天压缩至1小时。\n- 生成详细报告直观展示吞吐量、延迟和内存需求的权衡，精准定位性能瓶颈。\n- 设置QoS约束（延迟≤80ms），自动过滤不达标配置，确保上线即满足SLA。\n- 优化后吞吐量提升40%，内存使用减少25%，GPU利用率达90%，用户推荐响应速度提升50%。\n\nmodel_analyzer将模型配置优化从经验试错转变为数据驱动的精准决策，显著提升系统性能与资源效率。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Ftriton-inference-server_model_analyzer_068e0fc5.png","triton-inference-server","Triton Inference Server","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Ftriton-inference-server_a54d6a79.jpg","",null,"https:\u002F\u002Fdeveloper.nvidia.com\u002Fnvidia-triton-inference-server","https:\u002F\u002Fgithub.com\u002Ftriton-inference-server",[83,87,91,95],{"name":84,"color":85,"percentage":86},"Python","#3572A5",92.6,{"name":88,"color":89,"percentage":90},"Shell","#89e051",7.2,{"name":92,"color":93,"percentage":94},"Go Template","#00ADD8",0.1,{"name":96,"color":97,"percentage":94},"Dockerfile","#384d54",508,84,"2026-03-30T22:43:46","Apache-2.0","未说明",{"notes":102,"python":102,"dependencies":104},[],[13],[107,108,109,110],"deep-learning","inference","gpu","performance-analysis","2026-03-27T02:49:30.150509","2026-04-06T05:36:55.112954",[114,119,124,129,134,138],{"id":115,"question_zh":116,"answer_zh":117,"source_url":118},5167,"快速入门命令运行失败，如何解决？","请使用更新后的命令：`model-analyzer profile -m \u002Fquick_start_repository --profile-models add_sub` 和 `model-analyzer analyze --analysis-models add_sub -e analysis_results`。文档已更新，但旧指南可能仍存在，建议使用新命令避免错误。","https:\u002F\u002Fgithub.com\u002Ftriton-inference-server\u002Fmodel_analyzer\u002Fissues\u002F130",{"id":120,"question_zh":121,"answer_zh":122,"source_url":123},5168,"在 Kubernetes 中运行 model-analyzer 时出现 DCGM 初始化错误，如何解决？","设置环境变量 `__DCGM_DBG_FILE=dcgm.log` 和 `__DCMG_DBG_LVL=WARNING` 以生成调试日志（日志将输出到 `\u002Fvar\u002Flog\u002Fdcgm.log`）。或升级到新版本的 Model Analyzer（已重写为 Python，20.12 Triton SDK 发布）。","https:\u002F\u002Fgithub.com\u002Ftriton-inference-server\u002Fmodel_analyzer\u002Fissues\u002F16",{"id":125,"question_zh":126,"answer_zh":127,"source_url":128},5169,"分析集成模型时出现 Protobuf 错误 'inference.ModelConfig' should not have multiple 'scheduling_choice' oneof fields，如何解决？","检查模型配置文件（ModelConfig）中是否有多余的 `scheduling_choice` 字段。确保配置文件符合 Triton 规范，移除重复字段或使用最新版本的 Model Analyzer（已修复此问题）。","https:\u002F\u002Fgithub.com\u002Ftriton-inference-server\u002Fmodel_analyzer\u002Fissues\u002F162",{"id":130,"question_zh":131,"answer_zh":132,"source_url":133},5170,"使用 model-analyzer 分析 PyTorch 模型时出现 'ModuleNotFoundError: no module named 'torch''，如何解决？","在 Docker 镜像中安装 PyTorch 依赖。自定义 Dockerfile 添加 `RUN pip install torch`，或使用包含 PyTorch 的 Triton 镜像（如 `tritonserver:23.05-py3`）运行 model-analyzer。","https:\u002F\u002Fgithub.com\u002Ftriton-inference-server\u002Fmodel_analyzer\u002Fissues\u002F699",{"id":135,"question_zh":136,"answer_zh":137,"source_url":118},5171,"model-analyzer 分析时模型加载失败，提示 'explicit model load \u002F unload is not allowed if polling is enabled'，如何解决？","在 Triton 配置中禁用轮询模式。启动 Triton 时添加参数 `--model-control-mode=explicit` 或在配置文件中设置 `model_control_mode: EXPLICIT`，避免与 model-analyzer 冲突。",{"id":139,"question_zh":140,"answer_zh":141,"source_url":118},5172,"model-analyzer 分析结果缺失，提示 'KeyError: 'add_sub''，如何解决？","确保模型名称与配置一致。检查 `model_repository` 中的模型目录名是否为 `add_sub`（而非 `add_sub_i34`），并使用 `--profile-models add_sub` 指定正确模型名。",[143,148,152,156,160,164,168,173,177,181,185,190,194,199,204,208,212,217,222,226],{"id":144,"version":145,"summary_zh":146,"released_at":147},104672,"v1.52.0","- Switched to latest NGC container image.\r\n- README and quick-start guides (single model, multi-model, ensemble, BLS) updated for **26.03** \u002F **1.52.0**\r\n- `docs\u002Fconfig.md`, `docs\u002Fkubernetes_deploy.md`, Helm chart `values.yaml`, and `Dockerfile` references updated accordingly","2026-03-27T16:38:16",{"id":149,"version":150,"summary_zh":79,"released_at":151},104673,"v1.51.0","2026-03-02T18:05:51",{"id":153,"version":154,"summary_zh":79,"released_at":155},104674,"v1.50.0","2026-02-17T16:21:20",{"id":157,"version":158,"summary_zh":79,"released_at":159},104675,"v1.49.0","2026-01-28T18:07:31",{"id":161,"version":162,"summary_zh":79,"released_at":163},104676,"v1.48.0","2026-01-28T18:06:35",{"id":165,"version":166,"summary_zh":79,"released_at":167},104677,"v1.47.0","2024-12-23T19:18:36",{"id":169,"version":170,"summary_zh":171,"released_at":172},104678,"v1.46.0","---","2024-11-26T17:55:31",{"id":174,"version":175,"summary_zh":79,"released_at":176},104679,"v1.45.0","2024-10-29T15:38:32",{"id":178,"version":179,"summary_zh":79,"released_at":180},104680,"v1.44.0","2024-09-27T16:51:26",{"id":182,"version":183,"summary_zh":79,"released_at":184},104681,"v1.43.0","2024-08-27T18:03:43",{"id":186,"version":187,"summary_zh":188,"released_at":189},104682,"v1.42.0","## New Features and Improvements\r\n\r\n- Optuna search mode\r\n   - Allows you to search any parameter that can be specified in the model configuration, using a hyperparameter optimization framework","2024-07-24T19:33:36",{"id":191,"version":192,"summary_zh":79,"released_at":193},104683,"v1.41.0","2024-06-28T00:32:32",{"id":195,"version":196,"summary_zh":197,"released_at":198},104684,"v1.40.0","\r\n","2024-05-25T02:04:57",{"id":200,"version":201,"summary_zh":202,"released_at":203},104685,"v1.39.0","## New Features and Improvements\r\n\r\n- Model Analyzer now supports profiling Large Language Models (LLMs) using GenAI-Perf\r\n","2024-04-30T17:46:58",{"id":205,"version":206,"summary_zh":79,"released_at":207},104686,"v1.38.0","2024-03-27T17:01:19",{"id":209,"version":210,"summary_zh":79,"released_at":211},104687,"v1.37.0","2024-03-01T01:12:52",{"id":213,"version":214,"summary_zh":215,"released_at":216},104688,"v1.36.0","## New Features and Improvements\r\n* Model Analyzer now correctly loads and optimizes ensemble models\r\n* Model Analyzer now correctly works with SSL via gRPC\r\n* Model Analyzer now handles the case of optimizing a model on a remote Triton server without requiring a local GPU","2024-01-30T01:17:31",{"id":218,"version":219,"summary_zh":220,"released_at":221},104689,"v1.35.0","##  Known issues\r\n* Model Analyzer is not able to analyze and optimize ensemble model configs due to a bug in the way composing models are loaded.\r\n* Model Analyzer does not work with SSL via gRPC\r\n","2023-12-20T01:03:04",{"id":223,"version":224,"summary_zh":79,"released_at":225},104690,"v1.34.0","2023-12-01T23:31:59",{"id":227,"version":228,"summary_zh":79,"released_at":229},104691,"v1.33.0","2023-10-27T01:24:00"]