[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-Trusted-AI--adversarial-robustness-toolbox":3,"tool-Trusted-AI--adversarial-robustness-toolbox":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",150037,2,"2026-04-10T23:33:47",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",108322,"2026-04-10T11:39:34",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":32,"last_commit_at":50,"category_tags":51,"status":17},6121,"gemini-cli","google-gemini\u002Fgemini-cli","gemini-cli 是一款由谷歌推出的开源 AI 命令行工具，它将强大的 Gemini 大模型能力直接集成到用户的终端环境中。对于习惯在命令行工作的开发者而言，它提供了一条从输入提示词到获取模型响应的最短路径，无需切换窗口即可享受智能辅助。\n\n这款工具主要解决了开发过程中频繁上下文切换的痛点，让用户能在熟悉的终端界面内直接完成代码理解、生成、调试以及自动化运维任务。无论是查询大型代码库、根据草图生成应用，还是执行复杂的 Git 操作，gemini-cli 都能通过自然语言指令高效处理。\n\n它特别适合广大软件工程师、DevOps 人员及技术研究人员使用。其核心亮点包括支持高达 100 万 token 的超长上下文窗口，具备出色的逻辑推理能力；内置 Google 搜索、文件操作及 Shell 命令执行等实用工具；更独特的是，它支持 MCP（模型上下文协议），允许用户灵活扩展自定义集成，连接如图像生成等外部能力。此外，个人谷歌账号即可享受免费的额度支持，且项目基于 Apache 2.0 协议完全开源，是提升终端工作效率的理想助手。",100752,"2026-04-10T01:20:03",[52,13,15,14],"插件",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":32,"last_commit_at":59,"category_tags":60,"status":17},4721,"markitdown","microsoft\u002Fmarkitdown","MarkItDown 是一款由微软 AutoGen 团队打造的轻量级 Python 工具，专为将各类文件高效转换为 Markdown 格式而设计。它支持 PDF、Word、Excel、PPT、图片（含 OCR）、音频（含语音转录）、HTML 乃至 YouTube 链接等多种格式的解析，能够精准提取文档中的标题、列表、表格和链接等关键结构信息。\n\n在人工智能应用日益普及的今天，大语言模型（LLM）虽擅长处理文本，却难以直接读取复杂的二进制办公文档。MarkItDown 恰好解决了这一痛点，它将非结构化或半结构化的文件转化为模型“原生理解”且 Token 效率极高的 Markdown 格式，成为连接本地文件与 AI 分析 pipeline 的理想桥梁。此外，它还提供了 MCP（模型上下文协议）服务器，可无缝集成到 Claude Desktop 等 LLM 应用中。\n\n这款工具特别适合开发者、数据科学家及 AI 研究人员使用，尤其是那些需要构建文档检索增强生成（RAG）系统、进行批量文本分析或希望让 AI 助手直接“阅读”本地文件的用户。虽然生成的内容也具备一定可读性，但其核心优势在于为机器",93400,"2026-04-06T19:52:38",[52,14],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":67,"readme_en":68,"readme_zh":69,"quickstart_zh":70,"use_case_zh":71,"hero_image_url":72,"owner_login":73,"owner_name":73,"owner_avatar_url":74,"owner_bio":75,"owner_company":76,"owner_location":76,"owner_email":77,"owner_twitter":78,"owner_website":76,"owner_url":79,"languages":80,"stars":96,"forks":97,"last_commit_at":98,"license":99,"difficulty_score":32,"env_os":100,"env_gpu":100,"env_ram":100,"env_deps":101,"category_tags":113,"github_topics":114,"view_count":32,"oss_zip_url":76,"oss_zip_packed_at":76,"status":17,"created_at":132,"updated_at":133,"faqs":134,"releases":163},6514,"Trusted-AI\u002Fadversarial-robustness-toolbox","adversarial-robustness-toolbox","Adversarial Robustness Toolbox (ART) - Python Library for Machine Learning Security - Evasion, Poisoning, Extraction, Inference - Red and Blue Teams","Adversarial Robustness Toolbox（简称 ART）是一款专为机器学习安全设计的 Python 开源库，由 Linux Foundation AI & Data 基金会托管。它的核心使命是帮助开发者和研究人员抵御针对人工智能模型的各类对抗性攻击，确保模型在复杂环境下的可靠性与安全性。\n\n在现实应用中，AI 模型常面临四大威胁： evasion（通过微小扰动欺骗模型）、poisoning（污染训练数据）、extraction（窃取模型参数）以及 inference（推断敏感训练数据）。ART 提供了一套完整的工具集，让用户能够轻松模拟这些攻击场景以评估模型弱点（红队视角），并提供相应的防御策略来加固模型（蓝队视角）。\n\n这款工具特别适合从事 AI 安全研究的研究人员、需要部署高可靠模型的工程师，以及关注数据隐私的企业技术团队。其独特亮点在于极强的兼容性：不仅支持 TensorFlow、PyTorch、scikit-learn 等主流深度学习框架，还涵盖图像、音频、表格等多种数据类型，适用于分类、目标检测、语音识别等各类任务。无论您是想测试模型的鲁棒性，还是构建更安","Adversarial Robustness Toolbox（简称 ART）是一款专为机器学习安全设计的 Python 开源库，由 Linux Foundation AI & Data 基金会托管。它的核心使命是帮助开发者和研究人员抵御针对人工智能模型的各类对抗性攻击，确保模型在复杂环境下的可靠性与安全性。\n\n在现实应用中，AI 模型常面临四大威胁： evasion（通过微小扰动欺骗模型）、poisoning（污染训练数据）、extraction（窃取模型参数）以及 inference（推断敏感训练数据）。ART 提供了一套完整的工具集，让用户能够轻松模拟这些攻击场景以评估模型弱点（红队视角），并提供相应的防御策略来加固模型（蓝队视角）。\n\n这款工具特别适合从事 AI 安全研究的研究人员、需要部署高可靠模型的工程师，以及关注数据隐私的企业技术团队。其独特亮点在于极强的兼容性：不仅支持 TensorFlow、PyTorch、scikit-learn 等主流深度学习框架，还涵盖图像、音频、表格等多种数据类型，适用于分类、目标检测、语音识别等各类任务。无论您是想测试模型的鲁棒性，还是构建更安全的 AI 应用，ART 都能提供专业且易用的技术支持，让机器学习变得更加可信。","# Adversarial Robustness Toolbox (ART) v1.20\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FTrusted-AI_adversarial-robustness-toolbox_readme_590b613806bb.png\" width=\"467\" title=\"ART logo\">\n\u003C\u002Fp>\n\u003Cbr \u002F>\n\n![CodeQL](https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fworkflows\u002FCodeQL\u002Fbadge.svg)\n[![Documentation Status](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FTrusted-AI_adversarial-robustness-toolbox_readme_13d664e1afd7.png)](http:\u002F\u002Fadversarial-robustness-toolbox.readthedocs.io\u002Fen\u002Flatest\u002F?badge=latest)\n[![PyPI](https:\u002F\u002Fbadge.fury.io\u002Fpy\u002Fadversarial-robustness-toolbox.svg)](https:\u002F\u002Fbadge.fury.io\u002Fpy\u002Fadversarial-robustness-toolbox)\n[![codecov](https:\u002F\u002Fcodecov.io\u002Fgh\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fbranch\u002Fmain\u002Fgraph\u002Fbadge.svg)](https:\u002F\u002Fcodecov.io\u002Fgh\u002FTrusted-AI\u002Fadversarial-robustness-toolbox)\n[![Code style: black](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fcode%20style-black-000000.svg)](https:\u002F\u002Fgithub.com\u002Fpsf\u002Fblack)\n[![License: MIT](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLicense-MIT-yellow.svg)](https:\u002F\u002Fopensource.org\u002Flicenses\u002FMIT)\n[![PyPI - Python Version](https:\u002F\u002Fimg.shields.io\u002Fpypi\u002Fpyversions\u002Fadversarial-robustness-toolbox)](https:\u002F\u002Fpypi.org\u002Fproject\u002Fadversarial-robustness-toolbox\u002F)\n[![slack-img](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fchat-on%20slack-yellow.svg)](https:\u002F\u002Fibm-art.slack.com\u002F)\n[![Downloads](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FTrusted-AI_adversarial-robustness-toolbox_readme_34c50e933400.png)](https:\u002F\u002Fpepy.tech\u002Fproject\u002Fadversarial-robustness-toolbox)\n[![Downloads](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FTrusted-AI_adversarial-robustness-toolbox_readme_34c50e933400.png\u002Fmonth)](https:\u002F\u002Fpepy.tech\u002Fproject\u002Fadversarial-robustness-toolbox)\n[![CII Best Practices](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FTrusted-AI_adversarial-robustness-toolbox_readme_43b29938ccd7.png)](https:\u002F\u002Fbestpractices.coreinfrastructure.org\u002Fprojects\u002F5090)\n\n[中文README请按此处](README-cn.md)\n\n \u003Cdiv align=\"center\">\n  \u003Cpicture>\n    \u003Csource media=\"(prefers-color-scheme: dark)\" srcset=\"docs\u002Fimages\u002Flfaidata-project-badge-graduate-color_dark.png\" width=\"400\" title=\"LF AI & Data\">\n    \u003Csource media=\"(prefers-color-scheme: light)\" srcset=\"docs\u002Fimages\u002Flfaidata-project-badge-graduate-color.png\" width=\"400\" title=\"LF AI & Data\">\n    \u003Cimg alt=\"Fallback image description\" src=\"default-image.png\" width=\"400\">\n  \u003C\u002Fpicture>\n\u003C\u002Fdiv>\n\u003Cbr \u002F>\n\nAdversarial Robustness Toolbox (ART) is a Python library for Machine Learning Security. ART is hosted by the \n[Linux Foundation AI & Data Foundation](https:\u002F\u002Flfaidata.foundation) (LF AI & Data). ART provides tools that enable\ndevelopers and researchers to defend and evaluate Machine Learning models and applications against the\nadversarial threats of Evasion, Poisoning, Extraction, and Inference. ART supports all popular machine learning frameworks\n(TensorFlow, Keras, PyTorch, scikit-learn, XGBoost, LightGBM, CatBoost, GPy, etc.), all data types\n(images, tables, audio, video, etc.) and machine learning tasks (classification, object detection, speech recognition,\ngeneration, certification, etc.).\n\n## Adversarial Threats\n\n \u003Cdiv align=\"center\">\n  \u003Cpicture>\n    \u003Csource media=\"(prefers-color-scheme: dark)\" srcset=\"docs\u002Fimages\u002Fadversarial_threats_attacker_dark.png\" width=\"400 title=\"ART Threats\">\n    \u003Csource media=\"(prefers-color-scheme: light)\" srcset=\"docs\u002Fimages\u002Fadversarial_threats_attacker.png\" width=\"400 title=\"ART Threats\">\n    \u003Cimg alt=\"Fallback image description\" src=\"default-image.png\" width=\"400\">\n  \u003C\u002Fpicture>\n\u003C\u002Fdiv>\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FTrusted-AI_adversarial-robustness-toolbox_readme_68acf6924457.png\" width=\"400\" title=\"ART Matrix\">\n\u003C\u002Fp>\n\u003Cbr \u002F>\n\n## ART for Red and Blue Teams (selection)\n\n \u003Cdiv align=\"center\">\n  \u003Cpicture>\n    \u003Csource media=\"(prefers-color-scheme: dark)\" srcset=\"docs\u002Fimages\u002Fwhite_hat_blue_red_dark.png\" width=\"800 title=\"ART Red and Blue Teams\">\n    \u003Csource media=\"(prefers-color-scheme: light)\" srcset=\"docs\u002Fimages\u002Fwhite_hat_blue_red.png\" width=\"800 title=\"ART Red and Blue Teams\">\n    \u003Cimg alt=\"Fallback image description\" src=\"default-image.png\" width=\"800\">\n  \u003C\u002Fpicture>\n\u003C\u002Fdiv>\n\u003Cbr \u002F>\n\n## Learn more\n\n| **[Get Started][get-started]**     | **[Documentation][documentation]**     | **[Contributing][contributing]**           |\n|-------------------------------------|-------------------------------|-----------------------------------|\n| - [Installation][installation]\u003Cbr>- [Examples](examples\u002FREADME.md)\u003Cbr>- [Notebooks](notebooks\u002FREADME.md) | - [Attacks][attacks]\u003Cbr>- [Defences][defences]\u003Cbr>- [Estimators][estimators]\u003Cbr>- [Metrics][metrics]\u003Cbr>- [Technical Documentation](https:\u002F\u002Fadversarial-robustness-toolbox.readthedocs.io) | - [Slack](https:\u002F\u002Fibm-art.slack.com), [Invitation](https:\u002F\u002Fjoin.slack.com\u002Ft\u002Fibm-art\u002Fshared_invite\u002FenQtMzkyOTkyODE4NzM4LTA4NGQ1OTMxMzFmY2Q1MzE1NWI2MmEzN2FjNGNjOGVlODVkZDE0MjA1NTA4OGVkMjVkNmQ4MTY1NmMyOGM5YTg)\u003Cbr>- [Contributing](CONTRIBUTING.md)\u003Cbr>- [Roadmap][roadmap]\u003Cbr>- [Citing][citing] |\n\n[get-started]: https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fwiki\u002FGet-Started\n[attacks]: https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fwiki\u002FART-Attacks\n[defences]: https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fwiki\u002FART-Defences\n[estimators]: https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fwiki\u002FART-Estimators\n[metrics]: https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fwiki\u002FART-Metrics\n[contributing]: https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fwiki\u002FContributing\n[documentation]: https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fwiki\u002FDocumentation\n[installation]: https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fwiki\u002FGet-Started#setup\n[roadmap]: https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fwiki\u002FRoadmap\n[citing]: https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fwiki\u002FContributing#citing-art\n\nThe library is under continuous development. Feedback, bug reports and contributions are very welcome!\n\n# Acknowledgment\nThis material is partially based upon work supported by the Defense Advanced Research Projects Agency (DARPA) under\nContract No. HR001120C0013. Any opinions, findings and conclusions or recommendations expressed in this material are\nthose of the author(s) and do not necessarily reflect the views of the Defense Advanced Research Projects Agency (DARPA).\n","# 对抗鲁棒性工具箱 (ART) v1.20\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FTrusted-AI_adversarial-robustness-toolbox_readme_590b613806bb.png\" width=\"467\" title=\"ART logo\">\n\u003C\u002Fp>\n\u003Cbr \u002F>\n\n![CodeQL](https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fworkflows\u002FCodeQL\u002Fbadge.svg)\n[![文档状态](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FTrusted-AI_adversarial-robustness-toolbox_readme_13d664e1afd7.png)](http:\u002F\u002Fadversarial-robustness-toolbox.readthedocs.io\u002Fen\u002Flatest\u002F?badge=latest)\n[![PyPI](https:\u002F\u002Fbadge.fury.io\u002Fpy\u002Fadversarial-robustness-toolbox.svg)](https:\u002F\u002Fbadge.fury.io\u002Fpy\u002Fadversarial-robustness-toolbox)\n[![codecov](https:\u002F\u002Fcodecov.io\u002Fgh\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fbranch\u002Fmain\u002Fgraph\u002Fbadge.svg)](https:\u002F\u002Fcodecov.io\u002Fgh\u002FTrusted-AI\u002Fadversarial-robustness-toolbox)\n[![代码风格：black](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fcode%20style-black-000000.svg)](https:\u002F\u002Fgithub.com\u002Fpsf\u002Fblack)\n[![许可证：MIT](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLicense-MIT-yellow.svg)](https:\u002F\u002Fopensource.org\u002Flicenses\u002FMIT)\n[![PyPI - Python 版本](https:\u002F\u002Fimg.shields.io\u002Fpypi\u002Fpyversions\u002Fadversarial-robustness-toolbox)](https:\u002F\u002Fpypi.org\u002Fproject\u002Fadversarial-robustness-toolbox\u002F)\n[![Slack 图标](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fchat-on%20slack-yellow.svg)](https:\u002F\u002Fibm-art.slack.com\u002F)\n[![下载量](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FTrusted-AI_adversarial-robustness-toolbox_readme_34c50e933400.png)](https:\u002F\u002Fpepy.tech\u002Fproject\u002Fadversarial-robustness-toolbox)\n[![月度下载量](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FTrusted-AI_adversarial-robustness-toolbox_readme_34c50e933400.png\u002Fmonth)](https:\u002F\u002Fpepy.tech\u002Fproject\u002Fadversarial-robustness-toolbox)\n[![CII 最佳实践](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FTrusted-AI_adversarial-robustness-toolbox_readme_43b29938ccd7.png)](https:\u002F\u002Fbestpractices.coreinfrastructure.org\u002Fprojects\u002F5090)\n\n[中文README请按此处](README-cn.md)\n\n \u003Cdiv align=\"center\">\n  \u003Cpicture>\n    \u003Csource media=\"(prefers-color-scheme: dark)\" srcset=\"docs\u002Fimages\u002Flfaidata-project-badge-graduate-color_dark.png\" width=\"400\" title=\"LF AI & Data\">\n    \u003Csource media=\"(prefers-color-scheme: light)\" srcset=\"docs\u002Fimages\u002Flfaidata-project-badge-graduate-color.png\" width=\"400\" title=\"LF AI & Data\">\n    \u003Cimg alt=\"Fallback image description\" src=\"default-image.png\" width=\"400\">\n  \u003C\u002Fpicture>\n\u003C\u002Fdiv>\n\u003Cbr \u002F>\n\n对抗鲁棒性工具箱 (ART) 是一个用于机器学习安全的 Python 库。ART 由 \n[Linux 基金会 AI & 数据基金会](https:\u002F\u002Flfaidata.foundation)（LF AI & Data）托管。ART 提供了一系列工具，使开发者和研究人员能够防御并评估机器学习模型和应用免受逃避攻击、投毒攻击、提取攻击和推理攻击等对抗性威胁。ART 支持所有主流的机器学习框架（TensorFlow、Keras、PyTorch、scikit-learn、XGBoost、LightGBM、CatBoost、GPy 等），所有数据类型（图像、表格、音频、视频等）以及各种机器学习任务（分类、目标检测、语音识别、生成、认证等）。\n\n## 对抗性威胁\n\n \u003Cdiv align=\"center\">\n  \u003Cpicture>\n    \u003Csource media=\"(prefers-color-scheme: dark)\" srcset=\"docs\u002Fimages\u002Fadversarial_threats_attacker_dark.png\" width=\"400 title=\"ART Threats\">\n    \u003Csource media=\"(prefers-color-scheme: light)\" srcset=\"docs\u002Fimages\u002Fadversarial_threats_attacker.png\" width=\"400 title=\"ART Threats\">\n    \u003Cimg alt=\"Fallback image description\" src=\"default-image.png\" width=\"400\">\n  \u003C\u002Fpicture>\n\u003C\u002Fdiv>\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FTrusted-AI_adversarial-robustness-toolbox_readme_68acf6924457.png\" width=\"400\" title=\"ART Matrix\">\n\u003C\u002Fp>\n\u003Cbr \u002F>\n\n## ART 适用于红队与蓝队（精选）\n\n \u003Cdiv align=\"center\">\n  \u003Cpicture>\n    \u003Csource media=\"(prefers-color-scheme: dark)\" srcset=\"docs\u002Fimages\u002Fwhite_hat_blue_red_dark.png\" width=\"800 title=\"ART Red and Blue Teams\">\n    \u003Csource media=\"(prefers-color-scheme: light)\" srcset=\"docs\u002Fimages\u002Fwhite_hat_blue_red.png\" width=\"800 title=\"ART Red and Blue Teams\">\n    \u003Cimg alt=\"Fallback image description\" src=\"default-image.png\" width=\"800\">\n  \u003C\u002Fpicture>\n\u003C\u002Fdiv>\n\u003Cbr \u002F>\n\n## 了解更多\n\n| **[开始使用][get-started]**     | **[文档][documentation]**     | **[贡献][contributing]**           |\n|-------------------------------------|-------------------------------|-----------------------------------|\n| - [安装][installation]\u003Cbr>- [示例](examples\u002FREADME.md)\u003Cbr>- [笔记本](notebooks\u002FREADME.md) | - [攻击][attacks]\u003Cbr>- [防御][defences]\u003Cbr>- [估计器][estimators]\u003Cbr>- [指标][metrics]\u003Cbr>- [技术文档](https:\u002F\u002Fadversarial-robustness-toolbox.readthedocs.io) | - [Slack](https:\u002F\u002Fibm-art.slack.com), [邀请](https:\u002F\u002Fjoin.slack.com\u002Ft\u002Fibm-art\u002Fshared_invite\u002FenQtMzkyOTkyODE4NzM4LTA4NGQ1OTMxMzFmY2Q1MzE1NWI2MmEzN2FjNGNjOGVlODVkZDE0MjA1NTA4OGVkMjVkNmQ4MTY1NmMyOGM5YTg)\u003Cbr>- [贡献](CONTRIBUTING.md)\u003Cbr>- [路线图][roadmap]\u003Cbr>- [引用][citing] |\n\n[get-started]: https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fwiki\u002FGet-Started\n[attacks]: https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fwiki\u002FART-Attacks\n[defences]: https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fwiki\u002FART-Defences\n[estimators]: https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fwiki\u002FART-Estimators\n[metrics]: https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fwiki\u002FART-Metrics\n[contributing]: https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fwiki\u002FContributing\n[documentation]: https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fwiki\u002FDocumentation\n[installation]: https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fwiki\u002FGet-Started#setup\n[roadmap]: https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fwiki\u002FRoadmap\n[citing]: https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fwiki\u002FContributing#citing-art\n\n该库仍在持续开发中。欢迎提供反馈、报告 bug 和参与贡献！\n\n# 致谢\n本材料部分内容基于美国国防高级研究计划局（DARPA）合同 No. HR001120C0013 的支持。本材料中表达的所有观点、发现、结论或建议均属作者个人观点，不一定反映美国国防高级研究计划局（DARPA）的观点。","# Adversarial Robustness Toolbox (ART) 快速上手指南\n\nAdversarial Robustness Toolbox (ART) 是一个用于机器学习安全的 Python 库，旨在帮助开发者和研究人员防御及评估模型免受对抗性威胁（如逃避攻击、投毒攻击、模型提取和推理攻击）。它支持 TensorFlow、PyTorch、Keras、scikit-learn 等主流框架，涵盖图像、表格、音频等多种数据类型。\n\n## 环境准备\n\n在开始之前，请确保您的开发环境满足以下要求：\n\n*   **操作系统**：Linux, macOS, 或 Windows\n*   **Python 版本**：3.8 - 3.11 (推荐最新稳定版)\n*   **前置依赖**：\n    *   已安装至少一种主流的机器学习框架（如 `tensorflow`, `pytorch`, `scikit-learn` 等），ART 将自动检测并使用已安装的框架。\n    *   建议安装 `matplotlib` 用于可视化结果（可选）。\n\n## 安装步骤\n\n您可以使用 `pip` 直接安装 ART。为了获得更快的下载速度，推荐使用国内镜像源（如清华大学开源软件镜像站）。\n\n### 方式一：使用 PyPI 官方源\n```bash\npip install adversarial-robustness-toolbox\n```\n\n### 方式二：使用国内镜像源（推荐）\n```bash\npip install adversarial-robustness-toolbox -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n```\n\n### 验证安装\n安装完成后，可在 Python 中运行以下代码验证是否成功：\n```python\nimport art\nprint(art.__version__)\n```\n\n## 基本使用\n\n以下是一个最简单的示例，展示如何使用 ART 对基于 scikit-learn 的分类器生成对抗样本（FGSM 攻击）。\n\n### 1. 导入依赖并构建模型\n首先创建一个简单的机器学习模型并进行训练。\n\n```python\nimport numpy as np\nfrom sklearn.linear_model import LogisticRegression\nfrom art.estimators.classification import SklearnClassifier\n\n# 构造简单数据\nx_train = np.random.rand(100, 20)\ny_train = np.random.randint(0, 2, 100)\nx_test = np.random.rand(10, 20)\ny_test = np.random.randint(0, 2, 10)\n\n# 初始化并训练 scikit-learn 模型\nmodel = LogisticRegression()\nmodel.fit(x_train, y_train)\n\n# 将模型包装为 ART 的 Estimator\nclassifier = SklearnClassifier(model=model, clip_values=(0, 1))\n```\n\n### 2. 定义攻击方法并生成对抗样本\n使用快速梯度符号法（FGSM）生成对抗样本，并观察模型预测的变化。\n\n```python\nfrom art.attacks.evasion import FGSM\n\n# 初始化攻击方法，设定扰动步长 epsilon\nattack = FGSM(classifier, eps=0.1)\n\n# 生成对抗样本\nx_adv = attack.generate(x=x_test)\n\n# 对比原始预测与对抗样本预测\nprint(\"原始标签:\", y_test)\nprint(\"原始预测:\", np.argmax(classifier.predict(x_test), axis=1))\nprint(\"对抗预测:\", np.argmax(classifier.predict(x_adv), axis=1))\n```\n\n通过以上步骤，您已成功使用 ART 完成了基础的对抗样本生成。更多高级功能（如防御策略、不同框架支持、指标评估等）请参考官方文档。","某金融科技公司正在部署一个人脸识别支付系统，安全团队急需验证模型在面对恶意攻击时的可靠性。\n\n### 没有 adversarial-robustness-toolbox 时\n- **防御手段盲目**：团队仅凭理论猜测可能的攻击方式，缺乏系统化的对抗样本生成工具来模拟真实的“逃逸攻击”，导致防御策略如同纸上谈兵。\n- **评估标准缺失**：无法量化模型在遭受“数据投毒”或“模型提取”攻击时的具体风险值，只能依赖模糊的直觉判断安全性。\n- **框架适配困难**：由于系统混合使用了 TensorFlow 和 PyTorch，手动编写针对不同框架的攻击与防御代码耗时巨大且极易出错。\n- **响应滞后**：等到生产环境出现异常（如用户被误识或模型被窃取）后才被动修补，此时业务声誉已遭受不可逆的损失。\n\n### 使用 adversarial-robustness-toolbox 后\n- **主动模拟攻击**：利用其内置的多种攻击算法，红队能精准生成针对人脸图像的微小扰动样本，提前暴露模型在“逃逸攻击”下的脆弱点。\n- **量化安全指标**：通过标准化的评估流程，蓝队获得了具体的鲁棒性评分，清晰掌握模型在抵抗“推理攻击”和“数据投毒”时的真实水位。\n- **统一技术栈支持**：借助其对主流框架的无缝兼容，团队无需重复造轮子，迅速在同一套代码库中完成了跨框架的安全测试流水线。\n- **前置风险拦截**：在模型上线前即完成多轮攻防演练并加固，将潜在的安全漏洞消灭在萌芽状态，确保支付环节万无一失。\n\nadversarial-robustness-toolbox 将机器学习安全从“被动救火”转变为“主动免疫”，为 AI 系统的可信落地构建了坚实的防线。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FTrusted-AI_adversarial-robustness-toolbox_590b6138.png","Trusted-AI","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FTrusted-AI_53a5ad53.png","This GitHub org hosts LF AI Foundation projects in the category of Trusted and Responsible AI.",null,"info@lfai.foundation","LFAI_Foundation","https:\u002F\u002Fgithub.com\u002FTrusted-AI",[81,85,89,93],{"name":82,"color":83,"percentage":84},"Python","#3572A5",99.7,{"name":86,"color":87,"percentage":88},"Shell","#89e051",0.2,{"name":90,"color":91,"percentage":92},"Dockerfile","#384d54",0,{"name":94,"color":95,"percentage":92},"Makefile","#427819",5922,1308,"2026-04-10T00:35:37","MIT","未说明",{"notes":102,"python":103,"dependencies":104},"该工具库支持多种主流机器学习框架（如 TensorFlow, PyTorch, scikit-learn 等）及多种数据类型（图像、表格、音频、视频等）。具体运行环境需求取决于用户所选用的后端深度学习框架及任务类型，README 中未列出统一的硬件最低配置。","3.8+",[105,106,107,108,109,110,111,112],"TensorFlow","Keras","PyTorch","scikit-learn","XGBoost","LightGBM","CatBoost","GPy",[13,14,15],[115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131],"python","attack","adversarial-machine-learning","poisoning","trusted-ai","artificial-intelligence","extraction","adversarial-attacks","adversarial-examples","evasion","inference","privacy","ai","trustworthy-ai","red-team","blue-team","machine-learning","2026-03-27T02:49:30.150509","2026-04-11T10:02:43.890381",[135,140,145,150,154,159],{"id":136,"question_zh":137,"answer_zh":138,"source_url":139},29471,"使用 PyTorch 进行白盒攻击（如 PGD）时遇到 \"RuntimeError: element 0 of tensors does not require grad\" 错误怎么办？","这是因为白盒攻击（如 `ProjectedGradientDescent`）需要计算损失或类别梯度，因此不能在 `with torch.no_grad():` 代码块中运行。请检查您的代码，确保在生成对抗样本时移除了 `torch.no_grad()` 上下文管理器。注意：虽然黑盒攻击（如 `HopSkipJump`）不需要框架计算梯度，理论上可以在该块中运行，但自 ART 1.3 版本起，`PytorchClassifier.predict` 内部已默认使用 `torch.no_grad()` 以加速评估，通常无需手动包裹。","https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fissues\u002F498",{"id":141,"question_zh":142,"answer_zh":143,"source_url":144},29472,"SimBA 和 Square Attack 黑盒攻击对模型输出格式（Logits 还是概率）有什么具体要求？","不同的黑盒攻击算法对输出格式要求不同：\n1. **SimBA**：要求模型输出为**概率值**（probabilities）。如果您的模型输出是 logits，需要在攻击前通过 Softmax 函数将其转换为概率。\n2. **Square Attack**：要求模型输出为 **logits**。\n请根据您使用的具体攻击算法，调整模型预测函数的返回值类型，否则可能导致攻击失败或效果不佳。","https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fissues\u002F924",{"id":146,"question_zh":147,"answer_zh":148,"source_url":149},29473,"在使用 HopSkipJump (HSJ) 攻击时遇到 \"invalid value encountered in true_divide\" 警告或产生 NaN 结果是什么原因？","这通常是由输入预处理中的随机性（例如随机 Dropout）引起的。HSJ 攻击假设误分类样本在二分搜索后仍保持误分类状态，但随机预处理会导致每次查询模型的预测结果发生剧烈变化，从而破坏这一假设并导致梯度计算出现 NaN。\n解决方案建议：\n1. 在攻击阶段增加检查机制，确保生成的对抗样本不包含 NaN 值，如果发现 NaN 则提前退出或重试。\n2. 尽量固定模型推理时的随机种子或关闭推理阶段的随机 Dropout，以保证预测的一致性。","https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fissues\u002F307",{"id":151,"question_zh":152,"answer_zh":153,"source_url":144},29474,"SimBA 攻击需要多少次迭代才能找到对抗样本？如果图像归一化到 [0, 1] 范围，epsilon 该如何设置？","对于典型的模型和图像数据，SimBA 通常需要 **300 到 500 次迭代** (`max_iter`) 才能成功找到对抗样本。如果您的计算资源有限，可以尝试减少迭代次数，但这可能会降低攻击成功率。\n关于 `epsilon` 设置：如果您的输入图像已经归一化到 **[0, 1]** 范围，那么设置 `epsilon=0.1` 通常是合适的，无需再进行额外的缩放处理。",{"id":155,"question_zh":156,"answer_zh":157,"source_url":158},29475,"当 Keras 模型包含 Embedding 层时，初始化 `KerasClassifier` 报错 \"Can not convert a NoneType into a Tensor\" 如何解决？","这是一个已知问题，通常与 Embedding 层的梯度处理或数值范围有关。虽然具体的代码修复可能需要更新 ART 版本或应用社区提供的 PR，但在配置 `clip_values`（扰动裁剪值）时需要注意：\n如果 Embedding 层后面紧跟着 `ReLU` 激活函数，嵌入空间的值通常是非负的。在这种情况下，建议将 `clip_values` 的下界设置为 **0**，以防止扰动引入负值，这有助于避免类型转换错误或数值不稳定。","https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox\u002Fissues\u002F33",{"id":160,"question_zh":161,"answer_zh":162,"source_url":139},29476,"为什么在 `torch.no_grad()` 块中运行某些 ART 攻击会失败，而另一些则可以？","这取决于攻击类型：\n- **白盒攻击**（如 `ProjectedGradientDescent`, `AutoProjectedGradientDescent`）：必须计算梯度来更新输入，因此**不能**在 `with torch.no_grad():` 块中运行，否则会抛出 \"does not require grad\" 错误。\n- **黑盒攻击**（如 `HopSkipJump`）：主要通过查询模型输出来估计梯度或直接搜索，不依赖框架的自动微分，理论上可以在 `no_grad` 块中运行。但需注意，ART 的 `predict` 方法内部可能已经优化使用了 `no_grad`，用户无需额外包裹。",[164,169,174,179,184,189,194,199,204,209,214,219,224,229,234,239,244,249,254,259],{"id":165,"version":166,"summary_zh":167,"released_at":168},198322,"1.15.0","This release of ART 1.15.0 introduces a default training loop for TensorFlowV2Classifier, the TRADES adversarial training protocol, an estimator for DEtection TRansformer (DETR) object detection models, and more.\r\n\r\n# Added\r\n\r\n- Added default training function to `TensorFlowV2Classifier` (#2124)\r\n- Added TRADES adversarial training protocol in PyTorch (#2131)\r\n- Added preprocessors for images supporting padding and resizing in PyTorch, TensorFlow and framework-independent (#2138)\r\n- Added support for arbitrarily sized images in `BadDet` poisoning attacks (#2189)\r\n- Added estimator for DEtection TRansformer (DETR) object detection models based on transformer architectures (#2192)\r\n\r\n# Changed\r\n\r\n- Changed PyTorch estimators to use PyTorch datasets and dataloaders to optimize the `fit` and `predict` methods for `PyTorchClassifier`, `PyTorchRegressor`, `PyTorchRandomizedSmoothing`, `PyTorchObjectDetector`, and `PyTorchYolo`  and optimized the `predict` method of `TensorFlowV2Classifier` by using a TensorFlow dataset and applying @tf.function decorator (#2180)\r\n- Changed `PyTorchObjectDetector` to apply `channels_first` argument  and improved performance by applying batch processing provided by newer PyTorch versions. (#2180)\r\n\r\n# Removed\r\n\r\n[None] \r\n\r\n# Fixed\r\n\r\n- Fixed unnecessary duplicate prediction calls to estimator in `SignOPTAttack` (#2129)\r\n- Fixed missing transfer of tensor to device in `ProjectedGradientDescentPyTorch` (#2135)\r\n- Fixed trigger placement for image poisoning perturbations by correctly accessing height and width of the trigger image instead of swapping both (#2143)\r\n- Fixed key error in loss gradients of `PyTorchYolo` estimator and updated format of targets passed to the estimator in `AdversarialPatchPyTorch` to reflect updates to `PyTorchYolo`(#2169)\r\n- Fixed Visible Deprecation Warning in `analyze_by_distance` and `analyze_by_size` of `ClusteringAnalyzer` (#2195)\r\n","2023-06-30T22:18:12",{"id":170,"version":171,"summary_zh":172,"released_at":173},198323,"1.14.1","This release of ART 1.14.1 provides updates to ART 1.14\r\n\r\n# Added\r\n\r\n[None]\r\n\r\n# Changed\r\n\r\n[None]\r\n\r\n# Removed\r\n\r\n[None]\r\n\r\n# Fixed\r\n\r\n- Fixed bug in `PytorchYolo` object detection estimator to correctly normalize the bounding boxes (#2091)\r\n- Fixed missing `adversarial_accuracy` metric in `__init__.py` (#2093 )\r\n- Fixed bug of default value for a loss weighting parameter being used rather than user supplied inputs in `AdversarialTrainerCertifiedIBPPyTorch` (#2102)\r\n- Fixed Regional Misclassification Attack (RMA) to be able to poison all bounding boxes regardless of the class type (#2110 )\r\n- Fixed wrong order of predictions and targets arguments in `AutoProjectedGradientDescent`'s new cross entropy loss class introduced in ART 1.14.0 and ensured correct attributes in `PyTorchClassifier` (#2117)\r\n","2023-04-21T19:47:55",{"id":175,"version":176,"summary_zh":177,"released_at":178},198324,"1.14.0","This release of ART 1.14.0 introduces poisoning attacks on object detection models, privacy risk metrics, new white-box evasion attack based on conjugate gradients, and more.\r\n\r\n# Added\r\n\r\n- Added implementation of SHAPr membership privacy risk metric (#1978)\r\n- Added support for categorical non-numeric as well as continuous features in attribute inference attacks and improvements in shadow model tools (#2006)\r\n- Added implementation of Auto Conjugate Gradient Attack for white-box evasion (#2028)\r\n- Added implementation of adversarial training with interval bound propagation (#2044)\r\n- Added implementation of method `fit` to object detection estimators `PyTorchFasterRCNN`, `PyTorchObjectDetector`, and `PyTorchYolo` (#2067)\r\n- Added BadDet object detection poisoning attacks (RMA, GMA, OGA, ODA) (#2054, #2069)\r\n\r\n# Changed\r\n\r\n- Changed evasion detectors module by refactoring the entire module and introducing common API with the `EvasionDetector` base class (#1993)\r\n- Changed loading of audio triggers with `audio_perturbations` to cache trigger to accelerate loading (#2053)\r\n- Changed tested and officially supported Python versions to 3.9, 3.10, 3.11 (#2063)\r\n- Changed checks and internal improvements to `AdversarialTrainerCertifiedPytorch` (#2070)\r\n\r\n# Removed\r\n\r\n[None] \r\n\r\n# Fixed\r\n\r\n- Fixed bug in `add_single_bd` and `add_pattern_bd` to avoid confusing height and width of the trigger image and transposing the trigger (#2046)\r\n","2023-03-17T15:19:03",{"id":180,"version":181,"summary_zh":182,"released_at":183},198325,"1.13.1","This release of ART 1.13.1 provides updates to ART 1.13\r\n\r\n# Added\r\n\r\n[None]\r\n\r\n# Changed\r\n\r\n- Changed PDTP privacy metric to support two comparison: ratio (default) and new difference mode (#1984)\r\n- Changed default parameters for `apply_fit` and `apply_predict` for the Data Augmentation defenses `CutMix*`, `CutOut*`, and `MixUp*` (#1987)\r\n\r\n# Removed\r\n\r\n[None]\r\n\r\n# Fixed\r\n\r\n- Fixed bug in `PixelThreshold` attack to support batches of a single sample (#1982)\r\n- Fixed type error in `DPInstaHideTrainer` for `PyTorchClassifier` by casting random noise to correct type (#1987)\r\n- Added missing classes to union types `OBJECT_DETECTOR_TYPE`, `PYTORCH_ESTIMATOR_TYPE`, and `TENSORFLOWV2_ESTIMATOR_TYPE` (#1999)\r\n- Fixed audio perturbations going out of clip values in `insert_tone_trigger` and `insert_audio_trigger` (#2016)\r\n- Fixed missing transfer to device in `FeatureAdversariesPyTorch` to enable running on GPUs (#2021)\r\n- Fixed missing covnersion to float to support floor() on GPUs in `PyTorchClassifier` (#2022)\r\n- Fixed incorrect integer return type in `check_and_transform_label_format` (#2025)\r\n","2023-02-16T13:07:41",{"id":185,"version":186,"summary_zh":187,"released_at":188},198309,"1.20.1","ART 1.20.1 版本对 ART 1.20 进行了更新。\n\n# 新增\n\n[无]\n\n# 变更\n\n[无]\n\n# 移除\n\n[无]\n\n# 修复\n\n- 修复了一个导致 `PyTorchYolo` 目标检测估算器中早于 v5 版本的 YOLO 目标检测模型无法向后兼容的 bug。（#2686）","2025-07-07T19:58:07",{"id":190,"version":191,"summary_zh":192,"released_at":193},198310,"1.20.0","ART 1.20.0 版本新增了对 YOLO 对象检测模型 v8 及更高版本的支持，以及用于衡量生成式 AI 鲁棒性的全新 GREAT 指标。\n\n# 新增\n\n- 新增 GREAT 指标：基于生成模型的对抗扰动全局鲁棒性评估 (#2527)\n- 增加对 YOLO 对象检测模型 v8 及更高版本的支持 (#2675)\n\n# 变更\n\n[无]\n\n# 移除\n\n- 移除了对 TensorFlow v1 和 MXNet 的支持，包括仅针对这些框架实现的相关工具和攻击方法 (#2621)\n\n# 修复\n\n[无]\n","2025-06-30T11:58:47",{"id":195,"version":196,"summary_zh":197,"released_at":198},198311,"1.19.2","ART 1.19.2 版本对 ART 1.19 进行了更新。\n\n# 新增\n\n[无]\n\n# 变更\n\n- 将 APGL 许可证下的 `art\u002Fattacks\u002Fevasion\u002Foverload\u002Fbox_iou.py` 替换为采用 MIT 许可证发布的代码 (#2615)\n\n# 移除\n\n- 移除了 tests 模块意外安装到 site-packages 中的问题 (#2616)\n\n# 修复\n\n- 修复了 `PyTorchObjectDetector._get_losses` 中的一个 bug，该 bug 导致目标检测模型的批归一化层未被冻结。因此，`PyTorchObjectDetector` 中所有计算损失或损失梯度的方法都会修改模型的批归一化参数，从而导致损失梯度不准确，并使模型的批归一化层参数逐渐发生变化。(#2663)\n","2025-06-18T11:37:26",{"id":200,"version":201,"summary_zh":202,"released_at":203},198312,"1.19.1","ART 1.19.1 版本对 ART 1.19 进行了更新。\n\n# 新增\n\n[无]\n\n# 变更\n\n[无]\n\n# 移除\n\n[无]\n\n# 修复\n\n- 修复了返回的 AutoAttack 元数据中字符串字面量未正确使用 f-string 的问题 (#2550)\n- 修复了 `AdversarialPatchPyTorch` 攻击在目标检测数据上的类型注解和代码逻辑问题 (#2557)\n- 修复了 `ProjectedGradientDescentPyTorch` 攻击中缺失的 PyTorch 张量设备转移问题 (#2558)","2025-01-22T10:09:43",{"id":205,"version":206,"summary_zh":207,"released_at":208},198313,"1.19.0","ART 1.19.0 版本引入了“先窃取、后攻击”和重缩放自动共轭梯度攻击，以及用于对抗样本的“做你自己的邻居检测器”（BEYOND）。\n\n# 新增\n\n- 新增“先窃取、后攻击”（SNAL）逃避攻击 (#2440)\n- 新增重缩放自动共轭梯度（ReACG）下降逃避攻击 (#2460)\n- 新增适用于 PyTorch 的对抗样本“做你自己的邻居检测器”（BEYOND）(#2489)\n- 增加对具有多输出的 scikit-learn 模型的支持 (#2505)\n\n# 变更\n\n- 修改 AutoAttack，允许定义并行处理中使用的进程数 (#2529)\n\n# 移除\n\n[无]\n\n# 修复\n\n- 修复了 `scipy` 中已弃用函数 `binom_test` 的使用 (#2517)\n- 修复了 PyTorch 中对抗补丁攻击掩码中补丁位置随机采样中的 bug (#2539)","2024-12-20T00:51:46",{"id":210,"version":211,"summary_zh":212,"released_at":213},198314,"1.18.2","ART 1.18.2 版本更新提供了对 ART 1.18 的改进。\n\n# 新增\n\n[无]\n\n# 变更\n\n- 修改了对需要检查是否使用标准库函数的导入库的版本检查逻辑 (#2500)\n\n# 移除\n\n[无]\n\n# 修复\n\n[无]\n","2024-10-02T21:30:21",{"id":215,"version":216,"summary_zh":217,"released_at":218},198315,"1.18.1","ART 1.18.1 版本对 ART 1.18 进行了更新。\n\n# 新增\n\n[无]\n\n# 变更\n\n[无]\n\n# 移除\n\n[无]\n\n# 修复\n\n- 修复了 `ProjectedGradientDescentPyTorch` 中缺失的设备\u002FGPU 转移问题（#2455）\n","2024-07-03T17:29:44",{"id":220,"version":221,"summary_zh":222,"released_at":223},198316,"1.18.0","ART 1.18.0 版本引入了针对目标检测模型的过载攻击，并在投影梯度下降攻击中为所有范数提供了快速且准确的损失梯度。\n\n# 新增\n\n- 为目标检测模型新增了过载攻击 (#2337)\n- 在投影梯度下降攻击中增加了对所有范数的支持 (#2382)\n- 在推理攻击中增加了对特征缩放的支持 (#2384)\n\n# 变更\n\n- 将 YOLO 和 Faster R-CNN 的模型特定估计器替换为适用于 PyTorch 中所有目标检测模型的统一估计器 (#2321)\n\n# 移除\n\n[无]\n\n# 修复\n\n- 修复了投影梯度下降攻击中非 L[2, infinity] 范数梯度的缩放问题 (#2382)","2024-06-16T22:19:55",{"id":225,"version":226,"summary_zh":227,"released_at":228},198317,"1.17.1","ART 1.17.1 版本对 ART 1.17 进行了更新。\n\n# 新增\n\n[无]\n\n# 变更\n\n[无]\n\n# 移除\n\n- 移除了 `scikit-learn` 的版本上限，以减少依赖冲突并便于与其他库的集成。\n\n# 修复\n\n[无]\n","2024-02-17T23:58:41",{"id":230,"version":231,"summary_zh":232,"released_at":233},198318,"1.17.0","ART 1.17.0 版本引入了新的对抗训练协议、成员推理攻击、用于逃避检测的组合式对抗攻击等新功能。\n\n# 新增\n\n- 在 PyTorch 中新增了作为逃避攻击的组合式对抗攻击 (#2287)\n- 增加了对无需真实标签的黑盒成员推理攻击的支持 (#2293)\n- 为所有分类估计器的 `fit` 和 `predict` 方法中的进度条添加了详细模式选项 (#2334)\n- 在 PyTorch 中新增了 Oracle 对齐对抗训练 (OAAT) (#2348)\n\n# 变更\n\n[无]\n\n# 移除\n\n[无]\n\n# 修复\n\n- 通过在调用 `get_activations()` 时对输出进行展平，修复了 `ActivateDefense` 和 `SpectralSignatures` 污染防御中的 bug (#2327)\n- 修复了 Hugging Face 分类估计器中的一个 bug，使其能够在提供的模型已位于 GPU 上时正确推断设备 (#2300)","2023-12-27T22:17:11",{"id":235,"version":236,"summary_zh":237,"released_at":238},198319,"1.16.0","This release of ART 1.16.0 introduces multiple estimators for certified robustness and Hugging Face models, adversarial training with Adversarial Weight Perturbation, improvements for inference attacks, and more.\r\n\r\n# Added\r\n\r\n- Added estimator for smoothed vision transformers as defence against evasion with adversarial patches (#2171)\r\n- Added estimators for variations of randomised smoothing including MACER, SmoothAdv, and SmoothMix for PyTorch and TensorFlow (#2218)\r\n- Added adversarial training with Adversarial Weight Perturbation protocol in PyTorch (#2224)\r\n- Added estimator for Hugging Face models with PyTorch backend (#2245)\r\n- Added ObjectSeeker certifiably robust defence for object detectors against poisoning and adversarial patches (#2246)\r\n- Added representation string `__repr__` to all attacks (#2274) \r\n\r\n# Changed\r\n\r\n- Changed inference attacks to support additional attack model types (e.g., KNN, LR, etc.) and replaced scikit-learn's MLPClassifier with a PyTorch neural network model (#2253)\r\n- Changes attacks's method `set_params` to raise `ValueError` if a not previously defined attributed is set (#2257)\r\n- Changed AutoAttack to support multiprocessing and support running attacks in parallel (#2258)\r\n\r\n# Removed\r\n\r\n[None] \r\n\r\n# Fixed\r\n\r\n- Fixed docstring of `TargetedUniversalPerturbation` (#2212)\r\n- Fixed bug of unsupported operands because of dependency updates in `AdversarialPatchTensorFlowV2` (#2276)\r\n- Fixed bug in `AutoAttack` to avoid that attacks which do not support targeted mode are skipped (#2257)\r\n","2023-09-22T14:42:30",{"id":240,"version":241,"summary_zh":242,"released_at":243},198320,"1.15.2","This release of ART 1.15.2 provides updates to ART 1.15\r\n\r\n# Added\r\n\r\n[None]\r\n\r\n# Changed\r\n\r\n[None]\r\n\r\n# Removed\r\n\r\n[None]\r\n\r\n# Fixed\r\n\r\n- Fixed bug where `PyTorchYolo` and `PyTorchObjectDetector` object detection estimators modified the original input Numpy array (#2263)\r\n- Fixed bug where `channels_first` argument of `PyTorchObjectDetector` and `PyTorchFasterRCNN` received the wrong default value of `False` instead of `True` (#2264)\r\n","2023-09-12T20:55:47",{"id":245,"version":246,"summary_zh":247,"released_at":248},198321,"1.15.1","This release of ART 1.15.1 provides updates to ART 1.15\r\n\r\n# Added\r\n\r\n[None]\r\n\r\n# Changed\r\n\r\n[None]\r\n\r\n# Removed\r\n\r\n[None]\r\n\r\n# Fixed\r\n\r\n- Fixed deprecation warning by replacing the import statement `from scipy.ndimage.filters import median_filter` with `from scipy.ndimage import median_filter` (#2211)\r\n- Fixed bug limiting input shapes in `AutoProjectedGradientDescent` and `AutoConjugateGradient` attacks to be images to support any input shapes (#2214)\r\n- Fixed missing support for index-labels in `AdversarialTrainerTRADESPyTorch` (#2231)\r\n- Fix bug in `PyTorchObjectDetector` and `PyTorchYolo` estimators to support non-leaf tensors to retain gradient properties if moved to another device (#2238, #2249)\r\n- Fixed unintended required dependency `Pillow` to be optional again (#2240)\r\n- Fixed circular dependencies in `art.estimators.certification` (#2241)\r\n","2023-08-20T22:36:39",{"id":250,"version":251,"summary_zh":252,"released_at":253},198326,"1.13.0","This release of ART 1.13.0 introduces black-box regression estimator, DP-InstaHide, object detection estimator for TensorFlow v2, and more.\r\n\r\n# Added\r\n\r\n- Added `CutOut` data augmentation as preprocessor in Numpy, TensorFlow and PyTorch (#1850)\r\n- Added `MixUp` data augmentation as preprocessor in Numpy, TensorFlow and PyTorch (#1885)\r\n- Added `CutMix` data augmentation as preprocessor in Numpy, TensorFlow and PyTorch (#1910)\r\n- Added regression estimator for black-box scenario (#1930)\r\n- Added additional model support for shadow models (#1930)\r\n- Added Numpy-based data generator to support very large datasets (#1934\r\n- Added object detection estimator for Faster-RCNN in TensorFlow v2 (#1951)\r\n- Added DP-InstaHide training for classification with differentially private data augmentations (#1956)\r\n- Added Interval Bound Propagation for certified classification in PyTorch (#1965)\r\n\r\n# Changed\r\n\r\n[None] \r\n\r\n# Removed\r\n\r\n[None] \r\n\r\n# Fixed\r\n\r\n- Fixed unexpected shape in `art.utils.load_cifar10` for loading raw dataset (#1962)\r\n- Fixed bug to return correct best poisoning indices in `SleeperAgentAttack` (#1955)\r\n","2022-12-18T22:51:31",{"id":255,"version":256,"summary_zh":257,"released_at":258},198327,"1.12.2","This release of ART 1.12.2 provides updates to ART 1.12.\r\n\r\n# Added\r\n\r\n- Added `drop_last` option to method `fit` of `PyTorchClassifier` (#1883)\r\n\r\n# Changed\r\n\r\n- Changed documentation of `art.metrics.verification_decisions_trees.RobustnessVerificationTreeModelsCliqueMethod` to provide addiitonal information (#1897)\r\n- Changed Numba to be an optional dependency (#1884)\r\n- Changed `BoundaryAttack` to enable binary classification by removing unnecessary input check (#1890)\r\n\r\n# Removed\r\n\r\n[None]\r\n\r\n# Fixed\r\n\r\n- Fixed issue caused by missing variable initialization in `SleeperAgentAttack` (#1892)\r\n- Fixed bug in `projection_l1_1` and `projection_l1_2` where in rare cases they returned the input point rather than the its projection (#1870)\r\n","2022-11-15T15:30:50",{"id":260,"version":261,"summary_zh":262,"released_at":263},198328,"1.12.1","This release of ART 1.12.1 provides updates to ART 1.12.\r\n\r\n# Added\r\n\r\n[None]\r\n\r\n# Changed\r\n\r\n[None]\r\n\r\n# Removed\r\n\r\n[None] \r\n\r\n# Fixed\r\n\r\n- Fixed object detection estimator `PyTorchYolo` to not modify tracked statistics of batch-norm layers of the YOLO model during loss and loss gradient calculations (#1860)\r\n","2022-09-28T13:31:24"]