[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-jiep--offensive-ai-compilation":3,"tool-jiep--offensive-ai-compilation":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",160784,2,"2026-04-19T11:32:54",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",109154,"2026-04-18T11:18:24",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":32,"last_commit_at":50,"category_tags":51,"status":17},6121,"gemini-cli","google-gemini\u002Fgemini-cli","gemini-cli 是一款由谷歌推出的开源 AI 命令行工具，它将强大的 Gemini 大模型能力直接集成到用户的终端环境中。对于习惯在命令行工作的开发者而言，它提供了一条从输入提示词到获取模型响应的最短路径，无需切换窗口即可享受智能辅助。\n\n这款工具主要解决了开发过程中频繁上下文切换的痛点，让用户能在熟悉的终端界面内直接完成代码理解、生成、调试以及自动化运维任务。无论是查询大型代码库、根据草图生成应用，还是执行复杂的 Git 操作，gemini-cli 都能通过自然语言指令高效处理。\n\n它特别适合广大软件工程师、DevOps 人员及技术研究人员使用。其核心亮点包括支持高达 100 万 token 的超长上下文窗口，具备出色的逻辑推理能力；内置 Google 搜索、文件操作及 Shell 命令执行等实用工具；更独特的是，它支持 MCP（模型上下文协议），允许用户灵活扩展自定义集成，连接如图像生成等外部能力。此外，个人谷歌账号即可享受免费的额度支持，且项目基于 Apache 2.0 协议完全开源，是提升终端工作效率的理想助手。",100752,"2026-04-10T01:20:03",[52,13,15,14],"插件",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":32,"last_commit_at":59,"category_tags":60,"status":17},4721,"markitdown","microsoft\u002Fmarkitdown","MarkItDown 是一款由微软 AutoGen 团队打造的轻量级 Python 工具，专为将各类文件高效转换为 Markdown 格式而设计。它支持 PDF、Word、Excel、PPT、图片（含 OCR）、音频（含语音转录）、HTML 乃至 YouTube 链接等多种格式的解析，能够精准提取文档中的标题、列表、表格和链接等关键结构信息。\n\n在人工智能应用日益普及的今天，大语言模型（LLM）虽擅长处理文本，却难以直接读取复杂的二进制办公文档。MarkItDown 恰好解决了这一痛点，它将非结构化或半结构化的文件转化为模型“原生理解”且 Token 效率极高的 Markdown 格式，成为连接本地文件与 AI 分析 pipeline 的理想桥梁。此外，它还提供了 MCP（模型上下文协议）服务器，可无缝集成到 Claude Desktop 等 LLM 应用中。\n\n这款工具特别适合开发者、数据科学家及 AI 研究人员使用，尤其是那些需要构建文档检索增强生成（RAG）系统、进行批量文本分析或希望让 AI 助手直接“阅读”本地文件的用户。虽然生成的内容也具备一定可读性，但其核心优势在于为机器",93400,"2026-04-06T19:52:38",[52,14],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":67,"readme_en":68,"readme_zh":69,"quickstart_zh":70,"use_case_zh":71,"hero_image_url":72,"owner_login":73,"owner_name":74,"owner_avatar_url":75,"owner_bio":76,"owner_company":76,"owner_location":77,"owner_email":76,"owner_twitter":76,"owner_website":76,"owner_url":78,"languages":79,"stars":88,"forks":89,"last_commit_at":90,"license":91,"difficulty_score":92,"env_os":93,"env_gpu":94,"env_ram":94,"env_deps":95,"category_tags":98,"github_topics":99,"view_count":32,"oss_zip_url":76,"oss_zip_packed_at":76,"status":17,"created_at":105,"updated_at":106,"faqs":107,"releases":108},9600,"jiep\u002Foffensive-ai-compilation","offensive-ai-compilation","A curated list of useful resources that cover Offensive AI.","offensive-ai-compilation 是一份精心整理的开源资源清单，专注于“进攻性人工智能”（Offensive AI）领域。它旨在帮助安全从业者系统性地理解人工智能模型面临的潜在威胁与攻击手段，从而更好地构建防御体系。\n\n该资源库解决了 AI 安全学习中资料分散、分类不清的痛点。它将复杂的对抗性机器学习攻击梳理为四大核心类型：模型提取（窃取参数）、反演攻击（推断数据）、投毒攻击（污染训练数据）以及 evasion 攻击（绕过检测）。此外，它还涵盖了生成式 AI 在音频、图像、视频及文本领域的滥用风险，并提供了相应的检测方法与防御策略。\n\noffensive-ai-compilation 特别适合网络安全研究人员、AI 开发者、渗透测试工程师以及对 AI 伦理与安全感兴趣的学生使用。其独特亮点在于不仅罗列了 Cleverhans、ART 等专业攻击工具，还针对每种攻击方式提供了具体的防御动作建议和学术文献链接，实现了从“攻击原理”到“防御实践”的闭环。无论是希望评估模型鲁棒性的开发者，还是致力于研究对抗样本的学者，都能从中获得极具价值的参考指引，共同提升人工智能系统的安全性","offensive-ai-compilation 是一份精心整理的开源资源清单，专注于“进攻性人工智能”（Offensive AI）领域。它旨在帮助安全从业者系统性地理解人工智能模型面临的潜在威胁与攻击手段，从而更好地构建防御体系。\n\n该资源库解决了 AI 安全学习中资料分散、分类不清的痛点。它将复杂的对抗性机器学习攻击梳理为四大核心类型：模型提取（窃取参数）、反演攻击（推断数据）、投毒攻击（污染训练数据）以及 evasion 攻击（绕过检测）。此外，它还涵盖了生成式 AI 在音频、图像、视频及文本领域的滥用风险，并提供了相应的检测方法与防御策略。\n\noffensive-ai-compilation 特别适合网络安全研究人员、AI 开发者、渗透测试工程师以及对 AI 伦理与安全感兴趣的学生使用。其独特亮点在于不仅罗列了 Cleverhans、ART 等专业攻击工具，还针对每种攻击方式提供了具体的防御动作建议和学术文献链接，实现了从“攻击原理”到“防御实践”的闭环。无论是希望评估模型鲁棒性的开发者，还是致力于研究对抗样本的学者，都能从中获得极具价值的参考指引，共同提升人工智能系统的安全性。","# Offensive AI Compilation\n\nA curated list of useful resources that cover Offensive AI.\n\n## 📁 Contents 📁\n- [🚫 Abuse 🚫](#-abuse-)\n  - [🧠 Adversarial Machine Learning 🧠](#-adversarial-machine-learning-)\n    - [⚡ Attacks ⚡](#-attacks-)\n      - [🔒 Extraction 🔒](#-extraction-)\n        - [⚠️ Limitations ⚠️](#️-limitations-️)\n        - [🛡️ Defensive actions 🛡️](#️-defensive-actions-️)\n        - [🔗 Useful links 🔗](#-useful-links-)\n      - [⬅️ Inversion (or inference) ⬅️](#️-inversion-or-inference-️)\n        - [🛡️ Defensive actions 🛡️](#️-defensive-actions-️-1)\n        - [🔗 Useful links 🔗](#-useful-links--1)\n      - [💉 Poisoning 💉](#-poisoning-)\n        - [🔓 Backdoors 🔓](#-backdoors-)\n        - [🛡️ Defensive actions 🛡️](#️-defensive-actions-️-2)\n        - [🔗 Useful links 🔗](#-useful-links--2)\n      - [🏃‍♂️ Evasion 🏃‍♂️](#️-evasion-️)\n        - [🛡️ Defensive actions 🛡️](#️-defensive-actions-️-3)\n        - [🔗 Useful links 🔗](#-useful-links--3)\n    - [🛠️ Tools 🛠️](#️-tools-️)\n        - [ART](#art)\n        - [Cleverhans](#cleverhans)\n- [🔧 Use 🔧](#-use-)\n  - [🕵️‍♂️ Pentesting 🕵️‍♂️](#️️-pentesting-️️)\n  - [🦠 Malware 🦠](#-malware-)\n  - [🗺️ OSINT 🗺️](#️osint-️)\n  - [📧 Phishing 📧](#phishing-)\n  - [👨‍🎤 Generative AI 👨‍🎤](#-generative-ai-)\n    - [🔊 Audio 🔊](#-audio-)\n      - [🛠️ Tools 🛠️](#️-tools-️-1)\n      - [💡 Applications 💡](#-applications-)\n      - [🔎 Detection 🔎](#-detection-)\n    - [📷 Image 📷](#-image-)\n      - [🛠️ Tools 🛠️](#️-tools-️-2)\n      - [💡 Applications 💡](#-applications--1)\n      - [🔎 Detection 🔎](#-detection--1)\n    - [🎥 Video 🎥](#-video-)\n      - [🛠️ Tools 🛠️](#️-tools-️-3)\n      - [💡 Applications 💡](#-applications--2)\n      - [🔎 Detection 🔎](#-detection--2)\n    - [📄 Text 📄](#-text-)\n      - [🛠️ Tools 🛠️](#️-tools-️-4)\n      - [🔎 Detection 🔎](#-detection--3)\n      - [💡 Applications 💡](#-applications--3)\n  - [📚 Misc 📚](#-misc-)\n- [📊 Surveys 📊](#-surveys-)\n- [🗣 Contributors 🗣](#-contributors-)\n- [©️ License ©️](#️-license-️)\n\n## 🚫 Abuse 🚫\n\nExploiting the vulnerabilities of AI models.\n\n### 🧠 Adversarial Machine Learning 🧠\n\nAdversarial Machine Learning is responsible for assessing their weaknesses and providing countermeasures.\n\n#### ⚡ Attacks ⚡\n\nIt is organized into four types of attacks: extraction, inversion, poisoning and evasion.\n\n![Adversarial Machine Learning attacks](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_bf8200d7c81e.png)\n\n##### 🔒 Extraction 🔒\n\nIt tries to steal the parameters and hyperparameters of a model by making requests that maximize the extraction of information.\n\n![Extraction attack](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_6c6e8e2c650c.png)\n\nDepending on the knowledge of the adversary's model, white-box and black-box attacks can be performed.\n\nIn the simplest white-box case (when the adversary has full knowledge of the model, e.g., a sigmoid function), one can create a system of linear equations that can be easily solved.\n\nIn the generic case, where there is insufficient knowledge of the model, the substitute model is used. This model is trained with the requests made to the original model in order to imitate the same functionality as the original one.\n\n![White-box and black-box extraction attacks](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8f306e35fc3d.png)\n\n###### ⚠️ Limitations ⚠️\n\n  * Training a substitute model is equivalent (in many cases) to training a model from scratch.\n\n  * Very computationally intensive.\n\n  * The adversary has limitations on the number of requests before being detected.\n\n###### 🛡️ Defensive actions 🛡️\n\n  * Rounding of output values.\n\n  * Use of [differential privacy](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FDifferential_privacy).\n\n  * Use of [ensembles](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FEnsemble_learning).\n\n  * Use of specific defenses\n    * [Specific architectures](https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.07221)\n    * [PRADA](https:\u002F\u002Farxiv.org\u002Fabs\u002F1805.02628)\n    * [Adaptive Misinformation](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.07100)\n    * ...\n\n###### 🔗 Useful links 🔗\n\n  * [Stealing Machine Learning Models via Prediction APIs](https:\u002F\u002Farxiv.org\u002Fabs\u002F1609.02943)\n  * [Stealing Hyperparameters in Machine Learning](https:\u002F\u002Farxiv.org\u002Fabs\u002F1802.05351)\n  * [Knockoff Nets: Stealing Functionality of Black-Box Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F1812.02766)\n  * [Model Extraction Warning in MLaaS Paradigm](https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.07221)\n  * [Copycat CNN: Stealing Knowledge by Persuading Confession with Random Non-Labeled Data](https:\u002F\u002Farxiv.org\u002Fabs\u002F1806.05476)\n  * [Prediction Poisoning: Towards Defenses Against DNN Model Stealing Attacks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1906.10908)\n  * [Stealing Neural Networks via Timing Side Channels](https:\u002F\u002Farxiv.org\u002Fabs\u002F1812.11720)\n  * [Model Stealing Attacks Against Inductive Graph Neural Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.08331)\n  * [High Accuracy and High Fidelity Extraction of Neural Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1909.01838)\n  * [Poisoning Web-Scale Training Datasets is Practical](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.10149)\n  * [Polynomial Time Cryptanalytic Extraction of Neural Network Models](https:\u002F\u002Feprint.iacr.org\u002F2023\u002F1526)\n  * [Prompt-Specific Poisoning Attacks on Text-to-Image Generative Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.13828)\n  * [Awesome Data Poisoning And Backdoor Attacks](https:\u002F\u002Fgithub.com\u002Fpenghui-yang\u002Fawesome-data-poisoning-and-backdoor-attacks): A curated list of papers & resources linked to data poisoning, backdoor attacks and defenses against them. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d7feb193321b.png)](https:\u002F\u002Fgithub.com\u002Fpenghui-yang\u002Fawesome-data-poisoning-and-backdoor-attacks)\n  * [BackdoorBox](https:\u002F\u002Fgithub.com\u002FTHUYimingLi\u002FBackdoorBox): An Open-sourced Python Toolbox for Backdoor Attacks and Defenses. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_adf559535dad.png)](https:\u002F\u002Fgithub.com\u002FTHUYimingLi\u002FBackdoorBox)\n  * [Stealing Part of a Production Language Model](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.06634)\n  * [Hard-Label Cryptanalytic Extraction of Neural Network Models](https:\u002F\u002Feprint.iacr.org\u002F2024\u002F1403)\n  * [https:\u002F\u002Fwww.anthropic.com\u002Fnews\u002Fdetecting-and-preventing-distillation-attacks](https:\u002F\u002Fwww.anthropic.com\u002Fnews\u002Fdetecting-and-preventing-distillation-attacks)\n\n##### ⬅️ Inversion (or inference) ⬅️\n\nThey are intended to reverse the information flow of a machine-learning model.\n\n![Inference attack](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_6362764efa6e.png)\n\nThey enable an adversary to know the model that was not explicitly intended to be shared.\n\nThey allow us to know the training data or information as statistical properties of the model.\n\nThree types are possible:\n\n  * **Membership Inference Attack (MIA)**: An adversary attempts to determine whether a sample was employed as part of the training.\n\n  * **Property Inference Attack (PIA)**: An adversary aims to extract statistical properties that were not explicitly encoded as features during the training phase.\n\n  * **Reconstruction**: An adversary tries to reconstruct one or more samples from the training set and\u002For their corresponding labels. Also called inversion.\n\n\n###### 🛡️ Defensive actions 🛡️\n\n  * Use of advanced cryptography. Countermeasures include [differential privacy](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FDifferential_privacy), [homomorphic cryptography](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FHomomorphic_encryption) and [secure multiparty computation](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FSecure_multi-party_computation).\n\n  * Use of regularization techniques such as [Dropout](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FDilution_(neural_networks)) due to the relationship between overtraining and privacy.\n\n  * [Model compression](https:\u002F\u002Fmedium.com\u002Fgsi-technology\u002Fan-overview-of-model-compression-techniques-for-deep-learning-in-space-3fd8d4ce84e5) has been proposed as a defense against reconstruction attacks.\n\n###### 🔗 Useful links 🔗\n\n  * [Membership Inference Attacks Against Machine Learning Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F1610.05820)\n  * [Model Inversion Attacks that Exploit Confidence Information and Basic Countermeasures](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F2810103.2813677)\n  * [Machine Learning Models that Remember Too Much](https:\u002F\u002Farxiv.org\u002Fabs\u002F1709.07886)\n  * [ML-Leaks: Model and Data Independent Membership Inference Attacks and Defenses on Machine Learning Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F1806.01246)\n  * [Deep Models Under the GAN: Information Leakage from Collaborative Deep Learning](https:\u002F\u002Farxiv.org\u002Fabs\u002F1702.07464)\n  * [LOGAN: Membership Inference Attacks Against Generative Models](https:\u002F\u002Fpetsymposium.org\u002Fpopets\u002F2019\u002Fpopets-2019-0008.php)\n  * [Overfitting, robustness, and malicious algorithms: A study of potential causes of privacy risk in machine learning](https:\u002F\u002Fcontent.iospress.com\u002Farticles\u002Fjournal-of-computer-security\u002Fjcs191362)\n  * [Comprehensive Privacy Analysis of Deep Learning: Stand-alone and Federated Learning under Passive and Active White-box Inference Attacks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1812.00910)\n  * [Inference Attacks Against Collaborative Learning](https:\u002F\u002Farxiv.org\u002Fabs\u002F1805.04049)\n  * [The Secret Sharer: Evaluating and Testing Unintended Memorization in Neural Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1802.08232)\n  * [Towards the Science of Security and Privacy in Machine Learning](https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.03814)\n  * [MemGuard: Defending against Black-Box Membership Inference Attacks via Adversarial Examples](https:\u002F\u002Farxiv.org\u002Fabs\u002F1909.10594)\n  * [Extracting Training Data from Large Language Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.07805)\n  * [Property Inference Attacks on Fully Connected Neural Networks using Permutation Invariant Representations](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3243734.3243834)\n  * [Extracting Training Data from Diffusion Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.13188)\n  * [High-resolution image reconstruction with latent diffusion models from human brain activity](https:\u002F\u002Fwww.biorxiv.org\u002Fcontent\u002F10.1101\u002F2022.11.18.517004v1)\n  * [Stealing and evading malware classifiers and antivirus at low false positive conditions](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0167404823001025)\n  * [Realistic fingerprint presentation attacks based on an adversarial approach](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10295513)\n  * [Active Adversarial Tests](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Factive-adversarial-tests): Increasing Confidence in Adversarial Robustness Evaluations. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_da359b286997.png)](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Factive-adversarial-tests)\n  * [GPT Jailbreak Status](https:\u002F\u002Fgithub.com\u002Ftg12\u002Fgpt_jailbreak_status): Updates on the status of jailbreaking the OpenAI GPT language model. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_053b74118c66.png)](https:\u002F\u002Fgithub.com\u002Ftg12\u002Fgpt_jailbreak_status)\n  * [Order of Magnitude Speedups for LLM Membership Inference](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.14513)\n  * [What GPT-oss Leaks About OpenAI's Training Data](https:\u002F\u002Ffi-le.net\u002Foss\u002F)\n  * [On the Detectability of Active Gradient Inversion Attacks in Federated Learning](https:\u002F\u002Farxiv.org\u002Fabs\u002F2511.10502)\n\n##### 💉 Poisoning 💉\n\nThey aim to corrupt the training set by causing a machine-learning model to reduce its accuracy.\n\n![Poisoning attack](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_87a289069531.png)\n\nThis attack is difficult to detect when performed on the training data since the attack can propagate among different models using the same training data.\n\nThe adversary seeks to destroy the availability of the model by modifying the decision boundary and, as a result, producing incorrect predictions or, create a backdoor in a model. In the latter, the model behaves correctly (returning the desired predictions) in most cases, except for certain inputs specially created by the adversary that produce undesired results. The adversary can manipulate the results of the predictions and launch future attacks.\n\n##### 🔓 Backdoors 🔓\n\n[BadNets](https:\u002F\u002Farxiv.org\u002Fabs\u002F1708.06733) are the simplest type of backdoor in a machine learning model. Moreover, BadNets are able to be preserved in a model, even if they are retrained again for a different task than the original model (transfer learning).\n\nIt is important to note that **public pre-trained models may contain backdoors**.\n\n###### 🛡️ Defensive actions 🛡️\n\n  * Detection of poisoned data, along with the use of data sanitization.\n\n  * Robust training methods.\n\n  * Specific defenses.\n    * [Neural Cleanse: Identifying and Mitigating Backdoor Attacks in Neural Networks](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8835365)\n    * [STRIP: A Defence Against Trojan Attacks on Deep Neural Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1902.06531)\n    * [Detecting Backdoor Attacks on Deep Neural Networks by Activation Clustering](https:\u002F\u002Farxiv.org\u002Fabs\u002F1811.03728)\n    * [ABS: Scanning Neural Networks for Back-doors by Artificial Brain Stimulation](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3319535.3363216)\n    * [DeepInspect: A Black-box Trojan Detection and Mitigation Framework for Deep Neural Networks](https:\u002F\u002Fwww.ijcai.org\u002Fproceedings\u002F2019\u002F647)\n    * [Defending Neural Backdoors via Generative Distribution Modeling](https:\u002F\u002Farxiv.org\u002Fabs\u002F1910.04749)\n    * [A Comprehensive Survey on Backdoor Attacks and Their Defenses in Face Recognition Systems](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10480615)\n    * [DataElixir: Purifying Poisoned Dataset to Mitigate Backdoor Attacks via Diffusion Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.11057)\n\n###### 🔗 Useful links 🔗\n\n  * [Poisoning Attacks against Support Vector Machines](https:\u002F\u002Farxiv.org\u002Fabs\u002F1206.6389)\n  * [Targeted Backdoor Attacks on Deep Learning Systems Using Data Poisoning](https:\u002F\u002Farxiv.org\u002Fabs\u002F1712.05526)\n  * [Trojaning Attack on Neural Networks](https:\u002F\u002Fwww.semanticscholar.org\u002Fpaper\u002FTrojaning-Attack-on-Neural-Networks-Liu-Ma\u002F08f7ac64b420210aa46fcbbdb0f206215f2e0644)\n  * [Fine-Pruning: Defending Against Backdooring Attacks on Deep Neural Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1805.12185)\n  * [Poison Frogs! Targeted Clean-Label Poisoning Attacks on Neural Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1804.00792)\n  * [Spectral Signatures in Backdoor Attacks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1811.00636)\n  * [Latent Backdoor Attacks on Deep Neural Networks](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3319535.3354209)\n  * [Regula Sub-rosa: Latent Backdoor Attacks on Deep Neural Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.10447)\n  * [Hidden Trigger Backdoor Attacks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1910.00033)\n  * [Transferable Clean-Label Poisoning Attacks on Deep Neural Nets](https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.05897)\n  * [TABOR: A Highly Accurate Approach to Inspecting and Restoring Trojan Backdoors in AI Systems](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.01763)\n  * [Towards Poisoning of Deep Learning Algorithms with Back-gradient Optimization](https:\u002F\u002Farxiv.org\u002Fabs\u002F1708.08689)\n  * [When Does Machine Learning FAIL? Generalized Transferability for Evasion and Poisoning Attacks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1803.06975)\n  * [Certified Defenses for Data Poisoning Attacks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1706.03691)\n  * [Input-Aware Dynamic Backdoor Attack](https:\u002F\u002Farxiv.org\u002Fabs\u002F2010.08138)\n  * [How To Backdoor Federated Learning](https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.00459)\n  * [Planting Undetectable Backdoors in Machine Learning Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.06974)\n  * [Fool the AI!](https:\u002F\u002Ffooltheai.mybluemix.net\u002F): Hackers can use backdoors to poison training data and cause an AI model to misclassify images. Learn how IBM researchers can tell when data has been poisoned, and then guess what backdoors have been hidden in these datasets. Can you guess the backdoor?\n  * [Backdoor Toolbox](https:\u002F\u002Fgithub.com\u002Fvtu81\u002Fbackdoor-toolbox): A compact toolbox for backdoor attacks and defenses. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c487a033e1b5.png)](https:\u002F\u002Fgithub.com\u002Fvtu81\u002Fbackdoor-toolbox)\n  * [LaserGuider: A Laser Based Physical Backdoor Attack against Deep Neural Networks](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2412.03993)\n  * [Energy-latency attacks via sponge poisoning](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0020025525000374)\n  * [ShadowCoT: Cognitive Hijacking for Stealthy Reasoning Backdoors in LLMs](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05605)\n  * [A small number of samples can poison LLMs of any size](https:\u002F\u002Fwww.anthropic.com\u002Fresearch\u002Fsmall-samples-poison)\n\n##### 🏃‍♂️ Evasion 🏃‍♂️\n\nAn adversary adds a small perturbation (in the form of noise) to the input of a machine learning model to make it classify incorrectly (example adversary).\n\n![Evasion attack](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_59b021b36c02.png)\n\nThey are similar to poisoning attacks, but their main difference is that evasion attacks try to exploit the weaknesses of the model in the inference phase.\n\nThe goal of the adversary is for adversarial examples to be imperceptible to a human.\n\nTwo types of attack can be performed depending on the output desired by the opponent:\n\n  * **Targeted**: the adversary aims to obtain a prediction of his choice.\n\n    ![Targeted attack](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c21a80e25365.png)\n\n  * **Untargeted**: the adversary intends to achieve a misclassification.\n\n    ![Untargeted attack](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_34478e8e2f5a.png)\n\nThe most common attacks are **white-box attacks**:\n\n  * [L-BFGS](https:\u002F\u002Farxiv.org\u002Fabs\u002F1312.6199)\n  * [FGSM](https:\u002F\u002Farxiv.org\u002Fabs\u002F1412.6572)\n  * [BIM](https:\u002F\u002Farxiv.org\u002Fabs\u002F1607.02533)\n  * [JSMA](https:\u002F\u002Farxiv.org\u002Fabs\u002F1511.07528)\n  * [Carlini & Wagner (C&W)](https:\u002F\u002Farxiv.org\u002Fabs\u002F1608.04644)\n  * [NewtonFool](https:\u002F\u002Fandrewxiwu.github.io\u002Fpublic\u002Fpapers\u002F2017\u002FJWJ17-objective-metrics-and-gradient-descent-based-algorithms-for-adversarial-examples-in-machine-learning.pdf)\n  * [EAD](https:\u002F\u002Farxiv.org\u002Fabs\u002F1709.04114)\n  * [UAP](https:\u002F\u002Farxiv.org\u002Fabs\u002F1610.08401)\n\n###### 🛡️ Defensive actions 🛡️\n\n  * Adversarial training, which consists of crafting adversarial examples during training to allow the model to learn features of the adversarial examples, making the model more robust to this type of attack.\n\n  * Transformations on inputs.\n\n  * Gradient masking\u002Fregularization. [Not very effective](https:\u002F\u002Farxiv.org\u002Fabs\u002F1802.00420).\n\n  * Weak defenses.\n\n  * [Prompt Injection Defenses](https:\u002F\u002Fgithub.com\u002Ftldrsec\u002Fprompt-injection-defenses): Every practical and proposed defense against prompt injection. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b4a656d05236.png)](https:\u002F\u002Fgithub.com\u002Ftldrsec\u002Fprompt-injection-defenses)\n\n  * [Lakera PINT Benchmark](https:\u002F\u002Fgithub.com\u002Flakeraai\u002Fpint-benchmark): The Prompt Injection Test (PINT) Benchmark provides a neutral way to evaluate the performance of a prompt injection detection system, like Lakera Guard, without relying on known public datasets that these tools can use to optimize for evaluation performance. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_7ee39731865b.png)](https:\u002F\u002Fgithub.com\u002Flakeraai\u002Fpint-benchmark)\n\n  * [Devil's Inference](https:\u002F\u002Fgithub.com\u002FAI-Voodoo\u002FDevil_Inference): A method to adversarially assess the Phi-3 Instruct model by observing the attention distribution across its heads when exposed to specific inputs. This approach prompts the model to adopt the 'devil's mindset’, enabling it to generate outputs of a violent nature. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_97fd5d70dcd0.png)](https:\u002F\u002Fgithub.com\u002FAI-Voodoo\u002FDevil_Inference)\n\n  * [Over-the-Air Adversarial Attack Detection: from Datasets to Defenses](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.09296)\n\n  * [Harnessing Hyperbolic Geometry for Harmful Prompt Detection and Sanitization](https:\u002F\u002Fopenreview.net\u002Fforum?id=G8HnUTlMpt)\n\n###### 🔗 Useful links 🔗\n\n  * [Practical Black-Box Attacks against Machine Learning](https:\u002F\u002Farxiv.org\u002Fabs\u002F1602.02697)\n  * [The Limitations of Deep Learning in Adversarial Settings](https:\u002F\u002Farxiv.org\u002Fabs\u002F1511.07528)\n  * [Towards Evaluating the Robustness of Neural Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1608.04644)\n  * [Distillation as a Defense to Adversarial Perturbations Against Deep Neural Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1511.04508)\n  * [Adversarial examples in the physical world](https:\u002F\u002Farxiv.org\u002Fabs\u002F1607.02533)\n  * [Ensemble Adversarial Training: Attacks and Defenses](https:\u002F\u002Farxiv.org\u002Fabs\u002F1705.07204)\n  * [Towards Deep Learning Models Resistant to Adversarial Attacks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1706.06083)\n  * [Intriguing properties of neural networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1312.6199)\n  * [Explaining and Harnessing Adversarial Examples](https:\u002F\u002Farxiv.org\u002Fabs\u002F1412.6572)\n  * [Delving into Transferable Adversarial Examples and Black-box Attacks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.02770)\n  * [Adversarial machine learning at scale](https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.01236)\n  * [Black-box Adversarial Attacks with Limited Queries and Information](https:\u002F\u002Farxiv.org\u002Fabs\u002F1804.08598)\n  * [Feature Squeezing: Detecting Adversarial Examples in Deep Neural Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.01155)\n  * [Decision-Based Adversarial Attacks: Reliable Attacks Against Black-Box Machine Learning Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F1712.04248)\n  * [Boosting Adversarial Attacks with Momentum](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FDong_Boosting_Adversarial_Attacks_CVPR_2018_paper.pdf)\n  * [The Space of Transferable Adversarial Examples](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.03453)\n  * [Countering Adversarial Images using Input Transformations](https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.00117)\n  * [Defense-GAN: Protecting Classifiers Against Adversarial Attacks Using Generative Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F1805.06605)\n  * [Synthesizing Robust Adversarial Examples](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.07397)\n  * [Mitigating adversarial effects through randomization](https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.01991)\n  * [On Detecting Adversarial Perturbations](https:\u002F\u002Farxiv.org\u002Fabs\u002F1702.04267)\n  * [Adversarial Patch](https:\u002F\u002Farxiv.org\u002Fabs\u002F1712.09665)\n  * [PixelDefend: Leveraging Generative Models to Understand and Defend against Adversarial Examples](https:\u002F\u002Farxiv.org\u002Fabs\u002F1710.10766)\n  * [One Pixel Attack for Fooling Deep Neural Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1710.08864)\n  * [Efficient Defenses Against Adversarial Attacks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.06728)\n  * [Robust Physical-World Attacks on Deep Learning Visual Classification](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8578273)\n  * [Adversarial Perturbations Against Deep Neural Networks for Malware Classification](https:\u002F\u002Farxiv.org\u002Fabs\u002F1606.04435)\n  * [3D Adversarial Attacks Beyond Point Cloud](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.12146)\n  * [Adversarial Perturbations Fool Deepfake Detectors](https:\u002F\u002Farxiv.org\u002Fabs\u002F2003.10596)\n  * [Adversarial Deepfakes: Evaluating Vulnerability of Deepfake Detectors to Adversarial Examples](https:\u002F\u002Farxiv.org\u002Fabs\u002F2002.12749)\n  * [An Overview of Vulnerabilities of Voice Controlled Systems](https:\u002F\u002Farxiv.org\u002Fabs\u002F1803.09156)\n  * [FastWordBug: A Fast Method To Generate Adversarial Text Against NLP Applications](https:\u002F\u002Farxiv.org\u002Fabs\u002F2002.00760)\n  * [Phantom of the ADAS: Securing Advanced Driver Assistance Systems from Split-Second Phantom Attacks](https:\u002F\u002Fwww.nassiben.com\u002Fphantoms)\n  * [llm-attacks](https:\u002F\u002Fgithub.com\u002Fllm-attacks\u002Fllm-attacks): Universal and Transferable Attacks on Aligned Language Models. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_5f5110ec16a7.png)](https:\u002F\u002Fgithub.com\u002Fllm-attacks\u002Fllm-attacks)\n  * [Attacks on AI Models: Prompt Injection vs supply chain poisoning](https:\u002F\u002Fblog.mithrilsecurity.io\u002Fattacks-on-ai-models-prompt-injection-vs-supply-chain-poisoning\u002F)\n  * [Prompt Injection attack against LLM-integrated Applications](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.05499.pdf)\n  * [garak](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fgarak): LLM vulnerability scanner. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a9151d361735.png)](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fgarak)\n  * [promptfoo](https:\u002F\u002Fgithub.com\u002Fpromptfoo\u002Fpromptfoo): Open-source LLM red teaming with 100+ attack types. AI Red teaming, pentesting, and vulnerability scanning for LLMs. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_299c9048c490.png)](https:\u002F\u002Fgithub.com\u002Fpromptfoo\u002Fpromptfoo)\n  * [Simple Adversarial Transformations in PyTorch](https:\u002F\u002Fdavidstutz.de\u002Fsimple-adversarial-transformations-in-pytorch\u002F)\n  * [ChatGPT Plugins: Data Exfiltration via Images & Cross Plugin Request Forgery](https:\u002F\u002Fembracethered.com\u002Fblog\u002Fposts\u002F2023\u002Fchatgpt-webpilot-data-exfil-via-markdown-injection\u002F)\n  * [Image Hijacks: Adversarial Images can Control Generative Models at Runtime](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.00236)\n  * [Multi-attacks: Many images + the same adversarial attack → many target labels](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.03792)\n  * [ACTIVE: Towards Highly Transferable 3D Physical Camouflage for Universal and Robust Vehicle Evasion](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.07009)\n  * [LLM Red Teaming GPTS's: Prompt Leaking, API Leaking, Documents Leaking](https:\u002F\u002Fadversa.ai\u002Fblog\u002Fllm-red-teaming-gpts-prompt-leaking-api-leaking-documents-leaking\u002F)\n  * [Human-Producible Adversarial Examples](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.06474)\n  * [Multilingual Jailbreak Challenges in Large Language Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.06474)\n  * [Misusing Tools in Large Language Models With Visual Adversarial Examples](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.03185)\n  * [AutoDAN: Interpretable Gradient-Based Adversarial Attacks on Large Language Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.15140)\n  * [Multimodal Injection](https:\u002F\u002Fgithub.com\u002Febagdasa\u002Fmultimodal_injection): (Ab)using Images and Sounds for Indirect Instruction Injection in Multi-Modal LLMs. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_0e1982488540.png)](https:\u002F\u002Fgithub.com\u002Febagdasa\u002Fmultimodal_injection)\n  * [JailbreakingLLMs](https:\u002F\u002Fgithub.com\u002Fpatrickrchao\u002FJailbreakingLLMs): Jailbreaking Black Box Large Language Models in Twenty Queries. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_47e8a4d45833.png)](https:\u002F\u002Fgithub.com\u002Fpatrickrchao\u002FJailbreakingLLMs)\n  * [Tree of Attacks: Jailbreaking Black-Box LLMs Automatically](https:\u002F\u002Fassets-global.website-files.com\u002F62a8db3f7f80ab5d3420c03a\u002F656eaaed8e762c7543693902_Robust_Intelligence_Blackbox_Attacks_on_LLMs.pdf)\n  * [GPTs](https:\u002F\u002Fgithub.com\u002Flinexjlin\u002FGPTs): Leaked prompts of GPTs. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ad1d1e50932c.png)](https:\u002F\u002Fgithub.com\u002Flinexjlin\u002FGPTs)\n  * [AI Exploits](https:\u002F\u002Fgithub.com\u002Fprotectai\u002Fai-exploits): A collection of real world AI\u002FML exploits for responsibly disclosed vulnerabilities. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f40eb8872c05.png)](https:\u002F\u002Fgithub.com\u002Fprotectai\u002Fai-exploits)\n  * [LLM Agents can Autonomously Hack Websites](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.06664v1)\n  * [Cloudflare announces Firewall for AI](https:\u002F\u002Fblog.cloudflare.com\u002Ffirewall-for-ai)\n  * [PromptInject](https:\u002F\u002Fgithub.com\u002Fagencyenterprise\u002FPromptInject): Framework that assembles prompts in a modular fashion to provide a quantitative analysis of the robustness of LLMs to adversarial prompt attacks. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_84a99ac59563.png)](https:\u002F\u002Fgithub.com\u002Fagencyenterprise\u002FPromptInject)\n  * [LLM Red Teaming: Adversarial, Programming, and Linguistic approaches VS ChatGPT, Claude, Mistral, Grok, LLAMA, and Gemini](https:\u002F\u002Fadversa.ai\u002Fblog\u002Fllm-red-teaming-vs-grok-chatgpt-claude-gemini-bing-mistral-llama\u002F)\n  * [The Instruction Hierarchy: Training LLMs to Prioritize Privileged Instructions](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.13208)\n  * [Prompt Injection \u002F JailBreaking a Banking LLM Agent (GPT-4, Langchain)](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=5rXVg8cxne4)\n  * [GitHub Copilot Chat: From Prompt Injection to Data Exfiltration](https:\u002F\u002Fembracethered.com\u002Fblog\u002Fposts\u002F2024\u002Fgithub-copilot-chat-prompt-injection-data-exfiltration\u002F?s=35)\n  * [Adversarial Examples are Misaligned in Diffusion Model Manifolds](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.06637)\n  * [Image-to-Text Logic Jailbreak: Your Imagination Can Help You Do Anything](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.02534)\n  * [Mitigating Skeleton Key, a new type of generative AI jailbreak technique](https:\u002F\u002Fwww.microsoft.com\u002Fen-us\u002Fsecurity\u002Fblog\u002F2024\u002F06\u002F26\u002Fmitigating-skeleton-key-a-new-type-of-generative-ai-jailbreak-technique\u002F)\n  * [Image Obfuscation Benchmark](https:\u002F\u002Fgithub.com\u002Fgoogle-deepmind\u002Fimage_obfuscation_benchmark): This repository contains the code to evaluate models on the image obfuscation benchmark, first presented in [Benchmarking Robustness to Adversarial Image Obfuscations](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.12993). [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e73077a5a2bf.png)](https:\u002F\u002Fgithub.com\u002Fgoogle-deepmind\u002Fimage_obfuscation_benchmark)\n  * [Jailbreaking Large Language Models with Symbolic Mathematics](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.11445)\n  * [Adversarial Reasoning at Jailbreaking Time](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.01633)\n  * [How we estimate the risk from prompt injection attacks on AI systems](https:\u002F\u002Fsecurity.googleblog.com\u002F2025\u002F01\u002Fhow-we-estimate-risk-from-prompt.html)\n  * [Adversarial Misuse of Generative AI](https:\u002F\u002Fcloud.google.com\u002Fblog\u002Ftopics\u002Fthreat-intelligence\u002Fadversarial-misuse-generative-ai)\n  * [Defeating Prompt Injections by Design](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.18813)\n  * [Mitigating prompt injection attacks with a layered defense strategy](https:\u002F\u002Fsecurity.googleblog.com\u002F2025\u002F06\u002Fmitigating-prompt-injection-attacks.html)\n  * [Logic layer Prompt Control Injection (LPCI): A Novel Security Vulnerability Class in Agentic Systems](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.10457)\n  * [Prompt injection engineering for attackers: Exploiting GitHub Copilot](https:\u002F\u002Fblog.trailofbits.com\u002F2025\u002F08\u002F06\u002Fprompt-injection-engineering-for-attackers-exploiting-github-copilot\u002F)\n  * [The State of Adversarial Prompts](https:\u002F\u002Fblog.securitybreak.io\u002Fthe-state-of-adversarial-prompts-84c364b5d860)\n  * [TransferBench: Benchmarking Ensemble-based Black-box Transfer Attacks](https:\u002F\u002Ftransferbench.github.io\u002F)\n  * [Attention Tracker: Detecting Prompt Injection Attacks in LLMs](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.00348v2)\n  * [The Attacker Moves Second: Stronger Adaptive Attacks Bypass Defenses Against Llm Jailbreaks and Prompt Injections](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.09023)\n  * [Weaponizing Calendar Invites: A Semantic Attack on Google Gemini](https:\u002F\u002Fwww.miggo.io\u002Fpost\u002Fweaponizing-calendar-invites-a-semantic-attack-on-google-gemini)\n  * [GTIG AI Threat Tracker: Distillation, Experimentation, and (Continued) Integration of AI for Adversarial Use](https:\u002F\u002Fcloud.google.com\u002Fblog\u002Ftopics\u002Fthreat-intelligence\u002Fdistillation-experimentation-integration-ai-adversarial-use)\n  * [How Cline Was Compromised: Prompt Injection and Dangling Commits in the Cline Supply Chain Attack](https:\u002F\u002Fmurraycole.com\u002Fposts\u002Fcline-compromise-prompt-injection-supply-chain-attack)\n  * [Aguara: Security scanner for AI agent skills & MCP servers](https:\u002F\u002Faguarascan.com\u002F)\n\n#### 🛠️ Tools 🛠️\n\n| Name | Type | Supported algorithms | Supported attack types | Attack\u002FDefence | Supported frameworks | Popularity |\n| ---------- | :----------: | :----------: | :----------: | :----------: | :----------: | :----------: |\n| [Cleverhans](https:\u002F\u002Fgithub.com\u002Fcleverhans-lab\u002Fcleverhans) | Image | [Deep Learning](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FDeep_learning) | Evasion | Attack | [Tensorflow](https:\u002F\u002Fwww.tensorflow.org), [Keras](https:\u002F\u002Fkeras.io), [JAX](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fjax) | [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_cead982b157d.png)](https:\u002F\u002Fgithub.com\u002Fcleverhans-lab\u002Fcleverhans)|\n| [Foolbox](https:\u002F\u002Fgithub.com\u002Fbethgelab\u002Ffoolbox) | Image | Deep Learning | Evasion | Attack | Tensorflow, [PyTorch](https:\u002F\u002Fpytorch.org), JAX | [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d126634879ca.png)](https:\u002F\u002Fgithub.com\u002Fbethgelab\u002Ffoolbox)|\n| [ART](https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox) | Any type (image, tabular data, audio,...) | Deep Learning, [SVM](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FSupport_vector_machine), [LR](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FLogistic_regression), etc. | Any (extraction, inference, poisoning, evasion) | Both | Tensorflow, Keras, Pytorch, [Scikit Learn](https:\u002F\u002Fscikit-learn.org) | [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a7ecbf6cf806.png)](https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox)|\n| [TextAttack](https:\u002F\u002Fgithub.com\u002FQData\u002FTextAttack) | Text | Deep Learning | Evasion | Attack | Keras, [HuggingFace](https:\u002F\u002Fhuggingface.co\u002F) | [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_dc1cff1a5a3f.png)](https:\u002F\u002Fgithub.com\u002FQData\u002FTextAttack)|\n| [Advertorch](https:\u002F\u002Fgithub.com\u002FBorealisAI\u002Fadvertorch) | Image | Deep Learning | Evasion | Both | --- | [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c6092ce01adf.png)](https:\u002F\u002Fgithub.com\u002FBorealisAI\u002Fadvertorch)|\n| [AdvBox](https:\u002F\u002Fgithub.com\u002Fadvboxes\u002FAdvBox) | Image | Deep Learning | Evasion | Both | PyTorch, Tensorflow, [MxNet](https:\u002F\u002Fmxnet.apache.org) | [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_bf0cb81f51eb.png)](https:\u002F\u002Fgithub.com\u002Fadvboxes\u002FAdvBox)|\n| [DeepRobust](https:\u002F\u002Fgithub.com\u002FDSE-MSU\u002FDeepRobust) | Image, graph | Deep Learning | Evasion | Both | PyTorch | [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ffb018312546.png)](https:\u002F\u002Fgithub.com\u002FDSE-MSU\u002FDeepRobust)|\n| [Counterfit](https:\u002F\u002Fgithub.com\u002FAzure\u002Fcounterfit) | Any | Any | Evasion | Attack | --- | [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c8973bb11171.png)](https:\u002F\u002Fgithub.com\u002FAzure\u002Fcounterfit)|\n| [Adversarial Audio Examples](https:\u002F\u002Fgithub.com\u002Fcarlini\u002Faudio_adversarial_examples) | Audio | [DeepSpeech](https:\u002F\u002Fgithub.com\u002Fmozilla\u002FDeepSpeech) | Evasion | Attack | --- | [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b19a32a9ba4e.png)](https:\u002F\u002Fgithub.com\u002Fcarlini\u002Faudio_adversarial_examples)|\n\n###### ART\n\n[Adversarial Robustness Toolbox](https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox), abbreviated as ART, is an open-source Adversarial Machine Learning library for testing the robustness of machine learning models.\n\n![ART logo](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_590b613806bb.png)\n\nIt is developed in Python and implements extraction, inversion, poisoning and evasion attacks and defenses.\n\nART supports the most popular frameworks: Tensorflow, Keras, PyTorch, MxNet, and ScikitLearn among many others.\n\nIt is not limited to the use of models that use images as input but also supports other types of data, such as audio, video, tabular data, etc.\n\n> [Workshop to learn Adversarial Machine Learning with ART 🇪🇸](https:\u002F\u002Fgithub.com\u002Fjiep\u002Fadversarial-machine-learning)\n\n###### Cleverhans\n\n[Cleverhans](https:\u002F\u002Fgithub.com\u002Fcleverhans-lab\u002Fcleverhans) is a library for performing evasion attacks and testing the robustness of a deep learning model on image models.\n\n![Cleverhans logo](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ff3e571006f8.png)\n\nIt is developed in Python and integrates with the Tensorflow, Torch and JAX frameworks.\n\nIt implements numerous attacks such as L-BFGS, FGSM, JSMA, C&W, among others.\n\n## 🔧 Use 🔧\n\nAI is used to accomplish malicious tasks and boost classic attacks.\n\n### 🕵️‍♂️ Pentesting 🕵️‍♂️\n\n  * [GyoiThon](https:\u002F\u002Fgithub.com\u002Fgyoisamurai\u002FGyoiThon): Next generation penetration test tool, intelligence gathering tool for web server. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b028ccdff5e8.png)](https:\u002F\u002Fgithub.com\u002Fgyoisamurai\u002FGyoiThon)\n  * [Cochise](https:\u002F\u002Fgithub.com\u002Fandreashappe\u002Fcochise\u002F): LLM-agent performing autonomous penetration test against Microsoft Windows Active Directory (using [GOAD](https:\u002F\u002Fgithub.com\u002FOrange-Cyberdefense\u002FGOAD) as testbed).\n  * [HackingBuddyGPT](https:\u002F\u002Fgithub.com\u002Fipa-lab\u002FhackingBuddyGPT): LLMs x PenTesting. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e6038f5060b7.png)](https:\u002F\u002Fgithub.com\u002Fipa-lab\u002FhackingBuddyGPT)\n  * [Deep Exploit](https:\u002F\u002Fgithub.com\u002F13o-bbr-bbq\u002Fmachine_learning_security\u002Ftree\u002Fmaster\u002FDeepExploit): Fully automatic penetration test tool using Deep Reinforcement Learning. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_17c673ee0ff0.png)](https:\u002F\u002Fgithub.com\u002F13o-bbr-bbq\u002Fmachine_learning_security)\n  * [AutoPentest-DRL](https:\u002F\u002Fgithub.com\u002Fcrond-jaist\u002FAutoPentest-DRL): Automated penetration testing using deep reinforcement learning. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_118fb23a3093.png)](https:\u002F\u002Fgithub.com\u002Fcrond-jaist\u002FAutoPentest-DRL)\n  * [DeepGenerator](https:\u002F\u002Fgithub.com\u002F13o-bbr-bbq\u002Fmachine_learning_security\u002Ftree\u002Fmaster\u002FGenerator): Fully automatically generate injection codes for web application assessment using Genetic Algorithm and Generative Adversarial Networks.\n  * [Eyeballer](https:\u002F\u002Fgithub.com\u002FBishopFox\u002Feyeballer): Eyeballer is meant for large-scope network penetration tests where you need to find \"interesting\" targets from a huge set of web-based hosts. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_17d245b14954.png)](https:\u002F\u002Fgithub.com\u002FBishopFox\u002Feyeballer)\n  * [Nebula](https:\u002F\u002Fgithub.com\u002Fberylliumsec\u002Fnebula): AI-Powered Ethical Hacking Assistant. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c8e5a0c2e847.png)](https:\u002F\u002Fgithub.com\u002Fberylliumsec\u002Fnebula)\n  * [AI-OPS](https:\u002F\u002Fgithub.com\u002FantoninoLorenzo\u002FAI-OPS): Penetration Testing AI Assistant based on open source LLMs. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f03952292ab4.png)](https:\u002F\u002Fgithub.com\u002FantoninoLorenzo\u002FAI-OPS)\n  * [Can LLMs Hack Enterprise Networks?](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.04227): Autonomous Assumed Breach Penetration-Testing Active Directory Networks\n  * [Teams of LLM Agents can Exploit Zero-Day Vulnerabilities](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.01637)\n  * [Insights and Current Gaps in Open-Source LLM Vulnerability Scanners: A Comparative Analysis](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.16527)\n  * [Comparing AI Agents to Cybersecurity Professionals in Real-World Penetration Testing](https:\u002F\u002Farxiv.org\u002Fabs\u002F2512.09882)\n  * [CAI](https:\u002F\u002Fgithub.com\u002Faliasrobotics\u002Fcai): An Open, Bug Bounty-Ready Cybersecurity AI [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_025f282dc6f2.png)](https:\u002F\u002Fgithub.com\u002Faliasrobotics\u002Fcai)\n  * [Shannon](https:\u002F\u002Fgithub.com\u002FKeygraphHQ\u002Fshannon): Shannon Lite is an autonomous, white-box AI pentester for web applications and APIs. It analyzes your source code, identifies attack vectors, and executes real exploits to prove vulnerabilities before they reach production. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_797731749188.png)](https:\u002F\u002Fgithub.com\u002FKeygraphHQ\u002Fshannon)\n\n### 🦠 Malware 🦠\n\n  * [DeepLocker](https:\u002F\u002Fi.blackhat.com\u002Fus-18\u002FThu-August-9\u002Fus-18-Kirat-DeepLocker-Concealing-Targeted-Attacks-with-AI-Locksmithing.pdf): Concealing targeted attacks with AI locksmithing, by IBM Labs on BH.\n  * [An Overview of Artificial Intelligence Used in Malware](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-17030-0_4): A curated list of AI Malware resources.\n  * [DeepObfusCode](https:\u002F\u002Farxiv.org\u002Fabs\u002F1909.01837): Source code obfuscation through sequence-to-sequence networks.\n  * [AutoCAT](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.08025): Reinforcement learning for automated exploration of cache-timing attacks.\n  * [AI-BASED BOTNET](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.02223): A game-theoretic approach for AI-based botnet attack defence.\n  * [SECML_Malware](https:\u002F\u002Fgithub.com\u002Fpralab\u002Fsecml_malware): Python library for creating adversarial attacks against Windows Malware detectors. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_25ba5928a447.png)](https:\u002F\u002Fgithub.com\u002Fpralab\u002Fsecml_malware)\n  * [Transcendent-release](https:\u002F\u002Fgithub.com\u002Fs2labres\u002Ftranscendent-release): Using conformal evaluation to detect concept drift affecting malware detection. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8f40119f1d0c.png)](https:\u002F\u002Fgithub.com\u002Fs2labres\u002Ftranscendent-release)\n\n### 🗺️ OSINT 🗺️\n\n  * [SNAP_R](https:\u002F\u002Fgithub.com\u002Fzerofox-oss\u002FSNAP_R): Generate automatically spear-phishing posts on social media. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_157a6ae970cc.png)](https:\u002F\u002Fgithub.com\u002Fzerofox-oss\u002FSNAP_R)\n  * [SpyScrap](https:\u002F\u002Fgithub.com\u002FRuthGnz\u002FSpyScrap): SpyScrap combines facial recognition methods to filter the results and uses natural language processing to obtain important entities from the website the user appears. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_fb89388d04d0.png)](https:\u002F\u002Fgithub.com\u002FRuthGnz\u002FSpyScrap)\n\n### 📧 Phishing 📧\n\n  * [DeepDGA](https:\u002F\u002Fgithub.com\u002Froreagan\u002FDeepDGA): Implementation of DeepDGA: Adversarially-Tuned Domain Generation and Detection. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2df0c0617342.png)](https:\u002F\u002Fgithub.com\u002Froreagan\u002FDeepDGA)\n  * [ScamAgents: How AI Agents Can Simulate Human-Level Scam Calls](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.06457)\n\n### 🕵 Threat Intelligence 🕵\n\n  * [From Sands to Mansions: Enabling Automatic Full-Life-Cycle Cyberattack Construction with LLM](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.16928) \n\n### ⚙️ Reverse engenieering ⚙️\n\n  * [We hid backdoors in ~40MB binaries and asked AI + Ghidra to find them](https:\u002F\u002Fquesma.com\u002Fblog\u002Fintroducing-binaryaudit\u002F)\n  * [Malware Reverse Engineering is no longer a human problem!](https:\u002F\u002Fblog.securitybreak.io\u002Fmalware-reverse-engineering-is-no-longer-a-human-problem-5441e4a0564f)\n  * [GhidraMCP](MCP Server for Ghidra. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d608775d14e3.png)](https:\u002F\u002Fgithub.com\u002FLaurieWired\u002FGhidraMCP)\n  * [ghidra-mcp](Production-grade Ghidra MCP Server — 179 MCP tools, 147 GUI + 172 headless endpoints, Ghidra Server integration, cross-binary documentation transfer, batch operations, AI documentation workflows, and Docker deployment for AI-powered reverse engineering. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8cc3e5f58d31.png)](https:\u002F\u002Fgithub.com\u002Fbethington\u002Fghidra-mcp)\n\n### 🌀 Side channels 🌀\n\n  * [SCAAML](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fscaaml): Side Channel Attacks Assisted with Machine Learning. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b065123332fd.png)](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fscaaml)\n\n### 👨‍🎤 Generative AI 👨‍🎤\n\n#### 🔊 Audio 🔊\n\n##### 🛠️ Tools 🛠️\n  * [deep-voice-conversion](https:\u002F\u002Fgithub.com\u002Fandabi\u002Fdeep-voice-conversion): Deep neural networks for voice conversion (voice style transfer) in Tensorflow. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8365bdc35c92.png)](https:\u002F\u002Fgithub.com\u002Fandabi\u002Fdeep-voice-conversion)\n  * [tacotron](https:\u002F\u002Fgithub.com\u002Fkeithito\u002Ftacotron): A TensorFlow implementation of Google's Tacotron speech synthesis with pre-trained model (unofficial). [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a7e138c937e5.png)](https:\u002F\u002Fgithub.com\u002Fkeithito\u002Ftacotron)\n  * [Real-Time-Voice-Cloning](https:\u002F\u002Fgithub.com\u002FCorentinJ\u002FReal-Time-Voice-Cloning): Clone a voice in 5 seconds to generate arbitrary speech in real-time. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b709d230bd06.png)](https:\u002F\u002Fgithub.com\u002FCorentinJ\u002FReal-Time-Voice-Cloning)\n  * [mimic2](https:\u002F\u002Fgithub.com\u002FMycroftAI\u002Fmimic2): Text to Speech engine based on the Tacotron architecture, initially implemented by Keith Ito. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_50270b326a3a.png)](https:\u002F\u002Fgithub.com\u002FMycroftAI\u002Fmimic2)\n  * [Neural-Voice-Cloning-with-Few-Samples](https:\u002F\u002Fgithub.com\u002FSharad24\u002FNeural-Voice-Cloning-with-Few-Samples): Implementation of Neural Voice Cloning with Few Samples Research Paper by Baidu. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_215ff7e3fd90.png)](https:\u002F\u002Fgithub.com\u002FSharad24\u002FNeural-Voice-Cloning-with-Few-Samples)\n  * [Vall-E](https:\u002F\u002Fgithub.com\u002Fenhuiz\u002Fvall-e): An unofficial PyTorch implementation of the audio LM VALL-E. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_36f8152f853a.png)](https:\u002F\u002Fgithub.com\u002Fenhuiz\u002Fvall-e)\n  * [voice-changer](https:\u002F\u002Fgithub.com\u002Fw-okada\u002Fvoice-changer): Realtime Voice Changer. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d056e5149a6e.png)](https:\u002F\u002Fgithub.com\u002Fw-okada\u002Fvoice-changer)\n  * [Retrieval-based-Voice-Conversion-WebUI](https:\u002F\u002Fgithub.com\u002FRVC-Project\u002FRetrieval-based-Voice-Conversion-WebUI): An easy-to-use Voice Conversion framework based on VITS. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f2fd23d689a1.png)](https:\u002F\u002Fgithub.com\u002FRVC-Project\u002FRetrieval-based-Voice-Conversion-WebUI)\n  * [Audiocraft](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Faudiocraft): Audiocraft is a library for audio processing and generation with deep learning. It features the state-of-the-art EnCodec audio compressor\u002Ftokenizer, along with MusicGen, a simple and controllable music generation LM with textual and melodic conditioning. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d5227e702e6b.png)](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Faudiocraft)\n  * [VALL-E-X](https:\u002F\u002Fgithub.com\u002FPlachtaa\u002FVALL-E-X): An open source implementation of Microsoft's VALL-E X zero-shot TTS model. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_6dc6a3829a02.png)](https:\u002F\u002Fgithub.com\u002FPlachtaa\u002FVALL-E-X)\n  * [OpenVoice](https:\u002F\u002Fgithub.com\u002Fmyshell-ai\u002FOpenVoice): Instant voice cloning by MyShell. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_607e89514c79.png)](https:\u002F\u002Fgithub.com\u002Fmyshell-ai\u002FOpenVoice)\n  * [MeloTTS](https:\u002F\u002Fgithub.com\u002Fmyshell-ai\u002FMeloTTS): High-quality multi-lingual text-to-speech library by MyShell.ai. Support English, Spanish, French, Chinese, Japanese and Korean. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_04779a223b84.png)](https:\u002F\u002Fgithub.com\u002Fmyshell-ai\u002FMeloTTS)\n  * [VoiceCraft](https:\u002F\u002Fgithub.com\u002Fjasonppy\u002FVoiceCraft): Zero-Shot Speech Editing and Text-to-Speech in the Wild. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_81ef25aa3faf.png)](https:\u002F\u002Fgithub.com\u002Fjasonppy\u002FVoiceCraft)\n  * [Parler-TTS](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Fparler-tts): Inference and training library for high-quality TTS models. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ea5b16ff41cc.png)](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Fparler-tts)\n  * [ChatTTS](https:\u002F\u002Fgithub.com\u002F2noise\u002FChatTTS): A generative speech model for daily dialogue. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_77099da6e800.png)](https:\u002F\u002Fgithub.com\u002F2noise\u002FChatTTS)\n\n\n\n##### 💡 Applications 💡\n\n  * [Lip2Wav](https:\u002F\u002Fgithub.com\u002FRudrabha\u002FLip2Wav): Generate high quality speech from only lip movements. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_10a8ac34115d.png)](https:\u002F\u002Fgithub.com\u002FRudrabha\u002FLip2Wav)\n  * [AudioLDM: Text-to-Audio Generation with Latent Diffusion Models](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fhaoheliu\u002Faudioldm-text-to-audio-generation)\n  * [deepvoice3_pytorch](https:\u002F\u002Fgithub.com\u002Fr9y9\u002Fdeepvoice3_pytorch): PyTorch implementation of convolutional neural networks-based text-to-speech synthesis models. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_92d03664ac23.png)](https:\u002F\u002Fgithub.com\u002Fr9y9\u002Fdeepvoice3_pytorch)\n  * [🎸 Riffusion](https:\u002F\u002Fgithub.com\u002Friffusion\u002Friffusion): Stable diffusion for real-time music generation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b713c87bca37.png)](https:\u002F\u002Fgithub.com\u002Friffusion\u002Friffusion)\n  * [whisper.cpp](https:\u002F\u002Fgithub.com\u002Fggerganov\u002Fwhisper.cpp): Port of OpenAI's Whisper model in C\u002FC++. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_feafd115570e.png)](https:\u002F\u002Fgithub.com\u002Fggerganov\u002Fwhisper.cpp)\n  * [TTS](https:\u002F\u002Fgithub.com\u002Fcoqui-ai\u002FTTS): 🐸💬 - a deep learning toolkit for Text-to-Speech, battle-tested in research and production. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_6978d66a0b5f.png)](https:\u002F\u002Fgithub.com\u002Fcoqui-ai\u002FTTS)\n  * [YourTTS](https:\u002F\u002Fgithub.com\u002FEdresson\u002FYourTTS): Towards Zero-Shot Multi-Speaker TTS and Zero-Shot Voice Conversion for everyone. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_4eaff639ee28.png)](https:\u002F\u002Fgithub.com\u002FEdresson\u002FYourTTS)\n  * [TorToiSe](https:\u002F\u002Fgithub.com\u002Fneonbjb\u002Ftortoise-tts): A multi-voice TTS system trained with an emphasis on quality. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c48e5c91a7cd.png)](https:\u002F\u002Fgithub.com\u002Fneonbjb\u002Ftortoise-tts)\n  * [DiffSinger](https:\u002F\u002Fgithub.com\u002FMoonInTheRiver\u002FDiffSinger): Singing Voice Synthesis via Shallow Diffusion Mechanism (SVS & TTS). [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ffb61e2ff633.png)](https:\u002F\u002Fgithub.com\u002FMoonInTheRiver\u002FDiffSinger)\n  * [WaveNet vocoder](https:\u002F\u002Fgithub.com\u002Fr9y9\u002Fwavenet_vocoder): Implementation of the WaveNet vocoder, which can generate high-quality raw speech samples conditioned on linguistic or acoustic features. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2077d7ac2875.png)](https:\u002F\u002Fgithub.com\u002Fr9y9\u002Fwavenet_vocoder)\n  * [Deepvoice3_pytorch](https:\u002F\u002Fgithub.com\u002Fr9y9\u002Fdeepvoice3_pytorch): PyTorch implementation of convolutional neural networks-based text-to-speech synthesis models. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_92d03664ac23.png)](https:\u002F\u002Fgithub.com\u002Fr9y9\u002Fdeepvoice3_pytorch)\n  * [eSpeak NG Text-to-Speech](https:\u002F\u002Fgithub.com\u002Fespeak-ng\u002Fespeak-ng): eSpeak NG is an open source speech synthesizer that supports more than hundred languages and accents. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_951d08509296.png)](https:\u002F\u002Fgithub.com\u002Fespeak-ng\u002Fespeak-ng)\n  * [RealChar](https:\u002F\u002Fgithub.com\u002FShaunwei\u002FRealChar): Create, Customize and Talk to your AI Character\u002FCompanion in Realtime (All in One Codebase!). Have a natural seamless conversation with AI everywhere (mobile, web and terminal) using LLM OpenAI GPT3.5\u002F4, Anthropic Claude2, Chroma Vector DB, Whisper Speech2Text, ElevenLabs Text2Speech. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2586763c892b.png)](https:\u002F\u002Fgithub.com\u002FShaunwei\u002FRealChar)\n  * [Neural Voice Cloning with a Few Samples](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2018\u002Fhash\u002F4559912e7a94a9c32b09d894f2bc3c82-Abstract.html)\n  * [NAUTILUS: A Versatile Voice Cloning System](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9246264)\n  * [Learning to Speak Fluently in a Foreign Language: Multilingual Speech Synthesis and Cross-Language Voice Cloning](https:\u002F\u002Farxiv.org\u002Fabs\u002F1907.04448)\n  * [When Good Becomes Evil: Keystroke Inference with Smartwatch](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F2810103.2813668)\n  * [KeyListener: Inferring Keystrokes on QWERTY Keyboard of Touch Screen through Acoustic Signals](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8737591)\n  * [This Voice Does Not Exist: On Voice Synthesis, Audio Deepfakes and Their Detection](https:\u002F\u002Fthis-voice-does-not-exist.com)\n  * [AudioSep](https:\u002F\u002Fgithub.com\u002FAudio-AGI\u002FAudioSep): Separate Anything You Describe. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_21f3532adb92.png)](https:\u002F\u002Fgithub.com\u002FAudio-AGI\u002FAudioSep)\n  * [stable-audio-tools](https:\u002F\u002Fgithub.com\u002FStability-AI\u002Fstable-audio-tools): Generative models for conditional audio generation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_31beb227de90.png)](https:\u002F\u002Fgithub.com\u002FStability-AI\u002Fstable-audio-tools)\n  * [GPT-SoVITS-WebUI](https:\u002F\u002Fgithub.com\u002FRVC-Boss\u002FGPT-SoVITS): 1 min voice data can also be used to train a good TTS model! (few shot voice cloning). [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d53e2061e932.png)](https:\u002F\u002Fgithub.com\u002FRVC-Boss\u002FGPT-SoVITS)\n  * [Hybrid-Net](https:\u002F\u002Fgithub.com\u002FDoMusic\u002FHybrid-Net): Real-time audio source separation, generate lyrics, chords, beat. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_3645f3924f4d.png)](https:\u002F\u002Fgithub.com\u002FDoMusic\u002FHybrid-Net)\n  * [CosyVoice](https:\u002F\u002Fgithub.com\u002FFunAudioLLM\u002FCosyVoice): Multi-lingual large voice generation model, providing inference, training and deployment full-stack ability. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d5cc7d678579.png)](https:\u002F\u002Fgithub.com\u002FFunAudioLLM\u002FCosyVoice)\n  * [EasyVolcap](https:\u002F\u002Fgithub.com\u002Fzju3dv\u002FEasyVolcap): Accelerating Neural Volumetric Video Research. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e77fc9f6d2e7.png)](https:\u002F\u002Fgithub.com\u002Fzju3dv\u002FEasyVolcap)\n\n\n\n##### 🔎 Detection 🔎\n  * [fake-voice-detection](https:\u002F\u002Fgithub.com\u002Fdessa-oss\u002Ffake-voice-detection): Using temporal convolution to detect Audio Deepfakes. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2af37eee0e0e.png)](https:\u002F\u002Fgithub.com\u002Fdessa-oss\u002Ffake-voice-detection)\n  * [A robust voice spoofing detection system using novel CLS-LBP features and LSTM](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1319157822000684)\n  * [Voice spoofing detector: A unified anti-spoofing framework](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0957417422002330)\n  * [Securing Voice-Driven Interfaces Against Fake (Cloned) Audio Attacks](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F8695320)\n  * [DeepSonar: Towards Effective and Robust Detection of AI-Synthesized Fake Voices](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3394171.3413716)\n  * [Fighting AI with AI: Fake Speech Detection Using Deep Learning](https:\u002F\u002Fwww.aes.org\u002Fe-lib\u002Fonline\u002Fbrowse.cfm?elib=20479)\n  * [A Review of Modern Audio Deepfake Detection Methods: Challenges and Future Directions](https:\u002F\u002Fwww.mdpi.com\u002F1999-4893\u002F15\u002F5\u002F155)\n\n#### 📷 Image 📷\n\n##### 🛠️ Tools 🛠️\n\n  * [StyleGAN](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan): StyleGAN - Official TensorFlow Implementation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2837b6642473.png)](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan)\n  * [StyleGAN2](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan2): StyleGAN2 - Official TensorFlow Implementation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2837b6642473.png2)](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan2)\n  * [stylegan2-ada-pytorch](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan2-ada-pytorch): StyleGAN2-ADA - Official PyTorch implementation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2837b6642473.png2-ada-pytorch)](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan2-ada-pytorch)\n  * [StyleGAN-nada](https:\u002F\u002Fgithub.com\u002Frinongal\u002FStyleGAN-nada): CLIP-Guided Domain Adaptation of Image Generators. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_bb974aaf9a63.png)](https:\u002F\u002Fgithub.com\u002Frinongal\u002FStyleGAN-nada)\n  * [StyleGAN3](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan3): Official PyTorch implementation of StyleGAN3. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2837b6642473.png3)](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan3)\n  * [Imaginaire](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fimaginaire): Imaginaire is a pytorch library that contains the optimized implementation of several image and video synthesis methods developed at NVIDIA. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c0e656282edb.png)](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fimaginaire)\n  * [ffhq-dataset](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fffhq-dataset): Flickr-Faces-HQ Dataset (FFHQ). [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d18a6e6d2ce7.png)](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fffhq-dataset)\n  * [DALLE2-pytorch](https:\u002F\u002Fgithub.com\u002Flucidrains\u002FDALLE2-pytorch): Implementation of DALL-E 2, OpenAI's updated text-to-image synthesis neural network, in Pytorch. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_fef5371e23be.png)](https:\u002F\u002Fgithub.com\u002Flucidrains\u002FDALLE2-pytorch)\n  * [ImaginAIry](https:\u002F\u002Fgithub.com\u002Fbrycedrennan\u002FimaginAIry): AI imagined images. Pythonic generation of stable diffusion images. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_7afbfe55ddd9.png)](https:\u002F\u002Fgithub.com\u002Fbrycedrennan\u002FimaginAIry)\n  * [Lama Cleaner](https:\u002F\u002Fgithub.com\u002FSanster\u002Flama-cleaner): Image inpainting tool powered by SOTA AI Model. Remove any unwanted object, defect, or people from your pictures or erase and replace(powered by stable diffusion) anything on your pictures. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ef14c31dd23c.png)](https:\u002F\u002Fgithub.com\u002FSanster\u002Flama-cleaner)\n  * [Invertible-Image-Rescaling](https:\u002F\u002Fgithub.com\u002Fpkuxmq\u002FInvertible-Image-Rescaling): This is the PyTorch implementation of paper: Invertible Image Rescaling. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_68392506d4ad.png)](https:\u002F\u002Fgithub.com\u002Fpkuxmq\u002FInvertible-Image-Rescaling)\n  * [DifFace](https:\u002F\u002Fgithub.com\u002FzsyOAOA\u002FDifFace): Blind Face Restoration with Diffused Error Contraction (PyTorch). [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f5d8c34f6969.png)](https:\u002F\u002Fgithub.com\u002FzsyOAOA\u002FDifFace)\n  * [CodeFormer](https:\u002F\u002Fgithub.com\u002Fsczhou\u002FCodeFormer): Towards Robust Blind Face Restoration with Codebook Lookup Transformer. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_6990fdd47a1f.png)](https:\u002F\u002Fgithub.com\u002Fsczhou\u002FCodeFormer)\n  * [Custom Diffusion](https:\u002F\u002Fgithub.com\u002Fadobe-research\u002Fcustom-diffusion): Multi-Concept Customization of Text-to-Image Diffusion. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_04220cde65c4.png)](https:\u002F\u002Fgithub.com\u002Fadobe-research\u002Fcustom-diffusion)\n  * [Diffusers](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Fdiffusers): 🤗 Diffusers: State-of-the-art diffusion models for image and audio generation in PyTorch. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b60a9ebbb187.png)](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Fdiffusers)\n  * [Stable Diffusion](https:\u002F\u002Fgithub.com\u002FStability-AI\u002Fstablediffusion): High-Resolution Image Synthesis with Latent Diffusion Models. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_39a38a99e1b8.png)](https:\u002F\u002Fgithub.com\u002FStability-AI\u002Fstablediffusion)\n  * [InvokeAI](https:\u002F\u002Fgithub.com\u002Finvoke-ai\u002FInvokeAI): InvokeAI is a leading creative engine for Stable Diffusion models, empowering professionals, artists, and enthusiasts to generate and create visual media using the latest AI-driven technologies. The solution offers an industry-leading WebUI, supports terminal use through a CLI, and serves as the foundation for multiple commercial products. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a6845f85c5ac.png)](https:\u002F\u002Fgithub.com\u002Finvoke-ai\u002FInvokeAI)\n  * [Stable Diffusion web UI](https:\u002F\u002Fgithub.com\u002FAUTOMATIC1111\u002Fstable-diffusion-webui): Stable Diffusion web UI. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d8fb7745e064.png)](https:\u002F\u002Fgithub.com\u002FAUTOMATIC1111\u002Fstable-diffusion-webui)\n  * [Stable Diffusion Infinity](https:\u002F\u002Fgithub.com\u002Flkwq007\u002Fstablediffusion-infinity): Outpainting with Stable Diffusion on an infinite canvas. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_0f221aa918ed.png)](https:\u002F\u002Fgithub.com\u002Flkwq007\u002Fstablediffusion-infinity)\n  * [Fast Stable Diffusion](https:\u002F\u002Fgithub.com\u002FTheLastBen\u002Ffast-stable-diffusion): fast-stable-diffusion + DreamBooth. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_5da23b21a34c.png)](https:\u002F\u002Fgithub.com\u002FTheLastBen\u002Ffast-stable-diffusion)\n  * [GET3D](https:\u002F\u002Fgithub.com\u002Fnv-tlabs\u002FGET3D): A Generative Model of High Quality 3D Textured Shapes Learned from Images. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_6e2c448b23f6.png)](https:\u002F\u002Fgithub.com\u002Fnv-tlabs\u002FGET3D)\n  * [Awesome AI Art Image Synthesis](https:\u002F\u002Fgithub.com\u002Faltryne\u002Fawesome-ai-art-image-synthesis): A list of awesome tools, ideas, prompt engineering tools, collabs, models, and helpers for the prompt designer playing with aiArt and image synthesis. Covers Dalle2, MidJourney, StableDiffusion, and open source tools. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c80931e26cbe.png)](https:\u002F\u002Fgithub.com\u002Faltryne\u002Fawesome-ai-art-image-synthesis)\n  * [Stable Diffusion](https:\u002F\u002Fgithub.com\u002FCompVis\u002Fstable-diffusion): A latent text-to-image diffusion model. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b9c2448bd799.png)](https:\u002F\u002Fgithub.com\u002FCompVis\u002Fstable-diffusion)\n  * [Weather Diffusion](https:\u002F\u002Fgithub.com\u002FIGITUGraz\u002FWeatherDiffusion): Code for \"Restoring Vision in Adverse Weather Conditions with Patch-Based Denoising Diffusion Models\". [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_5d3a19f5187d.png)](https:\u002F\u002Fgithub.com\u002FIGITUGraz\u002FWeatherDiffusion)\n  * [DF-GAN](https:\u002F\u002Fgithub.com\u002Ftobran\u002FDF-GAN): A Simple and Effective Baseline for Text-to-Image Synthesis. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e4d2b218ec7a.png)](https:\u002F\u002Fgithub.com\u002Ftobran\u002FDF-GAN)\n  * [Dall-E Playground](https:\u002F\u002Fgithub.com\u002Fsaharmor\u002Fdalle-playground): A playground to generate images from any text prompt using Stable Diffusion (past: using DALL-E Mini). [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_48360bd4b478.png)](https:\u002F\u002Fgithub.com\u002Fsaharmor\u002Fdalle-playground)\n  * [MM-CelebA-HQ-Dataset](https:\u002F\u002Fgithub.com\u002FIIGROUP\u002FMM-CelebA-HQ-Dataset): A large-scale face image dataset that allows text-to-image generation, text-guided image manipulation, sketch-to-image generation, GANs for face generation and editing, image caption, and VQA. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_7cb33e8fc679.png)](https:\u002F\u002Fgithub.com\u002FIIGROUP\u002FMM-CelebA-HQ-Dataset)\n  * [Deep Daze](https:\u002F\u002Fgithub.com\u002Flucidrains\u002Fdeep-daze): Simple command line tool for text-to-image generation using OpenAI's CLIP and Siren (Implicit neural representation network). [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_9a5a46d4ed2e.png)](https:\u002F\u002Fgithub.com\u002Flucidrains\u002Fdeep-daze)\n  * [StyleMapGAN](https:\u002F\u002Fgithub.com\u002Fnaver-ai\u002FStyleMapGAN): Exploiting Spatial Dimensions of Latent in GAN for Real-time Image Editing. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_92de36041298.png)](https:\u002F\u002Fgithub.com\u002Fnaver-ai\u002FStyleMapGAN)\n  * [Kandinsky-2](https:\u002F\u002Fgithub.com\u002Fai-forever\u002FKandinsky-2): Multilingual text2image latent diffusion model. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_68211636bfa6.png)](https:\u002F\u002Fgithub.com\u002Fai-forever\u002FKandinsky-2)\n  * [DragGAN](https:\u002F\u002Fgithub.com\u002FXingangPan\u002FDragGAN): Interactive Point-based Manipulation on the Generative Image Manifold. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_cbf575c28d90.png)](https:\u002F\u002Fgithub.com\u002FXingangPan\u002FDragGAN)\n  * [Segment Anything](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsegment-anything): The repository provides code for running inference with the SegmentAnything Model (SAM), links for downloading the trained model checkpoints, and example notebooks that show how to use the model. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d11dd8330bb4.png)](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsegment-anything)\n  * [Segment Anything 2](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsegment-anything-2): The repository provides code for running inference with the Meta Segment Anything Model 2 (SAM 2), links for downloading the trained model checkpoints, and example notebooks that show how to use the model. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d11dd8330bb4.png-2)](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsegment-anything-2)\n  * [MobileSAM](https:\u002F\u002Fgithub.com\u002FChaoningZhang\u002FMobileSAM): This is the official code for the MobileSAM project that makes SAM lightweight for mobile applications and beyond! [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_dc933ca96ce8.png)](https:\u002F\u002Fgithub.com\u002FChaoningZhang\u002FMobileSAM)\n  * [FastSAM](https:\u002F\u002Fgithub.com\u002FCASIA-IVA-Lab\u002FFastSAM): Fast Segment Anything\n [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_dfd2dda2287c.png)](https:\u002F\u002Fgithub.com\u002FCASIA-IVA-Lab\u002FFastSAM)\n  * [Infinigen](https:\u002F\u002Fgithub.com\u002Fprinceton-vl\u002Finfinigen): Infinite Photorealistic Worlds using Procedural Generation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_01fc32428313.png)](https:\u002F\u002Fgithub.com\u002Fprinceton-vl\u002Finfinigen)\n  * [DALL·E 3](https:\u002F\u002Fopenai.com\u002Fdall-e-3)\n  * [StreamDiffusion](https:\u002F\u002Fgithub.com\u002Fcumulo-autumn\u002FStreamDiffusion): A Pipeline-Level Solution for Real-Time Interactive Generation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_755ccf9c583d.png)](https:\u002F\u002Fgithub.com\u002Fcumulo-autumn\u002FStreamDiffusion)\n  * [AnyDoor](https:\u002F\u002Fgithub.com\u002Fcumulo-autumn\u002FStreamDiffusion): Zero-shot Object-level Image Customization. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_755ccf9c583d.png)](https:\u002F\u002Fgithub.com\u002Fcumulo-autumn\u002FStreamDiffusion)\n  * [DiT](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FDiT): Scalable Diffusion Models with Transformers. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_5b9fc9553744.png)](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FDiT)\n  * [BrushNet](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FBrushNet): A Plug-and-Play Image Inpainting Model with Decomposed Dual-Branch Diffusion. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_739b7953742a.png)](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FBrushNet)\n  * [OOTDiffusion](https:\u002F\u002Fgithub.com\u002Flevihsu\u002FOOTDiffusion): Outfitting Fusion based Latent Diffusion for Controllable Virtual Try-on. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_227a4447fc29.png)](https:\u002F\u002Fgithub.com\u002Flevihsu\u002FOOTDiffusion)\n  * [VAR](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FVAR): Official impl. of \"Visual Autoregressive Modeling: Scalable Image Generation via Next-Scale Prediction\". [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_0f133dc215a3.png)](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FVAR)\n  * [Imagine Flash: Accelerating Emu Diffusion Models with Backward Distillation](https:\u002F\u002Fai.meta.com\u002Fresearch\u002Fpublications\u002Fimagine-flash-accelerating-emu-diffusion-models-with-backward-distillation\u002F)\n\n##### 💡 Applications 💡\n\n  * [ArtLine](https:\u002F\u002Fgithub.com\u002Fvijishmadhavan\u002FArtLine): A Deep Learning based project for creating line art portraits. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e0fd89ec9fec.png)](https:\u002F\u002Fgithub.com\u002Fvijishmadhavan\u002FArtLine)\n  * [Depix](https:\u002F\u002Fgithub.com\u002Fbeurtschipper\u002FDepix): Recovers passwords from pixelized screenshots. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b830ebd72ef4.png)](https:\u002F\u002Fgithub.com\u002Fbeurtschipper\u002FDepix)\n  * [Bringing Old Photos Back to Life](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FBringing-Old-Photos-Back-to-Life): Old Photo Restoration (Official PyTorch Implementation). [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a0d65e4bb9ef.png)](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FBringing-Old-Photos-Back-to-Life)\n  * [Rewriting](https:\u002F\u002Fgithub.com\u002Fdavidbau\u002Frewriting): Interactive tool to directly edit the rules of a GAN to synthesize scenes with objects added, removed, or altered. Change StyleGANv2 to make extravagant eyebrows, or horses wearing hats. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_90beb292c482.png)](https:\u002F\u002Fgithub.com\u002Fdavidbau\u002Frewriting)\n  * [Fawkes](https:\u002F\u002Fgithub.com\u002FShawn-Shan\u002Ffawkes): Privacy preserving tool against facial recognition systems. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_5e4ce70a505a.png)](https:\u002F\u002Fgithub.com\u002FShawn-Shan\u002Ffawkes)\n  * [Pulse](https:\u002F\u002Fgithub.com\u002Fadamian98\u002Fpulse): Self-Supervised Photo Upsampling via Latent Space Exploration of Generative Models. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e677f1524657.png)](https:\u002F\u002Fgithub.com\u002Fadamian98\u002Fpulse)\n  * [HiDT](https:\u002F\u002Fgithub.com\u002Fsaic-mdal\u002FHiDT): Official repository for the paper \"High-Resolution Daytime Translation Without Domain Labels\". [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_fa1e1999455e.png)](https:\u002F\u002Fgithub.com\u002Fsaic-mdal\u002FHiDT)\n  * [3D Photo Inpainting](https:\u002F\u002Fgithub.com\u002Fvt-vl-lab\u002F3d-photo-inpainting): 3D Photography using Context-aware Layered Depth Inpainting. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_80768c91edbd.png)](https:\u002F\u002Fgithub.com\u002Fvt-vl-lab\u002F3d-photo-inpainting)\n  * [SteganoGAN](https:\u002F\u002Fgithub.com\u002FDAI-Lab\u002FSteganoGAN): SteganoGAN is a tool for creating steganographic images using adversarial training. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c2a6f04a5db8.png)](https:\u002F\u002Fgithub.com\u002FDAI-Lab\u002FSteganoGAN)\n  * [Stylegan-T](https:\u002F\u002Fgithub.com\u002Fautonomousvision\u002Fstylegan-t): Unlocking the Power of GANs for Fast Large-Scale Text-to-Image Synthesis. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_86541602c7b8.png)](https:\u002F\u002Fgithub.com\u002Fautonomousvision\u002Fstylegan-t)\n  * [MegaPortraits](https:\u002F\u002Fgithub.com\u002FSamsungLabs\u002FMegaPortraits): One-shot Megapixel Neural Head Avatars. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d36ddb911409.png)](https:\u002F\u002Fgithub.com\u002FSamsungLabs\u002FMegaPortraits)\n  * [eg3d](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Feg3d): Efficient Geometry-aware 3D Generative Adversarial Networks. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_3339378251fe.png)](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Feg3d)\n  * [TediGAN](https:\u002F\u002Fgithub.com\u002FIIGROUP\u002FTediGAN): Pytorch implementation for TediGAN: Text-Guided Diverse Face Image Generation and Manipulation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_1a92854930e8.png)](https:\u002F\u002Fgithub.com\u002FIIGROUP\u002FTediGAN)\n  * [DALLE-pytorch](https:\u002F\u002Fgithub.com\u002Flucidrains\u002FDALLE-pytorch): Implementation \u002F replication of DALL-E, OpenAI's Text to Image Transformer, in Pytorch. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_87972aef1ad0.png)](https:\u002F\u002Fgithub.com\u002Flucidrains\u002FDALLE-pytorch)\n  * [StyleNeRF](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FStyleNeRF): This is the open source implementation of the ICLR2022 paper \"StyleNeRF: A Style-based 3D-Aware Generator for High-resolution Image Synthesis\". [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_1a6a7cedbddc.png)](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FStyleNeRF)\n  * [DeepSVG](https:\u002F\u002Fgithub.com\u002Falexandre01\u002Fdeepsvg): Official code for the paper \"DeepSVG: A Hierarchical Generative Network for Vector Graphics Animation\". Includes a PyTorch library for deep learning with SVG data. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8867b6cb7087.png)](https:\u002F\u002Fgithub.com\u002Falexandre01\u002Fdeepsvg)\n  * [NUWA](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FNUWA): A unified 3D Transformer Pipeline for visual synthesis. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_abaa13eabe66.png)](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FNUWA)\n  * [Image-Super-Resolution-via-Iterative-Refinement](https:\u002F\u002Fgithub.com\u002FJanspiry\u002FImage-Super-Resolution-via-Iterative-Refinement): Unofficial implementation of Image Super-Resolution via Iterative Refinement by Pytorch. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_7f78d979a551.png)](https:\u002F\u002Fgithub.com\u002FJanspiry\u002FImage-Super-Resolution-via-Iterative-Refinement)\n  * [Lama](https:\u002F\u002Fgithub.com\u002Fsaic-mdal\u002Flama): 🦙 LaMa Image Inpainting, Resolution-robust Large Mask Inpainting with Fourier Convolutions. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_0fd3d00f2cf1.png)](https:\u002F\u002Fgithub.com\u002Fsaic-mdal\u002Flama)\n  * [Person_reID_baseline_pytorch](https:\u002F\u002Fgithub.com\u002Flayumi\u002FPerson_reID_baseline_pytorch): Pytorch ReID: A tiny, friendly, strong pytorch implement of object re-identification baseline. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a715f49412d5.png)](https:\u002F\u002Fgithub.com\u002Flayumi\u002FPerson_reID_baseline_pytorch)\n  * [instruct-pix2pix](https:\u002F\u002Fgithub.com\u002Ftimothybrooks\u002Finstruct-pix2pix): PyTorch implementation of InstructPix2Pix, an instruction-based image editing model. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d61768ee768f.png)](https:\u002F\u002Fgithub.com\u002Ftimothybrooks\u002Finstruct-pix2pix)\n  * [GFPGAN](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FGFPGAN): GFPGAN aims at developing Practical Algorithms for Real-world Face Restoration. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_6aa40e02a124.png)](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FGFPGAN)\n  * [DeepVecFont](https:\u002F\u002Fgithub.com\u002Fyizhiwang96\u002Fdeepvecfont): Synthesizing High-quality Vector Fonts via Dual-modality Learning. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b9594c05d8a2.png)](https:\u002F\u002Fgithub.com\u002Fyizhiwang96\u002Fdeepvecfont)\n  * [Stargan v2 Tensorflow](https:\u002F\u002Fgithub.com\u002Fclovaai\u002Fstargan-v2-tensorflow): Official Tensorflow Implementation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_251b83ad5cb4.png)](https:\u002F\u002Fgithub.com\u002Fclovaai\u002Fstargan-v2-tensorflow)\n  * [StyleGAN2 Distillation](https:\u002F\u002Fgithub.com\u002FEvgenyKashin\u002Fstylegan2-distillation): Paired image-to-image translation, trained on synthetic data generated by StyleGAN2 outperforms existing approaches in image manipulation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_70de04dcdcaa.png)](https:\u002F\u002Fgithub.com\u002FEvgenyKashin\u002Fstylegan2-distillation)\n  * [Extracting Training Data from Diffusion Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.13188)\n  * [Mann-E - Mann-E (Persian: مانی) is an art generator model based on the weights of Stable Diffusion 1.5 and data gathered from artistic material available on Pinterest](https:\u002F\u002Fopencognitives.com\u002Fmann-e)\n  * [End-to-end Trained CNN Encode-Decoder Networks for Image Steganography](https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.07201)\n  * [Grounded-Segment-Anything](https:\u002F\u002Fgithub.com\u002FIDEA-Research\u002FGrounded-Segment-Anything): Marrying Grounding DINO with Segment Anything & Stable Diffusion & Tag2Text & BLIP & Whisper & ChatBot - Automatically Detect , Segment and Generate Anything with Image, Text, and Audio Inputs. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f9a3977979e4.png)](https:\u002F\u002Fgithub.com\u002FIDEA-Research\u002FGrounded-Segment-Anything)\n  * [AnimateDiff](https:\u002F\u002Fgithub.com\u002Fguoyww\u002FAnimateDiff): Animate Your Personalized Text-to-Image Diffusion Models without Specific Tuning. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a2c309bf3f27.png)](https:\u002F\u002Fgithub.com\u002Fguoyww\u002FAnimateDiff)\n  * [BasicSR](https:\u002F\u002Fgithub.com\u002FXPixelGroup\u002FBasicSR): Open Source Image and Video Restoration Toolbox for Super-resolution, Denoise, Deblurring, etc. Currently, it includes EDSR, RCAN, SRResNet, SRGAN, ESRGAN, EDVR, BasicVSR, SwinIR, ECBSR, etc. Also support StyleGAN2 and DFDNet. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_9a32c804dab7.png)](https:\u002F\u002Fgithub.com\u002FXPixelGroup\u002F\n  BasicSR)\n  * [Real-ESRGAN](https:\u002F\u002Fgithub.com\u002Fxinntao\u002FReal-ESRGAN): Real-ESRGAN aims at developing Practical Algorithms for General Image\u002FVideo Restoration. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e08feb501df3.png)](https:\u002F\u002Fgithub.com\u002Fxinntao\u002FReal-ESRGAN)\n  * [ESRGAN](https:\u002F\u002Fgithub.com\u002Fxinntao\u002FESRGAN): Enhanced SRGAN. Champion PIRM Challenge on Perceptual Super-Resolution. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f052cb7dc6de.png)](https:\u002F\u002Fgithub.com\u002Fxinntao\u002FESRGAN)\n  * [MixNMatch](https:\u002F\u002Fgithub.com\u002FYuheng-Li\u002FMixNMatch): Multifactor Disentanglement and Encoding for Conditional Image Generation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_170285a1fb48.png)](https:\u002F\u002Fgithub.com\u002FYuheng-Li\u002FMixNMatch)\n  * [Clarity-upscaler](https:\u002F\u002Fgithub.com\u002Fphilz1337x\u002Fclarity-upscaler): Reimagined image upscaling for everyone. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_82f6875dbe87.png)](https:\u002F\u002Fgithub.com\u002Fphilz1337x\u002Fclarity-upscaler)\n  * [One-step Diffusion with Distribution Matching Distillation](https:\u002F\u002Ftianweiy.github.io\u002Fdmd\u002F)\n  * [Invisible Stitch](https:\u002F\u002Fgithub.com\u002Fpaulengstler\u002Finvisible-stitch): Generating Smooth 3D Scenes with Depth Inpainting. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_168b24e7b433.png)](https:\u002F\u002Fgithub.com\u002Fpaulengstler\u002Finvisible-stitch)\n  * [SSR](https:\u002F\u002Fgithub.com\u002FDaLi-Jack\u002FSSR-code): Single-view 3D Scene Reconstruction with High-fidelity Shape and Texture. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_4ead0277cb02.png)](https:\u002F\u002Fgithub.com\u002Fpaulengstler\u002Fhttps:\u002F\u002Fgithub.com\u002FDaLi-Jack\u002FSSR-code)\n  * [InvSR](https:\u002F\u002Fgithub.com\u002FzsyOAOA\u002FInvSR): Arbitrary-steps Image Super-resolution via Diffusion Inversion. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_513527908081.png)](https:\u002F\u002Fgithub.com\u002FzsyOAOA\u002FInvSR)\n  * [REPARO](https:\u002F\u002Fgithub.com\u002FVincentHancoder\u002FREPARO): Compositional 3D Assets Generation with Differentiable 3D Layout Alignment. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c6b1627b107d.png)](https:\u002F\u002Fgithub.com\u002FVincentHancoder\u002FREPARO)\n  * [Gen3DSR](https:\u002F\u002Fgithub.com\u002FAndreeaDogaru\u002FGen3DSR): Generalizable 3D Scene Reconstruction via Divide and Conquer from a Single View. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_61494c3f06bb.png)](https:\u002F\u002Fgithub.com\u002FAndreeaDogaru\u002FGen3DSR)\n  * [ml-sharp](https:\u002F\u002Fgithub.com\u002Fapple\u002Fml-sharp): Sharp Monocular View Synthesis in Less Than a Second. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_5f6421e37b95.png)](https:\u002F\u002Fgithub.com\u002Fapple\u002Fml-sharp)\n\n\n##### 🔎 Detection 🔎\n\n  * [stylegan3-detector](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan3-detector): StyleGAN3 Synthetic Image Detection. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2837b6642473.png3-detector)](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan3-detector)\n  * [stylegan2-projecting-images](https:\u002F\u002Fgithub.com\u002Fwoctezuma\u002Fstylegan2-projecting-images): Projecting images to latent space with StyleGAN2. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c5a596e1b8bd.png)](https:\u002F\u002Fgithub.com\u002Fwoctezuma\u002Fstylegan2-projecting-images)\n  * [FALdetector](https:\u002F\u002Fgithub.com\u002FPeterWang512\u002FFALdetector): Detecting Photoshopped Faces by Scripting Photoshop. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_3aab12b955a2.png)](https:\u002F\u002Fgithub.com\u002FPeterWang512\u002FFALdetector)\n  * [B-Free](https:\u002F\u002Fgithub.com\u002Fgrip-unina\u002FB-Free): A Bias-Free Training Paradigm for More\nGeneral AI-generated Image Detection. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a4e5a315106b.png)](https:\u002F\u002Fgithub.com\u002Fgrip-unina\u002FB-Free)\n  * [Detection of Images Generated by Multi-Modal Models](https:\u002F\u002Fwww.bsi.bund.de\u002FSharedDocs\u002FDownloads\u002FEN\u002FBSI\u002FKI\u002FDetection_Images_Multi-Modal_Models.html)\n\n#### 🎥 Video 🎥\n\n##### 🛠️ Tools 🛠️\n\n  * [DeepFaceLab](https:\u002F\u002Fgithub.com\u002Fiperov\u002FDeepFaceLab): DeepFaceLab is the leading software for creating deepfakes. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ffdf630caee6.png)](https:\u002F\u002Fgithub.com\u002Fiperov\u002FDeepFaceLab)\n  * [faceswap](https:\u002F\u002Fgithub.com\u002Fdeepfakes\u002Ffaceswap): Deepfakes Software For All. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8502b17a5690.png)](https:\u002F\u002Fgithub.com\u002Fdeepfakes\u002Ffaceswap)\n  * [dot](https:\u002F\u002Fgithub.com\u002Fsensity-ai\u002Fdot): The Deepfake Offensive Toolkit. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f092de96d911.png)](https:\u002F\u002Fgithub.com\u002Fsensity-ai\u002Fdot)\n  * [SimSwap](https:\u002F\u002Fgithub.com\u002Fneuralchen\u002FSimSwap): An arbitrary face-swapping framework on images and videos with one single trained model! [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_7d37f167cdb8.png)](https:\u002F\u002Fgithub.com\u002Fneuralchen\u002FSimSwap)\n  * [faceswap-GAN](https:\u002F\u002Fgithub.com\u002Fshaoanlu\u002Ffaceswap-GAN): A denoising autoencoder + adversarial losses and attention mechanisms for face swapping. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f5ac3a96ed2e.png)](https:\u002F\u002Fgithub.com\u002Fshaoanlu\u002Ffaceswap-GAN)\n  * [Celeb DeepFakeForensics](https:\u002F\u002Fgithub.com\u002Fyuezunli\u002Fceleb-deepfakeforensics): A Large-scale Challenging Dataset for DeepFake Forensics. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_4ad798cadfd2.png)](https:\u002F\u002Fgithub.com\u002Fyuezunli\u002Fceleb-deepfakeforensics)\n  * [VGen](https:\u002F\u002Fgithub.com\u002Fdamo-vilab\u002Fi2vgen-xl): A holistic video generation ecosystem for video generation building on diffusion models. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8b455332876f.png)](https:\u002F\u002Fgithub.com\u002Fdamo-vilab\u002Fi2vgen-xl)\n  * [MuseV](https:\u002F\u002Fgithub.com\u002FTMElyralab\u002FMuseV): Infinite-length and High Fidelity Virtual Human Video Generation with Visual Conditioned Parallel Denoising. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b9a5ea7d8936.png)](https:\u002F\u002Fgithub.com\u002FTMElyralab\u002FMuseV)\n  * [GLEE](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FGLEE): General Object Foundation Model for Images and Videos at Scale. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_0abbec4a530d.png)](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FGLEE)\n  * [T-Rex](https:\u002F\u002Fgithub.com\u002FIDEA-Research\u002FT-Rex): Towards Generic Object Detection via Text-Visual Prompt Synergy. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_afd74fa03067.png)](https:\u002F\u002Fgithub.com\u002FIDEA-Research\u002FT-Rex)\n  * [DynamiCrafter](https:\u002F\u002Fgithub.com\u002FDoubiiu\u002FDynamiCrafter): Animating Open-domain Images with Video Diffusion Priors. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a52c5c971e0e.png)](https:\u002F\u002Fgithub.com\u002FDoubiiu\u002FDynamiCrafter)\n  * [Mora](https:\u002F\u002Fgithub.com\u002Flichao-sun\u002FMora): More like Sora for Generalist Video Generation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_743828bf09cc.png)](https:\u002F\u002Fgithub.com\u002Flichao-sun\u002FMora)\n\n##### 💡 Applications 💡\n\n  * [face2face-demo](https:\u002F\u002Fgithub.com\u002Fdatitran\u002Fface2face-demo): pix2pix demo that learns from facial landmarks and translates this into a face. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_3f1a2e218b00.png)](https:\u002F\u002Fgithub.com\u002Fdatitran\u002Fface2face-demo)\n  * [Faceswap-Deepfake-Pytorch](https:\u002F\u002Fgithub.com\u002FOldpan\u002FFaceswap-Deepfake-Pytorch): Faceswap with Pytorch or DeepFake with Pytorch. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a19bf3c65399.png)](https:\u002F\u002Fgithub.com\u002FOldpan\u002FFaceswap-Deepfake-Pytorch)\n  * [Point-E](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fpoint-e): Point cloud diffusion for 3D model synthesis. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a759f7d92327.png)](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fpoint-e)\n  * [EGVSR](https:\u002F\u002Fgithub.com\u002FThmen\u002FEGVSR): Efficient & Generic Video Super-Resolution. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_265823cc8865.png)](https:\u002F\u002Fgithub.com\u002FThmen\u002FEGVSR)\n  * [STIT](https:\u002F\u002Fgithub.com\u002Frotemtzaban\u002FSTIT): Stitch it in Time: GAN-Based Facial Editing of Real Videos. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_03945486701b.png)](https:\u002F\u002Fgithub.com\u002Frotemtzaban\u002FSTIT)\n  * [BackgroundMattingV2](https:\u002F\u002Fgithub.com\u002FPeterL1n\u002FBackgroundMattingV2): Real-Time High-Resolution Background Matting. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8b5eb04a2ec8.png)](https:\u002F\u002Fgithub.com\u002FPeterL1n\u002FBackgroundMattingV2)\n  * [MODNet](https:\u002F\u002Fgithub.com\u002FZHKKKe\u002FMODNet): A Trimap-Free Portrait Matting Solution in Real Time. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_3614ac2ed8a8.png)](https:\u002F\u002Fgithub.com\u002FZHKKKe\u002FMODNet)\n  * [Background-Matting](https:\u002F\u002Fgithub.com\u002Fsenguptaumd\u002FBackground-Matting): Background Matting: The World is Your Green Screen. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2e40ba2095f4.png)](https:\u002F\u002Fgithub.com\u002Fsenguptaumd\u002FBackground-Matting)\n  * [First Order Model](https:\u002F\u002Fgithub.com\u002FAliaksandrSiarohin\u002Ffirst-order-model): This repository contains the source code for the paper First Order Motion Model for Image Animation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_636a3c95999e.png)](https:\u002F\u002Fgithub.com\u002FAliaksandrSiarohin\u002Ffirst-order-model)\n  * [Articulated Animation](https:\u002F\u002Fgithub.com\u002Fsnap-research\u002Farticulated-animation): This repository contains the source code for the CVPR'2021 paper Motion Representations for Articulated Animation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_314ae3a610c4.png)](https:\u002F\u002Fgithub.com\u002Fsnap-research\u002Farticulated-animation)\n  * [Real Time Person Removal](https:\u002F\u002Fgithub.com\u002Fjasonmayes\u002FReal-Time-Person-Removal): Removing people from complex backgrounds in real time using TensorFlow.js in the web browser. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_427a826fa4f3.png)](https:\u002F\u002Fgithub.com\u002Fjasonmayes\u002FReal-Time-Person-Removal)\n  * [AdaIN-style](https:\u002F\u002Fgithub.com\u002Fxunhuang1995\u002FAdaIN-style): Arbitrary Style Transfer in Real-time with Adaptive Instance Normalization. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a0da3a66896e.png)](https:\u002F\u002Fgithub.com\u002Fxunhuang1995\u002FAdaIN-style)\n  * [Frame Interpolation](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fframe-interpolation): Frame Interpolation for Large Motion. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_af68f33f0121.png)](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fframe-interpolation)\n  * [Awesome-Image-Colorization](https:\u002F\u002Fgithub.com\u002FMarkMoHR\u002FAwesome-Image-Colorization): 📚 A collection of Deep Learning based Image Colorization and Video Colorization papers. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_bddba8efb649.png)](https:\u002F\u002Fgithub.com\u002FMarkMoHR\u002FAwesome-Image-Colorization)\n  * [SadTalker](https:\u002F\u002Fgithub.com\u002FOpenTalker\u002FSadTalker): Learning Realistic 3D Motion Coefficients for Stylized Audio-Driven Single Image Talking Face Animation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_90d38a28c9ce.png)](https:\u002F\u002Fgithub.com\u002FOpenTalker\u002FSadTalker)\n  * [roop](https:\u002F\u002Fgithub.com\u002Fs0md3v\u002Froop): One-click deepfake (face swap). [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_9ccd1142c02a.png)](https:\u002F\u002Fgithub.com\u002Fs0md3v\u002Froop)\n  * [StableVideo](https:\u002F\u002Fgithub.com\u002Frese1f\u002FStableVideo): Text-driven Consistency-aware Diffusion Video Editing. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_3bfda99e9393.png)](https:\u002F\u002Fgithub.com\u002Frese1f\u002FStableVideo)\n  * [MagicEdit](https:\u002F\u002Fgithub.com\u002Fmagic-research\u002Fmagic-edit): High-Fidelity Temporally Coherent Video Editing. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2a5b107b0dce.png)](https:\u002F\u002Fgithub.com\u002Fmagic-research\u002Fmagic-edit)\n  * [Rerender_A_Video](https:\u002F\u002Fgithub.com\u002Fwilliamyang1991\u002FRerender_A_Video): Zero-Shot Text-Guided Video-to-Video Translation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d76043f3b34c.png)](https:\u002F\u002Fgithub.com\u002Fwilliamyang1991\u002FRerender_A_Video)\n  * [DreamEditor](https:\u002F\u002Fgithub.com\u002Fzjy526223908\u002FDreamEditor): Text-Driven 3D Scene Editing with Neural Fields. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_6ed1dd370e29.png)](https:\u002F\u002Fgithub.com\u002Fzjy526223908\u002FDreamEditor)\n  * [DreamEditor](https:\u002F\u002Fgithub.com\u002Fzju3dv\u002F4K4D): Real-Time 4D View Synthesis at 4K Resolution. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_58bc214f0b25.png)](https:\u002F\u002Fgithub.com\u002Fzju3dv\u002F4K4D)\n  * [AnimateAnyone](https:\u002F\u002Fgithub.com\u002FHumanAIGC\u002FAnimateAnyone): Consistent and Controllable Image-to-Video Synthesis for Character Animation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ad690dcdc4be.png)](https:\u002F\u002Fgithub.com\u002FHumanAIGC\u002FAnimateAnyone)\n  * [Moore-AnimateAnyone](https:\u002F\u002Fgithub.com\u002FMooreThreads\u002FMoore-AnimateAnyone): This repository reproduces AnimateAnyone. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_46ecf7c87453.png)](https:\u002F\u002Fgithub.com\u002FMooreThreads\u002FMoore-AnimateAnyone)\n  * [audio2photoreal](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Faudio2photoreal): From Audio to Photoreal Embodiment: Synthesizing Humans in Conversations. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_3622dac23f83.png)](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Faudio2photoreal)\n  * [MagicVideo-V2: Multi-Stage High-Aesthetic Video Generation](https:\u002F\u002Fmagicvideov2.github.io\u002F)\n  * [LWM](https:\u002F\u002Fgithub.com\u002FLargeWorldModel\u002FLWM): A general-purpose large-context multimodal autoregressive model. It is trained on a large dataset of diverse long videos and books using RingAttention and can perform language, image, and video understanding and generation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_885e74a5722a.png)](https:\u002F\u002Fgithub.com\u002FLargeWorldModel\u002FLWM)\n  * [AniPortrait](https:\u002F\u002Fgithub.com\u002FZejun-Yang\u002FAniPortrait): Audio-Driven Synthesis of Photorealistic Portrait Animation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_40fe1bb94134.png)](https:\u002F\u002Fgithub.com\u002FZejun-Yang\u002FAniPortrait)\n  * [Champ](https:\u002F\u002Fgithub.com\u002Ffudan-generative-vision\u002Fchamp): Controllable and Consistent Human Image Animation with 3D Parametric Guidance. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_95ca3fa3cd67.png)](https:\u002F\u002Fgithub.com\u002Ffudan-generative-vision\u002Fchamp)\n  * [Streamv2v](https:\u002F\u002Fgithub.com\u002FJeff-LiangF\u002Fstreamv2v): Streaming Video-to-Video Translation with Feature Banks. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_78d51488c23f.png)](https:\u002F\u002Fgithub.com\u002FJeff-LiangF\u002Fstreamv2v)\n  * [Deep-Live-Cam](https:\u002F\u002Fgithub.com\u002Fhacksider\u002FDeep-Live-Cam): Real time face swap and one-click video deepfake with only a single image. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ecea61a7ee71.png)](https:\u002F\u002Fgithub.com\u002Fhacksider\u002FDeep-Live-Cam)\n  * [Sapiens](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsapiens): Foundation for Human Vision Models. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8acfacf134af.png)](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsapiens)\n  * [ViVid-1-to-3](https:\u002F\u002Fgithub.com\u002Fubc-vision\u002Fvivid123): Novel View Synthesis with Video Diffusion Models. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_5e1c4eb28192.png)](https:\u002F\u002Fgithub.com\u002Fubc-vision\u002Fvivid123)\n  * [VGGT](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fvggt): Visual Geometry Grounded Transformer. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f1630ef894ee.png)](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fvggt)\n  * [LayerPano3D](https:\u002F\u002Fgithub.com\u002F3DTopia\u002FLayerPano3D): Layered 3D Panorama for Hyper-Immersive Scene Generation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e39e841b89da.png)](https:\u002F\u002Fgithub.com\u002F3DTopia\u002FLayerPano3D)\n  * [RealmDreamer](https:\u002F\u002Fgithub.com\u002Fjaidevshriram\u002Frealmdreamer): Text-Driven 3D Scene Generation with Inpainting and Depth Diffusion. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_df07669ceb2a.png)](https:\u002F\u002Fgithub.com\u002Fjaidevshriram\u002Frealmdreamer)\n\n##### 🔎 Detection 🔎\n\n  * [FaceForensics++](https:\u002F\u002Fgithub.com\u002Fondyari\u002FFaceForensics): FaceForensics dataset. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e4fcfc597027.png)](https:\u002F\u002Fgithub.com\u002Fondyari\u002FFaceForensics)\n  * [DeepFake-Detection](https:\u002F\u002Fgithub.com\u002Fdessa-oss\u002FDeepFake-Detection): Towards deepfake detection that actually works. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_11104d5e3681.png)](https:\u002F\u002Fgithub.com\u002Fdessa-oss\u002FDeepFake-Detection)\n  * [fakeVideoForensics](https:\u002F\u002Fgithub.com\u002Fjiep\u002FfakeVideoForensics): Detect deep fakes videos. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f1f371bdbcff.png)](https:\u002F\u002Fgithub.com\u002Fjiep\u002FfakeVideoForensics)\n  * [Deepfake-Detection](https:\u002F\u002Fgithub.com\u002FHongguLiu\u002FDeepfake-Detection): The Pytorch implemention of Deepfake Detection based on Faceforensics++. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_61ffd00b26c6.png)](https:\u002F\u002Fgithub.com\u002FHongguLiu\u002FDeepfake-Detection)\n  * [SeqDeepFake](https:\u002F\u002Fgithub.com\u002Frshaojimmy\u002FSeqDeepFake): PyTorch code for SeqDeepFake: Detecting and Recovering Sequential DeepFake Manipulation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_3f63c4f7f8ff.png)](https:\u002F\u002Fgithub.com\u002Frshaojimmy\u002FSeqDeepFake)\n  * [PCL-I2G](https:\u002F\u002Fgithub.com\u002Fjtchen0528\u002FPCL-I2G): Unofficial Implementation: Learning Self-Consistency for Deepfake Detection. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_32076212bdfb.png)](https:\u002F\u002Fgithub.com\u002Fjtchen0528\u002FPCL-I2G)\n  * [DFDC DeepFake Challenge](https:\u002F\u002Fgithub.com\u002Fselimsef\u002Fdfdc_deepfake_challenge): A prize winning solution for DFDC challenge. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_1f1a5205d864.png)](https:\u002F\u002Fgithub.com\u002Fselimsef\u002Fdfdc_deepfake_challenge)\n  * [POI-Forensics](https:\u002F\u002Fgithub.com\u002Fgrip-unina\u002Fpoi-forensics): Audio-Visual Person-of-Interest DeepFake Detection. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d462136468ab.png)](https:\u002F\u002Fgithub.com\u002Fgrip-unina\u002Fpoi-forensics)\n  * [Standardizing Detection of Deepfakes: Why Experts Say It’s Important](https:\u002F\u002Fantispoofing.org\u002Fdeepfake-detection-standardization-origin-goals-and-implementation\u002F)\n  * [Want to spot a deepfake? Look for the stars in their eyes](https:\u002F\u002Fras.ac.uk\u002Fnews-and-press\u002Fnews\u002Fwant-spot-deepfake-look-stars-their-eyes)\n  * [Fit for Purpose? Deepfake Detection in the Real World](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.16556)\n\n#### 📄 Text 📄\n\n##### 🛠️ Tools 🛠️\n  * [GLM-130B](https:\u002F\u002Fgithub.com\u002FTHUDM\u002FGLM-130B): An Open Bilingual Pre-Trained Model. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_0c300c05b08c.png)](https:\u002F\u002Fgithub.com\u002FTHUDM\u002FGLM-130B)\n  * [LongtermChatExternalSources](https:\u002F\u002Fgithub.com\u002Fdaveshap\u002FLongtermChatExternalSources): GPT-3 chatbot with long-term memory and external sources. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_07632c7b31cf.png)](https:\u002F\u002Fgithub.com\u002Fdaveshap\u002FLongtermChatExternalSources)\n  * [sketch](https:\u002F\u002Fgithub.com\u002Fapproximatelabs\u002Fsketch): AI code-writing assistant that understands data content. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_1eb7e84649f8.png)](https:\u002F\u002Fgithub.com\u002Fapproximatelabs\u002Fsketch)\n  * [LangChain](https:\u002F\u002Fgithub.com\u002Fhwchase17\u002Flangchain): ⚡ Building applications with LLMs through composability ⚡. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_3d9f119a9861.png)](https:\u002F\u002Fgithub.com\u002Fhwchase17\u002Flangchain)\n  * [ChatGPT Wrapper](https:\u002F\u002Fgithub.com\u002Fmmabrouk\u002Fchatgpt-wrapper): API for interacting with ChatGPT using Python and from Shell. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8fecce6b25e3.png)](https:\u002F\u002Fgithub.com\u002Fmmabrouk\u002Fchatgpt-wrapper)\n  * [openai-python](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fopenai-python): The OpenAI Python library provides convenient access to the OpenAI API from applications written in the Python language. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_279b886fa5aa.png)](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fopenai-python)\n  * [Beto](https:\u002F\u002Fgithub.com\u002Fdccuchile\u002Fbeto): Spanish version of the BERT model. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_51c9bfcf0700.png)](https:\u002F\u002Fgithub.com\u002Fdccuchile\u002Fbeto)\n  * [GPT-Code-Clippy](https:\u002F\u002Fgithub.com\u002FCodedotAl\u002Fgpt-code-clippy): GPT-Code-Clippy (GPT-CC) is an open source version of GitHub Copilot, a language model -- based on GPT-3, called GPT-Codex. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_45635dea4017.png)](https:\u002F\u002Fgithub.com\u002FCodedotAl\u002Fgpt-code-clippy)\n  * [GPT Neo](https:\u002F\u002Fgithub.com\u002FEleutherAI\u002Fgpt-neo): An implementation of model parallel GPT-2 and GPT-3-style models using the mesh-tensorflow library. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e98cb9b2c238.png)](https:\u002F\u002Fgithub.com\u002FEleutherAI\u002Fgpt-neo)\n  * [ctrl](https:\u002F\u002Fgithub.com\u002Fsalesforce\u002Fctrl): Conditional Transformer Language Model for Controllable Generation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_eb1dfa509913.png)](https:\u002F\u002Fgithub.com\u002Fsalesforce\u002Fctrl)\n  * [Llama](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fllama): Inference code for LLaMA models. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ab83522a6fbc.png)](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fllama)\n  * [Llama2](https:\u002F\u002Fai.meta.com\u002Fllama\u002F)\n  * [Llama Guard 3](https:\u002F\u002Fllama.meta.com\u002Fdocs\u002Fmodel-cards-and-prompt-formats\u002Fllama-guard-3\u002F)\n  * [UL2 20B](https:\u002F\u002Fai.googleblog.com\u002F2022\u002F10\u002Ful2-20b-open-source-unified-language.html): An Open Source Unified Language Learner\n  * [burgpt](https:\u002F\u002Fgithub.com\u002Faress31\u002Fburpgpt): A Burp Suite extension that integrates OpenAI's GPT to perform an additional passive scan for discovering highly bespoke vulnerabilities, and enables running traffic-based analysis of any type. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_7c940d52f583.png)](https:\u002F\u002Fgithub.com\u002Faress31\u002Fburpgpt)\n  * [Ollama](https:\u002F\u002Fgithub.com\u002Fjmorganca\u002Follama): Get up and running with Llama 2 and other large language models locally. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8927bca96dc9.png)](https:\u002F\u002Fgithub.com\u002Fjmorganca\u002Follama)\n  * [SneakyPrompt](https:\u002F\u002Fgithub.com\u002FYuchen413\u002Ftext2image_safety): Jailbreaking Text-to-image Generative Models. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_44c796ca15eb.png)](https:\u002F\u002Fgithub.com\u002FYuchen413\u002Ftext2image_safety)\n    * [Copilot-For-Security](https:\u002F\u002Fgithub.com\u002FAzure\u002FCopilot-For-Security): A generative AI-powered security solution that helps increase the efficiency and capabilities of defenders to improve security outcomes at machine speed and scale, while remaining compliant with responsible AI principles. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_006bff948843.png)](https:\u002F\u002Fgithub.com\u002FAzure\u002FCopilot-For-Security)\n  * [LM Studio](https:\u002F\u002Flmstudio.ai\u002F): Discover, download, and run local LLMs\n  * [Bypass GPT: Convert AI Text to Human-like Content](https:\u002F\u002Fbypassgpt.ai\u002F)\n  * [MGM](https:\u002F\u002Fgithub.com\u002Fdvlab-research\u002FMGM): The framework supports a series of dense and MoE Large Language Models (LLMs) from 2B to 34B with image understanding, reasoning, and generation simultaneously. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_76c77103dfc7.png)](https:\u002F\u002Fgithub.com\u002Fdvlab-research\u002FMGM)\n  * [Secret Llama](https:\u002F\u002Fgithub.com\u002Fabi\u002Fsecret-llama): Fully private LLM chatbot that runs entirely with a browser with no server needed. Supports Mistral and LLama 3. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d6c514ceeeeb.png)](https:\u002F\u002Fgithub.com\u002Fabi\u002Fsecret-llama)\n  * [Llama3](https:\u002F\u002Fgithub.com\u002Fmeta-llama\u002Fllama3): The official Meta Llama 3 GitHub site. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a8cb82d0458c.png)](https:\u002F\u002Fgithub.com\u002Fmeta-llama\u002Fllama3)\n  * [Unsloth](https:\u002F\u002Fgithub.com\u002Funslothai\u002Funsloth): Finetune Llama 3.3, Mistral, Phi-4, Qwen 2.5 & Gemma 2x faster with 80% less memory! [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_78572a9ac59a.png)](https:\u002F\u002Fgithub.com\u002Funslothai\u002Funsloth)\n\n\n##### 🔎 Detection 🔎\n\n  * [Detecting Fake Text](https:\u002F\u002Fgithub.com\u002FHendrikStrobelt\u002Fdetecting-fake-text): Giant Language Model Test Room. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_1430342545c2.png)](https:\u002F\u002Fgithub.com\u002FHendrikStrobelt\u002Fdetecting-fake-text)\n  * [Grover](https:\u002F\u002Fgithub.com\u002Frowanz\u002Fgrover): Code for Defending Against Neural Fake News. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_46e6d9d0fd24.png)](https:\u002F\u002Fgithub.com\u002Frowanz\u002Fgrover)\n  * [Rebuff.ai](https:\u002F\u002Fgithub.com\u002Fprotectai\u002Frebuff):  Prompt Injection Detector. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_54507297e69a.png)](https:\u002F\u002Fgithub.com\u002Fprotectai\u002Frebuff)\n  * [New AI classifier for indicating AI-written text](https:\u002F\u002Fopenai.com\u002Fblog\u002Fnew-ai-classifier-for-indicating-ai-written-text\u002F)\n  * [Discover the 4 Magical Methods to Detect AI-Generated Text (including ChatGPT)](https:\u002F\u002Fmedium.com\u002F@itamargolan\u002Funcover-the-four-enchanted-ways-to-identify-ai-generated-text-including-chatgpts-4764847fd609)\n  * [GPTZero](https:\u002F\u002Fgptzero.me)\n  * [AI Content Detector (beta)](https:\u002F\u002Fcopyleaks.com\u002Fai-content-detector)\n  * [A Watermark for Large Language Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.10226)\n  * [Can AI-Generated Text be Reliably Detected?](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.11156)\n  * [GPT detectors are biased against non-native English writers](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.02819)\n  * [To ChatGPT, or not to ChatGPT: That is the question!](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.01487)\n  * [Can linguists distinguish between ChatGPT\u002FAI and human writing?: A study of research ethics and academic publishing](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS2772766123000289)\n  * [ChatGPT is bullshit](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs10676-024-09775-5)\n\n\n##### 💡 Applications 💡\n\n  * [handwrite](https:\u002F\u002Fgithub.com\u002Fbuiltree\u002Fhandwrite): Handwrite generates a custom font based on your handwriting sample. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c69a1dabc61b.png)](https:\u002F\u002Fgithub.com\u002Fbuiltree\u002Fhandwrite)\n  * [GPT Sandbox](https:\u002F\u002Fgithub.com\u002Fshreyashankar\u002Fgpt3-sandbox): The goal of this project is to enable users to create cool web demos using the newly released OpenAI GPT-3 API with just a few lines of Python. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8a48693a1cef.png)](https:\u002F\u002Fgithub.com\u002Fshreyashankar\u002Fgpt3-sandbox)\n  * [PassGAN](https:\u002F\u002Fgithub.com\u002Fbrannondorsey\u002FPassGAN): A Deep Learning Approach for Password Guessing. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_9ea5c0c1a8b4.png)](https:\u002F\u002Fgithub.com\u002Fbrannondorsey\u002FPassGAN)\n  * [GPT Index](https:\u002F\u002Fgithub.com\u002Fjerryjliu\u002Fgpt_index): GPT Index is a project consisting of a set of data structures designed to make it easier to use large external knowledge bases with LLMs. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c02dfd3db5b4.png)](https:\u002F\u002Fgithub.com\u002Fjerryjliu\u002Fgpt_index)\n  * [nanoGPT](https:\u002F\u002Fgithub.com\u002Fkarpathy\u002FnanoGPT): The simplest, fastest repository for training\u002Ffinetuning medium-sized GPTs. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_9fd4a3cee0bc.png)](https:\u002F\u002Fgithub.com\u002Fkarpathy\u002FnanoGPT)\n  * [whatsapp-gpt](https:\u002F\u002Fgithub.com\u002Fdanielgross\u002Fwhatsapp-gpt) [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_1bd9dd1e0e63.png)](https:\u002F\u002Fgithub.com\u002Fdanielgross\u002Fwhatsapp-gpt)\n  * [ChatGPT Chrome Extension](https:\u002F\u002Fgithub.com\u002Fgragland\u002Fchatgpt-chrome-extension): A ChatGPT Chrome extension. Integrates ChatGPT into every text box on the internet.\n  * [Unilm](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002Funilm): Large-scale Self-supervised Pre-training Across Tasks, Languages, and Modalities. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_bef49fb02ea5.png)](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002Funilm)\n  * [minGPT](https:\u002F\u002Fgithub.com\u002Fkarpathy\u002FminGPT): A minimal PyTorch re-implementation of the OpenAI GPT (Generative Pretrained Transformer) training. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_6348bf71a4cc.png)](https:\u002F\u002Fgithub.com\u002Fkarpathy\u002FminGPT)\n  * [CodeGeeX](https:\u002F\u002Fgithub.com\u002FTHUDM\u002FCodeGeeX): An Open Multilingual Code Generation Model. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_32f566baf18e.png)](https:\u002F\u002Fgithub.com\u002FTHUDM\u002FCodeGeeX)\n  * [OpenAI Cookbook](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fopenai-cookbook): Examples and guides for using the OpenAI API. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_9e09be3428f5.png)](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fopenai-cookbook)\n  * [🧠 Awesome ChatGPT Prompts](https:\u002F\u002Fgithub.com\u002Ff\u002Fawesome-chatgpt-prompts): This repo includes ChatGPT prompt curation to use ChatGPT better. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_4fd1d8237a98.png)](https:\u002F\u002Fgithub.com\u002Ff\u002Fawesome-chatgpt-prompts)\n  * [Alice](https:\u002F\u002Fgithub.com\u002Fgreshake\u002FAlice): Giving ChatGPT access to a real terminal. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_6979228961d6.png)](https:\u002F\u002Fgithub.com\u002Fgreshake\u002FAlice)\n  * [Security Code Review With ChatGPT](https:\u002F\u002Fresearch.nccgroup.com\u002F2023\u002F02\u002F09\u002Fsecurity-code-review-with-chatgpt)\n  * [Do Users Write More Insecure Code with AI Assistants?](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.03622)\n  * [Bypassing Gmail's spam filters with ChatGPT](https:\u002F\u002Fneelc.org\u002Fposts\u002Fchatgpt-gmail-spam)\n  * [Recurrent GANs Password Cracker For IoT Password Security Enhancement](https:\u002F\u002Fwww.mdpi.com\u002F1999-4893\u002F15\u002F5\u002F155)\n  * [PentestGPT](https:\u002F\u002Fgithub.com\u002FGreyDGL\u002FPentestGPT): A GPT-empowered penetration testing tool. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_99fc142e3b61.png)](https:\u002F\u002Fgithub.com\u002FGreyDGL\u002FPentestGPT)\n  * [GPT Researcher](https:\u002F\u002Fgithub.com\u002Fassafelovic\u002Fgpt-researcher): GPT based autonomous agent that does online comprehensive research on any given topic. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e015f00aab45.png)](https:\u002F\u002Fgithub.com\u002Fassafelovic\u002Fgpt-researcher)\n  * [GPT Engineer](https:\u002F\u002Fgithub.com\u002FAntonOsika\u002Fgpt-engineer): Specify what you want it to build, the AI asks for clarification, and then builds it. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_3eefa5f312c0.png)](https:\u002F\u002Fgithub.com\u002FAntonOsika\u002Fgpt-engineer)\n  * [localpilot](https:\u002F\u002Fgithub.com\u002Fdanielgross\u002Flocalpilot): Use GitHub Copilot locally on your Macbook with one-click! [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ae7dda45280b.png)](https:\u002F\u002Fgithub.com\u002Fdanielgross\u002Flocalpilot)\n  * [WormGPT])(https:\u002F\u002Fthehackernews.com\u002F2023\u002F07\u002Fwormgpt-new-ai-tool-allows.html): New AI Tool Allows Cybercriminals to Launch Sophisticated Cyber Attacks\n  * [PoisonGPT](https:\u002F\u002Fblog.mithrilsecurity.io\u002Fpoisongpt-how-we-hid-a-lobotomized-llm-on-hugging-face-to-spread-fake-news\u002F): How we hid a lobotomized LLM on Hugging Face to spread fake news\n  * [PassGPT: Password Modeling and (Guided) Generation with Large Language Models](https:\u002F\u002Fjavirandor.github.io\u002Fassets\u002Fpdf\u002Fpassgpt2023rando.pdf)\n  * [DeepPass — Finding Passwords With Deep Learning](https:\u002F\u002Fposts.specterops.io\u002Fdeeppass-finding-passwords-with-deep-learning-4d31c534cd00)\n  * [GPTFuzz](https:\u002F\u002Fgithub.com\u002Fsherdencooper\u002FGPTFuzz): Red Teaming Large Language Models with Auto-Generated Jailbreak Prompts. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_0b7733da74eb.png)](https:\u002F\u002Fgithub.com\u002Fsherdencooper\u002FGPTFuzz)\n  * [Open Interpreter](https:\u002F\u002Fgithub.com\u002FKillianLucas\u002Fopen-interpreter): OpenAI's Code Interpreter in your terminal, running locally. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2a0215cc273f.png)](https:\u002F\u002Fgithub.com\u002FKillianLucas\u002Fopen-interpreter)\n  * [Eureka](https:\u002F\u002Fgithub.com\u002Feureka-research\u002FEureka): Human-Level Reward Design via Coding Large Language Models. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2512629c1525.png)](https:\u002F\u002Fgithub.com\u002Feureka-research\u002FEureka)\n  * [MetaCLIP](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FMetaCLIP): Demystifying CLIP Data. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_70212166e482.png)](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FMetaCLIP)\n  * [LLM OSINT](https:\u002F\u002Fgithub.com\u002Fsshh12\u002Fllm_osint): Proof-of-concept method of using LLMs to gather information from the internet and then perform a task with this information. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c2ea78de870d.png)](https:\u002F\u002Fgithub.com\u002Fsshh12\u002Fllm_osint)\n  * [HackingBuddyGPT](https:\u002F\u002Fgithub.com\u002Fipa-lab\u002FhackingBuddyGPT): LLMs x PenTesting. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e6038f5060b7.png)](https:\u002F\u002Fgithub.com\u002Fipa-lab\u002FhackingBuddyGPT)\n  * [ChatGPT-Jailbreaks](https:\u002F\u002Fgithub.com\u002FGabryB03\u002FChatGPT-Jailbreaks): Official jailbreak for ChatGPT (GPT-3.5). Send a long message at the start of the conversation with ChatGPT to get offensive, unethical, aggressive, human-like answers in English and Italian. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_fa8f2e45a7c9.png)](https:\u002F\u002Fgithub.com\u002FGabryB03\u002FChatGPT-Jailbreaks)\n  * [Magika](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fmagika): Detect file content types with deep learning. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d7dc0c58ba14.png)](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fmagika)\n  * [Jan](https:\u002F\u002Fgithub.com\u002Fjanhq\u002Fjan): An open source alternative to ChatGPT that runs 100% offline on your computer. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e10fb8a1192e.png)](https:\u002F\u002Fgithub.com\u002Fjanhq\u002Fjan)\n  * [LibreChat](https:\u002F\u002Fgithub.com\u002Fdanny-avila\u002FLibreChat): Enhanced ChatGPT Clone: Features OpenAI, Assistants API, Azure, Groq, GPT-4 Vision, Mistral, Bing, Anthropic, OpenRouter, Vertex AI, Gemini, AI model switching, message search, langchain, DALL-E-3, ChatGPT Plugins, OpenAI Functions, Secure Multi-User System, Presets, completely open-source for self-hosting. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8203bf0255bc.png)](https:\u002F\u002Fgithub.com\u002Fdanny-avila\u002FLibreChat)\n  * [Lumina-T2X](https:\u002F\u002Fgithub.com\u002FAlpha-VLLM\u002FLumina-T2X): A unified framework for Text to Any Modality Generation. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f960a584c947.png)](https:\u002F\u002Fgithub.com\u002FAlpha-VLLM\u002FLumina-T2X)\n\n\n### 📚 Misc 📚\n\n  * [Awesome GPT + Security](https:\u002F\u002Fgithub.com\u002Fcckuailong\u002Fawesome-gpt-security): A curated list of awesome security tools, experimental case or other interesting things with LLM or GPT. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_25e605d9d2dc.png)](https:\u002F\u002Fgithub.com\u002Fcckuailong\u002Fawesome-gpt-security)\n  * [🚀 Awesome Reinforcement Learning for Cyber Security](https:\u002F\u002Fgithub.com\u002FLimmen\u002Fawesome-rl-for-cybersecurity): A curated list of resources dedicated to reinforcement learning applied to cyber security. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d4bbed6fa74a.png)](https:\u002F\u002Fgithub.com\u002FLimmen\u002Fawesome-rl-for-cybersecurity)\n  * [Awesome Machine Learning for Cyber Security](https:\u002F\u002Fgithub.com\u002Fjivoi\u002Fawesome-ml-for-cybersecurity): A curated list of amazingly awesome tools and resources related to the use of machine learning for cyber security. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d61ba77e081c.png)](https:\u002F\u002Fgithub.com\u002Fjivoi\u002Fawesome-ml-for-cybersecurity)\n  * [Hugging Face Diffusion Models Course](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Fdiffusion-models-class): Materials for the Hugging Face Diffusion Models Course. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_07c0b4cf5a5d.png)](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Fdiffusion-models-class)\n  * [Awesome-AI-Security](https:\u002F\u002Fgithub.com\u002FDeepSpaceHarbor\u002FAwesome-AI-Security): A curated list of AI security resources. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_9f742e059dd8.png)](https:\u002F\u002Fgithub.com\u002FDeepSpaceHarbor\u002FAwesome-AI-Security)\n  * [ML for Hackers](https:\u002F\u002Fgithub.com\u002Fjohnmyleswhite\u002FML_for_Hackers): Code accompanying the book \"Machine Learning for Hackers\". [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_505c23463561.png)](https:\u002F\u002Fgithub.com\u002Fjohnmyleswhite\u002FML_for_Hackers)\n  * [Awful AI](https:\u002F\u002Fgithub.com\u002Fdaviddao\u002Fawful-ai): Awful AI is a curated list to track current scary usages of AI - hoping to raise awareness. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_5b231eabb7b3.png)](https:\u002F\u002Fgithub.com\u002Fdaviddao\u002Fawful-ai)\n  * [NIST AI Risk Management Framework Playbook](https:\u002F\u002Fpages.nist.gov\u002FAIRMF)\n  * [SoK: Explainable Machine Learning for Computer Security Applications](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.10605)\n  * [Who Evaluates the Evaluators? On Automatic Metrics for Assessing AI-based Offensive Code Generators](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.06008)\n  * [Vulnerability Prioritization: An Offensive Security Approach](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.11182)\n  * [MITRE ATLAS™](https:\u002F\u002Fatlas.mitre.org) (Adversarial Threat Landscape for Artificial-Intelligence Systems)\n  * [A Survey on Reinforcement Learning Security with Application to Autonomous Driving](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.06123)\n  * [How to avoid machine learning pitfalls: a guide for academic researchers](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.02497)\n  * [A curated list of AI Security & Privacy events](https:\u002F\u002Fgithub.com\u002FZhengyuZhao\u002FAI-Security-and-Privacy-Events)\n  * [NIST AI 100-2 E2025](https:\u002F\u002Fdoi.org\u002F10.6028\u002FNIST.AI.100-2e2025): Adversarial Machine Learning. A Taxonomy and Terminology of Attacks and Mitigations.\n  * [🇪🇸 RootedCon 2023 - Inteligencia artificial ofensiva - ¿Cómo podemos estar  preparados?](\u002Fslides\u002FRootedCon_2023.pdf)\n  * [Security of AI-Systems: Fundamentals - Adversarial Deep Learning](https:\u002F\u002Fwww.bsi.bund.de\u002FSharedDocs\u002FDownloads\u002FEN\u002FBSI\u002FKI\u002FSecurity-of-AI-systems_fundamentals.pdf)\n  * [Beyond the Safeguards: Exploring the Security Risks of ChatGPT](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.08005)\n  * [The AI Attack Surface Map v1.0](https:\u002F\u002Fdanielmiessler.com\u002Fblog\u002Fthe-ai-attack-surface-map-v1-0)\n  * [On the Impossible Safety of Large AI Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.15259)\n  * [Frontier AI Regulation: Managing Emerging Risks to Public Safety](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.03718)\n  * [Multilayer Framework for Good Cybersecurity Practices for AI](https:\u002F\u002Fwww.enisa.europa.eu\u002Fpublications\u002Fmultilayer-framework-for-good-cybersecurity-practices-for-ai)\n  * [Introducing Google’s Secure AI Framework](https:\u002F\u002Fblog.google\u002Ftechnology\u002Fsafety-security\u002Fintroducing-googles-secure-ai-framework\u002F)\n  * [OWASP Top 10 for LLM](https:\u002F\u002Fowasp.org\u002Fwww-project-top-10-for-large-language-model-applications\u002Fassets\u002FPDF\u002FOWASP-Top-10-for-LLMs-2023-v1_0.pdf)\n  * [Awesome LLM Security](https:\u002F\u002Fgithub.com\u002Fcorca-ai\u002Fawesome-llm-security): A curation of awesome tools, documents and projects about LLM Security. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ccfefed83528.png)](https:\u002F\u002Fgithub.com\u002Fcorca-ai\u002Fawesome-llm-security)\n  * A framework to securely use LLMs in companies. [Part 1: Overview of Risks](https:\u002F\u002Fboringappsec.substack.com\u002Fp\u002Fedition-21-a-framework-to-securely). [Part 2: Managing risk](https:\u002F\u002Fboringappsec.substack.com\u002Fp\u002Fedition-22-a-framework-to-securely). [Part 3: Securing ChatGPT and GitHub Copilot](https:\u002F\u002Fboringappsec.substack.com\u002Fp\u002Fedition-23-a-framework-to-securely).\n  * [A Study on Robustness and Reliability of Large Language Model Code Generation](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.10335)\n  * [Identifying AI-generated images with SynthID](https:\u002F\u002Fwww.deepmind.com\u002Fblog\u002Fidentifying-ai-generated-images-with-synthid)\n  * [Auditing Large Language Models: A Three-Layered Approach](https:\u002F\u002Fpapers.ssrn.com\u002Fsol3\u002Fpapers.cfm?abstract_id=4361607)\n  * [Resolving the battle of short‑ vs. long‑term AI risk](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002Fs43681-023-00336-y.pdf)\n  * [FraudGPT: The Villain Avatar of ChatGPT](https:\u002F\u002Fnetenrich.com\u002Fblog\u002Ffraudgpt-the-villain-avatar-of-chatgpt)\n  * [AI Risks - Schneier on Security](https:\u002F\u002Fwww.schneier.com\u002Fblog\u002Farchives\u002F2023\u002F10\u002Fai-risks.html)\n  * [Use of LLMs for Illicit Purposes: Threats, Prevention Measures, and Vulnerabilities](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.12833)\n  * [AI Red-Teaming Is Not a One-Stop Solution to AI Harms: Recommendations for Using Red-Teaming for AI Accountability](https:\u002F\u002Fdatasociety.net\u002Fwp-content\u002Fuploads\u002F2023\u002F10\u002FRecommendations-for-Using-Red-Teaming-for-AI-Accountability-PolicyBrief.pdf)\n  * [A Taxonomy of Trustworthiness for Artificial Intelligence](https:\u002F\u002Fcltc.berkeley.edu\u002Fwp-content\u002Fuploads\u002F2023\u002F01\u002FTaxonomy_of_AI_Trustworthiness.pdf)\n  * [Managing AI Risks in an Era of Rapid Progress](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.17688)\n  * [Google - Acting on our commitment to safe and secure AI](https:\u002F\u002Fblog.google\u002Ftechnology\u002Fsafety-security\u002Fgoogle-ai-security-expansion\u002F)\n  * [Offensive ML Playbook](https:\u002F\u002Fwiki.offsecml.com\u002FWelcome+to+the+Offensive+ML+Playbook)\n  * [Demystifying Generative AI 🤖 A Security Researcher's Notes](https:\u002F\u002Fblog.openthreatresearch.com\u002Fdemystifying-generative-ai-a-security-researchers-notes\u002F)\n  * [GenAI-Security-Adventures](https:\u002F\u002Fgithub.com\u002FOTRF\u002FGenAI-Security-Adventures): An open-source initiative to share notes, presentations, and a diverse collection of experiments presented in Jupyter Notebooks, all aimed at helping you grasp the essential concepts behind large language models and exploring the intriguing intersection of security and natural language processing. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_47631f80621c.png)](https:\u002F\u002Fgithub.com\u002FOTRF\u002FGenAI-Security-Adventures)\n  * [AI Safety Camp](https:\u002F\u002Faisafety.camp\u002F) connects you with a research lead to collaborate on a project – to see where your work could help ensure future AI is safe.\n  * [Guidelines for secure AI system development](https:\u002F\u002Fwww.ncsc.gov.uk\u002Ffiles\u002FGuidelines-for-secure-AI-system-development.pdf)\n  * [Approach to Artificial Intelligence and Cybersecurity. BEST PRACTICE REPORT](https:\u002F\u002Fwww.ccn-cert.cni.es\u002Fes\u002Finformes\u002Finformes-de-buenas-practicas-bp\u002F7192-ccn-cert-bp-30-approach-to-artificial-intelligence-and-cybersecurity\u002Ffile.html)\n  * [Stanford Safe, Secure, and Trustworthy AI EO 14110 Tracker](https:\u002F\u002Fdocs.google.com\u002Fspreadsheets\u002Fd\u002F1xOL4hkQ2pLR-IAs3awIiXjPLmhIeXyE5-giJ5nT-h1M\u002Fedit#gid=142633882)\n  * [Awesome ML Security](https:\u002F\u002Fgithub.com\u002Ftrailofbits\u002Fawesome-ml-security): A curated list of awesome machine learning security references, guidance, tools, and more. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_243af7c11d15.png)](https:\u002F\u002Fgithub.com\u002Ftrailofbits\u002Fawesome-ml-security)\n  * [AI's Predictable Path: 7 Things to Expect From AI in 2024+](https:\u002F\u002Fdanielmiessler.com\u002Fp\u002Fai-predictable-path-7-components-2024)\n  * [Artificial Intelligence and Cybersecurity](https:\u002F\u002Fwww.ismsforum.es\u002Fficheros\u002Fdescargas\u002Fisms-gt-ia-021707141605.pdf) (in Spanish :es:)\n  * [Vigil](https:\u002F\u002Fgithub.com\u002Fdeadbits\u002Fvigil-llm): Detect prompt injections, jailbreaks, and other potentially risky Large Language Model (LLM) inputs. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_473e74477e17.png)](https:\u002F\u002Fgithub.com\u002Fdeadbits\u002Fvigil-llm)\n  * [Generative AI Models - Opportunities and Risks for Industry and Authorities](https:\u002F\u002Fwww.bsi.bund.de\u002FSharedDocs\u002FDownloads\u002FEN\u002FBSI\u002FKI\u002FGenerative_AI_Models.pdf)\n  * [Deploying AI Systems Securely. Best Practices for Deploying Secure and Resilient AI Systems](https:\u002F\u002Fmedia.defense.gov\u002F2024\u002FApr\u002F15\u002F2003439257\u002F-1\u002F-1\u002F0\u002FCSI-DEPLOYING-AI-SYSTEMS-SECURELY.PDF)\n  * [NIST AI 600-1: Artificial Intelligence Risk Management Framework: Generative Artificial Intelligence Profile](https:\u002F\u002Fairc.nist.gov\u002Fdocs\u002FNIST.AI.600-1.GenAI-Profile.ipd.pdf)\n  * [:fr: ANSSI: Recommandations De Sécurité Pour Un Système d'IA Générative (Security Recommendations for a Generative AI System)](https:\u002F\u002Fcyber.gouv.fr\u002Fsites\u002Fdefault\u002Ffiles\u002Fdocument\u002FRecommandations_de_s%C3%A9curit%C3%A9_pour_un_syst%C3%A8me_d_IA_g%C3%A9n%C3%A9rative.pdf)\n  * [PyRIT](https:\u002F\u002Fgithub.com\u002FAzure\u002FPyRIT): The Python Risk Identification Tool for generative AI (PyRIT) is an open-access automation framework to empower security professionals and machine learning engineers to proactively find risks in their generative AI systems. [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f12c2fd8bff9.png)](https:\u002F\u002Fgithub.com\u002FAzure\u002FPyRIT)\n  * [OWASP-Agentic-AI](https:\u002F\u002Fgithub.com\u002Fprecize\u002FOWASP-Agentic-AI): Working to create the OWASP Top 10 for Agentic AI (AI Agent Security). [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c7b36f8ad01c.png)](https:\u002F\u002Fgithub.com\u002Fprecize\u002FOWASP-Agentic-AI)\n  * [Towards Guaranteed Safe AI: A Framework for Ensuring Robust and Reliable AI Systems](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.06624)\n  * [Defining Real AI Risks](https:\u002F\u002Fjosephthacker.com\u002Fai\u002F2024\u002F05\u002F19\u002Fdefining-real-ai-risks.html)\n  * [Secure approach to generative AI](https:\u002F\u002Faws.amazon.com\u002Fes\u002Fai\u002Fgenerative-ai\u002Fsecurity\u002F)\n  * [Large Language Models in Cybersecurity](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-031-54827-7.pdf)\n  * [Hey, That's My Model! Introducing Chain & Hash, An LLM Fingerprinting Technique](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.10887)\n  * [Generative AI Misuse: A Taxonomy of Tactics and Insights from Real-World Data](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.13843)\n  * [AI Risk Repository](https:\u002F\u002Fairisk.mit.edu\u002F)\n  * [Revisiting AI Red-Teaming](https:\u002F\u002Fcset.georgetown.edu\u002Farticle\u002Frevisiting-ai-red-teaming\u002F)\n  * [German-French recommendations for the use of AI programming assistants](https:\u002F\u002Fwww.bsi.bund.de\u002FSharedDocs\u002FDownloads\u002FEN\u002FBSI\u002FKI\u002FANSSI_BSI_AI_Coding_Assistants.html)\n  * [Scalable watermarking for identifying large language model outputs](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41586-024-08025-4)\n  * [Lessons from red teaming 100 generative AI products](https:\u002F\u002Fairedteamwhitepapers.blob.core.windows.net\u002Flessonswhitepaper\u002FMS_AIRT_Lessons_eBook.pdf)\n  * [LLM red teaming guide](https:\u002F\u002Fwww.promptfoo.dev\u002Fdocs\u002Fred-team\u002F)\n  * [Open Challenges in Multi-Agent Security: Towards Secure Systems of Interacting AI Agents](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.02077)\n  * [LLMs unlock new paths to monetizing exploits](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.02077) \n  * [Spill The Beans: Exploiting CPU Cache Side-Channels to Leak Tokens from Large Language Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.00817)\n  * [Enhanced automated code vulnerability repair using large language models](https:\u002F\u002Fdoi.org\u002F10.1016\u002Fj.engappai.2024.109291)\n  * [Your Brain on ChatGPT: Accumulation of Cognitive Debt when Using an AI Assistant for Essay Writing Task](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.08872)\n  * [AIRTBench: Measuring Autonomous AI Red Teaming Capabilities in Language Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.14682)\n  * [Slopsquatting](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FSlopsquatting)\n  * [SP 800-53 Control Overlays for Securing AI Systems Concept Paper](https:\u002F\u002Fcsrc.nist.gov\u002Fcsrc\u002Fmedia\u002FProjects\u002Fcosais\u002Fdocuments\u002FNIST-Overlays-SecuringAI-concept-paper.pdf)\n  * [Agents Rule of Two: A Practical Approach to AI Agent Security](https:\u002F\u002Fai.meta.com\u002Fblog\u002Fpractical-ai-agent-security\u002F)\n  * [ETSI EN 304 223 V2.1.1 (2025-12)](https:\u002F\u002Fwww.etsi.org\u002Fdeliver\u002Fetsi_en\u002F304200_304299\u002F304223\u002F02.01.01_60\u002Fen_304223v020101p.pdf): Securing Artificial Intelligence (SAI); Baseline Cyber Security Requirements for AI Models and Systems \n  * [Evaluating AGENTS.md: Are Repository-Level Context Files Helpful for Coding Agents?](https:\u002F\u002Farxiv.org\u002Fabs\u002F2602.11988)\n  * [Handbook of Digital Face Manipulation and Detection](https:\u002F\u002Flink.springer.com\u002Fbook\u002F10.1007\u002F978-3-030-87664-7)\n  * [Security Considerations for Artificial Intelligence Agents](https:\u002F\u002Farxiv.org\u002Fabs\u002F2603.12230)\n\n## 📊 Surveys 📊\n\n  * [The Threat of Offensive AI to Organizations](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.15764)\n  * [Artificial Intelligence in the Cyber Domain: Offense and Defense](https:\u002F\u002Fwww.mdpi.com\u002F2073-8994\u002F12\u002F3\u002F410)\n  * [A survey on adversarial attacks and defenses](https:\u002F\u002Fietresearch.onlinelibrary.wiley.com\u002Fdoi\u002F10.1049\u002Fcit2.12028)\n  * [Adversarial Deep Learning: A Survey on Adversarial Attacks and Defense Mechanisms on Image Classification](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9895425)\n  * [A Survey of Privacy Attacks in Machine Learning](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.07646)\n  * [Towards Security Threats of Deep Learning Systems: A Survey](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.12562)\n  * [A Survey on Security Threats and Defensive Techniques of Machine Learning: A Data-Driven View](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8290925)\n  * [SoK: Security and Privacy in Machine Learning](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8406613)\n  * [Adversarial Machine Learning: The Rise in AI-Enabled Crime and its Role in Spam Filter Evasion](https:\u002F\u002Fpapers.ssrn.com\u002Fsol3\u002Fpapers.cfm?abstract_id=4155496)\n  * [Threats, Vulnerabilities, and Controls of Machine Learning Based Systems: A Survey and Taxonomy](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.07474)\n  * [Adversarial Attacks and Defences: A Survey](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1810.00069.pdf)\n  * [Security Matters: A Survey on Adversarial Machine Learning](https:\u002F\u002Farxiv.org\u002Fabs\u002F1810.07339)\n  * [A Survey on Adversarial Attacks for Malware Analysis](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2111.08223.pdf)\n  * [Adversarial Machine Learning in Image Classification: A Survey Towards the Defender’s Perspective](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2009.03728.pdf)\n  * [A Survey of Robust Adversarial Training in Pattern Recognition: Fundamental, Theory, and Methodologies](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.14046)\n  * [Privacy in Large Language Models: Attacks, Defenses and Future Directions](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.10383)\n\n## 🗣 Maintainers  🗣\n\n\u003Ctable>\n  \u003Ctr>\n    \u003Ctd align=\"center\">\u003Ca href=\"https:\u002F\u002Fgithub.com\u002FMiguel000\">\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a406d74a76f6.png\" width=\"150;\" alt=\"\"\u002F>\u003Cbr \u002F>\u003Csub>\u003Cb>Miguel Hernández\u003C\u002Fb>\u003C\u002Fsub>\u003C\u002Fa>\u003C\u002Ftd>\n    \u003Ctd align=\"center\">\u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fjiep\">\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f64601ca6024.png\" width=\"150px;\" alt=\"\"\u002F>\u003Cbr \u002F>\u003Csub>\u003Cb>José Ignacio Escribano\u003C\u002Fb>\u003C\u002Fsub>\u003C\u002Fa>\u003C\u002Ftd>\n  \u003C\u002Ftr>\n\u003C\u002Ftable>\n\n## ©️ License ©️\n\n[![License: CC BY-SA 4.0](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLicense-CC%20BY--SA%204.0-lightgrey.svg)](https:\u002F\u002Fcreativecommons.org\u002Flicenses\u002Fby-sa\u002F4.0\u002F)\n\n* [Creative Commons Attribution-Share Alike 4.0 International](LICENSE.txt)\n","# 攻击性人工智能合集\n\n一份精选的实用资源列表，涵盖攻击性人工智能领域。\n\n## 📁 目录 📁\n- [🚫 滥用 🚫](#-abuse-)\n  - [🧠 对抗机器学习 🧠](#-adversarial-machine-learning-)\n    - [⚡ 攻击 ⚡](#-attacks-)\n      - [🔒 提取 🔒](#-extraction-)\n        - [⚠️ 局限性 ⚠️](#️-limitations-️)\n        - [🛡️ 防御措施 🛡️](#️-defensive-actions-️)\n        - [🔗 有用链接 🔗](#-useful-links-)\n      - [⬅️ 还原（或推断）⬅️](#️-inversion-or-inference-️)\n        - [🛡️ 防御措施 🛡️](#️-defensive-actions-️-1)\n        - [🔗 有用链接 🔗](#-useful-links--1)\n      - [💉 毒化 💉](#-poisoning-)\n        - [🔓 后门 🔓](#-backdoors-)\n        - [🛡️ 防御措施 🛡️](#️-defensive-actions-️-2)\n        - [🔗 有用链接 🔗](#-useful-links--2)\n      - [🏃‍♂️ 规避 🏃‍♂️](#️-evasion-️)\n        - [🛡️ 防御措施 🛡️](#️-defensive-actions-️-3)\n        - [🔗 有用链接 🔗](#-useful-links--3)\n    - [🛠️ 工具 🛠️](#️-tools-️)\n        - [ART](#art)\n        - [Cleverhans](#cleverhans)\n- [🔧 应用 🔧](#-use-)\n  - [🕵️‍♂️ 渗透测试 🕵️‍♂️](#️️-pentesting-️️)\n  - [🦠 恶意软件 🦠](#-malware-)\n  - [🗺️ OSINT 🗺️](#️osint-️)\n  - [📧 钓鱼邮件 📧](#phishing-)\n  - [👨‍🎤 生成式AI 👨‍🎤](#-generative-ai-)\n    - [🔊 音频 🔊](#-audio-)\n      - [🛠️ 工具 🛠️](#️-tools-️-1)\n      - [💡 应用场景 💡](#-applications-)\n      - [🔎 检测 🔎](#-detection-)\n    - [📷 图像 📷](#-image-)\n      - [🛠️ 工具 🛠️](#️-tools-️-2)\n      - [💡 应用场景 💡](#-applications--1)\n      - [🔎 检测 🔎](#-detection--1)\n    - [🎥 视频 🎥](#-video-)\n      - [🛠️ 工具 🛠️](#️-tools-️-3)\n      - [💡 应用场景 💡](#-applications--2)\n      - [🔎 检测 🔎](#-detection--2)\n    - [📄 文本 📄](#-text-)\n      - [🛠️ 工具 🛠️](#️-tools-️-4)\n      - [🔎 检测 🔎](#-detection--3)\n      - [💡 应用场景 💡](#-applications--3)\n  - [📚 杂项 📚](#-misc-)\n- [📊 调查报告 📊](#-surveys-)\n- [🗣 贡献者 🗣](#-contributors-)\n- [©️ 许可证 ©️](#️-license-️)\n\n## 🚫 滥用 🚫\n\n利用人工智能模型的漏洞进行攻击。\n\n### 🧠 对抗机器学习 🧠\n\n对抗机器学习旨在评估这些模型的弱点，并提供相应的防御措施。\n\n#### ⚡ 攻击 ⚡\n\n攻击主要分为四类：提取、还原、毒化和规避。\n\n![对抗机器学习攻击](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_bf8200d7c81e.png)\n\n##### 🔒 提取 🔒\n\n通过发送请求以最大化信息提取量，试图窃取模型的参数和超参数。\n\n![提取攻击](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_6c6e8e2c650c.png)\n\n根据攻击者对目标模型的了解程度，可以进行白盒攻击和黑盒攻击。\n\n在最简单的白盒情况下（当攻击者完全了解模型结构时，例如一个Sigmoid函数），可以建立一组易于求解的线性方程。\n\n而在一般情况下，即对模型了解不足时，则会使用替代模型。该模型通过对原始模型发出的请求进行训练，以模仿原始模型的功能。\n\n![白盒与黑盒提取攻击](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8f306e35fc3d.png)\n\n###### ⚠️ 局限性 ⚠️\n\n  * 训练替代模型在很多情况下等同于从头开始训练一个新模型。\n\n  * 计算成本非常高。\n\n  * 攻击者在被检测之前，能够发出的请求数量有限。\n\n###### 🛡️ 防御措施 🛡️\n\n  * 对输出值进行四舍五入。\n\n  * 使用[差分隐私](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FDifferential_privacy)技术。\n\n  * 使用[集成学习](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FEnsemble_learning)方法。\n\n  * 采用特定的防御机制\n    * [特定架构](https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.07221)\n    * [PRADA](https:\u002F\u002Farxiv.org\u002Fabs\u002F1805.02628)\n    * [自适应错误信息](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.07100)\n    * ...\n\n###### 🔗 有用链接 🔗\n\n  * [通过预测API窃取机器学习模型](https:\u002F\u002Farxiv.org\u002Fabs\u002F1609.02943)\n  * [窃取机器学习中的超参数](https:\u002F\u002Farxiv.org\u002Fabs\u002F1802.05351)\n  * [仿冒网络：通过随机未标记数据诱使供述来窃取黑盒模型的功能](https:\u002F\u002Farxiv.org\u002Fabs\u002F1806.05476)\n  * [MLaaS范式下的模型提取警告](https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.07221)\n  * [模仿CNN：通过说服供述并结合随机未标记数据窃取知识](https:\u002F\u002Farxiv.org\u002Fabs\u002F1806.05476)\n  * [预测毒化：迈向防御深度神经网络模型窃取攻击的方法](https:\u002F\u002Farxiv.org\u002Fabs\u002F1906.10908)\n  * [通过时间侧信道窃取神经网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F1812.11720)\n  * [针对归纳图神经网络的模型窃取攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.08331)\n  * [高精度与高保真度的神经网络提取](https:\u002F\u002Farxiv.org\u002Fabs\u002F1909.01838)\n  * [毒化大规模训练数据集是可行的](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.10149)\n  * [多项式时间密码分析法提取神经网络模型](https:\u002F\u002Feprint.iacr.org\u002F2023\u002F1526)\n  * [针对文本到图像生成模型的提示特异性毒化攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.13828)\n  * [优秀数据毒化与后门攻击资源库](https:\u002F\u002Fgithub.com\u002Fpenghui-yang\u002Fawesome-data-poisoning-and-backdoor-attacks)：一份精心整理的论文及资源列表，涉及数据毒化、后门攻击及其防御措施。[![星标数](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d7feb193321b.png)](https:\u002F\u002Fgithub.com\u002Fpenghui-yang\u002Fawesome-data-poisoning-and-backdoor-attacks)\n  * [BackdoorBox](https:\u002F\u002Fgithub.com\u002FTHUYimingLi\u002FBackdoorBox)：一个开源的Python工具箱，用于后门攻击与防御。[![星标数](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_adf559535dad.png)](https:\u002F\u002Fgithub.com\u002FTHUYimingLi\u002FBackdoorBox)\n  * [窃取部分生产级语言模型](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.06634)\n  * [硬标签密码分析法提取神经网络模型](https:\u002F\u002Feprint.iacr.org\u002F2024\u002F1403)\n  * [https:\u002F\u002Fwww.anthropic.com\u002Fnews\u002Fdetecting-and-preventing-distillation-attacks](https:\u002F\u002Fwww.anthropic.com\u002Fnews\u002Fdetecting-and-preventing-distillation-attacks)\n\n##### ⬅️ 还原（或推断）⬅️\n\n其目的是逆转机器学习模型的信息流。\n\n![推断攻击](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_6362764efa6e.png)\n\n这类攻击使攻击者能够了解原本并未打算公开的模型内部信息。\n\n它们还可以帮助我们获取训练数据或作为模型统计特征的信息。\n\n主要有三种类型：\n\n  * **成员身份推断攻击（MIA）**：攻击者试图确定某个样本是否曾被用于训练过程。\n\n  * **属性推断攻击（PIA）**：攻击者旨在提取那些在训练阶段并未明确编码为特征的统计特性。\n\n  * **重构**：攻击者尝试从训练集中重建一个或多个样本及其对应的标签。也称为还原。\n\n\n###### 🛡️ 防御措施 🛡️\n\n* 使用先进的密码学技术。应对措施包括[差分隐私](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FDifferential_privacy)、[同态加密](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FHomomorphic_encryption)和[安全多方计算](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FSecure_multi-party_computation)。\n\n  * 由于过拟合与隐私之间的关系，使用诸如[Dropout](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FDilution_(neural_networks))之类的正则化技术。\n\n  * [模型压缩](https:\u002F\u002Fmedium.com\u002Fgsi-technology\u002Fan-overview-of-model-compression-techniques-for-deep-learning-in-space-3fd8d4ce84e5)已被提议作为抵御重建攻击的一种防御手段。\n\n###### 🔗 有用链接 🔗\n\n  * [针对机器学习模型的成员推理攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F1610.05820)\n  * [利用置信度信息的模型逆向攻击及基本防御措施](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F2810103.2813677)\n  * [记住过多信息的机器学习模型](https:\u002F\u002Farxiv.org\u002Fabs\u002F1709.07886)\n  * [ML-Leaks：独立于模型和数据的成员推理攻击及其在机器学习模型上的防御](https:\u002F\u002Farxiv.org\u002Fabs\u002F1806.01246)\n  * [GAN下的深度模型：协作式深度学习中的信息泄露](https:\u002F\u002Farxiv.org\u002Fabs\u002F1702.07464)\n  * [LOGAN：针对生成模型的成员推理攻击](https:\u002F\u002Fpetsymposium.org\u002Fpopets\u002F2019\u002Fpopets-2019-0008.php)\n  * [过拟合、鲁棒性与恶意算法：机器学习中潜在隐私风险原因的研究](https:\u002F\u002Fcontent.iospress.com\u002Farticles\u002Fjournal-of-computer-security\u002Fjcs191362)\n  * [深度学习的全面隐私分析：被动与主动白盒推理攻击下的独立及联邦学习](https:\u002F\u002Farxiv.org\u002Fabs\u002F1812.00910)\n  * [针对协作学习的推理攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F1805.04049)\n  * [秘密分享者：评估与测试神经网络中的意外记忆](https:\u002F\u002Farxiv.org\u002Fabs\u002F1802.08232)\n  * [迈向机器学习中的安全与隐私科学](https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.03814)\n  * [MemGuard：通过对抗样本防御黑盒成员推理攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F1909.10594)\n  * [从大型语言模型中提取训练数据](https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.07805)\n  * [利用置换不变表示对全连接神经网络进行属性推理攻击](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3243734.3243834)\n  * [从扩散模型中提取训练数据](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.13188)\n  * [基于人类脑活动的潜在扩散模型高分辨率图像重建](https:\u002F\u002Fwww.biorxiv.org\u002Fcontent\u002F10.1101\u002F2022.11.18.517004v1)\n  * [在低误报条件下窃取并绕过恶意软件分类器和杀毒软件](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0167404823001025)\n  * [基于对抗方法的真实指纹呈现攻击](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10295513)\n  * [主动对抗测试](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Factive-adversarial-tests)：提升对抗鲁棒性评估的信心。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_da359b286997.png)](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Factive-adversarial-tests)\n  * [GPT越狱状态](https:\u002F\u002Fgithub.com\u002Ftg12\u002Fgpt_jailbreak_status)：关于OpenAI GPT语言模型越狱状态的更新。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_053b74118c66.png)](https:\u002F\u002Fgithub.com\u002Ftg12\u002Fgpt_jailbreak_status)\n  * [LLM成员推理的速度提升一个数量级](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.14513)\n  * [GPT-oss泄露的关于OpenAI训练数据的信息](https:\u002F\u002Ffi-le.net\u002Foss\u002F)\n  * [关于联邦学习中主动梯度反转攻击的可检测性](https:\u002F\u002Farxiv.org\u002Fabs\u002F2511.10502)\n\n##### 💉 毒化 💉\n\n其目标是通过使机器学习模型的准确性降低来破坏训练集。\n\n![毒化攻击](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_87a289069531.png)\n\n这种攻击在对训练数据实施时很难被发现，因为攻击可以在使用相同训练数据的不同模型之间传播。\n\n攻击者试图通过修改决策边界来破坏模型的可用性，从而产生错误的预测，或者在模型中创建后门。在后一种情况下，模型在大多数情况下表现正常（返回预期的预测结果），但当遇到攻击者专门设计的某些输入时，却会产生非预期的结果。攻击者可以操纵预测结果，并借此发动未来的攻击。\n\n##### 🔓 后门 🔓\n\n[BadNets](https:\u002F\u002Farxiv.org\u002Fabs\u002F1708.06733)是机器学习模型中最简单的后门类型。此外，即使将模型重新训练用于与原始模型不同的任务（迁移学习），BadNets仍然能够保留在模型中。\n\n需要注意的是，**公开的预训练模型可能包含后门**。\n\n###### 🛡️ 防御措施 🛡️\n\n  * 检测受污染的数据，并结合数据净化技术。\n\n  * 鲁棒的训练方法。\n\n  * 特定的防御措施。\n    * [Neural Cleanse：识别并缓解神经网络中的后门攻击](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8835365)\n    * [STRIP：一种防御深度神经网络木马攻击的方法](https:\u002F\u002Farxiv.org\u002Fabs\u002F1902.06531)\n    * [通过激活聚类检测深度神经网络中的后门攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F1811.03728)\n    * [ABS：通过人工脑刺激扫描神经网络中的后门](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3319535.3363216)\n    * [DeepInspect：深度神经网络的黑盒木马检测与缓解框架](https:\u002F\u002Fwww.ijcai.org\u002Fproceedings\u002F2019\u002F647)\n    * [通过生成式分布建模防御神经网络后门](https:\u002F\u002Farxiv.org\u002Fabs\u002F1910.04749)\n    * [人脸识别系统中后门攻击及其防御的综合综述](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10480615)\n    * [DataElixir：通过扩散模型净化受污染的数据集以缓解后门攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.11057)\n\n###### 🔗 有用链接 🔗\n\n* [针对支持向量机的投毒攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F1206.6389)\n  * [利用数据投毒对深度学习系统进行定向后门攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F1712.05526)\n  * [神经网络中的木马攻击](https:\u002F\u002Fwww.semanticscholar.org\u002Fpaper\u002FTrojaning-Attack-on-Neural-Networks-Liu-Ma\u002F08f7ac64b420210aa46fcbbdb0f206215f2e0644)\n  * [精细剪枝：防御针对深度神经网络的后门攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F1805.12185)\n  * [毒蛙！针对神经网络的定向清洁标签投毒攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F1804.00792)\n  * [后门攻击中的频谱特征](https:\u002F\u002Farxiv.org\u002Fabs\u002F1811.00636)\n  * [深度神经网络中的隐性后门攻击](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3319535.3354209)\n  * [Regula Sub-rosa：深度神经网络中的隐性后门攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.10447)\n  * [隐藏触发器后门攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F1910.00033)\n  * [深度神经网络中可迁移的清洁标签投毒攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.05897)\n  * [TABOR：一种高度精确的方法，用于检测和修复人工智能系统中的木马后门](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.01763)\n  * [通过反向梯度优化实现深度学习算法的投毒](https:\u002F\u002Farxiv.org\u002Fabs\u002F1708.08689)\n  * [机器学习何时会失效？逃避与投毒攻击的广义可迁移性](https:\u002F\u002Farxiv.org\u002Fabs\u002F1803.06975)\n  * [针对数据投毒攻击的认证防御](https:\u002F\u002Farxiv.org\u002Fabs\u002F1706.03691)\n  * [输入感知的动态后门攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F2010.08138)\n  * [如何对联邦学习植入后门](https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.00459)\n  * [在机器学习模型中植入难以检测的后门](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.06974)\n  * [愚弄AI！](https:\u002F\u002Ffooltheai.mybluemix.net\u002F)：黑客可以使用后门污染训练数据，使AI模型错误分类图像。了解IBM研究人员如何判断数据是否被投毒，并猜测这些数据集中隐藏了哪些后门。你能猜出后门吗？\n  * [后门工具箱](https:\u002F\u002Fgithub.com\u002Fvtu81\u002Fbackdoor-toolbox)：一个用于后门攻击与防御的紧凑工具箱。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c487a033e1b5.png)](https:\u002F\u002Fgithub.com\u002Fvtu81\u002Fbackdoor-toolbox)\n  * [LaserGuider：一种基于激光的针对深度神经网络的物理后门攻击](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2412.03993)\n  * [通过海绵投毒实施的能量-延迟攻击](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0020025525000374)\n  * [ShadowCoT：用于LLM中隐蔽推理后门的认知劫持](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05605)\n  * [少量样本即可投毒任何规模的LLM](https:\u002F\u002Fwww.anthropic.com\u002Fresearch\u002Fsmall-samples-poison)\n\n##### 🏃‍♂️ 逃避攻击 🏃‍♂️\n\n攻击者会在机器学习模型的输入上添加微小的扰动（以噪声的形式），使其分类错误（示例攻击者）。\n\n![逃避攻击](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_59b021b36c02.png)\n\n它们类似于投毒攻击，但主要区别在于，逃避攻击试图利用模型在推理阶段的弱点。\n\n攻击者的目标是让对抗样本对人类来说几乎无法察觉。\n\n根据对手期望的输出，可以执行两种类型的攻击：\n\n  * **定向攻击**：攻击者旨在获得自己选择的预测结果。\n\n    ![定向攻击](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c21a80e25365.png)\n\n  * **非定向攻击**：攻击者意图实现错误分类。\n\n    ![非定向攻击](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_34478e8e2f5a.png)\n\n最常见的攻击是**白盒攻击**：\n\n  * [L-BFGS](https:\u002F\u002Farxiv.org\u002Fabs\u002F1312.6199)\n  * [FGSM](https:\u002F\u002Farxiv.org\u002Fabs\u002F1412.6572)\n  * [BIM](https:\u002F\u002Farxiv.org\u002Fabs\u002F1607.02533)\n  * [JSMA](https:\u002F\u002Farxiv.org\u002Fabs\u002F1511.07528)\n  * [Carlini & Wagner (C&W)](https:\u002F\u002Farxiv.org\u002Fabs\u002F1608.04644)\n  * [NewtonFool](https:\u002F\u002Fandrewxiwu.github.io\u002Fpublic\u002Fpapers\u002F2017\u002FJWJ17-objective-metrics-and-gradient-descent-based-algorithms-for-adversarial-examples-in-machine-learning.pdf)\n  * [EAD](https:\u002F\u002Farxiv.org\u002Fabs\u002F1709.04114)\n  * [UAP](https:\u002F\u002Farxiv.org\u002Fabs\u002F1610.08401)\n\n###### 🛡️ 防御措施 🛡️\n\n  * 对抗训练，即在训练过程中生成对抗样本，使模型学会识别对抗样本的特征，从而提高其对这类攻击的鲁棒性。\n  * 对输入进行变换。\n  * 梯度掩蔽\u002F正则化。[效果不佳](https:\u002F\u002Farxiv.org\u002Fabs\u002F1802.00420)。\n  * 弱防御措施。\n  * [提示注入防御](https:\u002F\u002Fgithub.com\u002Ftldrsec\u002Fprompt-injection-defenses)：所有实用且提出的提示注入防御措施。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b4a656d05236.png)](https:\u002F\u002Fgithub.com\u002Ftldrsec\u002Fprompt-injection-defenses)\n  * [Lakera PINT基准测试](https:\u002F\u002Fgithub.com\u002Flakeraai\u002Fpint-benchmark)：提示注入测试（PINT）基准提供了一种中立的方式来评估提示注入检测系统的性能，例如Lakera Guard，而无需依赖这些工具可能用来优化评估表现的已知公开数据集。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_7ee39731865b.png)](https:\u002F\u002Fgithub.com\u002Flakeraai\u002Fpint-benchmark)\n  * [恶魔推理](https:\u002F\u002Fgithub.com\u002FAI-Voodoo\u002FDevil_Inference)：一种通过观察Phi-3 Instruct模型在特定输入下的注意力分布来对抗性地评估该模型的方法。这种方法促使模型采取“恶魔心态”，从而生成暴力性质的输出。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_97fd5d70dcd0.png)](https:\u002F\u002Fgithub.com\u002FAI-Voodoo\u002FDevil_Inference)\n  * [空中对抗攻击检测：从数据集到防御](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.09296)\n  * [利用双曲几何检测并净化有害提示](https:\u002F\u002Fopenreview.net\u002Fforum?id=G8HnUTlMpt)\n\n###### 🔗 有用链接 🔗\n\n* [针对机器学习的实用黑盒攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F1602.02697)\n  * [深度学习在对抗性环境中的局限性](https:\u002F\u002Farxiv.org\u002Fabs\u002F1511.07528)\n  * [迈向评估神经网络的鲁棒性](https:\u002F\u002Farxiv.org\u002Fabs\u002F1608.04644)\n  * [蒸馏作为防御深度神经网络对抗扰动的方法](https:\u002F\u002Farxiv.org\u002Fabs\u002F1511.04508)\n  * [物理世界中的对抗样本](https:\u002F\u002Farxiv.org\u002Fabs\u002F1607.02533)\n  * [集成对抗训练：攻击与防御](https:\u002F\u002Farxiv.org\u002Fabs\u002F1705.07204)\n  * [迈向抗对抗攻击的深度学习模型](https:\u002F\u002Farxiv.org\u002Fabs\u002F1706.06083)\n  * [神经网络的有趣性质](https:\u002F\u002Farxiv.org\u002Fabs\u002F1312.6199)\n  * [解释并利用对抗样本](https:\u002F\u002Farxiv.org\u002Fabs\u002F1412.6572)\n  * [深入研究可迁移的对抗样本与黑盒攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.02770)\n  * [大规模的对抗性机器学习](https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.01236)\n  * [有限查询与信息下的黑盒对抗攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F1804.08598)\n  * [特征挤压：检测深度神经网络中的对抗样本](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.01155)\n  * [基于决策的对抗攻击：对黑盒机器学习模型的可靠攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F1712.04248)\n  * [通过动量增强对抗攻击](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FDong_Boosting_Adversarial_Attacks_CVPR_2018_paper.pdf)\n  * [可迁移对抗样本的空间](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.03453)\n  * [利用输入变换对抗对抗图像](https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.00117)\n  * [Defense-GAN：使用生成模型保护分类器免受对抗攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F1805.06605)\n  * [合成鲁棒的对抗样本](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.07397)\n  * [通过随机化缓解对抗效应](https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.01991)\n  * [关于检测对抗扰动的研究](https:\u002F\u002Farxiv.org\u002Fabs\u002F1702.04267)\n  * [对抗补丁](https:\u002F\u002Farxiv.org\u002Fabs\u002F1712.09665)\n  * [PixelDefend：利用生成模型理解并防御对抗样本](https:\u002F\u002Farxiv.org\u002Fabs\u002F1710.10766)\n  * [仅用一个像素即可愚弄深度神经网络的攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F1710.08864)\n  * [高效防御对抗攻击的方法](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.06728)\n  * [深度学习视觉分类任务中针对物理世界的鲁棒攻击](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8578273)\n  * [针对深度神经网络的对抗扰动用于恶意软件分类](https:\u002F\u002Farxiv.org\u002Fabs\u002F1606.04435)\n  * [超越点云的三维对抗攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.12146)\n  * [对抗扰动可欺骗深度伪造检测器](https:\u002F\u002Farxiv.org\u002Fabs\u002F2003.10596)\n  * [对抗性深度伪造：评估深度伪造检测器对对抗样本的脆弱性](https:\u002F\u002Farxiv.org\u002Fabs\u002F2002.12749)\n  * [语音控制系统漏洞综述](https:\u002F\u002Farxiv.org\u002Fabs\u002F1803.09156)\n  * [FastWordBug：一种快速生成针对NLP应用的对抗性文本的方法](https:\u002F\u002Farxiv.org\u002Fabs\u002F2002.00760)\n  * [ADAS的幽灵：保护高级驾驶辅助系统免受瞬间幽灵攻击](https:\u002F\u002Fwww.nassiben.com\u002Fphantoms)\n  * [llm-attacks](https:\u002F\u002Fgithub.com\u002Fllm-attacks\u002Fllm-attacks)：对齐语言模型的通用且可迁移攻击。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_5f5110ec16a7.png)](https:\u002F\u002Fgithub.com\u002Fllm-attacks\u002Fllm-attacks)\n  * [AI模型攻击：提示注入 vs 供应链中毒](https:\u002F\u002Fblog.mithrilsecurity.io\u002Fattacks-on-ai-models-prompt-injection-vs-supply-chain-poisoning\u002F)\n  * [针对集成LLM的应用程序的提示注入攻击](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.05499.pdf)\n  * [garak](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fgarak)：LLM漏洞扫描工具。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a9151d361735.png)](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fgarak)\n  * [promptfoo](https:\u002F\u002Fgithub.com\u002Fpromptfoo\u002Fpromptfoo)：开源LLM红队工具，包含100多种攻击类型。用于LLM的红队演练、渗透测试及漏洞扫描。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_299c9048c490.png)](https:\u002F\u002Fgithub.com\u002Fpromptfoo\u002Fpromptfoo)\n  * [PyTorch中的简单对抗变换](https:\u002F\u002Fdavidstutz.de\u002Fsimple-adversarial-transformations-in-pytorch\u002F)\n  * [ChatGPT插件：通过图片和跨插件请求伪造进行数据外泄](https:\u002F\u002Fembracethered.com\u002Fblog\u002Fposts\u002F2023\u002Fchatgpt-webpilot-data-exfil-via-markdown-injection\u002F)\n  * [图像劫持：对抗图像可在运行时控制生成模型](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.00236)\n  * [多重攻击：多张图片+相同的对抗攻击→多个目标标签](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.03792)\n  * [ACTIVE：迈向高度可迁移的3D物理伪装，实现车辆的通用且鲁棒规避](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.07009)\n  * [GPTs的LLM红队：提示泄漏、API泄漏、文档泄漏](https:\u002F\u002Fadversa.ai\u002Fblog\u002Fllm-red-teaming-gpts-prompt-leaking-api-leaking-documents-leaking\u002F)\n  * [人类可制作的对抗样本](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.06474)\n  * [大型语言模型中的多语言越狱挑战](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.06474)\n  * [利用视觉对抗样本滥用大型语言模型中的工具](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.03185)\n  * [AutoDAN：针对大型语言模型的可解释梯度基对抗攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.15140)\n  * [Multimodal Injection](https:\u002F\u002Fgithub.com\u002Febagdasa\u002Fmultimodal_injection)：（滥用）图像和声音，在多模态LLM中进行间接指令注入。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_0e1982488540.png)](https:\u002F\u002Fgithub.com\u002Febagdasa\u002Fmultimodal_injection)\n  * [JailbreakingLLMs](https:\u002F\u002Fgithub.com\u002Fpatrickrchao\u002FJailbreakingLLMs)：在二十次查询内越狱黑箱大型语言模型。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_47e8a4d45833.png)](https:\u002F\u002Fgithub.com\u002Fpatrickrchao\u002FJailbreakingLLMs)\n  * [攻击之树：自动越狱黑箱LLM](https:\u002F\u002Fassets-global.website-files.com\u002F62a8db3f7f80ab5d3420c03a\u002F656eaaed8e762c7543693902_Robust_Intelligence_Blackbox_Attacks_on_LLMs.pdf)\n  * [GPTs](https:\u002F\u002Fgithub.com\u002Flinexjlin\u002FGPTs)：泄露的GPTs提示。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ad1d1e50932c.png)](https:\u002F\u002Fgithub.com\u002Flinexjlin\u002FGPTs)\n  * [AI Exploits](https:\u002F\u002Fgithub.com\u002Fprotectai\u002Fai-exploits)：负责任披露漏洞的真实世界AI\u002FML漏洞集合。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f40eb8872c05.png)](https:\u002F\u002Fgithub.com\u002Fprotectai\u002Fai-exploits)\n  * [LLM代理可自主入侵网站](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.06664v1)\n  * [Cloudflare宣布推出AI防火墙](https:\u002F\u002Fblog.cloudflare.com\u002Ffirewall-for-ai)\n  * [PromptInject](https:\u002F\u002Fgithub.com\u002Fagencyenterprise\u002FPromptInject)：以模块化方式组装提示的框架，用于定量分析LLM对对抗性提示攻击的鲁棒性。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_84a99ac59563.png)](https:\u002F\u002Fgithub.com\u002Fagencyenterprise\u002FPromptInject)\n  * [LLM红队：对抗、编程和语言学方法 vs ChatGPT、Claude、Mistral、Grok、LLAMA和Gemini](https:\u002F\u002Fadversa.ai\u002Fblog\u002Fllm-red-teaming-vs-grok-chatgpt-claude-gemini-bing-mistral-llama\u002F)\n  * [指令层级：训练LLM优先处理特权指令](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.13208)\n  * [银行LLM代理（GPT-4，Langchain）的提示注入\u002F越狱](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=5rXVg8cxne4)\n  * [GitHub Copilot聊天：从提示注入到数据外泄](https:\u002F\u002Fembracethered.com\u002Fblog\u002Fposts\u002F2024\u002Fgithub-copilot-chat-prompt-injection-data-exfiltration\u002F?s=35)\n  * [对抗样本在扩散模型流形中存在错位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.06637)\n  * [图转文逻辑越狱：你的想象力可以帮助你做任何事](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.02534)\n  * [缓解“密钥骨架”——一种新型生成式AI越狱技术](https:\u002F\u002Fwww.microsoft.com\u002Fen-us\u002Fsecurity\u002Fblog\u002F2024\u002F06\u002F26\u002Fmitigating-skeleton-key-a-new-type-of-generative-ai-jailbreak-technique\u002F)\n  * [图像混淆基准](https:\u002F\u002Fgithub.com\u002Fgoogle-deepmind\u002Fimage_obfuscation_benchmark)：该仓库包含用于评估模型在图像混淆基准上表现的代码，该基准首次发表于《对抗性图像混淆的鲁棒性基准测试》(https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.12993)。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e73077a5a2bf.png)](https:\u002F\u002Fgithub.com\u002Fgoogle-deepmind\u002Fimage_obfuscation_benchmark)\n  * [利用符号数学越狱大型语言模型](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.11445)\n  * [越狱时刻的对抗性推理](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.01633)\n  * [我们如何评估AI系统中提示注入攻击的风险](https:\u002F\u002Fsecurity.googleblog.com\u002F2025\u002F01\u002Fhow-we-estimate-risk-from-prompt.html)\n  * [生成式AI的对抗性滥用](https:\u002F\u002Fcloud.google.com\u002Fblog\u002Ftopics\u002Fthreat-intelligence\u002Fadversarial-misuse-generative-ai)\n  * [通过设计抵御提示注入](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.18813)\n  * [采用分层防御策略缓解提示注入攻击](https:\u002F\u002Fsecurity.googleblog.com\u002F2025\u002F06\u002Fmitigating-prompt-injection-attacks.html)\n  * [逻辑层提示控制注入（LPCI）：代理系统中的一种新型安全漏洞类别](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.10457)\n  * [针对攻击者的提示注入工程：利用GitHub Copilot](https:\u002F\u002Fblog.trailofbits.com\u002F2025\u002F08\u002F06\u002Fprompt-injection-engineering-for-attackers-exploiting-github-copilot\u002F)\n  * [对抗性提示的现状](https:\u002F\u002Fblog.securitybreak.io\u002Fthe-state-of-adversarial-prompts-84c364b5d860)\n  * [TransferBench：基于集成的黑盒迁移攻击基准测试](https:\u002F\u002Ftransferbench.github.io\u002F)\n  * [注意力追踪器：检测LLM中的提示注入攻击](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.00348v2)\n  * [攻击者后手出击：更强的自适应攻击可绕过针对LLM越狱和提示注入的防御措施](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.09023)\n  * [将日历邀请武器化：对Google Gemini的语义攻击](https:\u002F\u002Fwww.miggo.io\u002Fpost\u002Fweaponizing-calendar-invites-a-semantic-attack-on-google-gemini)\n  * [GTIG AI威胁追踪器：提炼、实验与（持续）整合AI用于对抗性用途](https:\u002F\u002Fcloud.google.com\u002Fblog\u002Ftopics\u002Fthreat-intelligence\u002Fdistillation-experimentation-integration-ai-adversarial-use)\n  * [Cline是如何被攻陷的：Cline供应链攻击中的提示注入和悬空提交](https:\u002F\u002Fmurraycole.com\u002Fposts\u002Fcline-compromise-prompt-injection-supply-chain-attack)\n  * [Aguara：AI代理技能与MCP服务器的安全扫描工具](https:\u002F\u002Faguarascan.com\u002F)\n\n#### 🛠️ 工具 🛠️\n\n| 名称 | 类型 | 支持的算法 | 支持的攻击类型 | 攻击\u002F防御 | 支持的框架 | 流行度 |\n| ---------- | :----------: | :----------: | :----------: | :----------: | :----------: | :----------: |\n| [Cleverhans](https:\u002F\u002Fgithub.com\u002Fcleverhans-lab\u002Fcleverhans) | 图像 | [深度学习](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FDeep_learning) | 欺骗 | 攻击 | [Tensorflow](https:\u002F\u002Fwww.tensorflow.org), [Keras](https:\u002F\u002Fkeras.io), [JAX](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fjax) | [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_cead982b157d.png)](https:\u002F\u002Fgithub.com\u002Fcleverhans-lab\u002Fcleverhans)|\n| [Foolbox](https:\u002F\u002Fgithub.com\u002Fbethgelab\u002Ffoolbox) | 图像 | 大量学习 | 欺骗 | 攻击 | Tensorflow, [PyTorch](https:\u002F\u002Fpytorch.org), JAX | [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d126634879ca.png)](https:\u002F\u002Fgithub.com\u002Fbethgelab\u002Ffoolbox)|\n| [ART](https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox) | 任意类型（图像、表格数据、音频等） | 深度学习、[SVM](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FSupport_vector_machine)、[LR](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FLogistic_regression)等 | 任意（提取、推理、投毒、欺骗） | 双方 | Tensorflow、Keras、Pytorch、[Scikit Learn](https:\u002F\u002Fscikit-learn.org) | [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a7ecbf6cf806.png)](https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox)|\n| [TextAttack](https:\u002F\u002Fgithub.com\u002FQData\u002FTextAttack) | 文本 | 深度学习 | 欺骗 | 攻击 | Keras、[HuggingFace](https:\u002F\u002Fhuggingface.co\u002F) | [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_dc1cff1a5a3f.png)](https:\u002F\u002Fgithub.com\u002FQData\u002FTextAttack)|\n| [Advertorch](https:\u002F\u002Fgithub.com\u002FBorealisAI\u002Fadvertorch) | 图像 | 深度学习 | 欺骗 | 双方 | --- | [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c6092ce01adf.png)](https:\u002F\u002Fgithub.com\u002FBorealisAI\u002Fadvertorch)|\n| [AdvBox](https:\u002F\u002Fgithub.com\u002Fadvboxes\u002FAdvBox) | 图像 | 深度学习 | 欺骗 | 双方 | PyTorch、Tensorflow、[MxNet](https:\u002F\u002Fmxnet.apache.org) | [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_bf0cb81f51eb.png)](https:\u002F\u002Fgithub.com\u002Fadvboxes\u002FAdvBox)|\n| [DeepRobust](https:\u002F\u002Fgithub.com\u002FDSE-MSU\u002FDeepRobust) | 图像、图 | 深度学习 | 欺骗 | 双方 | PyTorch | [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ffb018312546.png)](https:\u002F\u002Fgithub.com\u002FDSE-MSU\u002FDeepRobust)|\n| [Counterfit](https:\u002F\u002Fgithub.com\u002FAzure\u002Fcounterfit) | 任意 | 任意 | 欺骗 | 攻击 | --- | [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c8973bb11171.png)](https:\u002F\u002Fgithub.com\u002FAzure\u002Fcounterfit)|\n| [Adversarial Audio Examples](https:\u002F\u002Fgithub.com\u002Fcarlini\u002Faudio_adversarial_examples) | 音频 | [DeepSpeech](https:\u002F\u002Fgithub.com\u002Fmozilla\u002FDeepSpeech) | 欺骗 | 攻击 | --- | [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b19a32a9ba4e.png)](https:\u002F\u002Fgithub.com\u002Fcarlini\u002Faudio_adversarial_examples)|\n\n###### ART\n\n[对抗鲁棒性工具箱](https:\u002F\u002Fgithub.com\u002FTrusted-AI\u002Fadversarial-robustness-toolbox)，简称ART，是一个开源的对抗机器学习库，用于测试机器学习模型的鲁棒性。\n\n![ART logo](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_590b613806bb.png)\n\n它使用Python开发，实现了提取、逆向、投毒和欺骗等攻击与防御方法。\n\nART支持最流行的框架：Tensorflow、Keras、PyTorch、MxNet以及ScikitLearn等众多框架。\n\n它不仅限于处理以图像为输入的模型，还支持其他类型的数据，如音频、视频、表格数据等。\n\n> [使用ART学习对抗机器学习的研讨会 🇪🇸](https:\u002F\u002Fgithub.com\u002Fjiep\u002Fadversarial-machine-learning)\n\n###### Cleverhans\n\n[Cleverhans](https:\u002F\u002Fgithub.com\u002Fcleverhans-lab\u002Fcleverhans)是一个用于执行欺骗攻击并测试图像模型深度学习鲁棒性的库。\n\n![Cleverhans logo](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ff3e571006f8.png)\n\n它使用Python开发，并与Tensorflow、Torch和JAX框架集成。\n\n它实现了多种攻击方法，如L-BFGS、FGSM、JSMA、C&W等。\n\n\n\n## 🔧 使用 🔧\n\n人工智能被用于完成恶意任务并增强传统攻击手段。\n\n### 🕵️‍♂️ 渗透测试 🕵️‍♂️\n\n  * [GyoiThon](https:\u002F\u002Fgithub.com\u002Fgyoisamurai\u002FGyoiThon): 新一代渗透测试工具，用于Web服务器的情报收集工具。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b028ccdff5e8.png)](https:\u002F\u002Fgithub.com\u002Fgyoisamurai\u002FGyoiThon)\n  * [Cochise](https:\u002F\u002Fgithub.com\u002Fandreashappe\u002Fcochise\u002F): 使用LLM代理对微软Windows Active Directory进行自主渗透测试（以[GOAD](https:\u002F\u002Fgithub.com\u002FOrange-Cyberdefense\u002FGOAD)为测试平台）。\n  * [HackingBuddyGPT](https:\u002F\u002Fgithub.com\u002Fipa-lab\u002FhackingBuddyGPT): 大型语言模型与渗透测试的结合。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e6038f5060b7.png)](https:\u002F\u002Fgithub.com\u002Fipa-lab\u002FhackingBuddyGPT)\n  * [Deep Exploit](https:\u002F\u002Fgithub.com\u002F13o-bbr-bbq\u002Fmachine_learning_security\u002Ftree\u002Fmaster\u002FDeepExploit): 基于深度强化学习的全自动渗透测试工具。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_17c673ee0ff0.png)](https:\u002F\u002Fgithub.com\u002F13o-bbr-bbq\u002Fmachine_learning_security)\n  * [AutoPentest-DRL](https:\u002F\u002Fgithub.com\u002Fcrond-jaist\u002FAutoPentest-DRL): 利用深度强化学习实现自动化渗透测试。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_118fb23a3093.png)](https:\u002F\u002Fgithub.com\u002Fcrond-jaist\u002FAutoPentest-DRL)\n  * [DeepGenerator](https:\u002F\u002Fgithub.com\u002F13o-bbr-bbq\u002Fmachine_learning_security\u002Ftree\u002Fmaster\u002FGenerator): 利用遗传算法和生成对抗网络，全自动生成用于Web应用评估的注入代码。\n  * [Eyeballer](https:\u002F\u002Fgithub.com\u002FBishopFox\u002Feyeballer): Eyeballer适用于大规模网络渗透测试，能够在海量Web主机中快速找到“有趣”的目标。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_17d245b14954.png)](https:\u002F\u002Fgithub.com\u002FBishopFox\u002Feyeballer)\n  * [Nebula](https:\u002F\u002Fgithub.com\u002Fberylliumsec\u002Fnebula): 基于AI的道德黑客助手。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c8e5a0c2e847.png)](https:\u002F\u002Fgithub.com\u002Fberylliumsec\u002Fnebula)\n  * [AI-OPS](https:\u002F\u002Fgithub.com\u002FantoninoLorenzo\u002FAI-OPS): 基于开源大型语言模型的渗透测试AI助手。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f03952292ab4.png)](https:\u002F\u002Fgithub.com\u002FantoninoLorenzo\u002FAI-OPS)\n  * [LLM能否攻陷企业网络？](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.04227): 自主模拟入侵的Active Directory网络渗透测试\n  * [LLM代理团队可利用零日漏洞](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.01637)\n  * [开源LLM漏洞扫描器的洞察与当前不足：比较分析](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.16527)\n  * [AI代理与网络安全专业人员在真实渗透测试中的对比](https:\u002F\u002Farxiv.org\u002Fabs\u002F2512.09882)\n  * [CAI](https:\u002F\u002Fgithub.com\u002Faliasrobotics\u002Fcai): 一款开放且适合漏洞赏金计划的网络安全AI。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_025f282dc6f2.png)](https:\u002F\u002Fgithub.com\u002Faliasrobotics\u002Fcai)\n  * [Shannon](https:\u002F\u002Fgithub.com\u002FKeygraphHQ\u002Fshannon): Shannon Lite是一款针对Web应用和API的自主白盒AI渗透测试工具。它会分析你的源代码，识别攻击向量，并执行真实的漏洞利用来证明潜在漏洞，从而防止它们在生产环境中被利用。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_797731749188.png)](https:\u002F\u002Fgithub.com\u002FKeygraphHQ\u002Fshannon)\n\n### 🦠 恶意软件 🦠\n\n  * [DeepLocker](https:\u002F\u002Fi.blackhat.com\u002Fus-18\u002FThu-August-9\u002Fus-18-Kirat-DeepLocker-Concealing-Targeted-Attacks-with-AI-Locksmithing.pdf): IBM实验室在Black Hat大会上展示的利用AI锁匠技术隐藏定向攻击的方法。\n  * [恶意软件中使用的人工智能概述](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-17030-0_4): 精选的AI恶意软件资源列表。\n  * [DeepObfusCode](https:\u002F\u002Farxiv.org\u002Fabs\u002F1909.01837): 通过序列到序列网络进行源代码混淆。\n  * [AutoCAT](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.08025): 利用强化学习自动探索缓存定时攻击。\n  * [基于AI的僵尸网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.02223): 一种基于博弈论的AI驱动僵尸网络攻击防御方法。\n  * [SECML_Malware](https:\u002F\u002Fgithub.com\u002Fpralab\u002Fsecml_malware): 用于对Windows恶意软件检测器发起对抗性攻击的Python库。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_25ba5928a447.png)](https:\u002F\u002Fgithub.com\u002Fpralab\u002Fsecml_malware)\n  * [Transcendent-release](https:\u002F\u002Fgithub.com\u002Fs2labres\u002Ftranscendent-release): 使用共形评估检测影响恶意软件检测的概念漂移。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8f40119f1d0c.png)](https:\u002F\u002Fgithub.com\u002Fs2labres\u002Ftranscendent-release)\n\n### 🗺️ OSINT 🗺️\n\n  * [SNAP_R](https:\u002F\u002Fgithub.com\u002Fzerofox-oss\u002FSNAP_R): 自动生成社交媒体上的鱼叉式钓鱼帖子。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_157a6ae970cc.png)](https:\u002F\u002Fgithub.com\u002Fzerofox-oss\u002FSNAP_R)\n  * [SpyScrap](https:\u002F\u002Fgithub.com\u002FRuthGnz\u002FSpyScrap): SpyScrap结合了面部识别技术来筛选结果，并利用自然语言处理从用户出现的网站中提取重要实体。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_fb89388d04d0.png)](https:\u002F\u002Fgithub.com\u002FRuthGnz\u002FSpyScrap)\n\n### 📧 钓鱼邮件 📧\n\n  * [DeepDGA](https:\u002F\u002Fgithub.com\u002Froreagan\u002FDeepDGA): DeepDGA的实现：对抗性调优的域名生成与检测。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2df0c0617342.png)](https:\u002F\u002Fgithub.com\u002Froreagan\u002FDeepDGA)\n  * [诈骗代理：AI代理如何模拟人类水平的诈骗电话](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.06457)\n\n### 🕵 威胁情报 🕵\n\n  * [从沙子到豪宅：利用LLM实现全自动全生命周期网络攻击构建](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.16928)\n\n### ⚙️ 逆向工程 ⚙️\n\n  * [我们在约40MB的二进制文件中隐藏了后门，并让AI+Ghidra去寻找它们](https:\u002F\u002Fquesma.com\u002Fblog\u002Fintroducing-binaryaudit\u002F)\n  * [恶意软件逆向工程不再是人类的任务！](https:\u002F\u002Fblog.securitybreak.io\u002Fmalware-reverse-engineering-is-no-longer-a-human-problem-5441e4a0564f)\n  * [GhidraMCP](Ghidra的MCP服务器。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d608775d14e3.png)](https:\u002F\u002Fgithub.com\u002FLaurieWired\u002FGhidraMCP)\n  * [ghidra-mcp](生产级Ghidra MCP服务器——包含179个MCP工具、147个GUI端点和172个无头端点，集成Ghidra服务器，支持跨二进制文档传输、批量操作、AI文档工作流，并可通过Docker部署实现AI驱动的逆向工程。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8cc3e5f58d31.png)](https:\u002F\u002Fgithub.com\u002Fbethington\u002Fghidra-mcp)\n\n### 🌀 侧信道 🌀\n\n  * [SCAAML](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fscaaml): 机器学习辅助的侧信道攻击。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b065123332fd.png)](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fscaaml)\n\n### 👨‍🎤 生成式AI 👨‍🎤\n\n#### 🔊 音频 🔊\n\n##### 🛠️ 工具 🛠️\n  * [deep-voice-conversion](https:\u002F\u002Fgithub.com\u002Fandabi\u002Fdeep-voice-conversion): 基于 TensorFlow 的深度神经网络语音转换（语音风格迁移）工具。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8365bdc35c92.png)](https:\u002F\u002Fgithub.com\u002Fandabi\u002Fdeep-voice-conversion)\n  * [tacotron](https:\u002F\u002Fgithub.com\u002Fkeithito\u002Ftacotron): Google Tacotron 语音合成的 TensorFlow 实现，包含预训练模型（非官方）。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a7e138c937e5.png)](https:\u002F\u002Fgithub.com\u002Fkeithito\u002Ftacotron)\n  * [Real-Time-Voice-Cloning](https:\u002F\u002Fgithub.com\u002FCorentinJ\u002FReal-Time-Voice-Cloning): 在 5 秒内克隆一段声音，实时生成任意语音。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b709d230bd06.png)](https:\u002F\u002Fgithub.com\u002FCorentinJ\u002FReal-Time-Voice-Cloning)\n  * [mimic2](https:\u002F\u002Fgithub.com\u002FMycroftAI\u002Fmimic2): 基于 Tacotron 架构的文本转语音引擎，最初由 Keith Ito 实现。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_50270b326a3a.png)](https:\u002F\u002Fgithub.com\u002FMycroftAI\u002Fmimic2)\n  * [Neural-Voice-Cloning-with-Few-Samples](https:\u002F\u002Fgithub.com\u002FSharad24\u002FNeural-Voice-Cloning-with-Few-Samples): 百度发表的少样本神经网络语音克隆研究论文的实现。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_215ff7e3fd90.png)](https:\u002F\u002Fgithub.com\u002FSharad24\u002FNeural-Voice-Cloning-with-Few-Samples)\n  * [Vall-E](https:\u002F\u002Fgithub.com\u002Fenhuiz\u002Fvall-e): 音频语言模型 VALL-E 的非官方 PyTorch 实现。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_36f8152f853a.png)](https:\u002F\u002Fgithub.com\u002Fenhuiz\u002Fvall-e)\n  * [voice-changer](https:\u002F\u002Fgithub.com\u002Fw-okada\u002Fvoice-changer): 实时语音变换器。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d056e5149a6e.png)](https:\u002F\u002Fgithub.com\u002Fw-okada\u002Fvoice-changer)\n  * [Retrieval-based-Voice-Conversion-WebUI](https:\u002F\u002Fgithub.com\u002FRVC-Project\u002FRetrieval-based-Voice-Conversion-WebUI): 基于 VITS 的易用型语音转换框架。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f2fd23d689a1.png)](https:\u002F\u002Fgithub.com\u002FRVC-Project\u002FRetrieval-based-Voice-Conversion-WebUI)\n  * [Audiocraft](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Faudiocraft): Audiocraft 是一个用于音频处理与生成的深度学习库。它包含最先进的 EnCodec 音频压缩器\u002F分词器，以及 MusicGen——一种简单可控、可通过文本和旋律条件生成音乐的语言模型。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d5227e702e6b.png)](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Faudiocraft)\n  * [VALL-E-X](https:\u002F\u002Fgithub.com\u002FPlachtaa\u002FVALL-E-X): 微软 VALL-E X 零样本 TTS 模型的开源实现。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_6dc6a3829a02.png)](https:\u002F\u002Fgithub.com\u002FPlachtaa\u002FVALL-E-X)\n  * [OpenVoice](https:\u002F\u002Fgithub.com\u002Fmyshell-ai\u002FOpenVoice): MyShell 提供的即时语音克隆服务。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_607e89514c79.png)](https:\u002F\u002Fgithub.com\u002Fmyshell-ai\u002FOpenVoice)\n  * [MeloTTS](https:\u002F\u002Fgithub.com\u002Fmyshell-ai\u002FMeloTTS): MyShell.ai 推出的高质量多语言文本转语音库，支持英语、西班牙语、法语、中文、日语和韩语。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_04779a223b84.png)](https:\u002F\u002Fgithub.com\u002Fmyshell-ai\u002FMeloTTS)\n  * [VoiceCraft](https:\u002F\u002Fgithub.com\u002Fjasonppy\u002FVoiceCraft): 零样本语音编辑及野外环境下的文本转语音技术。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_81ef25aa3faf.png)](https:\u002F\u002Fgithub.com\u002Fjasonppy\u002FVoiceCraft)\n  * [Parler-TTS](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Fparler-tts): 高质量 TTS 模型的推理与训练库。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ea5b16ff41cc.png)](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Fparler-tts)\n  * [ChatTTS](https:\u002F\u002Fgithub.com\u002F2noise\u002FChatTTS): 用于日常对话的生成式语音模型。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_77099da6e800.png)](https:\u002F\u002Fgithub.com\u002F2noise\u002FChatTTS)\n\n\n\n##### 💡 应用场景 💡\n\n* [Lip2Wav](https:\u002F\u002Fgithub.com\u002FRudrabha\u002FLip2Wav): 仅通过唇部动作生成高质量语音。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_10a8ac34115d.png)](https:\u002F\u002Fgithub.com\u002FRudrabha\u002FLip2Wav)\n  * [AudioLDM：基于潜在扩散模型的文本到音频生成](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fhaoheliu\u002Faudioldm-text-to-audio-generation)\n  * [deepvoice3_pytorch](https:\u002F\u002Fgithub.com\u002Fr9y9\u002Fdeepvoice3_pytorch)：基于卷积神经网络的文本到语音合成模型的PyTorch实现。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_92d03664ac23.png)](https:\u002F\u002Fgithub.com\u002Fr9y9\u002Fdeepvoice3_pytorch)\n  * [🎸 Riffusion](https:\u002F\u002Fgithub.com\u002Friffusion\u002Friffusion)：用于实时音乐生成的稳定扩散模型。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b713c87bca37.png)](https:\u002F\u002Fgithub.com\u002Friffusion\u002Friffusion)\n  * [whisper.cpp](https:\u002F\u002Fgithub.com\u002Fggerganov\u002Fwhisper.cpp)：OpenAI Whisper模型的C\u002FC++移植版本。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_feafd115570e.png)](https:\u002F\u002Fgithub.com\u002Fggerganov\u002Fwhisper.cpp)\n  * [TTS](https:\u002F\u002Fgithub.com\u002Fcoqui-ai\u002FTTS)：🐸💬——一个在研究和生产中久经考验的深度学习文本到语音工具包。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_6978d66a0b5f.png)](https:\u002F\u002Fgithub.com\u002Fcoqui-ai\u002FTTS)\n  * [YourTTS](https:\u002F\u002Fgithub.com\u002FEdresson\u002FYourTTS)：面向所有人的零样本多说话者TTS及零样本语音转换技术。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_4eaff639ee28.png)](https:\u002F\u002Fgithub.com\u002FEdresson\u002FYourTTS)\n  * [TorToiSe](https:\u002F\u002Fgithub.com\u002Fneonbjb\u002Ftortoise-tts)：一款以质量为重训练的多语音TTS系统。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c48e5c91a7cd.png)](https:\u002F\u002Fgithub.com\u002Fneonbjb\u002Ftortoise-tts)\n  * [DiffSinger](https:\u002F\u002Fgithub.com\u002FMoonInTheRiver\u002FDiffSinger)：基于浅层扩散机制的歌声合成（SVS与TTS）。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ffb61e2ff633.png)](https:\u002F\u002Fgithub.com\u002FMoonInTheRiver\u002FDiffSinger)\n  * [WaveNet声码器](https:\u002F\u002Fgithub.com\u002Fr9y9\u002Fwavenet_vocoder)：WaveNet声码器的实现，可根据语言学或声学特征生成高质量的原始语音样本。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2077d7ac2875.png)](https:\u002F\u002Fgithub.com\u002Fr9y9\u002Fwavenet_vocoder)\n  * [Deepvoice3_pytorch](https:\u002F\u002Fgithub.com\u002Fr9y9\u002Fdeepvoice3_pytorch)：基于卷积神经网络的文本到语音合成模型的PyTorch实现。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_92d03664ac23.png)](https:\u002F\u002Fgithub.com\u002Fr9y9\u002Fdeepvoice3_pytorch)\n  * [eSpeak NG 文本转语音](https:\u002F\u002Fgithub.com\u002Fespeak-ng\u002Fespeak-ng)：eSpeak NG是一款开源语音合成器，支持超过一百种语言和口音。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_951d08509296.png)](https:\u002F\u002Fgithub.com\u002Fespeak-ng\u002Fespeak-ng)\n  * [RealChar](https:\u002F\u002Fgithub.com\u002FShaunwei\u002FRealChar)：实时创建、自定义并与你的AI角色\u002F伙伴对话（一体化代码库！）。使用LLM OpenAI GPT3.5\u002F4、Anthropic Claude2、Chroma向量数据库、Whisper语音转文本以及ElevenLabs文本转语音，在任何地方（移动端、网页端和终端）与AI进行自然流畅的对话。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2586763c892b.png)](https:\u002F\u002Fgithub.com\u002FShaunwei\u002FRealChar)\n  * [少量样本下的神经语音克隆](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2018\u002Fhash\u002F4559912e7a94a9c32b09d894f2bc3c82-Abstract.html)\n  * [NAUTILUS：多功能语音克隆系统](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9246264)\n  * [学习流利地说外语：多语言语音合成与跨语言语音克隆](https:\u002F\u002Farxiv.org\u002Fabs\u002F1907.04448)\n  * [当善变恶时：利用智能手表推断按键输入](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F2810103.2813668)\n  * [KeyListener：通过声学信号推断触摸屏QWERTY键盘上的按键输入](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8737591)\n  * [这个声音并不存在：关于语音合成、音频深度伪造及其检测](https:\u002F\u002Fthis-voice-does-not-exist.com)\n  * [AudioSep](https:\u002F\u002Fgithub.com\u002FAudio-AGI\u002FAudioSep)：分离你所描述的任何内容。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_21f3532adb92.png)](https:\u002F\u002Fgithub.com\u002FAudio-AGI\u002FAudioSep)\n  * [stable-audio-tools](https:\u002F\u002Fgithub.com\u002FStability-AI\u002Fstable-audio-tools)：用于条件音频生成的生成模型。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_31beb227de90.png)](https:\u002F\u002Fgithub.com\u002FStability-AI\u002Fstable-audio-tools)\n  * [GPT-SoVITS-WebUI](https:\u002F\u002Fgithub.com\u002FRVC-Boss\u002FGPT-SoVITS)：只需1分钟的语音数据即可训练出优秀的TTS模型！（少样本语音克隆）。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d53e2061e932.png)](https:\u002F\u002Fgithub.com\u002FRVC-Boss\u002FGPT-SoVITS)\n  * [Hybrid-Net](https:\u002F\u002Fgithub.com\u002FDoMusic\u002FHybrid-Net)：实时音频源分离，生成歌词、和弦和节拍。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_3645f3924f4d.png)](https:\u002F\u002Fgithub.com\u002FDoMusic\u002FHybrid-Net)\n  * [CosyVoice](https:\u002F\u002Fgithub.com\u002FFunAudioLLM\u002FCosyVoice)：多语言大型语音生成模型，提供推理、训练和部署的全栈能力。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d5cc7d678579.png)](https:\u002F\u002Fgithub.com\u002FFunAudioLLM\u002FCosyVoice)\n  * [EasyVolcap](https:\u002F\u002Fgithub.com\u002Fzju3dv\u002FEasyVolcap)：加速神经体积视频研究。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e77fc9f6d2e7.png)](https:\u002F\u002Fgithub.com\u002Fzju3dv\u002FEasyVolcap)\n\n\n\n##### 🔎 检测 🔎\n  * [fake-voice-detection](https:\u002F\u002Fgithub.com\u002Fdessa-oss\u002Ffake-voice-detection)：利用时间卷积检测音频深度伪造。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2af37eee0e0e.png)](https:\u002F\u002Fgithub.com\u002Fdessa-oss\u002Ffake-voice-detection)\n  * [基于新型CLS-LBP特征和LSTM的鲁棒语音欺骗检测系统](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1319157822000684)\n  * [语音欺骗检测器：统一的反欺骗框架](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0957417422002330)\n  * [保护语音驱动界面免受虚假（克隆）音频攻击](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F8695320)\n  * [DeepSonar：迈向有效且鲁棒的AI合成虚假语音检测](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3394171.3413716)\n  * [以AI对抗AI：利用深度学习检测虚假语音](https:\u002F\u002Fwww.aes.org\u002Fe-lib\u002Fonline\u002Fbrowse.cfm?elib=20479)\n  * [现代音频深度伪造检测方法综述：挑战与未来方向](https:\u002F\u002Fwww.mdpi.com\u002F1999-4893\u002F15\u002F5\u002F155)\n\n#### 📷 图像 📷\n\n##### 🛠️ 工具 🛠️\n\n* [StyleGAN](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan): StyleGAN - 官方 TensorFlow 实现。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2837b6642473.png)](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan)\n  * [StyleGAN2](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan2): StyleGAN2 - 官方 TensorFlow 实现。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2837b6642473.png2)](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan2)\n  * [stylegan2-ada-pytorch](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan2-ada-pytorch): StyleGAN2-ADA - 官方 PyTorch 实现。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2837b6642473.png2-ada-pytorch)](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan2-ada-pytorch)\n  * [StyleGAN-nada](https:\u002F\u002Fgithub.com\u002Frinongal\u002FStyleGAN-nada): 基于 CLIP 的图像生成器领域自适应。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_bb974aaf9a63.png)](https:\u002F\u002Fgithub.com\u002Frinongal\u002FStyleGAN-nada)\n  * [StyleGAN3](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan3): StyleGAN3 的官方 PyTorch 实现。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2837b6642473.png3)](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan3)\n  * [Imaginaire](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fimaginaire): Imaginaire 是一个 PyTorch 库，包含了 NVIDIA 研发的多种图像和视频合成方法的优化实现。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c0e656282edb.png)](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fimaginaire)\n  * [ffhq-dataset](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fffhq-dataset): Flickr-Faces-HQ 数据集 (FFHQ)。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d18a6e6d2ce7.png)](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fffhq-dataset)\n  * [DALLE2-pytorch](https:\u002F\u002Fgithub.com\u002Flucidrains\u002FDALLE2-pytorch): OpenAI 更新的文本到图像合成神经网络 DALL-E 2 的 PyTorch 实现。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_fef5371e23be.png)](https:\u002F\u002Fgithub.com\u002Flucidrains\u002FDALLE2-pytorch)\n  * [ImaginAIry](https:\u002F\u002Fgithub.com\u002Fbrycedrennan\u002FimaginAIry): AI 想象的图像。Python 风格的稳定扩散图像生成工具。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_7afbfe55ddd9.png)](https:\u002F\u002Fgithub.com\u002Fbrycedrennan\u002FimaginAIry)\n  * [Lama Cleaner](https:\u002F\u002Fgithub.com\u002FSanster\u002Flama-cleaner): 基于 SOTA AI 模型的图像修复工具。可以移除照片中的任何不需要的对象、瑕疵或人物，也可以擦除并替换照片中的内容（基于稳定扩散模型）。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ef14c31dd23c.png)](https:\u002F\u002Fgithub.com\u002FSanster\u002Flama-cleaner)\n  * [Invertible-Image-Rescaling](https:\u002F\u002Fgithub.com\u002Fpkuxmq\u002FInvertible-Image-Rescaling): 论文《可逆图像缩放》的 PyTorch 实现。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_68392506d4ad.png)](https:\u002F\u002Fgithub.com\u002Fpkuxmq\u002FInvertible-Image-Rescaling)\n  * [DifFace](https:\u002F\u002Fgithub.com\u002FzsyOAOA\u002FDifFace): 基于扩散误差收缩的盲人面部修复（PyTorch）。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f5d8c34f6969.png)](https:\u002F\u002Fgithub.com\u002FzsyOAOA\u002FDifFace)\n  * [CodeFormer](https:\u002F\u002Fgithub.com\u002Fsczhou\u002FCodeFormer): 基于码本查找变换器的鲁棒盲人面部修复。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_6990fdd47a1f.png)](https:\u002F\u002Fgithub.com\u002Fsczhou\u002FCodeFormer)\n  * [Custom Diffusion](https:\u002F\u002Fgithub.com\u002Fadobe-research\u002Fcustom-diffusion): 文本到图像扩散模型的多概念自定义。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_04220cde65c4.png)](https:\u002F\u002Fgithub.com\u002Fadobe-research\u002Fcustom-diffusion)\n  * [Diffusers](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Fdiffusers): 🤗 Diffusers：用于图像和音频生成的最先进扩散模型，基于 PyTorch。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b60a9ebbb187.png)](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Fdiffusers)\n  * [Stable Diffusion](https:\u002F\u002Fgithub.com\u002FStability-AI\u002Fstablediffusion): 使用潜在扩散模型进行高分辨率图像合成。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_39a38a99e1b8.png)](https:\u002F\u002Fgithub.com\u002FStability-AI\u002Fstablediffusion)\n  * [InvokeAI](https:\u002F\u002Fgithub.com\u002Finvoke-ai\u002FInvokeAI): InvokeAI 是 Stable Diffusion 模型领域的领先创作引擎，赋能专业人士、艺术家和爱好者使用最新的 AI 驱动技术生成和创作视觉媒体。该解决方案提供行业领先的 WebUI，支持通过 CLI 进行终端操作，并作为多个商业产品的基础。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a6845f85c5ac.png)](https:\u002F\u002Fgithub.com\u002Finvoke-ai\u002FInvokeAI)\n  * [Stable Diffusion web UI](https:\u002F\u002Fgithub.com\u002FAUTOMATIC1111\u002Fstable-diffusion-webui): Stable Diffusion 的 Web 界面。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d8fb7745e064.png)](https:\u002F\u002Fgithub.com\u002FAUTOMATIC1111\u002Fstable-diffusion-webui)\n  * [Stable Diffusion Infinity](https:\u002F\u002Fgithub.com\u002Flkwq007\u002Fstablediffusion-infinity): 在无限画布上使用 Stable Diffusion 进行扩展绘画。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_0f221aa918ed.png)](https:\u002F\u002Fgithub.com\u002Flkwq007\u002Fstablediffusion-infinity)\n  * [Fast Stable Diffusion](https:\u002F\u002Fgithub.com\u002FTheLastBen\u002Ffast-stable-diffusion): 快速稳定扩散 + DreamBooth。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_5da23b21a34c.png)](https:\u002F\u002Fgithub.com\u002FTheLastBen\u002Ffast-stable-diffusion)\n  * [GET3D](https:\u002F\u002Fgithub.com\u002Fnv-tlabs\u002FGET3D): 一种从图像中学习的高质量 3D 纹理形状生成模型。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_6e2c448b23f6.png)](https:\u002F\u002Fgithub.com\u002Fnv-tlabs\u002FGET3D)\n  * [Awesome AI Art Image Synthesis](https:\u002F\u002Fgithub.com\u002Faltryne\u002Fawesome-ai-art-image-synthesis): 一份关于 AI 艺术和图像合成的优秀工具、创意、提示工程工具、合作项目、模型和辅助资源的列表。涵盖 Dalle2、MidJourney、StableDiffusion 以及开源工具。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c80931e26cbe.png)](https:\u002F\u002Fgithub.com\u002Faltryne\u002Fawesome-ai-art-image-synthesis)\n  * [Stable Diffusion](https:\u002F\u002Fgithub.com\u002FCompVis\u002Fstable-diffusion): 一种潜在的文本到图像扩散模型。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b9c2448bd799.png)](https:\u002F\u002Fgithub.com\u002FCompVis\u002Fstable-diffusion)\n  * [Weather Diffusion](https:\u002F\u002Fgithub.com\u002FIGITUGraz\u002FWeatherDiffusion): “利用基于补丁的去噪扩散模型恢复恶劣天气条件下的视觉”相关代码。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_5d3a19f5187d.png)](https:\u002F\u002Fgithub.com\u002FIGITUGraz\u002FWeatherDiffusion)\n  * [DF-GAN](https:\u002F\u002Fgithub.com\u002Ftobran\u002FDF-GAN): 一种简单而有效的文本到图像合成基线。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e4d2b218ec7a.png)](https:\u002F\u002Fgithub.com\u002Ftobran\u002FDF-GAN)\n  * [Dall-E Playground](https:\u002F\u002Fgithub.com\u002Fsaharmor\u002Fdalle-playground): 一个使用 Stable Diffusion（过去曾使用 DALL-E Mini）根据任意文本提示生成图像的平台。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_48360bd4b478.png)](https:\u002F\u002Fgithub.com\u002Fsaharmor\u002Fdalle-playground)\n  * [MM-CelebA-HQ-Dataset](https:\u002F\u002Fgithub.com\u002FIIGROUP\u002FMM-CelebA-HQ-Dataset): 一个大规模人脸图像数据集，可用于文本到图像生成、文本引导的图像编辑、素描到图像生成、用于人脸生成和编辑的 GAN、图像描述以及 VQA。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_7cb33e8fc679.png)](https:\u002F\u002Fgithub.com\u002FIIGROUP\u002FMM-CelebA-HQ-Dataset)\n  * [Deep Daze](https:\u002F\u002Fgithub.com\u002Flucidrains\u002Fdeep-daze): 一个简单的命令行工具，使用 OpenAI 的 CLIP 和 Siren（隐式神经表示网络）进行文本到图像生成。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_9a5a46d4ed2e.png)](https:\u002F\u002Fgithub.com\u002Flucidrains\u002Fdeep-daze)\n  * [StyleMapGAN](https:\u002F\u002Fgithub.com\u002Fnaver-ai\u002FStyleMapGAN): 利用 GAN 中潜在空间的维度实现实时图像编辑。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_92de36041298.png)](https:\u002F\u002Fgithub.com\u002Fnaver-ai\u002FStyleMapGAN)\n  * [Kandinsky-2](https:\u002F\u002Fgithub.com\u002Fai-forever\u002FKandinsky-2): 多语言文本到图像潜在扩散模型。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_68211636bfa6.png)](https:\u002F\u002Fgithub.com\u002Fai-forever\u002FKandinsky-2)\n  * [DragGAN](https:\u002F\u002Fgithub.com\u002FXingangPan\u002FDragGAN): 在生成式图像流形上进行交互式的基于点的操作。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_cbf575c28d90.png)](https:\u002F\u002Fgithub.com\u002FXingangPan\u002FDragGAN)\n  * [Segment Anything](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsegment-anything): 该仓库提供了运行 SegmentAnything Model (SAM) 推理的代码、下载训练好的模型检查点的链接，以及展示如何使用该模型的示例笔记本。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d11dd8330bb4.png)](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsegment-anything)\n  * [Segment Anything 2](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsegment-anything-2): 该仓库提供了运行 Meta Segment Anything Model 2 (SAM 2) 推理的代码、下载训练好的模型检查点的链接，以及展示如何使用该模型的示例笔记本。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d11dd8330bb4.png-2)](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsegment-anything-2)\n  * [MobileSAM](https:\u002F\u002Fgithub.com\u002FChaoningZhang\u002FMobileSAM): 这是 MobileSAM 项目的官方代码，旨在使 SAM 更轻量级，适用于移动应用及其他场景！[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_dc933ca96ce8.png)](https:\u002F\u002Fgithub.com\u002FChaoningZhang\u002FMobileSAM)\n  * [FastSAM](https:\u002F\u002Fgithub.com\u002FCASIA-IVA-Lab\u002FFastSAM): 快速分割一切。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_dfd2dda2287c.png)](https:\u002F\u002Fgithub.com\u002FCASIA-IVA-Lab\u002FFastSAM)\n  * [Infinigen](https:\u002F\u002Fgithub.com\u002Fprinceton-vl\u002Finfinigen): 使用程序化生成创建无限逼真的世界。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_01fc32428313.png)](https:\u002F\u002Fgithub.com\u002Fprinceton-vl\u002Finfinigen)\n  * [DALL·E 3](https:\u002F\u002Fopenai.com\u002Fdall-e-3)\n  * [StreamDiffusion](https:\u002F\u002Fgithub.com\u002Fcumulo-autumn\u002FStreamDiffusion): 一种面向管道级别的实时交互式生成解决方案。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_755ccf9c583d.png)](https:\u002F\u002Fgithub.com\u002Fcumulo-autumn\u002FStreamDiffusion)\n  * [AnyDoor](https:\u002F\u002Fgithub.com\u002Fcumulo-autumn\u002FStreamDiffusion): 零样本对象级图像定制。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_755ccf9c583d.png)](https:\u002F\u002Fgithub.com\u002Fcumulo-autumn\u002FStreamDiffusion)\n  * [DiT](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FDiT): 基于 Transformer 的可扩展扩散模型。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_5b9fc9553744.png)](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FDiT)\n  * [BrushNet](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FBrushNet): 一种即插即用的图像修复模型，采用分解的双分支扩散机制。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_739b7953742a.png)](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FBrushNet)\n  * [OOTDiffusion](https:\u002F\u002Fgithub.com\u002Flevihsu\u002FOOTDiffusion): 基于潜在扩散的可控虚拟试穿融合。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_227a4447fc29.png)](https:\u002F\u002Fgithub.com\u002Flevihsu\u002FOOTDiffusion)\n  * [VAR](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FVAR): “视觉自回归建模：通过下一尺度预测实现可扩展图像生成”的官方实现。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_0f133dc215a3.png)](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FVAR)\n  * [Imagine Flash: 加速 Emu 扩散模型的反向蒸馏](https:\u002F\u002Fai.meta.com\u002Fresearch\u002Fpublications\u002Fimagine-flash-accelerating-emu-diffusion-models-with-backward-distillation\u002F)\n\n##### 💡 应用 💡\n\n* [ArtLine](https:\u002F\u002Fgithub.com\u002Fvijishmadhavan\u002FArtLine)：基于深度学习的线稿肖像生成项目。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e0fd89ec9fec.png)](https:\u002F\u002Fgithub.com\u002Fvijishmadhavan\u002FArtLine)\n  * [Depix](https:\u002F\u002Fgithub.com\u002Fbeurtschipper\u002FDepix)：从像素化截图中恢复密码。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b830ebd72ef4.png)](https:\u002F\u002Fgithub.com\u002Fbeurtschipper\u002FDepix)\n  * [让老照片重焕生机](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FBringing-Old-Photos-Back-to-Life)：老照片修复（官方 PyTorch 实现）。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a0d65e4bb9ef.png)](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FBringing-Old-Photos-Back-to-Life)\n  * [Rewriting](https:\u002F\u002Fgithub.com\u002Fdavidbau\u002Frewriting)：交互式工具，可直接编辑 GAN 的规则，以合成添加、删除或修改对象的场景。例如将 StyleGANv2 改造为拥有夸张眉毛或戴帽子的马匹。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_90beb292c482.png)](https:\u002F\u002Fgithub.com\u002Fdavidbau\u002Frewriting)\n  * [Fawkes](https:\u002F\u002Fgithub.com\u002FShawn-Shan\u002Ffawkes)：用于对抗人脸识别系统的隐私保护工具。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_5e4ce70a505a.png)](https:\u002F\u002Fgithub.com\u002FShawn-Shan\u002Ffawkes)\n  * [Pulse](https:\u002F\u002Fgithub.com\u002Fadamian98\u002Fpulse)：通过探索生成模型的潜在空间实现自监督照片超分辨率。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e677f1524657.png)](https:\u002F\u002Fgithub.com\u002Fadamian98\u002Fpulse)\n  * [HiDT](https:\u002F\u002Fgithub.com\u002Fsaic-mdal\u002FHiDT)：论文《无需领域标签的高分辨率白天图像转换》的官方仓库。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_fa1e1999455e.png)](https:\u002F\u002Fgithub.com\u002Fsaic-mdal\u002FHiDT)\n  * [3D Photo Inpainting](https:\u002F\u002Fgithub.com\u002Fvt-vl-lab\u002F3d-photo-inpainting)：使用上下文感知分层深度修复技术进行 3D 摄影。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_80768c91edbd.png)](https:\u002F\u002Fgithub.com\u002Fvt-vl-lab\u002F3d-photo-inpainting)\n  * [SteganoGAN](https:\u002F\u002Fgithub.com\u002FDAI-Lab\u002FSteganoGAN)：一种利用对抗训练生成隐写图像的工具。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c2a6f04a5db8.png)](https:\u002F\u002Fgithub.com\u002FDAI-Lab\u002FSteganoGAN)\n  * [Stylegan-T](https:\u002F\u002Fgithub.com\u002Fautonomousvision\u002Fstylegan-t)：释放 GAN 力量，实现快速的大规模文本到图像合成。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_86541602c7b8.png)](https:\u002F\u002Fgithub.com\u002Fautonomousvision\u002Fstylegan-t)\n  * [MegaPortraits](https:\u002F\u002Fgithub.com\u002FSamsungLabs\u002FMegaPortraits)：一次性生成百万像素级神经网络头像。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d36ddb911409.png)](https:\u002F\u002Fgithub.com\u002FSamsungLabs\u002FMegaPortraits)\n  * [eg3d](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Feg3d)：高效的几何感知 3D 生成对抗网络。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_3339378251fe.png)](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Feg3d)\n  * [TediGAN](https:\u002F\u002Fgithub.com\u002FIIGROUP\u002FTediGAN)：TediGAN 的 PyTorch 实现：文本引导的多样化人脸图像生成与操控。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_1a92854930e8.png)](https:\u002F\u002Fgithub.com\u002FIIGROUP\u002FTediGAN)\n  * [DALLE-pytorch](https:\u002F\u002Fgithub.com\u002Flucidrains\u002FDALLE-pytorch)：OpenAI 的文本到图像 Transformer DALL-E 在 PyTorch 中的实现\u002F复现。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_87972aef1ad0.png)](https:\u002F\u002Fgithub.com\u002Flucidrains\u002FDALLE-pytorch)\n  * [StyleNeRF](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FStyleNeRF)：这是 ICLR2022 论文《StyleNeRF：用于高分辨率图像合成的基于风格的 3D 感知生成器》的开源实现。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_1a6a7cedbddc.png)](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FStyleNeRF)\n  * [DeepSVG](https:\u002F\u002Fgithub.com\u002Falexandre01\u002Fdeepsvg)：论文《DeepSVG：面向矢量图形动画的层次化生成网络》的官方代码。包含用于 SVG 数据深度学习的 PyTorch 库。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8867b6cb7087.png)](https:\u002F\u002Fgithub.com\u002Falexandre01\u002Fdeepsvg)\n  * [NUWA](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FNUWA)：统一的 3D 变换器流水线，用于视觉合成。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_abaa13eabe66.png)](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FNUWA)\n  * [Image-Super-Resolution-via-Iterative-Refinement](https:\u002F\u002Fgithub.com\u002FJanspiry\u002FImage-Super-Resolution-via-Iterative-Refinement)：PyTorch 非官方实现的迭代细化超分辨率方法。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_7f78d979a551.png)](https:\u002F\u002Fgithub.com\u002FJanspiry\u002FImage-Super-Resolution-via-Iterative-Refinement)\n  * [Lama](https:\u002F\u002Fgithub.com\u002Fsaic-mdal\u002Flama)：🦙 LaMa 图像修复，采用傅里叶卷积实现对大尺寸遮罩的稳健修复。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_0fd3d00f2cf1.png)](https:\u002F\u002Fgithub.com\u002Fsaic-mdal\u002Flama)\n  * [Person_reID_baseline_pytorch](https:\u002F\u002Fgithub.com\u002Flayumi\u002FPerson_reID_baseline_pytorch)：PyTorch ReID：一个轻量、友好且强大的目标再识别基准实现。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a715f49412d5.png)](https:\u002F\u002Fgithub.com\u002Flayumi\u002FPerson_reID_baseline_pytorch)\n  * [instruct-pix2pix](https:\u002F\u002Fgithub.com\u002Ftimothybrooks\u002Finstruct-pix2pix)：InstructPix2Pix 的 PyTorch 实现，这是一种基于指令的图像编辑模型。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d61768ee768f.png)](https:\u002F\u002Fgithub.com\u002Ftimothybrooks\u002Finstruct-pix2pix)\n  * [GFPGAN](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FGFPGAN)：GFPGAN 致力于开发适用于现实世界的人脸修复实用算法。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_6aa40e02a124.png)](https:\u002F\u002Fgithub.com\u002FTencentARC\u002FGFPGAN)\n  * [DeepVecFont](https:\u002F\u002Fgithub.com\u002Fyizhiwang96\u002Fdeepvecfont)：通过双模态学习合成高质量矢量字体。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b9594c05d8a2.png)](https:\u002F\u002Fgithub.com\u002Fyizhiwang96\u002Fdeepvecfont)\n  * [Stargan v2 Tensorflow](https:\u002F\u002Fgithub.com\u002Fclovaai\u002Fstargan-v2-tensorflow)：官方 TensorFlow 实现。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_251b83ad5cb4.png)](https:\u002F\u002Fgithub.com\u002Fclovaai\u002Fstargan-v2-tensorflow)\n  * [StyleGAN2 蒸馏](https:\u002F\u002Fgithub.com\u002FEvgenyKashin\u002Fstylegan2-distillation)：成对的图像到图像翻译任务，基于 StyleGAN2 生成的合成数据进行训练，在图像操控方面优于现有方法。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_70de04dcdcaa.png)](https:\u002F\u002Fgithub.com\u002FEvgenyKashin\u002Fstylegan2-distillation)\n  * [从扩散模型中提取训练数据](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.13188)\n  * [Mann-E - Mann-E（波斯语：مانی）是一个艺术生成模型，基于 Stable Diffusion 1.5 的权重以及从 Pinterest 上收集的艺术素材](https:\u002F\u002Fopencognitives.com\u002Fmann-e)\n  * [端到端训练的 CNN 编码器-解码器网络用于图像隐写术](https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.07201)\n  * [Grounded-Segment-Anything](https:\u002F\u002Fgithub.com\u002FIDEA-Research\u002FGrounded-Segment-Anything)：将 Grounding DINO 与 Segment Anything、Stable Diffusion、Tag2Text、BLIP、Whisper 和 ChatBot 相结合——能够自动检测、分割并根据图像、文本和音频输入生成任何内容。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f9a3977979e4.png)](https:\u002F\u002Fgithub.com\u002FIDEA-Research\u002FGrounded-Segment-Anything)\n  * [AnimateDiff](https:\u002F\u002Fgithub.com\u002Fguoyww\u002FAnimateDiff)：无需特定调优即可为您的个性化文本到图像扩散模型添加动画效果。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a2c309bf3f27.png)](https:\u002F\u002Fgithub.com\u002Fguoyww\u002FAnimateDiff)\n  * [BasicSR](https:\u002F\u002Fgithub.com\u002FXPixelGroup\u002FBasicSR)：用于超分辨率、去噪、去模糊等任务的开源图像和视频修复工具箱。目前包括 EDSR、RCAN、SRResNet、SRGAN、ESRGAN、EDVR、BasicVSR、SwinIR、ECBSR 等，并支持 StyleGAN2 和 DFDNet。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_9a32c804dab7.png)](https:\u002F\u002Fgithub.com\u002FXPixelGroup\u002F\n  BasicSR)\n  * [Real-ESRGAN](https:\u002F\u002Fgithub.com\u002Fxinntao\u002FReal-ESRGAN)：Real-ESRGAN 致力于开发通用图像\u002F视频修复的实用算法。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e08feb501df3.png)](https:\u002F\u002Fgithub.com\u002Fxinntao\u002FReal-ESRGAN)\n  * [ESRGAN](https:\u002F\u002Fgithub.com\u002Fxinntao\u002FESRGAN)：增强版 SRGAN。在 PIRM 感知超分辨率挑战赛中夺冠。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f052cb7dc6de.png)](https:\u002F\u002Fgithub.com\u002Fxinntao\u002FESRGAN)\n  * [MixNMatch](https:\u002F\u002Fgithub.com\u002FYuheng-Li\u002FMixNMatch)：用于条件图像生成的多因子解耦与编码。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_170285a1fb48.png)](https:\u002F\u002Fgithub.com\u002FYuheng-Li\u002FMixNMatch)\n  * [Clarity-upscaler](https:\u002F\u002Fgithub.com\u002Fphilz1337x\u002Fclarity-upscaler)：为所有人重新构想的图像超分辨率工具。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_82f6875dbe87.png)](https:\u002F\u002Fgithub.com\u002Fphilz1337x\u002Fclarity-upscaler)\n  * [一步扩散与分布匹配蒸馏](https:\u002F\u002Ftianweiy.github.io\u002Fdmd\u002F)\n  * [隐形缝合](https:\u002F\u002Fgithub.com\u002Fpaulengstler\u002Finvisible-stitch)：通过深度修复生成平滑的 3D 场景。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_168b24e7b433.png)](https:\u002F\u002Fgithub.com\u002Fpaulengstler\u002Finvisible-stitch)\n  * [SSR](https:\u002F\u002Fgithub.com\u002FDaLi-Jack\u002FSSR-code)：单视图高保真形状与纹理的 3D 场景重建。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_4ead0277cb02.png)](https:\u002F\u002Fgithub.com\u002Fpaulengstler\u002Fhttps:\u002F\u002Fgithub.com\u002FDaLi-Jack\u002FSSR-code)\n  * [InvSR](https:\u002F\u002Fgithub.com\u002FzsyOAOA\u002FInvSR)：通过扩散反演实现任意步数的图像超分辨率。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_513527908081.png)](https:\u002F\u002Fgithub.com\u002FzsyOAOA\u002FInvSR)\n  * [REPARO](https:\u002F\u002Fgithub.com\u002FVincentHancoder\u002FREPARO)：通过可微分的 3D 布局对齐生成组合式 3D 资产。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c6b1627b107d.png)](https:\u002F\u002Fgithub.com\u002FVincentHancoder\u002FREPARO)\n  * [Gen3DSR](https:\u002F\u002Fgithub.com\u002FAndreeaDogaru\u002FGen3DSR)：从单视图出发，通过分治法实现可推广的 3D 场景重建。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_61494c3f06bb.png)](https:\u002F\u002Fgithub.com\u002FAndreeaDogaru\u002FGen3DSR)\n  * [ml-sharp](https:\u002F\u002Fgithub.com\u002Fapple\u002Fml-sharp)：在不到一秒钟内完成清晰的单目视图合成。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_5f6421e37b95.png)](https:\u002F\u002Fgithub.com\u002Fapple\u002Fml-sharp)\n\n##### 🔎 检测 🔎\n\n  * [stylegan3-detector](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan3-detector): StyleGAN3 合成图像检测。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2837b6642473.png3-detector)](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan3-detector)\n  * [stylegan2-projecting-images](https:\u002F\u002Fgithub.com\u002Fwoctezuma\u002Fstylegan2-projecting-images): 使用 StyleGAN2 将图像投影到潜在空间。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c5a596e1b8bd.png)](https:\u002F\u002Fgithub.com\u002Fwoctezuma\u002Fstylegan2-projecting-images)\n  * [FALdetector](https:\u002F\u002Fgithub.com\u002FPeterWang512\u002FFALdetector): 通过脚本化 Photoshop 检测经过 Photoshop 处理的人脸。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_3aab12b955a2.png)](https:\u002F\u002Fgithub.com\u002FPeterWang512\u002FFALdetector)\n  * [B-Free](https:\u002F\u002Fgithub.com\u002Fgrip-unina\u002FB-Free): 一种无偏见的训练范式，用于更通用的 AI 生成图像检测。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a4e5a315106b.png)](https:\u002F\u002Fgithub.com\u002Fgrip-unina\u002FB-Free)\n  * [多模态模型生成图像的检测](https:\u002F\u002Fwww.bsi.bund.de\u002FSharedDocs\u002FDownloads\u002FEN\u002FBSI\u002FKI\u002FDetection_Images_Multi-Modal_Models.html)\n\n#### 🎥 视频 🎥\n\n##### 🛠️ 工具 🛠️\n\n  * [DeepFaceLab](https:\u002F\u002Fgithub.com\u002Fiperov\u002FDeepFaceLab): DeepFaceLab 是领先的深度伪造制作软件。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ffdf630caee6.png)](https:\u002F\u002Fgithub.com\u002Fiperov\u002FDeepFaceLab)\n  * [faceswap](https:\u002F\u002Fgithub.com\u002Fdeepfakes\u002Ffaceswap): 适用于所有人的深度伪造软件。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8502b17a5690.png)](https:\u002F\u002Fgithub.com\u002Fdeepfakes\u002Ffaceswap)\n  * [dot](https:\u002F\u002Fgithub.com\u002Fsensity-ai\u002Fdot): 深度伪造攻击工具包。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f092de96d911.png)](https:\u002F\u002Fgithub.com\u002Fsensity-ai\u002Fdot)\n  * [SimSwap](https:\u002F\u002Fgithub.com\u002Fneuralchen\u002FSimSwap): 一个基于单一训练模型的任意人脸交换框架，可用于图像和视频！[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_7d37f167cdb8.png)](https:\u002F\u002Fgithub.com\u002Fneuralchen\u002FSimSwap)\n  * [faceswap-GAN](https:\u002F\u002Fgithub.com\u002Fshaoanlu\u002Ffaceswap-GAN): 一种去噪自编码器 + 对抗损失和注意力机制的人脸交换方法。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f5ac3a96ed2e.png)](https:\u002F\u002Fgithub.com\u002Fshaoanlu\u002Ffaceswap-GAN)\n  * [Celeb DeepFakeForensics](https:\u002F\u002Fgithub.com\u002Fyuezunli\u002Fceleb-deepfakeforensics): 一个大规模且具有挑战性的深度伪造取证数据集。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_4ad798cadfd2.png)](https:\u002F\u002Fgithub.com\u002Fyuezunli\u002Fceleb-deepfakeforensics)\n  * [VGen](https:\u002F\u002Fgithub.com\u002Fdamo-vilab\u002Fi2vgen-xl): 基于扩散模型构建的综合性视频生成生态系统。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8b455332876f.png)](https:\u002F\u002Fgithub.com\u002Fdamo-vilab\u002Fi2vgen-xl)\n  * [MuseV](https:\u002F\u002Fgithub.com\u002FTMElyralab\u002FMuseV): 基于视觉条件并行去噪技术，实现无限长度、高保真度的虚拟人视频生成。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_b9a5ea7d8936.png)](https:\u002F\u002Fgithub.com\u002FTMElyralab\u002FMuseV)\n  * [GLEE](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FGLEE): 面向图像和视频的大规模通用对象基础模型。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_0abbec4a530d.png)](https:\u002F\u002Fgithub.com\u002FFoundationVision\u002FGLEE)\n  * [T-Rex](https:\u002F\u002Fgithub.com\u002FIDEA-Research\u002FT-Rex): 通过文本-视觉提示协同作用实现通用目标检测。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_afd74fa03067.png)](https:\u002F\u002Fgithub.com\u002FIDEA-Research\u002FT-Rex)\n  * [DynamiCrafter](https:\u002F\u002Fgithub.com\u002FDoubiiu\u002FDynamiCrafter): 利用视频扩散先验对开放域图像进行动画化处理。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a52c5c971e0e.png)](https:\u002F\u002Fgithub.com\u002FDoubiiu\u002FDynamiCrafter)\n  * [Mora](https:\u002F\u002Fgithub.com\u002Flichao-sun\u002FMora): 更接近 Sora 的通用视频生成模型。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_743828bf09cc.png)](https:\u002F\u002Fgithub.com\u002Flichao-sun\u002FMora)\n\n##### 💡 应用 💡\n\n* [face2face-demo](https:\u002F\u002Fgithub.com\u002Fdatitran\u002Fface2face-demo): 基于人脸关键点学习并将其转换为面部的pix2pix演示。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_3f1a2e218b00.png)](https:\u002F\u002Fgithub.com\u002Fdatitran\u002Fface2face-demo)\n  * [Faceswap-Deepfake-Pytorch](https:\u002F\u002Fgithub.com\u002FOldpan\u002FFaceswap-Deepfake-Pytorch): 使用PyTorch实现的人脸交换或深度伪造。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a19bf3c65399.png)](https:\u002F\u002Fgithub.com\u002FOldpan\u002FFaceswap-Deepfake-Pytorch)\n  * [Point-E](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fpoint-e): 用于3D模型合成的点云扩散模型。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a759f7d92327.png)](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fpoint-e)\n  * [EGVSR](https:\u002F\u002Fgithub.com\u002FThmen\u002FEGVSR): 高效且通用的视频超分辨率技术。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_265823cc8865.png)](https:\u002F\u002Fgithub.com\u002FThmen\u002FEGVSR)\n  * [STIT](https:\u002F\u002Fgithub.com\u002Frotemtzaban\u002FSTIT): 时间拼接：基于GAN的真实视频人脸编辑。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_03945486701b.png)](https:\u002F\u002Fgithub.com\u002Frotemtzaban\u002FSTIT)\n  * [BackgroundMattingV2](https:\u002F\u002Fgithub.com\u002FPeterL1n\u002FBackgroundMattingV2): 实时高分辨率背景抠图。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8b5eb04a2ec8.png)](https:\u002F\u002Fgithub.com\u002FPeterL1n\u002FBackgroundMattingV2)\n  * [MODNet](https:\u002F\u002Fgithub.com\u002FZHKKKe\u002FMODNet): 无需三元图的实时人像抠图解决方案。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_3614ac2ed8a8.png)](https:\u002F\u002Fgithub.com\u002FZHKKKe\u002FMODNet)\n  * [Background-Matting](https:\u002F\u002Fgithub.com\u002Fsenguptaumd\u002FBackground-Matting): 背景抠图：世界即你的绿幕。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2e40ba2095f4.png)](https:\u002F\u002Fgithub.com\u002Fsenguptaumd\u002FBackground-Matting)\n  * [First Order Model](https:\u002F\u002Fgithub.com\u002FAliaksandrSiarohin\u002Ffirst-order-model): 该仓库包含论文《用于图像动画的一阶运动模型》的源代码。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_636a3c95999e.png)](https:\u002F\u002Fgithub.com\u002FAliaksandrSiarohin\u002Ffirst-order-model)\n  * [Articulated Animation](https:\u002F\u002Fgithub.com\u002Fsnap-research\u002Farticulated-animation): 该仓库包含CVPR'2021论文《关节动画的运动表示》的源代码。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_314ae3a610c4.png)](https:\u002F\u002Fgithub.com\u002Fsnap-research\u002Farticulated-animation)\n  * [Real Time Person Removal](https:\u002F\u002Fgithub.com\u002Fjasonmayes\u002FReal-Time-Person-Removal): 使用TensorFlow.js在网页浏览器中实时从复杂背景中移除人物。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_427a826fa4f3.png)](https:\u002F\u002Fgithub.com\u002Fjasonmayes\u002FReal-Time-Person-Removal)\n  * [AdaIN-style](https:\u002F\u002Fgithub.com\u002Fxunhuang1995\u002FAdaIN-style): 使用自适应实例归一化实现实时任意风格迁移。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a0da3a66896e.png)](https:\u002F\u002Fgithub.com\u002Fxunhuang1995\u002FAdaIN-style)\n  * [Frame Interpolation](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fframe-interpolation): 大运动帧插值。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_af68f33f0121.png)](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fframe-interpolation)\n  * [Awesome-Image-Colorization](https:\u002F\u002Fgithub.com\u002FMarkMoHR\u002FAwesome-Image-Colorization): 📚 基于深度学习的图像着色和视频着色论文集。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_bddba8efb649.png)](https:\u002F\u002Fgithub.com\u002FMarkMoHR\u002FAwesome-Image-Colorization)\n  * [SadTalker](https:\u002F\u002Fgithub.com\u002FOpenTalker\u002FSadTalker): 学习用于风格化音频驱动单张图片说话人脸动画的真实3D运动系数。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_90d38a28c9ce.png)](https:\u002F\u002Fgithub.com\u002FOpenTalker\u002FSadTalker)\n  * [roop](https:\u002F\u002Fgithub.com\u002Fs0md3v\u002Froop): 一键式深度伪造（人脸交换）。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_9ccd1142c02a.png)](https:\u002F\u002Fgithub.com\u002Fs0md3v\u002Froop)\n  * [StableVideo](https:\u002F\u002Fgithub.com\u002Frese1f\u002FStableVideo): 文本驱动的一致性感知扩散视频编辑。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_3bfda99e9393.png)](https:\u002F\u002Fgithub.com\u002Frese1f\u002FStableVideo)\n  * [MagicEdit](https:\u002F\u002Fgithub.com\u002Fmagic-research\u002Fmagic-edit): 高保真、时间一致的视频编辑。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2a5b107b0dce.png)](https:\u002F\u002Fgithub.com\u002Fmagic-research\u002Fmagic-edit)\n  * [Rerender_A_Video](https:\u002F\u002Fgithub.com\u002Fwilliamyang1991\u002FRerender_A_Video): 零样本文本引导的视频到视频翻译。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d76043f3b34c.png)](https:\u002F\u002Fgithub.com\u002Fwilliamyang1991\u002FRerender_A_Video)\n  * [DreamEditor](https:\u002F\u002Fgithub.com\u002Fzjy526223908\u002FDreamEditor): 基于神经场的文本驱动3D场景编辑。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_6ed1dd370e29.png)](https:\u002F\u002Fgithub.com\u002Fzjy526223908\u002FDreamEditor)\n  * [DreamEditor](https:\u002F\u002Fgithub.com\u002Fzju3dv\u002F4K4D): 4K分辨率下的实时4D视图合成。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_58bc214f0b25.png)](https:\u002F\u002Fgithub.com\u002Fzju3dv\u002F4K4D)\n  * [AnimateAnyone](https:\u002F\u002Fgithub.com\u002FHumanAIGC\u002FAnimateAnyone): 用于角色动画的一致且可控的图像到视频合成。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ad690dcdc4be.png)](https:\u002F\u002Fgithub.com\u002FHumanAIGC\u002FAnimateAnyone)\n  * [Moore-AnimateAnyone](https:\u002F\u002Fgithub.com\u002FMooreThreads\u002FMoore-AnimateAnyone): 该仓库复现了AnimateAnyone。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_46ecf7c87453.png)](https:\u002F\u002Fgithub.com\u002FMooreThreads\u002FMoore-AnimateAnyone)\n  * [audio2photoreal](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Faudio2photoreal): 从音频到照片级逼真化身：在对话中合成人类形象。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_3622dac23f83.png)](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Faudio2photoreal)\n  * [MagicVideo-V2: 多阶段高美学视频生成](https:\u002F\u002Fmagicvideov2.github.io\u002F)\n  * [LWM](https:\u002F\u002Fgithub.com\u002FLargeWorldModel\u002FLWM): 一种通用的大上下文多模态自回归模型。它使用RingAttention在大量多样化的长视频和书籍数据上训练，能够进行语言、图像和视频的理解与生成。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_885e74a5722a.png)](https:\u002F\u002Fgithub.com\u002FLargeWorldModel\u002FLWM)\n  * [AniPortrait](https:\u002F\u002Fgithub.com\u002FZejun-Yang\u002FAniPortrait): 音频驱动的写实人像动画合成。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_40fe1bb94134.png)](https:\u002F\u002Fgithub.com\u002FZejun-Yang\u002FAniPortrait)\n  * [Champ](https:\u002F\u002Fgithub.com\u002Ffudan-generative-vision\u002Fchamp): 基于3D参数化指导的可控且一致的人像动画。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_95ca3fa3cd67.png)](https:\u002F\u002Fgithub.com\u002Ffudan-generative-vision\u002Fchamp)\n  * [Streamv2v](https:\u002F\u002Fgithub.com\u002FJeff-LiangF\u002Fstreamv2v): 借助特征库实现的流式视频到视频翻译。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_78d51488c23f.png)](https:\u002F\u002Fgithub.com\u002FJeff-LiangF\u002Fstreamv2v)\n  * [Deep-Live-Cam](https:\u002F\u002Fgithub.com\u002Fhacksider\u002FDeep-Live-Cam): 仅需一张图片即可实现实时人脸交换和一键式视频深度伪造。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ecea61a7ee71.png)](https:\u002F\u002Fgithub.com\u002Fhacksider\u002FDeep-Live-Cam)\n  * [Sapiens](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsapiens): 人类视觉模型的基础。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8acfacf134af.png)](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsapiens)\n  * [ViVid-1-to-3](https:\u002F\u002Fgithub.com\u002Fubc-vision\u002Fvivid123): 利用视频扩散模型进行新颖视图合成。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_5e1c4eb28192.png)](https:\u002F\u002Fgithub.com\u002Fubc-vision\u002Fvivid123)\n  * [VGGT](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fvggt): 视觉几何基础Transformer。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f1630ef894ee.png)](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fvggt)\n  * [LayerPano3D](https:\u002F\u002Fgithub.com\u002F3DTopia\u002FLayerPano3D): 分层3D全景图，用于超沉浸式场景生成。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e39e841b89da.png)](https:\u002F\u002Fgithub.com\u002F3DTopia\u002FLayerPano3D)\n  * [RealmDreamer](https:\u002F\u002Fgithub.com\u002Fjaidevshriram\u002Frealmdreamer): 基于文本驱动的3D场景生成，结合修复和深度扩散技术。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_df07669ceb2a.png)](https:\u002F\u002Fgithub.com\u002Fjaidevshriram\u002Frealmdreamer)\n\n##### 🔎 检测 🔎\n\n  * [FaceForensics++](https:\u002F\u002Fgithub.com\u002Fondyari\u002FFaceForensics): FaceForensics 数据集。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e4fcfc597027.png)](https:\u002F\u002Fgithub.com\u002Fondyari\u002FFaceForensics)\n  * [DeepFake-Detection](https:\u002F\u002Fgithub.com\u002Fdessa-oss\u002FDeepFake-Detection): 致力于真正有效的深度伪造检测。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_11104d5e3681.png)](https:\u002F\u002Fgithub.com\u002Fdessa-oss\u002FDeepFake-Detection)\n  * [fakeVideoForensics](https:\u002F\u002Fgithub.com\u002Fjiep\u002FfakeVideoForensics): 检测深度伪造视频。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f1f371bdbcff.png)](https:\u002F\u002Fgithub.com\u002Fjiep\u002FfakeVideoForensics)\n  * [Deepfake-Detection](https:\u002F\u002Fgithub.com\u002FHongguLiu\u002FDeepfake-Detection): 基于 Faceforensics++ 的 PyTorch 实现的深度伪造检测。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_61ffd00b26c6.png)](https:\u002F\u002Fgithub.com\u002FHongguLiu\u002FDeepfake-Detection)\n  * [SeqDeepFake](https:\u002F\u002Fgithub.com\u002Frshaojimmy\u002FSeqDeepFake): SeqDeepFake 的 PyTorch 代码：检测并恢复序列式深度伪造篡改。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_3f63c4f7f8ff.png)](https:\u002F\u002Fgithub.com\u002Frshaojimmy\u002FSeqDeepFake)\n  * [PCL-I2G](https:\u002F\u002Fgithub.com\u002Fjtchen0528\u002FPCL-I2G): 非官方实现：学习自一致性以进行深度伪造检测。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_32076212bdfb.png)](https:\u002F\u002Fgithub.com\u002Fjtchen0528\u002FPCL-I2G)\n  * [DFDC 深度伪造挑战赛](https:\u002F\u002Fgithub.com\u002Fselimsef\u002Fdfdc_deepfake_challenge): DFDC 挑战赛的获奖解决方案。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_1f1a5205d864.png)](https:\u002F\u002Fgithub.com\u002Fselimsef\u002Fdfdc_deepfake_challenge)\n  * [POI-Forensics](https:\u002F\u002Fgithub.com\u002Fgrip-unina\u002Fpoi-forensics): 音频-视觉感兴趣人物的深度伪造检测。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d462136468ab.png)](https:\u002F\u002Fgithub.com\u002Fgrip-unina\u002Fpoi-forensics)\n  * [标准化深度伪造检测：专家为何认为其重要](https:\u002F\u002Fantispoofing.org\u002Fdeepfake-detection-standardization-origin-goals-and-implementation\u002F)\n  * [想识别深度伪造吗？看看他们眼睛里的“星星”](https:\u002F\u002Fras.ac.uk\u002Fnews-and-press\u002Fnews\u002Fwant-spot-deepfake-look-stars-their-eyes)\n  * [适合实际应用吗？现实世界中的深度伪造检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.16556)\n\n#### 📄 文本 📄\n\n##### 🛠️ 工具 🛠️\n  * [GLM-130B](https:\u002F\u002Fgithub.com\u002FTHUDM\u002FGLM-130B): 一个开源的双语预训练模型。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_0c300c05b08c.png)](https:\u002F\u002Fgithub.com\u002FTHUDM\u002FGLM-130B)\n  * [LongtermChatExternalSources](https:\u002F\u002Fgithub.com\u002Fdaveshap\u002FLongtermChatExternalSources): 具有长期记忆和外部信息源的GPT-3聊天机器人。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_07632c7b31cf.png)](https:\u002F\u002Fgithub.com\u002Fdaveshap\u002FLongtermChatExternalSources)\n  * [sketch](https:\u002F\u002Fgithub.com\u002Fapproximatelabs\u002Fsketch): 一款能够理解数据内容的AI代码编写助手。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_1eb7e84649f8.png)](https:\u002F\u002Fgithub.com\u002Fapproximatelabs\u002Fsketch)\n  * [LangChain](https:\u002F\u002Fgithub.com\u002Fhwchase17\u002Flangchain): ⚡ 通过可组合性构建大型语言模型应用 ⚡。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_3d9f119a9861.png)](https:\u002F\u002Fgithub.com\u002Fhwchase17\u002Flangchain)\n  * [ChatGPT Wrapper](https:\u002F\u002Fgithub.com\u002Fmmabrouk\u002Fchatgpt-wrapper): 使用Python和Shell与ChatGPT交互的API。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8fecce6b25e3.png)](https:\u002F\u002Fgithub.com\u002Fmmabrouk\u002Fchatgpt-wrapper)\n  * [openai-python](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fopenai-python): OpenAI Python库为使用Python语言编写的应用程序提供了便捷的OpenAI API访问接口。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_279b886fa5aa.png)](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fopenai-python)\n  * [Beto](https:\u002F\u002Fgithub.com\u002Fdccuchile\u002Fbeto): BERT模型的西班牙语版本。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_51c9bfcf0700.png)](https:\u002F\u002Fgithub.com\u002Fdccuchile\u002Fbeto)\n  * [GPT-Code-Clippy](https:\u002F\u002Fgithub.com\u002FCodedotAl\u002Fgpt-code-clippy): GPT-Code-Clippy (GPT-CC) 是GitHub Copilot的开源版本，基于GPT-3的语言模型，称为GPT-Codex。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_45635dea4017.png)](https:\u002F\u002Fgithub.com\u002FCodedotAl\u002Fgpt-code-clippy)\n  * [GPT Neo](https:\u002F\u002Fgithub.com\u002FEleutherAI\u002Fgpt-neo): 使用mesh-tensorflow库实现的模型并行GPT-2和GPT-3风格模型。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e98cb9b2c238.png)](https:\u002F\u002Fgithub.com\u002FEleutherAI\u002Fgpt-neo)\n  * [ctrl](https:\u002F\u002Fgithub.com\u002Fsalesforce\u002Fctrl): 用于可控生成的条件Transformer语言模型。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_eb1dfa509913.png)](https:\u002F\u002Fgithub.com\u002Fsalesforce\u002Fctrl)\n  * [Llama](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fllama): LLaMA模型的推理代码。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ab83522a6fbc.png)](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fllama)\n  * [Llama2](https:\u002F\u002Fai.meta.com\u002Fllama\u002F)\n  * [Llama Guard 3](https:\u002F\u002Fllama.meta.com\u002Fdocs\u002Fmodel-cards-and-prompt-formats\u002Fllama-guard-3\u002F)\n  * [UL2 20B](https:\u002F\u002Fai.googleblog.com\u002F2022\u002F10\u002Ful2-20b-open-source-unified-language.html): 一个开源的统一语言学习模型\n  * [burgpt](https:\u002F\u002Fgithub.com\u002Faress31\u002Fburpgpt): 一个Burp Suite扩展，集成了OpenAI的GPT，用于执行额外的被动扫描以发现高度定制化的漏洞，并支持对任何类型的流量进行分析。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_7c940d52f583.png)](https:\u002F\u002Fgithub.com\u002Faress31\u002Fburpgpt)\n  * [Ollama](https:\u002F\u002Fgithub.com\u002Fjmorganca\u002Follama): 在本地快速启动并运行Llama 2及其他大型语言模型。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8927bca96dc9.png)](https:\u002F\u002Fgithub.com\u002Fjmorganca\u002Follama)\n  * [SneakyPrompt](https:\u002F\u002Fgithub.com\u002FYuchen413\u002Ftext2image_safety): 突破文本到图像生成模型的安全限制。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_44c796ca15eb.png)](https:\u002F\u002Fgithub.com\u002FYuchen413\u002Ftext2image_safety)\n    * [Copilot-For-Security](https:\u002F\u002Fgithub.com\u002FAzure\u002FCopilot-For-Security): 一种由生成式AI驱动的安全解决方案，旨在以机器速度和规模提升防御者的效率和能力，从而改善安全成果，同时遵守负责任的AI原则。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_006bff948843.png)](https:\u002F\u002Fgithub.com\u002FAzure\u002FCopilot-For-Security)\n  * [LM Studio](https:\u002F\u002Flmstudio.ai\u002F): 发现、下载并运行本地大型语言模型\n  * [Bypass GPT: 将AI文本转换为人类风格的内容](https:\u002F\u002Fbypassgpt.ai\u002F)\n  * [MGM](https:\u002F\u002Fgithub.com\u002Fdvlab-research\u002FMGM): 该框架支持从2B到34B的一系列密集型和MoE大型语言模型（LLMs），同时具备图像理解、推理和生成能力。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_76c77103dfc7.png)](https:\u002F\u002Fgithub.com\u002Fdvlab-research\u002FMGM)\n  * [Secret Llama](https:\u002F\u002Fgithub.com\u002Fabi\u002Fsecret-llama): 完全私密的LLM聊天机器人，完全在浏览器中运行，无需服务器。支持Mistral和LLama 3。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d6c514ceeeeb.png)](https:\u002F\u002Fgithub.com\u002Fabi\u002Fsecret-llama)\n  * [Llama3](https:\u002F\u002Fgithub.com\u002Fmeta-llama\u002Fllama3): Meta Llama 3的官方GitHub站点。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a8cb82d0458c.png)](https:\u002F\u002Fgithub.com\u002Fmeta-llama\u002Fllama3)\n  * [Unsloth](https:\u002F\u002Fgithub.com\u002Funslothai\u002Funsloth): 以80%更少的内存，将Llama 3.3、Mistral、Phi-4、Qwen 2.5及Gemma 2的速度提升2倍！[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_78572a9ac59a.png)](https:\u002F\u002Fgithub.com\u002Funslothai\u002Funsloth)\n\n\n##### 🔎 检测 🔎\n\n  * [Detecting Fake Text](https:\u002F\u002Fgithub.com\u002FHendrikStrobelt\u002Fdetecting-fake-text): 巨型语言模型测试室。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_1430342545c2.png)](https:\u002F\u002Fgithub.com\u002FHendrikStrobelt\u002Fdetecting-fake-text)\n  * [Grover](https:\u002F\u002Fgithub.com\u002Frowanz\u002Fgrover): 用于防御神经网络假新闻的代码。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_46e6d9d0fd24.png)](https:\u002F\u002Fgithub.com\u002Frowanz\u002Fgrover)\n  * [Rebuff.ai](https:\u002F\u002Fgithub.com\u002Fprotectai\u002Frebuff): 提示注入检测器。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_54507297e69a.png)](https:\u002F\u002Fgithub.com\u002Fprotectai\u002Frebuff)\n  * [用于指示AI撰写文本的新AI分类器](https:\u002F\u002Fopenai.com\u002Fblog\u002Fnew-ai-classifier-for-indicating-ai-written-text\u002F)\n  * [揭秘四种神奇方法来检测AI生成文本（包括ChatGPT）](https:\u002F\u002Fmedium.com\u002F@itamargolan\u002Funcover-the-four-enchanted-ways-to-identify-ai-generated-text-including-chatgpts-4764847fd609)\n  * [GPTZero](https:\u002F\u002Fgptzero.me)\n  * [AI内容检测器（beta版）](https:\u002F\u002Fcopyleaks.com\u002Fai-content-detector)\n  * [大型语言模型的水印技术](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.10226)\n  * [能否可靠地检测出AI生成的文本？](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.11156)\n  * [GPT检测器对非英语母语写作者存在偏见](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.02819)\n  * [用ChatGPT，还是不用ChatGPT？这就是问题所在！](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.01487)\n  * [语言学家能否区分ChatGPT\u002F人工智能与人类写作？——一项关于研究伦理和学术出版的研究](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS2772766123000289)\n  * [ChatGPT就是胡扯](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs10676-024-09775-5)\n\n##### 💡 应用 💡\n\n* [handwrite](https:\u002F\u002Fgithub.com\u002Fbuiltree\u002Fhandwrite): Handwrite 根据你的手写样本生成自定义字体。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c69a1dabc61b.png)](https:\u002F\u002Fgithub.com\u002Fbuiltree\u002Fhandwrite)\n  * [GPT Sandbox](https:\u002F\u002Fgithub.com\u002Fshreyashankar\u002Fgpt3-sandbox): 该项目的目标是让用户仅用几行 Python 代码，就能利用新发布的 OpenAI GPT-3 API 创建酷炫的网页演示。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8a48693a1cef.png)](https:\u002F\u002Fgithub.com\u002Fshreyashankar\u002Fgpt3-sandbox)\n  * [PassGAN](https:\u002F\u002Fgithub.com\u002Fbrannondorsey\u002FPassGAN): 一种基于深度学习的密码猜测方法。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_9ea5c0c1a8b4.png)](https:\u002F\u002Fgithub.com\u002Fbrannondorsey\u002FPassGAN)\n  * [GPT Index](https:\u002F\u002Fgithub.com\u002Fjerryjliu\u002Fgpt_index): GPT Index 是一个由一系列数据结构组成的项目，旨在使大型外部知识库更容易与 LLM 结合使用。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c02dfd3db5b4.png)](https:\u002F\u002Fgithub.com\u002Fjerryjliu\u002Fgpt_index)\n  * [nanoGPT](https:\u002F\u002Fgithub.com\u002Fkarpathy\u002FnanoGPT): 训练\u002F微调中等规模 GPT 的最简单、最快的仓库。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_9fd4a3cee0bc.png)](https:\u002F\u002Fgithub.com\u002Fkarpathy\u002FnanoGPT)\n  * [whatsapp-gpt](https:\u002F\u002Fgithub.com\u002Fdanielgross\u002Fwhatsapp-gpt) [![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_1bd9dd1e0e63.png)](https:\u002F\u002Fgithub.com\u002Fdanielgross\u002Fwhatsapp-gpt)\n  * [ChatGPT Chrome 扩展](https:\u002F\u002Fgithub.com\u002Fgragland\u002Fchatgpt-chrome-extension): 一款 ChatGPT Chrome 扩展程序。将 ChatGPT 集成到互联网上的每一个文本框中。\n  * [Unilm](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002Funilm): 跨任务、跨语言和跨模态的大规模自监督预训练。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_bef49fb02ea5.png)](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002Funilm)\n  * [minGPT](https:\u002F\u002Fgithub.com\u002Fkarpathy\u002FminGPT): OpenAI GPT（生成式预训练 Transformer）训练的极简 PyTorch 重实现。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_6348bf71a4cc.png)](https:\u002F\u002Fgithub.com\u002Fkarpathy\u002FminGPT)\n  * [CodeGeeX](https:\u002F\u002Fgithub.com\u002FTHUDM\u002FCodeGeeX): 一个开源的多语言代码生成模型。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_32f566baf18e.png)](https:\u002F\u002Fgithub.com\u002FTHUDM\u002FCodeGeeX)\n  * [OpenAI Cookbook](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fopenai-cookbook): 使用 OpenAI API 的示例和指南。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_9e09be3428f5.png)](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fopenai-cookbook)\n  * [🧠 Awesome ChatGPT Prompts](https:\u002F\u002Fgithub.com\u002Ff\u002Fawesome-chatgpt-prompts): 该仓库包含 ChatGPT 提示词精选，帮助用户更好地使用 ChatGPT。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_4fd1d8237a98.png)](https:\u002F\u002Fgithub.com\u002Ff\u002Fawesome-chatgpt-prompts)\n  * [Alice](https:\u002F\u002Fgithub.com\u002Fgreshake\u002FAlice): 让 ChatGPT 获得真正的终端访问权限。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_6979228961d6.png)](https:\u002F\u002Fgithub.com\u002Fgreshake\u002FAlice)\n  * [使用 ChatGPT 进行安全代码审查](https:\u002F\u002Fresearch.nccgroup.com\u002F2023\u002F02\u002F09\u002Fsecurity-code-review-with-chatgpt)\n  * [用户在 AI 助手的帮助下是否会编写更不安全的代码？](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.03622)\n  * [使用 ChatGPT 绕过 Gmail 的垃圾邮件过滤器](https:\u002F\u002Fneelc.org\u002Fposts\u002Fchatgpt-gmail-spam)\n  * [用于增强物联网密码安全性的循环 GAN 密码破解器](https:\u002F\u002Fwww.mdpi.com\u002F1999-4893\u002F15\u002F5\u002F155)\n  * [PentestGPT](https:\u002F\u002Fgithub.com\u002FGreyDGL\u002FPentestGPT): 一款由 GPT 驱动的渗透测试工具。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_99fc142e3b61.png)](https:\u002F\u002Fgithub.com\u002FGreyDGL\u002FPentestGPT)\n  * [GPT Researcher](https:\u002F\u002Fgithub.com\u002Fassafelovic\u002Fgpt-researcher): 基于 GPT 的自主智能体，可针对任何给定主题进行在线全面研究。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e015f00aab45.png)](https:\u002F\u002Fgithub.com\u002Fassafelovic\u002Fgpt-researcher)\n  * [GPT Engineer](https:\u002F\u002Fgithub.com\u002FAntonOsika\u002Fgpt-engineer): 指定你想要构建的内容，AI 会请求澄清，然后完成构建。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_3eefa5f312c0.png)](https:\u002F\u002Fgithub.com\u002FAntonOsika\u002Fgpt-engineer)\n  * [localpilot](https:\u002F\u002Fgithub.com\u002Fdanielgross\u002Flocalpilot): 在你的 Macbook 上一键本地使用 GitHub Copilot！[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ae7dda45280b.png)](https:\u002F\u002Fgithub.com\u002Fdanielgross\u002Flocalpilot)\n  * [WormGPT](https:\u002F\u002Fthehackernews.com\u002F2023\u002F07\u002Fwormgpt-new-ai-tool-allows.html): 新型 AI 工具使网络犯罪分子能够发起复杂的网络攻击\n  * [PoisonGPT](https:\u002F\u002Fblog.mithrilsecurity.io\u002Fpoisongpt-how-we-hid-a-lobotomized-llm-on-hugging-face-to-spread-fake-news\u002F): 我们如何在 Hugging Face 上隐藏一台被“切除前额叶”的 LLM 来传播假新闻\n  * [PassGPT：使用大型语言模型进行密码建模和（引导式）生成](https:\u002F\u002Fjavirandor.github.io\u002Fassets\u002Fpdf\u002Fpassgpt2023rando.pdf)\n  * [DeepPass — 通过深度学习寻找密码](https:\u002F\u002Fposts.specterops.io\u002Fdeeppass-finding-passwords-with-deep-learning-4d31c534cd00)\n  * [GPTFuzz](https:\u002F\u002Fgithub.com\u002Fsherdencooper\u002FGPTFuzz): 使用自动生成的越狱提示对大型语言模型进行红队测试。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_0b7733da74eb.png)](https:\u002F\u002Fgithub.com\u002Fsherdencooper\u002FGPTFuzz)\n  * [Open Interpreter](https:\u002F\u002Fgithub.com\u002FKillianLucas\u002Fopen-interpreter): OpenAI 的 Code Interpreter 在你的终端中本地运行。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2a0215cc273f.png)](https:\u002F\u002Fgithub.com\u002FKillianLucas\u002Fopen-interpreter)\n  * [Eureka](https:\u002F\u002Fgithub.com\u002Feureka-research\u002FEureka): 通过大型语言模型编程实现人类级别的奖励设计。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_2512629c1525.png)](https:\u002F\u002Fgithub.com\u002Feureka-research\u002FEureka)\n  * [MetaCLIP](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FMetaCLIP): 揭秘 CLIP 数据。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_70212166e482.png)](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FMetaCLIP)\n  * [LLM OSINT](https:\u002F\u002Fgithub.com\u002Fsshh12\u002Fllm_osint): 利用 LLM 从互联网上收集信息，并基于这些信息执行任务的概念验证方法。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c2ea78de870d.png)](https:\u002F\u002Fgithub.com\u002Fsshh12\u002Fllm_osint)\n  * [HackingBuddyGPT](https:\u002F\u002Fgithub.com\u002Fipa-lab\u002FhackingBuddyGPT): LLM 与渗透测试结合。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e6038f5060b7.png)](https:\u002F\u002Fgithub.com\u002Fipa-lab\u002FhackingBuddyGPT)\n  * [ChatGPT-Jailbreaks](https:\u002F\u002Fgithub.com\u002FGabryB03\u002FChatGPT-Jailbreaks): ChatGPT（GPT-3.5）的官方越狱方法。在与 ChatGPT 对话开始时发送一条长消息，即可获得具有攻击性、不道德、激进且接近人类的回答，支持英语和意大利语。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_fa8f2e45a7c9.png)](https:\u002F\u002Fgithub.com\u002FGabryB03\u002FChatGPT-Jailbreaks)\n  * [Magika](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fmagika): 使用深度学习检测文件内容类型。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d7dc0c58ba14.png)](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fmagika)\n  * [Jan](https:\u002F\u002Fgithub.com\u002Fjanhq\u002Fjan): 一个开源的 ChatGPT 替代品，在你的电脑上 100% 离线运行。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_e10fb8a1192e.png)](https:\u002F\u002Fgithub.com\u002Fjanhq\u002Fjan)\n  * [LibreChat](https:\u002F\u002Fgithub.com\u002Fdanny-avila\u002FLibreChat): 增强版 ChatGPT 克隆：支持 OpenAI、Assistants API、Azure、Groq、GPT-4 Vision、Mistral、Bing、Anthropic、OpenRouter、Vertex AI、Gemini，可切换 AI 模型、支持消息搜索、LangChain、DALL-E-3、ChatGPT 插件、OpenAI Functions、安全的多用户系统、预设功能，完全开源，可自行托管。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_8203bf0255bc.png)](https:\u002F\u002Fgithub.com\u002Fdanny-avila\u002FLibreChat)\n  * [Lumina-T2X](https:\u002F\u002Fgithub.com\u002FAlpha-VLLM\u002FLumina-T2X): 一个统一的框架，用于文本到任意模态的生成。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f960a584c947.png)](https:\u002F\u002Fgithub.com\u002FAlpha-VLLM\u002FLumina-T2X)\n\n### 📚 杂项 📚\n\n* [Awesome GPT + Security](https:\u002F\u002Fgithub.com\u002Fcckuailong\u002Fawesome-gpt-security): 一个精选的安全工具、实验案例及其他与大语言模型或GPT相关有趣内容的列表。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_25e605d9d2dc.png)](https:\u002F\u002Fgithub.com\u002Fcckuailong\u002Fawesome-gpt-security)\n  * [🚀 Awesome Reinforcement Learning for Cyber Security](https:\u002F\u002Fgithub.com\u002FLimmen\u002Fawesome-rl-for-cybersecurity): 一个专注于强化学习在网络安全领域应用的资源精选列表。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d4bbed6fa74a.png)](https:\u002F\u002Fgithub.com\u002FLimmen\u002Fawesome-rl-for-cybersecurity)\n  * [Awesome Machine Learning for Cyber Security](https:\u002F\u002Fgithub.com\u002Fjivoi\u002Fawesome-ml-for-cybersecurity): 一个关于机器学习在网络安全中应用的超赞工具和资源精选列表。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_d61ba77e081c.png)](https:\u002F\u002Fgithub.com\u002Fjivoi\u002Fawesome-ml-for-cybersecurity)\n  * [Hugging Face扩散模型课程](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Fdiffusion-models-class): Hugging Face扩散模型课程的相关资料。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_07c0b4cf5a5d.png)](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Fdiffusion-models-class)\n  * [Awesome-AI-Security](https:\u002F\u002Fgithub.com\u002FDeepSpaceHarbor\u002FAwesome-AI-Security): 一个AI安全资源的精选列表。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_9f742e059dd8.png)](https:\u002F\u002Fgithub.com\u002FDeepSpaceHarbor\u002FAwesome-AI-Security)\n  * [面向黑客的机器学习](https:\u002F\u002Fgithub.com\u002Fjohnmyleswhite\u002FML_for_Hackers): 与《面向黑客的机器学习》一书配套的代码。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_505c23463561.png)](https:\u002F\u002Fgithub.com\u002Fjohnmyleswhite\u002FML_for_Hackers)\n  * [Awful AI](https:\u002F\u002Fgithub.com\u002Fdaviddao\u002Fawful-ai): Awful AI是一个精选列表，用于追踪当前令人担忧的AI使用案例，旨在提高公众意识。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_5b231eabb7b3.png)](https:\u002F\u002Fgithub.com\u002Fdaviddao\u002Fawful-ai)\n  * [NIST AI风险管理框架手册](https:\u002F\u002Fpages.nist.gov\u002FAIRMF)\n  * [SoK：面向计算机安全应用的可解释机器学习](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.10605)\n  * [谁来评估评估者？——关于评估基于AI的攻击性代码生成器的自动化指标](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.06008)\n  * [漏洞优先级排序：一种进攻性安全方法](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.11182)\n  * [MITRE ATLAS™](https:\u002F\u002Fatlas.mitre.org)（人工智能系统对抗威胁态势图）\n  * [强化学习安全性及其在自动驾驶中的应用综述](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.06123)\n  * [如何避免机器学习陷阱：学术研究人员指南](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.02497)\n  * [AI安全与隐私相关活动精选列表](https:\u002F\u002Fgithub.com\u002FZhengyuZhao\u002FAI-Security-and-Privacy-Events)\n  * [NIST AI 100-2 E2025](https:\u002F\u002Fdoi.org\u002F10.6028\u002FNIST.AI.100-2e2025)：对抗性机器学习。攻击与缓解措施的分类与术语。\n  * [🇪🇸 RootedCon 2023 - 进攻性人工智能 - 我们该如何做好准备？](\u002Fslides\u002FRootedCon_2023.pdf)\n  * [AI系统的安全性：基础——对抗性深度学习](https:\u002F\u002Fwww.bsi.bund.de\u002FSharedDocs\u002FDownloads\u002FEN\u002FBSI\u002FKI\u002FSecurity-of-AI-systems_fundamentals.pdf)\n  * [超越安全措施：探索ChatGPT的安全风险](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.08005)\n  * [AI攻击面地图 v1.0](https:\u002F\u002Fdanielmiessler.com\u002Fblog\u002Fthe-ai-attack-surface-map-v1-0)\n  * [大型AI模型不可能的安全性](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.15259)\n  * [前沿AI监管：管理新兴公共安全风险](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.03718)\n  * [针对AI的良好网络安全实践多层框架](https:\u002F\u002Fwww.enisa.europa.eu\u002Fpublications\u002Fmultilayer-framework-for-good-cybersecurity-practices-for-ai)\n  * [谷歌推出安全AI框架](https:\u002F\u002Fblog.google\u002Ftechnology\u002Fsafety-security\u002Fintroducing-googles-secure-ai-framework\u002F)\n  * [OWASP大型语言模型十大风险](https:\u002F\u002Fowasp.org\u002Fwww-project-top-10-for-large-language-model-applications\u002Fassets\u002FPDF\u002FOWASP-Top-10-for-LLMs-2023-v1_0.pdf)\n  * [Awesome LLM Security](https:\u002F\u002Fgithub.com\u002Fcorca-ai\u002Fawesome-llm-security): 一个关于LLM安全的优秀工具、文档和项目的精选列表。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_ccfefed83528.png)](https:\u002F\u002Fgithub.com\u002Fcorca-ai\u002Fawesome-llm-security)\n  * 一个在企业中安全使用LLM的框架。[第1部分：风险概述](https:\u002F\u002Fboringappsec.substack.com\u002Fp\u002Fedition-21-a-framework-to-securely)。[第2部分：风险管理](https:\u002F\u002Fboringappsec.substack.com\u002Fp\u002Fedition-22-a-framework-to-securely)。[第3部分：保护ChatGPT和GitHub Copilot](https:\u002F\u002Fboringappsec.substack.com\u002Fp\u002Fedition-23-a-framework-to-securely)。\n  * [大型语言模型代码生成的鲁棒性和可靠性研究](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.10335)\n  * [使用SynthID识别AI生成的图像](https:\u002F\u002Fwww.deepmind.com\u002Fblog\u002Fidentifying-ai-generated-images-with-synthid)\n  * [大型语言模型审计：三层方法](https:\u002F\u002Fpapers.ssrn.com\u002Fsol3\u002Fpapers.cfm?abstract_id=4361607)\n  * [解决短期与长期AI风险之争](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002Fs43681-023-00336-y.pdf)\n  * [FraudGPT：ChatGPT的反派化身](https:\u002F\u002Fnetenrich.com\u002Fblog\u002Ffraudgpt-the-villain-avatar-of-chatgpt)\n  * [AI风险——Schneier谈安全](https:\u002F\u002Fwww.schneier.com\u002Fblog\u002Farchives\u002F2023\u002F10\u002Fai-risks.html)\n  * [LLM用于非法目的：威胁、预防措施及漏洞](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.12833)\n  * [AI红队并非解决AI危害的一站式方案：关于利用红队进行AI问责制的建议](https:\u002F\u002Fdatasociety.net\u002Fwp-content\u002Fuploads\u002F2023\u002F10\u002FRecommendations-for-Using-Red-Teaming-for-AI-Accountability-PolicyBrief.pdf)\n  * [人工智能可信度分类体系](https:\u002F\u002Fcltc.berkeley.edu\u002Fwp-content\u002Fuploads\u002F2023\u002F01\u002FTaxonomy_of_AI_Trustworthiness.pdf)\n  * [快速进步时代下的AI风险管理](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.17688)\n  * [谷歌——践行我们对安全可靠AI的承诺](https:\u002F\u002Fblog.google\u002Ftechnology\u002Fsafety-security\u002Fgoogle-ai-security-expansion\u002F)\n  * [进攻性ML手册](https:\u002F\u002Fwiki.offsecml.com\u002FWelcome+to+the+Offensive+ML+Playbook)\n  * [揭秘生成式AI 🤖——一位安全研究员的笔记](https:\u002F\u002Fblog.openthreatresearch.com\u002Fdemystifying-generative-ai-a-security-researchers-notes\u002F)\n  * [GenAI-Security-Adventures](https:\u002F\u002Fgithub.com\u002FOTRF\u002FGenAI-Security-Adventures): 一个开源项目，分享笔记、演示文稿以及用Jupyter Notebook呈现的多样化实验，旨在帮助你掌握大型语言模型的核心概念，并探索安全与自然语言处理之间引人入胜的交叉点。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_47631f80621c.png)](https:\u002F\u002Fgithub.com\u002FOTRF\u002FGenAI-Security-Adventures)\n  * [AI安全营](https:\u002F\u002Faisafety.camp\u002F)将你与研究负责人联系起来，共同开展项目——看看你的工作如何助力确保未来AI的安全。\n  * [安全AI系统开发指南](https:\u002F\u002Fwww.ncsc.gov.uk\u002Ffiles\u002FGuidelines-for-secure-AI-system-development.pdf)\n  * [人工智能与网络安全的方法。最佳实践报告](https:\u002F\u002Fwww.ccn-cert.cni.es\u002Fes\u002Finformes\u002Finformes-de-buenas-practicas-bp\u002F7192-ccn-cert-bp-30-approach-to-artificial-intelligence-and-cybersecurity\u002Ffile.html)\n  * [斯坦福安全、可靠且值得信赖的AI行政命令14110跟踪表](https:\u002F\u002Fdocs.google.com\u002Fspreadsheets\u002Fd\u002F1xOL4hkQ2pLR-IAs3awIiXjPLmhIeXyE5-giJ5nT-h1M\u002Fedit#gid=142633882)\n  * [Awesome ML Security](https:\u002F\u002Fgithub.com\u002Ftrailofbits\u002Fawesome-ml-security): 一个精选的机器学习安全参考文献、指南、工具等资源列表。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_243af7c11d15.png)](https:\u002F\u002Fgithub.com\u002Ftrailofbits\u002Fawesome-ml-security)\n  * [AI的可预测路径：2024年及以后AI的7个预期发展](https:\u002F\u002Fdanielmiessler.com\u002Fp\u002Fai-predictable-path-7-components-2024)\n  * [人工智能与网络安全](https:\u002F\u002Fwww.ismsforum.es\u002Fficheros\u002Fdescargas\u002Fisms-gt-ia-021707141605.pdf)（西班牙语版）\n  * [Vigil](https:\u002F\u002Fgithub.com\u002Fdeadbits\u002Fvigil-llm): 检测提示注入、越狱以及其他潜在危险的大语言模型输入。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_473e74477e17.png)](https:\u002F\u002Fgithub.com\u002Fdeadbits\u002Fvigil-llm)\n  * [生成式AI模型——对行业和当局的机遇与风险](https:\u002F\u002Fwww.bsi.bund.de\u002FSharedDocs\u002FDownloads\u002FEN\u002FBSI\u002FKI\u002FGenerative_AI_Models.pdf)\n  * [安全部署AI系统。部署安全且有弹性的AI系统的最佳实践](https:\u002F\u002Fmedia.defense.gov\u002F2024\u002FApr\u002F15\u002F2003439257\u002F-1\u002F-1\u002F0\u002FCSI-DEPLOYING-AI-SYSTEMS-SECURELY.PDF)\n  * [NIST AI 600-1：人工智能风险管理框架——生成式人工智能配置文件](https:\u002F\u002Fairc.nist.gov\u002Fdocs\u002FNIST.AI.600-1.GenAI-Profile.ipd.pdf)\n  * [:fr: ANSSI：生成式AI系统的安全建议](https:\u002F\u002Fcyber.gouv.fr\u002Fsites\u002Fdefault\u002Ffiles\u002Fdocument\u002FRecommandations_de_s%C3%A9curit%C3%A9_pour_un_syst%C3%A8me_d_IA_g%C3%A9n%C3%A9rative.pdf)\n  * [PyRIT](https:\u002F\u002Fgithub.com\u002FAzure\u002FPyRIT): 生成式AI的Python风险识别工具（PyRIT）是一个开放访问的自动化框架，旨在赋能安全专业人士和机器学习工程师主动发现其生成式AI系统中的风险。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f12c2fd8bff9.png)](https:\u002F\u002Fgithub.com\u002FAzure\u002FPyRIT)\n  * [OWASP智能体AI](https:\u002F\u002Fgithub.com\u002Fprecize\u002FOWASP-Agentic-AI): 致力于制定OWASP智能体AI十大安全风险（AI代理安全）。[![stars](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_c7b36f8ad01c.png)](https:\u002F\u002Fgithub.com\u002Fprecize\u002FOWASP-Agentic-AI)\n  * [迈向保证安全的AI：确保稳健可靠AI系统的框架](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.06624)\n  * [定义真正的AI风险](https:\u002F\u002Fjosephthacker.com\u002Fai\u002F2024\u002F05\u002F19\u002Fdefining-real-ai-risks.html)\n  * [生成式AI的安全方法](https:\u002F\u002Faws.amazon.com\u002Fes\u002Fai\u002Fgenerative-ai\u002Fsecurity\u002F)\n  * [大型语言模型在网络安全中的应用](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-031-54827-7.pdf)\n  * [嘿，那是我的模型！介绍Chain & Hash：一种LLM指纹识别技术](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.10887)\n  * [生成式AI的滥用：基于真实数据的战术分类与洞察](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.13843)\n  * [AI风险库](https:\u002F\u002Fairisk.mit.edu\u002F)\n  * [重新审视AI红队](https:\u002F\u002Fcset.georgetown.edu\u002Farticle\u002Frevisiting-ai-red-teaming\u002F)\n  * [德法两国关于使用AI编程助手的建议](https:\u002F\u002Fwww.bsi.bund.de\u002FSharedDocs\u002FDownloads\u002FEN\u002FBSI\u002FKI\u002FANSSI_BSI_AI_Coding_Assistants.html)\n  * [用于识别大型语言模型输出的可扩展水印技术](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41586-024-08025-4)\n  * [从100款生成式AI产品红队测试中汲取的经验教训](https:\u002F\u002Fairedteamwhitepapers.blob.core.windows.net\u002Flessonswhitepaper\u002FMS_AIRT_Lessons_eBook.pdf)\n  * [LLM红队指南](https:\u002F\u002Fwww.promptfoo.dev\u002Fdocs\u002Fred-team\u002F)\n  * [多智能体安全领域的开放挑战：迈向安全的交互式AI代理系统](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.02077)\n  * [LLM解锁了利用漏洞获利的新途径](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.02077)\n  * [揭露真相：利用CPU缓存侧信道从大型语言模型中泄露令牌](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.00817)\n  * [利用大型语言模型增强自动修复代码漏洞的能力](https:\u002F\u002Fdoi.org\u002F10.1016\u002Fj.engappai.2024.109291)\n  * [你的大脑在ChatGPT面前：使用AI助手完成论文写作任务时的认知债务累积](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.08872)\n  * [AIRTBench：衡量语言模型中自主AI红队能力](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.14682)\n  * [Slopsquatting](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FSlopsquatting)\n  * [SP 800-53控制叠加用于保护AI系统概念文件](https:\u002F\u002Fcsrc.nist.gov\u002Fcsrc\u002Fmedia\u002FProjects\u002Fcosais\u002Fdocuments\u002FNIST-Overlays-SecuringAI-concept-paper.pdf)\n  * [双主体规则：AI代理安全的实用方法](https:\u002F\u002Fai.meta.com\u002Fblog\u002Fpractical-ai-agent-security\u002F)\n  * [ETSI EN 304 223 V2.1.1 (2025-12)](https:\u002F\u002Fwww.etsi.org\u002Fdeliver\u002Fetsi_en\u002F304200_304299\u002F304223\u002F02.01.01_60\u002Fen_304223v020101p.pdf)：人工智能安全保障（SAI）；AI模型和系统的基本网络安全要求\n  * [评估AGENTS.md：仓库级别的上下文文件对编码代理有帮助吗？](https:\u002F\u002Farxiv.org\u002Fabs\u002F2602.11988)\n  * [数字人脸操纵与检测手册](https:\u002F\u002Flink.springer.com\u002Fbook\u002F10.1007\u002F978-3-030-87664-7)\n  * [人工智能代理的安全考量](https:\u002F\u002Farxiv.org\u002Fabs\u002F2603.12230)\n\n## 📊 调查研究 📊\n\n  * [组织面临的攻击性人工智能威胁](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.15764)\n  * [网络空间中的人工智能：进攻与防御](https:\u002F\u002Fwww.mdpi.com\u002F2073-8994\u002F12\u002F3\u002F410)\n  * [对抗攻击与防御的综述](https:\u002F\u002Fietresearch.onlinelibrary.wiley.com\u002Fdoi\u002F10.1049\u002Fcit2.12028)\n  * [对抗深度学习：图像分类中的对抗攻击与防御机制综述](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9895425)\n  * [机器学习中的隐私攻击综述](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.07646)\n  * [迈向深度学习系统的安全威胁：综述](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.12562)\n  * [机器学习的安全威胁与防御技术综述：数据驱动视角](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8290925)\n  * [SoK：机器学习中的安全与隐私](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8406613)\n  * [对抗机器学习：人工智能赋能犯罪的兴起及其在垃圾邮件过滤器规避中的作用](https:\u002F\u002Fpapers.ssrn.com\u002Fsol3\u002Fpapers.cfm?abstract_id=4155496)\n  * [基于机器学习系统的威胁、漏洞与控制措施：综述与分类](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.07474)\n  * [对抗攻击与防御：综述](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1810.00069.pdf)\n  * [安全问题：对抗机器学习综述](https:\u002F\u002Farxiv.org\u002Fabs\u002F1810.07339)\n  * [用于恶意软件分析的对抗攻击综述](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2111.08223.pdf)\n  * [图像分类中的对抗机器学习：面向防御者的综述](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2009.03728.pdf)\n  * [模式识别中鲁棒对抗训练的综述：基础、理论与方法](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.14046)\n  * [大型语言模型中的隐私：攻击、防御及未来方向](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.10383)\n\n## 🗣 维护者  🗣\n\n\u003Ctable>\n  \u003Ctr>\n    \u003Ctd align=\"center\">\u003Ca href=\"https:\u002F\u002Fgithub.com\u002FMiguel000\">\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_a406d74a76f6.png\" width=\"150;\" alt=\"\"\u002F>\u003Cbr \u002F>\u003Csub>\u003Cb>Miguel Hernández\u003C\u002Fb>\u003C\u002Fsub>\u003C\u002Fa>\u003C\u002Ftd>\n    \u003Ctd align=\"center\">\u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fjiep\">\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_readme_f64601ca6024.png\" width=\"150px;\" alt=\"\"\u002F>\u003Cbr \u002F>\u003Csub>\u003Cb>José Ignacio Escribano\u003C\u002Fb>\u003C\u002Fsub>\u003C\u002Fa>\u003C\u002Ftd>\n  \u003C\u002Ftr>\n\u003C\u002Ftable>\n\n## ©️ 许可证 ©️\n\n[![许可证：CC BY-SA 4.0](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLicense-CC%20BY--SA%204.0-lightgrey.svg)](https:\u002F\u002Fcreativecommons.org\u002Flicenses\u002Fby-sa\u002F4.0\u002F)\n\n* [知识共享署名-相同方式共享4.0国际许可协议](LICENSE.txt)","# Offensive AI Compilation 快速上手指南\n\n`offensive-ai-compilation` 并非一个可执行的软件工具或代码库，而是一个**精选的资源列表（Curated List）**，旨在汇总关于“攻击性人工智能”（Offensive AI）的研究论文、工具和防御策略。因此，本项目**无需安装环境或运行命令**。\n\n本指南将指导开发者如何高效利用该资源库进行安全研究、渗透测试及模型防御。\n\n## 📁 环境准备\n\n由于本项目本质上是文档和资源索引，对系统环境无特殊要求：\n\n*   **操作系统**：任意支持现代浏览器的系统（Windows, macOS, Linux）。\n*   **前置依赖**：\n    *   Web 浏览器（推荐 Chrome, Firefox 或 Edge）。\n    *   Git（可选，用于克隆仓库到本地以便离线查阅或贡献）。\n    *   学术资源访问权限（部分链接指向 arXiv 或 ACM，国内用户可能需要配置学术加速或使用镜像站）。\n\n## 📥 获取与访问步骤\n\n你可以通过以下两种方式访问该资源列表：\n\n### 方式一：在线直接浏览（推荐）\n直接访问项目的 GitHub 页面查看整理好的目录和链接：\n1. 打开浏览器访问项目主页（通常在 GitHub 搜索 `offensive-ai-compilation`）。\n2. 利用页面右侧或顶部的目录导航（Contents）快速定位感兴趣的主题（如对抗样本、模型窃取、后门攻击等）。\n\n### 方式二：克隆到本地\n如果你希望离线阅读或参与贡献，可以使用 Git 克隆仓库：\n\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002Fricardodominguezgaspar\u002Foffensive-ai-compilation.git\ncd offensive-ai-compilation\n```\n\n*注：请将上述 URL 替换为该项目实际的仓库地址。*\n\n## 🚀 基本使用指南\n\n本项目的核心用法是**按主题检索资源**。以下是针对中国开发者的常用场景指引：\n\n### 1. 研究对抗机器学习（Adversarial Machine Learning）\n如果你需要研究如何攻击或防御 AI 模型，请查阅 `🚫 Abuse` -> `🧠 Adversarial Machine Learning` 章节。\n*   **模型窃取 (Extraction)**：查找如何通过 API 请求还原模型参数。重点关注 `White-box` 和 `Black-box` 攻击案例及相关论文（如 *Knockoff Nets*）。\n*   **推理攻击 (Inversion)**：了解如何从模型中反推训练数据（成员推断、属性推断）。\n*   **投毒与后门 (Poisoning & Backdoors)**：研究如何在训练阶段植入恶意逻辑。\n\n### 2. 寻找实战工具（Tools）\n在 `🛠️ Tools` 子章节中，你可以找到具体的开源攻击框架链接，例如：\n*   **ART (Adversarial Robustness Toolbox)**: IBM 开发的全面对抗样本库。\n*   **Cleverhans**: 经典的对抗样本基准库。\n*   **BackdoorBox**: 专门用于后门攻击与防御的 Python 工具箱。\n\n> **💡 国内加速建议**：\n> 文中提到的工具（如 ART, Cleverhans）通常托管在 GitHub 上。国内开发者在安装这些具体工具时，建议使用国内镜像源加速 pip 安装：\n> ```bash\n> pip install adversarial-robustness-toolbox -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n> ```\n\n### 3. 生成式 AI 安全（Generative AI）\n针对当下热门的 AIGC 安全，请查阅 `👨‍🎤 Generative AI` 章节：\n*   **音频\u002F图像\u002F视频\u002F文本**：分别列出了伪造工具（Deepfake）、应用场景及检测方案（Detection）。\n*   适合从事内容风控、多模态模型安全的开发人员参考。\n\n### 4. 渗透测试与恶意软件（Pentesting & Malware）\n在 `🔧 Use` 章节下，你可以找到将 AI 应用于传统安全领域的资源：\n*   自动化渗透测试脚本。\n*   基于 AI 的恶意软件生成与免杀技术。\n*   AI 辅助的 OSINT（开源情报收集）和钓鱼攻击模拟。\n\n## 📝 总结\n`offensive-ai-compilation` 是你进入 AI 安全领域的**地图**。\n1.  **确定目标**：明确你是要研究攻击（红队）还是构建防御（蓝队）。\n2.  **定位章节**：通过目录找到对应的攻击类型（如 Evasion, Poisoning）。\n3.  **深入阅读**：点击链接阅读原始论文（arXiv）或下载相关工具代码进行复现。\n\n*注意：本资源库包含大量攻击性技术内容，请仅将其用于合法的安全研究、教育目的或授权的红队演练，严格遵守相关法律法规。*","某金融科技公司安全团队正在对内部部署的信贷审批 AI 模型进行红队测试，旨在评估其抗攻击能力并防止核心算法泄露。\n\n### 没有 offensive-ai-compilation 时\n- **资源搜集零散低效**：团队成员需花费数天在 GitHub、arXiv 和各类博客中手动搜索对抗样本生成、模型提取等攻击技术，难以确认资料的时效性与权威性。\n- **防御策略缺乏系统性**：面对潜在的“模型窃取”风险，团队仅知道概念，却找不到如 PRADA 或自适应误导等具体的防御架构实现方案，导致防护方案停留在理论层面。\n- **工具选型盲目**：在寻找用于模拟攻击的开源工具（如 ART 或 Cleverhans）时，因缺乏对比指引，容易集成过时或不兼容的库，增加了测试环境的搭建成本。\n- **知识盲区明显**：对于数据投毒、推理反转等高级攻击手段了解不足，无法全面覆盖测试场景，留下了严重的安全隐患。\n\n### 使用 offensive-ai-compilation 后\n- **一站式获取权威资源**：团队直接利用该清单中分类整理的“对抗性机器学习”板块，几分钟内即可锁定最新的攻击论文与案例研究，大幅缩短调研周期。\n- **精准落地防御措施**：针对模型提取风险，团队迅速参考清单中提供的差分隐私、集成学习及特定防御架构链接，快速制定了可落地的加固方案。\n- **高效集成测试工具**：通过清单推荐的经过验证的工具列表，团队直接部署了成熟的攻击框架进行模拟演练，确保了测试环境的专业性与稳定性。\n- **全覆盖风险排查**：借助清单对攻击类型（提取、反转、投毒、规避）的系统化梳理，团队构建了完整的测试矩阵，有效识别并修复了此前被忽视的逻辑漏洞。\n\noffensive-ai-compilation 将分散的攻防知识转化为结构化的行动指南，帮助安全团队从“盲目摸索”转变为“精准防御”，显著提升了 AI 系统的安全性评估效率。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjiep_offensive-ai-compilation_bf8200d7.png","jiep","José Ignacio Escribano","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fjiep_f64601ca.png",null,"Spain","https:\u002F\u002Fgithub.com\u002Fjiep",[80,84],{"name":81,"color":82,"percentage":83},"HTML","#e34c26",86.4,{"name":85,"color":86,"percentage":87},"SCSS","#c6538c",13.6,1363,158,"2026-04-18T05:24:08","CC-BY-SA-4.0",1,"","未说明",{"notes":96,"python":94,"dependencies":97},"该项目是一个资源列表（Awesome List），整理了关于攻击性人工智能（Offensive AI）的论文、工具和链接，本身不是一个可执行的软件工具，因此没有具体的运行环境、依赖库或硬件需求。用户需根据列表中提到的具体子项目（如 ART, Cleverhans, BackdoorBox 等）查阅其各自的文档以获取环境要求。",[],[14],[100,101,102,103,104],"offensive-ai","adversarial-machine-learning","ai-security","artificial-intelligence","compilation","2026-03-27T02:49:30.150509","2026-04-20T04:06:36.819381",[],[]]