[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-magpie-align--magpie":3,"tool-magpie-align--magpie":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",151314,2,"2026-04-11T23:32:58",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",108322,"2026-04-10T11:39:34",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":32,"last_commit_at":50,"category_tags":51,"status":17},6121,"gemini-cli","google-gemini\u002Fgemini-cli","gemini-cli 是一款由谷歌推出的开源 AI 命令行工具，它将强大的 Gemini 大模型能力直接集成到用户的终端环境中。对于习惯在命令行工作的开发者而言，它提供了一条从输入提示词到获取模型响应的最短路径，无需切换窗口即可享受智能辅助。\n\n这款工具主要解决了开发过程中频繁上下文切换的痛点，让用户能在熟悉的终端界面内直接完成代码理解、生成、调试以及自动化运维任务。无论是查询大型代码库、根据草图生成应用，还是执行复杂的 Git 操作，gemini-cli 都能通过自然语言指令高效处理。\n\n它特别适合广大软件工程师、DevOps 人员及技术研究人员使用。其核心亮点包括支持高达 100 万 token 的超长上下文窗口，具备出色的逻辑推理能力；内置 Google 搜索、文件操作及 Shell 命令执行等实用工具；更独特的是，它支持 MCP（模型上下文协议），允许用户灵活扩展自定义集成，连接如图像生成等外部能力。此外，个人谷歌账号即可享受免费的额度支持，且项目基于 Apache 2.0 协议完全开源，是提升终端工作效率的理想助手。",100752,"2026-04-10T01:20:03",[52,13,15,14],"插件",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":32,"last_commit_at":59,"category_tags":60,"status":17},4721,"markitdown","microsoft\u002Fmarkitdown","MarkItDown 是一款由微软 AutoGen 团队打造的轻量级 Python 工具，专为将各类文件高效转换为 Markdown 格式而设计。它支持 PDF、Word、Excel、PPT、图片（含 OCR）、音频（含语音转录）、HTML 乃至 YouTube 链接等多种格式的解析，能够精准提取文档中的标题、列表、表格和链接等关键结构信息。\n\n在人工智能应用日益普及的今天，大语言模型（LLM）虽擅长处理文本，却难以直接读取复杂的二进制办公文档。MarkItDown 恰好解决了这一痛点，它将非结构化或半结构化的文件转化为模型“原生理解”且 Token 效率极高的 Markdown 格式，成为连接本地文件与 AI 分析 pipeline 的理想桥梁。此外，它还提供了 MCP（模型上下文协议）服务器，可无缝集成到 Claude Desktop 等 LLM 应用中。\n\n这款工具特别适合开发者、数据科学家及 AI 研究人员使用，尤其是那些需要构建文档检索增强生成（RAG）系统、进行批量文本分析或希望让 AI 助手直接“阅读”本地文件的用户。虽然生成的内容也具备一定可读性，但其核心优势在于为机器",93400,"2026-04-06T19:52:38",[52,14],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":66,"readme_en":67,"readme_zh":68,"quickstart_zh":69,"use_case_zh":70,"hero_image_url":71,"owner_login":72,"owner_name":73,"owner_avatar_url":74,"owner_bio":75,"owner_company":76,"owner_location":76,"owner_email":76,"owner_twitter":76,"owner_website":77,"owner_url":78,"languages":79,"stars":92,"forks":93,"last_commit_at":94,"license":95,"difficulty_score":10,"env_os":96,"env_gpu":97,"env_ram":96,"env_deps":98,"category_tags":103,"github_topics":104,"view_count":32,"oss_zip_url":76,"oss_zip_packed_at":76,"status":17,"created_at":118,"updated_at":119,"faqs":120,"releases":121},6727,"magpie-align\u002Fmagpie","magpie","[ICLR 2025] Alignment Data Synthesis from Scratch by Prompting Aligned LLMs with Nothing. Your efficient and high-quality synthetic data generation pipeline!","Magpie 是一个专为大语言模型打造的高效合成数据生成流水线，旨在从零开始构建高质量的“对齐数据”。在传统方法中，生成训练数据往往依赖繁琐的提示词工程或需要人工预设种子问题，而 Magpie 独创性地解决了这一痛点。它无需任何外部输入，直接利用已对齐大模型自身的预查询模板（pre-query templates），就能自动激发出完整的用户提问及对应的模型回答。\n\n这种“无中生有”的技术亮点，不仅大幅降低了数据准备门槛，还确保了生成数据在风格和逻辑上与目标模型高度一致，非常适合 AI 研究人员和开发者使用。无论是希望微调专属模型的企业团队，还是致力于探索模型对齐机制的学术研究者，都能通过 Magpie 快速获取大规模、多样化的训练数据集。目前，该项目已基于 Llama、Qwen、Gemma 等主流模型发布了多个百万级数据集，并开源了性能优异的对齐模型，为社区提供了宝贵的资源支持。如果你正在寻找一种简单、低成本且高质量的方式来扩充模型训练数据，Magpie 值得尝试。","\u003C!-- # 🐦 Magpie -->\n\n[![Magpie](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmagpie-align_magpie_readme_ce820bf37bdb.png)](https:\u002F\u002Fmagpie-align.github.io\u002F)\n\n[![arXiv](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FarXiv-paper-b31b1b.svg)](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.08464) [![License: MIT](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLicense-MIT-yellow.svg)](https:\u002F\u002Fopensource.org\u002Flicenses\u002FMIT) [![Data License](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FData%20License-CC%20By%20NC%204.0-red.svg)](https:\u002F\u002Fhuggingface.co\u002FMagpie-Align) [![Spaces](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F🤗-Open%20in%20Spaces-blue)](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fdavanstrien\u002Fmagpie)\n\nThis is the official repository for ICLR 2025 paper \"[Alignment Data Synthesis from Scratch by Prompting Aligned LLMs with Nothing](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.08464)\". Magpie generates high-quality alignment data by prompting aligned LLMs with their pre-query templates. Unlike many existing synthetic data generation methods, Magpie doesn't rely on prompt engineering or seed questions for generating synthetic data. Instead, it uses the prompt template of an aligned LLM to generate both the user query and an LLM response.\n\n- 🤗 [**Huggingface (Models and Datasets)**](https:\u002F\u002Fhuggingface.co\u002FMagpie-Align)\n- 🧭 [**Dataset Navigation**](navigation.md)\n- 🕸️ [**Website**](https:\u002F\u002Fmagpie-align.github.io\u002F)\n- 📄 [**Technical Report**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.08464)\n- 🤗 [**Magpie Demo**](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fdavanstrien\u002Fmagpie) (Thanks a lot for the implementation from @davanstrien!)\n- 🐦 [**Chat with Magpie**](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fflydust\u002FChat-with-Magpie)\n\n## 🐦 News\n- [2025\u002F01\u002F22] Magpie paper is accepted by ICLR 2025! \n- [2025\u002F01\u002F09] Magpie Reasoning V2 dataset is out! [250K]([https:\u002F\u002FhuggiK](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpie-reasoning-datasets-67790a13b91035bc42693885)) from Llama, Skywork-o1 and QwQ! This time, we focus on CoT 🤯\n- [2025\u002F01\u002F01] Magpie Llama-3.3 dataset is out! [1M](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Llama-3.3-Pro-1M-v0.1) from Llama-3.3-70B-Instruct! Happy New Year!\n- [2024\u002F10\u002F20] Magpie Qwen2.5 dataset is out! [1M](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Qwen2.5-Pro-1M-v0.1) from Qwen2.5 72B!\n- [2024\u002F09\u002F17] Ship two new models with SOTA performance: 𝙼𝚊𝚐𝚙𝚒𝚎𝙻𝙼-𝙲𝚑𝚊𝚝 (4B & 8B)! See collection [here](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpielm-66e2221f31fa3bf05b10786a)!\n- [2024\u002F08\u002F19] Three preference optimization datasets, [Magpie-Air-DPO-100K-v0.1](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Air-DPO-100K-v0.1), [Magpie-Pro-DPO-100K-v0.1](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Pro-DPO-100K-v0.1), and [Magpie-Llama-3.1-Pro-DPO-100K-v0.1](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Llama-3.1-Pro-DPO-100K-v0.1) are out! \n- [2024\u002F07\u002F25] Magpie Llama-3.1 dataset is out! [1M](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Llama-3.1-Pro-1M-v0.1) from Llama-3.1-70B-Instruct! More friendly license compared with Llama-3 😃!\n- [2024\u002F07\u002F21] Magpie Gemma2 dataset is out! [534K](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpie-gemma2-datasets-669da6aff21b09fdcecbd6ea) from Gemma-2-27b-it!\n- [2024\u002F07\u002F19] [Llama-3-8B-Magpie-Align-v0.3](https:\u002F\u002Fhuggingface.co\u002FMagpie-Align\u002FLlama-3-8B-Magpie-Align-v0.3) is out with enhanced Chinese question-answering ability, thanks to our new [Chinese instruction dataset](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Qwen2-Pro-200K-Chinese)!\n- [2024\u002F07\u002F14] [Llama-3-8B-Magpie-Align-v0.2](https:\u002F\u002Fhuggingface.co\u002FMagpie-Align\u002FLlama-3-8B-Magpie-Align-v0.2) is out with enhanced reasoning ability, thanks to our new [reasoning booster dataset](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Reasoning-150K)!\n- [2024\u002F07\u002F04] Magpie Qwen2 dataset is out! [1M](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Qwen2-Pro-1M-v0.1) from Qwen2 72B and [3M](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Qwen2-Air-3M-v0.1) from Qwen2 7B.\n- [2024\u002F07\u002F03] 🏆 Our open aligned model, [Llama-3-8B-Magpie-Align-v0.1](https:\u002F\u002Fhuggingface.co\u002FMagpie-Align\u002FLlama-3-8B-Magpie-Align-v0.1) is out! It is 🏆 the **best \u003C30B Model** in [AI2 WildBench Leaderboard](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fallenai\u002FWildBench)! Even better than the official [Meta-Llama-3-8B-Instruct](https:\u002F\u002Fhuggingface.co\u002Fmeta-llama\u002FMeta-Llama-3-8B-Instruct) model!\n- [2024\u002F06\u002F24] Magpie Phi 3 dataset is out! [1M](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpie-phi3-667a7a45f1a406cd61685d64) from Phi 3 Medium.\n- [2024\u002F06\u002F12] Magpie Llama-3 dataset is out! [1M](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpie-pro-6666b0e713e5f5c09554876f) from Llama-3 70B and [3M](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpie-air-6666b11a32021655a27f86c0) from Llama-3 8B.\n- [2024\u002F06\u002F12] [Magpie technical report]((https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.08464)) is out! Let's make high-quality alignment data open for all!\n\n## Magpie Supports\n\nCurrently, Magpie has been tested on the **Llama-3**, **Qwen2**, **Phi 3** and **Gemma-2** series. Please [submit an issue](https:\u002F\u002Fgithub.com\u002Fmagpie-align\u002Fmagpie\u002Fissues\u002Fnew) for more model support.\n\n|Model Family | Magpie | Magpie Scripts | Datasets | Size |\n|-------------|:------:|:-------|:-------|:-------|\n| [Llama 3.3](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002Fmeta-llama\u002Fllama-33-67531d5c405ec5d08a852000)     | ✅ | [70B](scripts\u002Fmagpie-llama3.3-70b.sh) | [70B](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Llama-3.3-Pro-1M-v0.1) | 1M |\n| [Llama 3.1](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002Fmeta-llama\u002Fllama-31-669fc079a0c406a149a5738f)     | ✅ * | [8B](scripts\u002Fmagpie-llama3.1-8b.sh),[70B](scripts\u002Fmagpie-llama3.1-70b.sh) | [70B](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpie-llama31-datasets-66a45ed727be07f53c8ff294),[405B(Argilla)](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fargilla\u002Fmagpie-ultra-v0.1) | 1M |\n| [Llama 3](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002Fmeta-llama\u002Fmeta-llama-3-66214712577ca38149ebb2b6)     | ✅ | [8B](scripts\u002Fmagpie-llama3-8b.sh),[70B](scripts\u002Fmagpie-llama3-70b.sh) | [8B](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpie-air-6666b11a32021655a27f86c0),[70B](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpie-pro-6666b0e713e5f5c09554876f) | 3M + 1M |\n| [Qwen2.5](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FQwen\u002Fqwen25-66e81a666513e518adb90d9e)     | ✅ | [3B](scripts\u002Fmagpie-qwen2.5-3b.sh),[7B](scripts\u002Fmagpie-qwen2.5-7b.sh),[14B](scripts\u002Fmagpie-qwen2.5-14b.sh),[32B](scripts\u002Fmagpie-qwen2.5-32b.sh),[72B](scripts\u002Fmagpie-qwen2.5-72b.sh) | [72B](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Qwen2.5-Pro-1M-v0.1) | 1M | \n| [Qwen2](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FQwen\u002Fqwen2-6659360b33528ced941e557f)     | ✅ | [7B](scripts\u002Fmagpie-qwen2-7b.sh),[72B](scripts\u002Fmagpie-qwen2-72b.sh),[Math 7B](scripts\u002Fmagpie-qwen2-math-7b.sh) | [7B](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Qwen2-Air-3M-v0.1),[72B](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Qwen2-Pro-1M-v0.1) | 3M + 1M |\n| [Phi 3](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002Fmicrosoft\u002Fphi-3-6626e15e9585a200d2d761e3)     | ✅ | [mini](scripts\u002Fmagpie-phi3mini.sh),[small](scripts\u002Fmagpie-phi3small.sh),[medium](scripts\u002Fmagpie-phi3medium.sh) | [medium](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpie-phi3-667a7a45f1a406cd61685d64) | 1M |\n| [Gemma-2](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002Fgoogle\u002Fgemma-2-release-667d6600fd5220e7b967f315)    | ✅ ** | [9B](magpie-gemma2-9b.sh),[27B](scripts\u002Fmagpie-gemma2-27b.sh) | [27B](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpie-gemma2-datasets-669da6aff21b09fdcecbd6ea) | 534K |\n| [Gemma-1.1](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002Fgoogle\u002Fgemma-release-65d5efbccdbb8c4202ec078b)    | ⭕️ | [7B](scripts\u002Fmagpie-gemma7b.sh)\n| [Llama 2](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002Fmeta-llama\u002Fllama-2-family-661da1f90a9d678b6f55773b)   | ⭕️ | [7B](scripts\u002Fmagpie-llama2-7b.sh),[70B](scripts\u002Fmagpie-llama2-70b.sh)\n| [Vicuna](https:\u002F\u002Flmsys.org\u002Fblog\u002F2023-03-30-vicuna\u002F)   | ⭕️ | [7B](scripts\u002Fmagpie-vicuna-7b.sh)\n| [Mistral](https:\u002F\u002Fhuggingface.co\u002Fmistralai\u002FMistral-7B-Instruct-v0.3)   | ⭕️ | [7B](scripts\u002Fmagpie-mistral7b.sh)\n| [Yi](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002F01-ai\u002Fyi-15-2024-05-663f3ecab5f815a3eaca7ca8)    | ⭕️ | [34B](scripts\u002Fmagpie-yi34b.sh)\n| [DeepSeek Coder](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002Fdeepseek-ai\u002Fdeepseekcoder-v2-666bf4b274a5f556827ceeca) | ⭕️ | [Coder V2 Lite](https:\u002F\u002Fgithub.com\u002Fmagpie-align\u002Fmagpie\u002Fblob\u002Fmain\u002Fscripts\u002Fmagpie-deepseek-coderv2-lite.sh)  \n\n- ✅: It works great! (**\\*** Apply a logits processor to surpress markdown; **\\*\\*** Apply a [filter](exp\u002Fstr_utils.py) before generating responses.)\n- ⭕️: It works! We can get something interesting, but we may need to design an additional logit processor and\u002For a filter.\n- ❌: Not work.\n- ❓: Untested.\n\nThe navigation of all available Magpie datasets can be found [here](navigation.md).\n\nWe hope Magpie can contribute to the democratization of AI with enhanced transparency of model alignment processes!\n\n## Abstract\n\u003Cdetails>\u003Csummary>Click Here\u003C\u002Fsummary>\nHigh-quality instruction data is critical for aligning large language models (LLMs). Although some models, such as Llama-3-Instruct, have open weights, their alignment data remain private, which hinders the democratization of AI. High human labor costs and a limited, predefined scope for prompting prevent existing open-source data creation methods from scaling effectively, potentially limiting the diversity and quality of public alignment datasets. Is it possible to synthesize high-quality instruction data at scale by extracting it directly from an aligned LLM? We present a self-synthesis method for generating large-scale alignment data named Magpie. Our key observation is that aligned LLMs like Llama-3-Instruct can generate a user query when we input only the left-side templates up to the position reserved for user messages, thanks to their auto-regressive nature. We use this method to prompt Llama-3-Instruct and generate 4 million instructions along with their corresponding responses. We perform a comprehensive analysis of the extracted data and select 300K high-quality instances. To compare Magpie data with other public instruction datasets, we fine-tune Llama-3-8B-Base with each dataset and evaluate the performance of the fine-tuned models. Our results indicate that in some tasks, models fine-tuned with Magpie perform comparably to the official Llama-3-8B-Instruct, despite the latter being enhanced with 10 million data points through supervised fine-tuning (SFT) and subsequent feedback learning. We also show that using Magpie solely for SFT can surpass the performance of previous public datasets utilized for both SFT and preference optimization, such as direct preference optimization with UltraFeedback. This advantage is evident on alignment benchmarks such as AlpacaEval, ArenaHard, and WildBench.\n\u003C\u002Fdetails>\u003Cbe>\n\n## Overview\n\n![Overview](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmagpie-align_magpie_readme_4794709f18e7.png)\n\n## Installation\n\n**Build environment**\n```\ngit clone https:\u002F\u002Fgithub.com\u002Fmagpie-align\u002Fmagpie.git\ncd magpie\nconda create -n magpie python=3.10 -y\nconda activate magpie\npip install -r requirements.txt\n```\n\n**Get access to Llama-3 models from 🤗 Huggingface**\n\nYou can apply for Llama-3 model access [here](https:\u002F\u002Fhuggingface.co\u002Fmeta-llama\u002FMeta-Llama-3-8B-Instruct). To login in the terminal, enter:\n```\nhuggingface-cli login\n```\nthen enter your Huggingface private key beginning with \"hf_\".\n\n## Toy Example\n\n**Play with Jupyter Notebook**\n\nThe toy example can be found in [`demo.ipynb`](demo.ipynb). Have fun! \n\n\u003Ca target=\"_blank\" href=\"https:\u002F\u002Fcolab.research.google.com\u002Fgithub\u002Fmagpie-align\u002Fmagpie\u002Fblob\u002Fmain\u002Fdemo.ipynb\">\n  \u003Cimg src=\"https:\u002F\u002Fcolab.research.google.com\u002Fassets\u002Fcolab-badge.svg\" alt=\"Open In Colab\"\u002F>\n\u003C\u002Fa>\n\n## Batched SFT Data Generation\nWe use Llama-3-8B-Instruct as an example to demonstrate the batched SFT data generation process. To run batched generation, you can simply run:\n```\ncd scripts\nbash magpie.sh\n```\nThe script will generate both instructions and responses in the data folder. It has been tested on an RTX 4090 24G GPU. If you are using GPUs with less memory, consider implementing [quantization](https:\u002F\u002Fdocs.vllm.ai\u002Fen\u002Flatest\u002Fquantization\u002Ffp8.html).\n\nWe also provide scripts for other models in the [`scripts`](scripts) folder. You can use [this](#magpie-supports) navigation to find specific Magpie scripts. Note that for model sizes greater than 8B, you may need 4*A100 GPUs to run the scripts.\n\n### Batched Multi-turn Data Generation \\[Optional\\]\nAfter generating instruction-response pairs, you can extend them to multi-turn conversations. To do so, simply run the following command:\n```\nbash magpie-multi-turn.sh ***_ins_res.json\n```\nwhere `***_ins_res.json` is the single-turn instruction-response pairs generated in the previous step.\n\n## Dataset Filtering\n### 1. Tagging\nTo tag the generated instruction-response pairs, you can run:\n```\ncd scripts\nbash unitag.sh ***_ins_res.json all\n```\nThis script will automatically generate quality, difficulty, task category, safety, reward, and language for the generated dataset. You can also generate one tag at a time. For example, if you just want to generate the safety label using device 0, you can run:\n```\ncd scripts\nbash unitag.sh ***_ins_res.json safety 0\n```\n### 2. Data Concatenation and Converting\nYou may generate datasets with different generation configurations. We provide a Jupyter notebook [here](data_sft\u002Fdata_concatenation.ipynb) for concatenating all datasets and converting them to ShareGPT format, which is fully supported by [Axolotl](https:\u002F\u002Fgithub.com\u002FOpenAccess-AI-Collective\u002Faxolotl) for fine-tuning.\n\n### 3. Removing Repetition\nOnce you have a full dataset converted to ShareGPT format, you can calculate the minimum neighbor distance of each instruction and remove repetitions. To do so, run:\n```\ncd exp\npython gen_dis.py --input_file ***_sharegpt.jsonl\n```\nwhere `***_sharegpt.jsonl` is the dataset path obtained in the previous step. The Python script will take care of building the FAISS index and calculating the minimum distance. \n\n### 4. Design and Apply Your Filter\nWe provide a Jupyter notebook [here](data_sft\u002Fdata_filter.ipynb) for simple filtering. You can adjust the filtering parameters to design and apply your own filter based on your needs.\n\n## Preference Data Generation\n\nTo generate preference data, first prepare filtered instructions following the steps outlined above. For the expected format, please refer to our example [here](data_po\u002Fexample_instructions.jsonl).\n\nNext, please use our provided scripts [here](scripts\u002Fmagpie_example_po.sh) to generate multiple responses and compute their corresponding rewards. Finally, your can process the data and upload it to Huggingface using [this Jupyter notebook](data_po\u002Fprocess_po.ipynb).\n\n## Fine-tuning\nPlease take a look at the [recipes](recipes\u002F) directory for instructions and our Magpie model recipes.\n\n## Citation\n\nIf you find the model, data, or code useful, please cite our paper 🤩:\n```\n@article{xu2024magpie,\n  title={Magpie: Alignment Data Synthesis from Scratch by Prompting Aligned LLMs with Nothing},\n  author={Zhangchen Xu and Fengqing Jiang and Luyao Niu and Yuntian Deng and Radha Poovendran and Yejin Choi and Bill Yuchen Lin},\n  journal={ArXiv},\n  year={2024},\n  volume={abs\u002F2406.08464},\n  url={https:\u002F\u002Fapi.semanticscholar.org\u002FCorpusID:270391432}\n}\n```\n\n## Star History\n\n[![Star History Chart](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmagpie-align_magpie_readme_803099aa9898.png)](https:\u002F\u002Fstar-history.com\u002F#magpie-align\u002Fmagpie&Date)\n","\u003C!-- # 🐦 喜鹊 -->\n\n[![喜鹊](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmagpie-align_magpie_readme_ce820bf37bdb.png)](https:\u002F\u002Fmagpie-align.github.io\u002F)\n\n[![arXiv](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FarXiv-paper-b31b1b.svg)](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.08464) [![License: MIT](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLicense-MIT-yellow.svg)](https:\u002F\u002Fopensource.org\u002Flicenses\u002FMIT) [![数据许可](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FData%20License-CC%20By%20NC%204.0-red.svg)](https:\u002F\u002Fhuggingface.co\u002FMagpie-Align) [![Spaces](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F🤗-Open%20in%20Spaces-blue)](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fdavanstrien\u002Fmagpie)\n\n这是ICLR 2025论文“通过使用对齐大模型的预查询模板从零开始合成对齐数据”（https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.08464）的官方仓库。喜鹊通过使用对齐大模型的预查询模板来提示这些模型，从而生成高质量的对齐数据。与许多现有的合成数据生成方法不同，喜鹊不依赖于提示工程或种子问题来生成合成数据。相反，它直接利用对齐大模型的提示模板来同时生成用户查询和大模型的回答。\n\n- 🤗 [**Huggingface（模型和数据集）**](https:\u002F\u002Fhuggingface.co\u002FMagpie-Align)\n- 🧭 [**数据集导航**](navigation.md)\n- 🕸️ [**官网**](https:\u002F\u002Fmagpie-align.github.io\u002F)\n- 📄 [**技术报告**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.08464)\n- 🤗 [**喜鹊演示**](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fdavanstrien\u002Fmagpie)（非常感谢@davanstrien的实现！）\n- 🐦 [**与喜鹊聊天**](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fflydust\u002FChat-with-Magpie)\n\n## 🐦 新闻\n- [2025\u002F01\u002F22] 喜鹊论文已被ICLR 2025接收！\n- [2025\u002F01\u002F09] 喜鹊推理V2数据集发布！来自Llama、Skywork-o1和QwQ的[25万条]([https:\u002F\u002FhuggiK](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpie-reasoning-datasets-67790a13b91035bc42693885))！这次我们专注于思维链（CoT）🤯\n- [2025\u002F01\u002F01] 喜鹊Llama-3.3数据集发布！来自Llama-3.3-70B-Instruct的[100万条]([https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Llama-3.3-Pro-1M-v0.1])！新年快乐！\n- [2024\u002F10\u002F20] 喜鹊Qwen2.5数据集发布！来自Qwen2.5 72B的[100万条]([https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Qwen2.5-Pro-1M-v0.1])！\n- [2024\u002F09\u002F17] 推出两款性能达到SOTA的新模型：𝙼𝚊𝚐𝚙𝚒𝚎𝙻𝙼-𝙲𝚑𝚊𝚝（4B和8B）！请查看合集[这里](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpielm-66e2221f31fa3bf05b10786a)！\n- [2024\u002F08\u002F19] 三份偏好优化数据集——[Magpie-Air-DPO-100K-v0.1](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Air-DPO-100K-v0.1)、[Magpie-Pro-DPO-100K-v0.1](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Pro-DPO-100K-v0.1)以及[Magpie-Llama-3.1-Pro-DPO-100K-v0.1](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Llama-3.1-Pro-DPO-100K-v0.1)——均已发布！\n- [2024\u002F07\u002F25] 喜鹊Llama-3.1数据集发布！来自Llama-3.1-70B-Instruct的[100万条]([https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Llama-3.1-Pro-1M-v0.1])！相比Llama-3，许可证更加友好😃！\n- [2024\u002F07\u002F21] 喜鹊Gemma2数据集发布！来自Gemma-2-27b-it的[53.4万条]([https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpie-gemma2-datasets-669da6aff21b09fdcecbd6ea])！\n- [2024\u002F07\u002F19] [Llama-3-8B-Magpie-Align-v0.3]（https:\u002F\u002Fhuggingface.co\u002FMagpie-Align\u002FLlama-3-8B-Magpie-Align-v0.3）发布，中文问答能力得到增强，这得益于我们新推出的[中文指令数据集](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Qwen2-Pro-200K-Chinese)！\n- [2024\u002F07\u002F14] [Llama-3-8B-Magpie-Align-v0.2]（https:\u002F\u002Fhuggingface.co\u002FMagpie-Align\u002FLlama-3-8B-Magpie-Align-v0.2）发布，推理能力进一步提升，这归功于我们新推出的[推理增强数据集](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Reasoning-150K)！\n- [2024\u002F07\u002F04] 喜鹊Qwen2数据集发布！来自Qwen2 72B的[100万条]([https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Qwen2-Pro-1M-v0.1])，以及来自Qwen2 7B的[300万条]([https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Qwen2-Air-3M-v0.1])。\n- [2024\u002F07\u002F03] 🏆 我们的开源对齐模型[Llama-3-8B-Magpie-Align-v0.1]（https:\u002F\u002Fhuggingface.co\u002FMagpie-Align\u002FLlama-3-8B-Magpie-Align-v0.1）发布！它在[AI2 WildBench排行榜](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fallenai\u002FWildBench)中被评为🏆 **\u003C30B模型中的最佳**！甚至优于官方的[Meta-Llama-3-8B-Instruct]（https:\u002F\u002Fhuggingface.co\u002Fmeta-llama\u002FMeta-Llama-3-8B-Instruct）模型！\n- [2024\u002F06\u002F24] 喜鹊Phi 3数据集发布！来自Phi 3 Medium的[100万条]([https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpie-phi3-667a7a45f1a406cd61685d64])！\n- [2024\u002F06\u002F12] 喜鹊Llama-3数据集发布！来自Llama-3 70B的[100万条]([https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpie-pro-6666b0e713e5f5c09554876f])，以及来自Llama-3 8B的[300万条]([https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpie-air-6666b11a32021655a27f86c0])。\n- [2024\u002F06\u002F12] [喜鹊技术报告]((https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.08464))发布！让我们共同推动高质量对齐数据的开放共享吧！\n\n## 喜鹊支持\n\n目前，喜鹊已在 **Llama-3**、**Qwen2**、**Phi 3** 和 **Gemma-2** 系列模型上进行了测试。如需更多模型支持，请 [提交问题](https:\u002F\u002Fgithub.com\u002Fmagpie-align\u002Fmagpie\u002Fissues\u002Fnew)。\n\n| 模型家族 | 喜鹊 | 喜鹊脚本 | 数据集 | 规模 |\n|-------------|:------:|:-------|:-------|:-------|\n| [Llama 3.3](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002Fmeta-llama\u002Fllama-33-67531d5c405ec5d08a852000)     | ✅ | [70B](scripts\u002Fmagpie-llama3.3-70b.sh) | [70B](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Llama-3.3-Pro-1M-v0.1) | 1M |\n| [Llama 3.1](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002Fmeta-llama\u002Fllama-31-669fc079a0c406a149a5738f)     | ✅ * | [8B](scripts\u002Fmagpie-llama3.1-8b.sh),[70B](scripts\u002Fmagpie-llama3.1-70b.sh) | [70B](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpie-llama31-datasets-66a45ed727be07f53c8ff294),[405B(Argilla)](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fargilla\u002Fmagpie-ultra-v0.1) | 1M |\n| [Llama 3](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002Fmeta-llama\u002Fmeta-llama-3-66214712577ca38149ebb2b6)     | ✅ | [8B](scripts\u002Fmagpie-llama3-8b.sh),[70B](scripts\u002Fmagpie-llama3-70b.sh) | [8B](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpie-air-6666b11a32021655a27f86c0),[70B](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpie-pro-6666b0e713e5f5c09554876f) | 3M + 1M |\n| [Qwen2.5](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FQwen\u002Fqwen25-66e81a666513e518adb90d9e)     | ✅ | [3B](scripts\u002Fmagpie-qwen2.5-3b.sh),[7B](scripts\u002Fmagpie-qwen2.5-7b.sh),[14B](scripts\u002Fmagpie-qwen2.5-14b.sh),[32B](scripts\u002Fmagpie-qwen2.5-32b.sh),[72B](scripts\u002Fmagpie-qwen2.5-72b.sh) | [72B](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Qwen2.5-Pro-1M-v0.1) | 1M | \n| [Qwen2](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FQwen\u002Fqwen2-6659360b33528ced941e557f)     | ✅ | [7B](scripts\u002Fmagpie-qwen2-7b.sh),[72B](scripts\u002Fmagpie-qwen2-72b.sh),[Math 7B](scripts\u002Fmagpie-qwen2-math-7b.sh) | [7B](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Qwen2-Air-3M-v0.1),[72B](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FMagpie-Align\u002FMagpie-Qwen2-Pro-1M-v0.1) | 3M + 1M |\n| [Phi 3](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002Fmicrosoft\u002Fphi-3-6626e15e9585a200d2d761e3)     | ✅ | [mini](scripts\u002Fmagpie-phi3mini.sh),[small](scripts\u002Fmagpie-phi3small.sh),[medium](scripts\u002Fmagpie-phi3medium.sh) | [medium](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpie-phi3-667a7a45f1a406cd61685d64) | 1M |\n| [Gemma-2](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002Fgoogle\u002Fgemma-2-release-667d6600fd5220e7b967f315)    | ✅ ** | [9B](magpie-gemma2-9b.sh),[27B](scripts\u002Fmagpie-gemma2-27b.sh) | [27B](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002FMagpie-Align\u002Fmagpie-gemma2-datasets-669da6aff21b09fdcecbd6ea) | 534K |\n| [Gemma-1.1](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002Fgoogle\u002Fgemma-release-65d5efbccdbb8c4202ec078b)    | ⭕️ | [7B](scripts\u002Fmagpie-gemma7b.sh)\n| [Llama 2](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002Fmeta-llama\u002Fllama-2-family-661da1f90a9d678b6f55773b)   | ⭕️ | [7B](scripts\u002Fmagpie-llama2-7b.sh),[70B](scripts\u002Fmagpie-llama2-70b.sh)\n| [Vicuna](https:\u002F\u002Flmsys.org\u002Fblog\u002F2023-03-30-vicuna\u002F)   | ⭕️ | [7B](scripts\u002Fmagpie-vicuna-7b.sh)\n| [Mistral](https:\u002F\u002Fhuggingface.co\u002Fmistralai\u002FMistral-7B-Instruct-v0.3)   | ⭕️ | [7B](scripts\u002Fmagpie-mistral7b.sh)\n| [Yi](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002F01-ai\u002Fyi-15-2024-05-663f3ecab5f815a3eaca7ca8)    | ⭕️ | [34B](scripts\u002Fmagpie-yi34b.sh)\n| [DeepSeek Coder](https:\u002F\u002Fhuggingface.co\u002Fcollections\u002Fdeepseek-ai\u002Fdeepseekcoder-v2-666bf4b274a5f556827ceeca) | ⭕️ | [Coder V2 Lite](https:\u002F\u002Fgithub.com\u002Fmagpie-align\u002Fmagpie\u002Fblob\u002Fmain\u002Fscripts\u002Fmagpie-deepseek-coderv2-lite.sh)  \n\n- ✅: 效果极佳！(**\\*** 应用 logits 处理器以抑制 Markdown；**\\*\\*** 在生成响应前应用 [过滤器](exp\u002Fstr_utils.py)。)\n- ⭕️: 可行！我们可以得到一些有趣的结果，但可能需要设计额外的 logits 处理器和\u002F或过滤器。\n- ❌: 不可行。\n- ❓: 未测试。\n\n所有可用的喜鹊数据集导航可在此处找到 [这里](navigation.md)。\n\n我们希望喜鹊能够通过提升模型对齐过程的透明度，为人工智能的民主化贡献力量！\n\n## 摘要\n\u003Cdetails>\u003Csummary>点击此处\u003C\u002Fsummary>\n高质量的指令数据对于对齐大型语言模型（LLMs）至关重要。尽管一些模型，例如 Llama-3-Instruct，其权重是公开的，但其对齐数据仍然保密，这阻碍了人工智能的民主化。高昂的人工成本以及提示范围的有限性和预定义性，使得现有的开源数据生成方法难以有效扩展，从而可能限制公共对齐数据集的多样性和质量。是否有可能通过直接从已对齐的 LLM 中提取数据，大规模合成高质量的指令数据呢？我们提出了一种名为“喜鹊”的大规模对齐数据自合成方法。我们的关键观察是，像 Llama-3-Instruct 这样的已对齐模型，由于其自回归特性，只需输入到用户消息位置之前的左侧模板，就能生成用户查询。我们利用这种方法提示 Llama-3-Instruct，生成了 400 万个指令及其对应的响应。我们对提取的数据进行了全面分析，并选择了 30 万个高质量实例。为了将喜鹊数据与其他公开指令数据集进行比较，我们使用每个数据集对 Llama-3-8B-Base 进行微调，并评估微调后模型的性能。结果表明，在某些任务中，使用喜鹊数据微调的模型表现与官方的 Llama-3-8B-Instruct 相当，尽管后者通过监督式微调（SFT）和后续的反馈学习增强了 1000 万条数据。我们还表明，仅使用喜鹊进行 SFT 的效果可以超越以往同时用于 SFT 和偏好优化的公开数据集，例如使用 UltraFeedback 进行直接偏好优化。这一优势在 AlpacaEval、ArenaHard 和 WildBench 等对齐基准测试中尤为明显。\n\u003C\u002Fdetails>\u003Cbr>\n\n## 概述\n\n![概述](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmagpie-align_magpie_readme_4794709f18e7.png)\n\n## 安装\n\n**构建环境**\n```\ngit clone https:\u002F\u002Fgithub.com\u002Fmagpie-align\u002Fmagpie.git\ncd magpie\nconda create -n magpie python=3.10 -y\nconda activate magpie\npip install -r requirements.txt\n```\n\n**获取 🤗 Huggingface 上的 Llama-3 模型访问权限**\n\n您可以在 [这里](https:\u002F\u002Fhuggingface.co\u002Fmeta-llama\u002FMeta-Llama-3-8B-Instruct)申请 Llama-3 模型的访问权限。要在终端登录，请输入：\n```\nhuggingface-cli login\n```\n然后输入以 \"hf_\" 开头的您的 Huggingface 私钥。\n\n## 玩具示例\n\n**使用 Jupyter Notebook 玩一玩**\n\n玩具示例可在 [`demo.ipynb`](demo.ipynb) 中找到。祝您玩得开心！\n\n\u003Ca target=\"_blank\" href=\"https:\u002F\u002Fcolab.research.google.com\u002Fgithub\u002Fmagpie-align\u002Fmagpie\u002Fblob\u002Fmain\u002Fdemo.ipynb\">\n  \u003Cimg src=\"https:\u002F\u002Fcolab.research.google.com\u002Fassets\u002Fcolab-badge.svg\" alt=\"在 Colab 中打开\"\u002F>\n\u003C\u002Fa>\n\n## 批量SFT数据生成\n我们以Llama-3-8B-Instruct为例，演示批量SFT数据生成流程。要运行批量生成，只需执行：\n```\ncd scripts\nbash magpie.sh\n```\n该脚本会在data文件夹中生成指令和响应数据。此流程已在RTX 4090 24G显卡上测试通过。若您使用的显卡显存较小，建议实施[量化](https:\u002F\u002Fdocs.vllm.ai\u002Fen\u002Flatest\u002Fquantization\u002Ffp8.html)。\n\n我们还在[`scripts`](scripts)文件夹中提供了其他模型的脚本。您可通过[此处](#magpie-supports)导航找到特定的Magpie脚本。请注意，对于超过8B参数量的模型，可能需要4张A100显卡才能运行这些脚本。\n\n### 批量多轮数据生成［可选］\n在生成指令-响应对之后，您可以将其扩展为多轮对话。只需运行以下命令：\n```\nbash magpie-multi-turn.sh ***_ins_res.json\n```\n其中`***_ins_res.json`是上一步生成的单轮指令-响应对文件。\n\n## 数据集过滤\n### 1. 标注\n要为生成的指令-响应对打标签，可以运行：\n```\ncd scripts\nbash unitag.sh ***_ins_res.json all\n```\n该脚本将自动为生成的数据集添加质量、难度、任务类别、安全性、奖励和语言等标签。您也可以逐个生成标签。例如，如果您只想使用设备0来生成安全标签，可以运行：\n```\ncd scripts\nbash unitag.sh ***_ins_res.json safety 0\n```\n\n### 2. 数据拼接与转换\n您可能会生成采用不同生成配置的数据集。我们在此提供了一个Jupyter笔记本[链接](data_sft\u002Fdata_concatenation.ipynb)，用于将所有数据集拼接并转换为ShareGPT格式，该格式受到[Axolotl](https:\u002F\u002Fgithub.com\u002FOpenAccess-AI-Collective\u002Faxolotl)的全面支持，可用于微调。\n\n### 3. 去重\n当您已将完整数据集转换为ShareGPT格式后，可以计算每条指令的最小邻近距离，并去除重复内容。为此，请运行：\n```\ncd exp\npython gen_dis.py --input_file ***_sharegpt.jsonl\n```\n其中`***_sharegpt.jsonl`是上一步得到的数据集路径。该Python脚本将负责构建FAISS索引并计算最小距离。\n\n### 4. 设计并应用您的过滤器\n我们在此提供了一个Jupyter笔记本[链接](data_sft\u002Fdata_filter.ipynb)，用于进行简单的过滤操作。您可以根据自身需求调整过滤参数，设计并应用您自己的过滤策略。\n\n## 偏好数据生成\n\n要生成偏好数据，首先需按照上述步骤准备经过筛选的指令。有关预期格式，请参考我们的示例[链接](data_po\u002Fexample_instructions.jsonl)。\n\n接下来，请使用我们提供的脚本[链接](scripts\u002Fmagpie_example_po.sh)生成多个响应，并计算相应的奖励值。最后，您可以通过[这个Jupyter笔记本](data_po\u002Fprocess_po.ipynb)对数据进行处理，并将其上传至Huggingface。\n\n## 微调\n请查看[recipes](recipes\u002F)目录，获取相关说明及我们的Magpie模型配方。\n\n## 引用\n如果您认为本模型、数据或代码有所帮助，请引用我们的论文🤩：\n```\n@article{xu2024magpie,\n  title={Magpie: Alignment Data Synthesis from Scratch by Prompting Aligned LLMs with Nothing},\n  author={Zhangchen Xu and Fengqing Jiang and Luyao Niu and Yuntian Deng and Radha Poovendran and Yejin Choi and Bill Yuchen Lin},\n  journal={ArXiv},\n  year={2024},\n  volume={abs\u002F2406.08464},\n  url={https:\u002F\u002Fapi.semanticscholar.org\u002FCorpusID:270391432}\n}\n```\n\n## 星标历史\n\n[![星标历史图](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmagpie-align_magpie_readme_803099aa9898.png)](https:\u002F\u002Fstar-history.com\u002F#magpie-align\u002Fmagpie&Date)","# Magpie 快速上手指南\n\nMagpie 是一个无需种子问题或复杂提示工程，即可从对齐的大语言模型（如 Llama-3、Qwen2 等）中直接合成高质量指令微调（SFT）数据的工具。它利用模型自身的自回归特性，仅通过输入预查询模板即可生成用户提问和模型回答。\n\n## 环境准备\n\n*   **操作系统**: Linux \u002F macOS (Windows 需使用 WSL)\n*   **Python 版本**: 3.10\n*   **硬件要求**: 推荐 NVIDIA GPU (示例脚本已在 RTX 4090 24G 上测试)。显存较小的用户建议在生成脚本中启用量化选项。\n*   **前置依赖**:\n    *   Conda (推荐用于环境管理)\n    *   Git\n    *   Hugging Face 账号 (如需使用 Llama-3 等受限模型，需提前申请权限)\n\n## 安装步骤\n\n1.  **克隆仓库**\n    ```bash\n    git clone https:\u002F\u002Fgithub.com\u002Fmagpie-align\u002Fmagpie.git\n    cd magpie\n    ```\n\n2.  **创建并激活虚拟环境**\n    ```bash\n    conda create -n magpie python=3.10 -y\n    conda activate magpie\n    ```\n\n3.  **安装依赖包**\n    *(注：若下载速度慢，可配置 pip 使用国内镜像源，如清华源)*\n    ```bash\n    pip install -r requirements.txt -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n    ```\n\n4.  **登录 Hugging Face**\n    如果您计划使用 Llama-3 系列模型，必须先登录以验证访问权限：\n    ```bash\n    huggingface-cli login\n    ```\n    按提示输入您的 Hugging Face Token（以 `hf_` 开头）。\n\n## 基本使用\n\n### 方式一：体验示例 (Jupyter Notebook)\n最适合快速理解原理的方式是运行官方提供的 Demo。\n\n*   **本地运行**:\n    打开 `demo.ipynb` 文件并逐个单元格执行。\n*   **在线运行 (Colab)**:\n    点击 [Open In Colab](https:\u002F\u002Fcolab.research.google.com\u002Fgithub\u002Fmagpie-align\u002Fmagpie\u002Fblob\u002Fmain\u002Fdemo.ipynb) 直接在浏览器中体验。\n\n### 方式二：批量生成数据 (命令行)\n以下以 `Llama-3-8B-Instruct` 为例，演示如何批量生成指令微调数据。\n\n1.  进入脚本目录：\n    ```bash\n    cd scripts\n    ```\n\n2.  运行生成脚本：\n    ```bash\n    bash magpie.sh\n    ```\n    *说明：该脚本将自动调用模型，生成的指令（instructions）和回复（responses）将保存在 `data` 文件夹中。*\n\n3.  **适配其他模型**:\n    Magpie 支持多种模型系列。请根据下表选择对应的脚本运行：\n\n    | 模型系列 | 脚本示例 |\n    | :--- | :--- |\n    | Llama-3.1 \u002F 3.3 | `bash magpie-llama3.1-8b.sh` \u002F `bash magpie-llama3.3-70b.sh` |\n    | Qwen2.5 | `bash magpie-qwen2.5-7b.sh` \u002F `bash magpie-qwen2.5-72b.sh` |\n    | Qwen2 | `bash magpie-qwen2-7b.sh` |\n    | Phi 3 | `bash magpie-phi3medium.sh` |\n    | Gemma-2 | `bash magpie-gemma2-27b.sh` |\n\n    > **注意**: 对于部分模型（如带 `*` 标记的 Llama 3.1 或带 `**` 标记的 Gemma-2），可能需要在脚本中应用特定的 logits processor 或过滤器以获得最佳效果，具体请参考 `scripts` 目录下的脚本注释。","某初创团队正在研发一款垂直领域的医疗咨询助手，急需大量高质量的医患对话数据来微调模型，但面临真实患者隐私数据不可用、人工标注成本过高的困境。\n\n### 没有 magpie 时\n- **种子问题依赖重**：团队必须手动编写成千上万个“种子问题”作为生成起点，不仅耗时耗力，还容易陷入思维定势，导致数据多样性不足。\n- **提示词工程复杂**：为了引导模型生成符合医疗规范的问答对，需要反复调试复杂的 Prompt 模板，稍有不慎就会产出幻觉严重或格式错误的数据。\n- **数据分布单一**：由于依赖人工设定的种子，生成的数据往往集中在常见病症，缺乏长尾罕见病例的覆盖，导致模型在面对复杂咨询时表现不佳。\n- **迭代周期漫长**：从构思问题到清洗数据，整个流程需要数周时间，严重拖慢了模型版本的迭代速度，难以快速响应业务需求。\n\n### 使用 magpie 后\n- **零样本自动激发**：magpie 直接利用对齐模型的预查询模板（pre-query template），无需任何种子问题或人工提示，即可自动“无中生有”地生成自然的用户提问和专家回答。\n- **流程极简高效**：省去了繁琐的提示词调试环节，团队只需运行流水线，就能批量获得结构完整、逻辑严密的医疗对话数据，将准备时间从数周缩短至数小时。\n- **长尾覆盖全面**：得益于模型自身的知识泛化能力，magpie 生成的数据天然涵盖了大量罕见病案和复杂咨询场景，显著提升了训练数据的分布广度。\n- **质量可控可靠**：生成的数据直接源自已对齐的强模型（如 Llama-3 或 Qwen2.5），确保了医学建议的准确性和安全性，大幅降低了后期人工清洗的成本。\n\nmagpie 通过“无中生有”的合成机制，让团队在零隐私风险下低成本获得了百万级高质量医疗指令数据，彻底打破了领域模型冷启动的数据瓶颈。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fmagpie-align_magpie_ce820bf3.png","magpie-align","Magpie","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fmagpie-align_ce920659.png","",null,"https:\u002F\u002Fmagpie-align.github.io\u002F","https:\u002F\u002Fgithub.com\u002Fmagpie-align",[80,84,88],{"name":81,"color":82,"percentage":83},"Python","#3572A5",42.6,{"name":85,"color":86,"percentage":87},"Shell","#89e051",33,{"name":89,"color":90,"percentage":91},"Jupyter Notebook","#DA5B0B",24.4,841,67,"2026-04-06T13:24:14","MIT","未说明","需要 NVIDIA GPU，测试环境为 RTX 4090 (24GB 显存)。若显存较小需使用量化技术。",{"notes":99,"python":100,"dependencies":101},"建议使用 conda 创建名为 'magpie' 的虚拟环境。运行前需在 Hugging Face 申请 Llama-3 等模型访问权限并登录。脚本已在单张 RTX 4090 上测试通过，低显存用户需自行实现量化逻辑。","3.10",[102],"requirements.txt 中定义的依赖库",[16,35,14],[105,106,107,108,109,110,111,112,113,114,115,116,117],"alignment","llama2","llama3","llm","nlp","paper","phi3","qwen2","synthetic-data","synthetic-dataset-generation","dataset","gemma","supervised-finetuning","2026-03-27T02:49:30.150509","2026-04-12T07:52:05.534211",[],[]]