[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-promptdriven--pdd":3,"tool-promptdriven--pdd":64},[4,17,27,35,44,52],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":16},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,3,"2026-04-05T11:01:52",[13,14,15],"开发框架","图像","Agent","ready",{"id":18,"name":19,"github_repo":20,"description_zh":21,"stars":22,"difficulty_score":23,"last_commit_at":24,"category_tags":25,"status":16},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",140436,2,"2026-04-05T23:32:43",[13,15,26],"语言模型",{"id":28,"name":29,"github_repo":30,"description_zh":31,"stars":32,"difficulty_score":23,"last_commit_at":33,"category_tags":34,"status":16},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,"2026-04-03T11:11:01",[13,14,15],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":10,"last_commit_at":41,"category_tags":42,"status":16},4292,"Deep-Live-Cam","hacksider\u002FDeep-Live-Cam","Deep-Live-Cam 是一款专注于实时换脸与视频生成的开源工具，用户仅需一张静态照片，即可通过“一键操作”实现摄像头画面的即时变脸或制作深度伪造视频。它有效解决了传统换脸技术流程繁琐、对硬件配置要求极高以及难以实时预览的痛点，让高质量的数字内容创作变得触手可及。\n\n这款工具不仅适合开发者和技术研究人员探索算法边界，更因其极简的操作逻辑（仅需三步：选脸、选摄像头、启动），广泛适用于普通用户、内容创作者、设计师及直播主播。无论是为了动画角色定制、服装展示模特替换，还是制作趣味短视频和直播互动，Deep-Live-Cam 都能提供流畅的支持。\n\n其核心技术亮点在于强大的实时处理能力，支持口型遮罩（Mouth Mask）以保留使用者原始的嘴部动作，确保表情自然精准；同时具备“人脸映射”功能，可同时对画面中的多个主体应用不同面孔。此外，项目内置了严格的内容安全过滤机制，自动拦截涉及裸露、暴力等不当素材，并倡导用户在获得授权及明确标注的前提下合规使用，体现了技术发展与伦理责任的平衡。",88924,"2026-04-06T03:28:53",[13,14,15,43],"视频",{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":23,"last_commit_at":50,"category_tags":51,"status":16},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[13,26],{"id":53,"name":54,"github_repo":55,"description_zh":56,"stars":57,"difficulty_score":23,"last_commit_at":58,"category_tags":59,"status":16},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,"2026-04-05T10:45:23",[14,60,43,61,15,62,26,13,63],"数据工具","插件","其他","音频",{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":70,"readme_en":71,"readme_zh":72,"quickstart_zh":73,"use_case_zh":74,"hero_image_url":75,"owner_login":76,"owner_name":77,"owner_avatar_url":78,"owner_bio":79,"owner_company":80,"owner_location":80,"owner_email":80,"owner_twitter":81,"owner_website":82,"owner_url":83,"languages":84,"stars":109,"forks":110,"last_commit_at":111,"license":112,"difficulty_score":23,"env_os":113,"env_gpu":114,"env_ram":114,"env_deps":115,"category_tags":123,"github_topics":124,"view_count":23,"oss_zip_url":80,"oss_zip_packed_at":80,"status":16,"created_at":136,"updated_at":137,"faqs":138,"releases":139},4181,"promptdriven\u002Fpdd","pdd","Prompt Driven Development Command Line Interface","pdd（Prompt-Driven Development）是一款专为 AI 时代打造的命令行开发工具，旨在通过自然语言提示词驱动完整的代码生成与维护流程。它主要解决了传统开发中从需求理解到代码实现、测试验证环节耗时较长的问题，让开发者能够直接将 GitHub 议题（Issue）转化为可运行的代码或测试用例。\n\n这款工具特别适合希望提升开发效率的软件工程师、全栈开发者以及热衷于探索 AI 辅助编程的技术团队。用户只需在终端输入简单指令，如 `pdd change` 或 `pdd bug`，即可自动执行包含需求分析、架构设计、代码编写及测试生成的多步工作流。\n\npdd 的核心亮点在于其强大的“智能体（Agentic）”能力。它不仅支持自动生成代码和 UI 测试，还能进行探索性测试、契约验证及无障碍审计等复杂任务。此外，pdd 提供本地 Web 界面用于可视化项目管理，并拥有独特的 `sync` 命令，能够自动化整个开发生命周期，提供实时视觉反馈和精细的状态管理。无论是修复漏洞、实现新功能还是从产品需求文档生成架构文件，pdd 都能通过标准化的多步骤流程协助开发者高效完成，让编程变得更加直","pdd（Prompt-Driven Development）是一款专为 AI 时代打造的命令行开发工具，旨在通过自然语言提示词驱动完整的代码生成与维护流程。它主要解决了传统开发中从需求理解到代码实现、测试验证环节耗时较长的问题，让开发者能够直接将 GitHub 议题（Issue）转化为可运行的代码或测试用例。\n\n这款工具特别适合希望提升开发效率的软件工程师、全栈开发者以及热衷于探索 AI 辅助编程的技术团队。用户只需在终端输入简单指令，如 `pdd change` 或 `pdd bug`，即可自动执行包含需求分析、架构设计、代码编写及测试生成的多步工作流。\n\npdd 的核心亮点在于其强大的“智能体（Agentic）”能力。它不仅支持自动生成代码和 UI 测试，还能进行探索性测试、契约验证及无障碍审计等复杂任务。此外，pdd 提供本地 Web 界面用于可视化项目管理，并拥有独特的 `sync` 命令，能够自动化整个开发生命周期，提供实时视觉反馈和精细的状态管理。无论是修复漏洞、实现新功能还是从产品需求文档生成架构文件，pdd 都能通过标准化的多步骤流程协助开发者高效完成，让编程变得更加直观和流畅。","# PDD (Prompt-Driven Development) Command Line Interface\n\n![PDD-CLI Version](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fpdd--cli-v0.0.179-blue) [![Discord](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FDiscord-join%20chat-7289DA.svg?logo=discord&logoColor=white)](https:\u002F\u002Fdiscord.gg\u002FYp4RTh8bG7)\n\n## Introduction\n\nPDD (Prompt-Driven Development) is a toolkit for AI-powered code generation and maintenance.\n\n**Getting started is simple:**\n\n```bash\n# Install and run\nuv tool install pdd-cli\npdd setup\npdd connect\n```\n\nThis launches a web interface at `localhost:9876` where you can:\n- Implement GitHub issues automatically\n- Generate and test code from prompts\n- Manage your PDD projects visually\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fpromptdriven_pdd_readme_913020f6e87e.gif\" alt=\"PDD Handpaint Demo\" \u002F>\n\u003C\u002Fp>\n\nFor CLI users, PDD also offers powerful **agentic commands** that implement GitHub issues automatically:\n- `pdd change \u003Cissue-url>` - Implement feature requests (12-step workflow)\n- `pdd bug \u003Cissue-url>` - Create failing tests for bugs\n- `pdd fix \u003Cissue-url>` - Fix the failing tests\n- `pdd generate \u003Cissue-url>` - Generate architecture.json from a PRD issue (11-step workflow)\n- `pdd test \u003Cissue-url>` - Generate UI tests from issue descriptions (18-step workflow with exploratory testing, contract validation, accessibility audits)\n\nFor prompt-based workflows, the **`sync`** command automates the complete development cycle with intelligent decision-making, real-time visual feedback, and sophisticated state management.\n\n## Whitepaper\n\nFor a detailed explanation of the concepts, architecture, and benefits of Prompt-Driven Development, please refer to our full whitepaper. This document provides an in-depth look at the PDD philosophy, its advantages over traditional development, and includes benchmarks and case studies.\n\n[Read the Full Whitepaper with Benchmarks](docs\u002Fwhitepaper_with_benchmarks\u002Fwhitepaper_w_benchmarks.md)\n\nAlso see the Prompt‑Driven Development Doctrine for core principles and practices: [docs\u002Fprompt-driven-development-doctrine.md](docs\u002Fprompt-driven-development-doctrine.md)\n\n## Installation\n\n### Prerequisites for macOS\n\nOn macOS, you'll need to install some prerequisites before installing PDD:\n\n1. **Install Xcode Command Line Tools** (required for Python compilation):\n   ```bash\n   xcode-select --install\n   ```\n\n2. **Install Homebrew** (recommended package manager for macOS):\n   ```bash\n   \u002Fbin\u002Fbash -c \"$(curl -fsSL https:\u002F\u002Fraw.githubusercontent.com\u002FHomebrew\u002Finstall\u002FHEAD\u002Finstall.sh)\"\n   ```\n   \n   After installation, add Homebrew to your PATH:\n   ```bash\n   echo 'eval \"$(\u002Fopt\u002Fhomebrew\u002Fbin\u002Fbrew shellenv)\"' >> ~\u002F.zprofile && eval \"$(\u002Fopt\u002Fhomebrew\u002Fbin\u002Fbrew shellenv)\"\n   ```\n\n3. **Install Python** (if not already installed):\n   ```bash\n   # Check if Python is installed\n   python3 --version\n   \n   # If Python is not found, install it via Homebrew\n   brew install python\n   ```\n   \n   **Note**: Recent versions of macOS no longer ship with Python pre-installed. PDD requires Python 3.8 or higher. The `brew install python` command installs the latest Python 3 version.\n\n### Recommended Method: uv\n\nWe recommend installing PDD using the [uv package manager](https:\u002F\u002Fgithub.com\u002Fastral-sh\u002Fuv) for better dependency management and automatic environment configuration:\n\n```bash\n# Install uv if you haven't already \ncurl -LsSf https:\u002F\u002Fastral.sh\u002Fuv\u002Finstall.sh | sh\n\n# Install PDD using uv tool install\nuv tool install pdd-cli\n```\n\nThis installation method ensures:\n- Faster installations with optimized dependency resolution\n- Automatic environment setup without manual configuration\n- Proper handling of the PDD_PATH environment variable\n- Better isolation from other Python packages\n\nThe PDD CLI will be available immediately after installation without requiring any additional environment configuration.\n\nVerify installation:\n```bash\npdd --version\n```\n\nWith the CLI on your `PATH`, continue with:\n```bash\npdd setup\n```\nThe command detects agentic CLI tools, scans for API keys, configures models, and seeds local configuration files.\nIf you postpone this step, the CLI detects the missing setup artifacts the first time you run another command and shows a reminder banner so you can complete it later (the banner is suppressed once `~\u002F.pdd\u002Fapi-env` exists or when your project already provides credentials via `.env` or `.pdd\u002F`).\n\n### Alternative: pip Installation\n\nIf you prefer using pip, you can install PDD with:\n```bash\npip install pdd-cli\n```\n\n\n## Advanced Installation Options\n\n### Virtual Environment Installation\n```bash\n# Create virtual environment\npython -m venv pdd-env\n\n# Activate environment\n# On Windows:\npdd-env\\Scripts\\activate\n# On Unix\u002FMacOS:\nsource pdd-env\u002Fbin\u002Factivate\n\n# Install PDD\npip install pdd-cli\n```\n\n\n\n## Getting Started\n\n### Option 1: Web Interface (Recommended)\n\nThe easiest way to use PDD is through the web interface:\n\n```bash\n# 1. Install PDD\ncurl -LsSf https:\u002F\u002Fastral.sh\u002Fuv\u002Finstall.sh | sh\nuv tool install pdd-cli\n\n# 2. Run setup (API keys, shell completion)\npdd setup\n\n# 3. Launch the web interface\npdd connect\n```\n\nThis opens a browser-based interface where you can:\n- **Run Commands**: Execute `pdd change`, `pdd bug`, `pdd fix`, `pdd sync` etc. visually\n- **Browse Files**: View and edit prompts, code, and tests in your project\n- **Remote Access**: Access your session from any browser via PDD Cloud (use `--local-only` to disable)\n\n### Option 2: Issue-Driven CLI\n\nFor CLI enthusiasts, implement GitHub issues directly:\n\n**Prerequisites:**\n1. **GitHub CLI** - Required for issue access:\n   ```bash\n   brew install gh && gh auth login\n   ```\n\n2. **One Agentic CLI** - Required to run the workflows (install at least one):\n   - **Claude Code**: `npm install -g @anthropic-ai\u002Fclaude-code` (requires `ANTHROPIC_API_KEY`)\n   - **Gemini CLI**: `npm install -g @google\u002Fgemini-cli` (requires `GOOGLE_API_KEY` or `GEMINI_API_KEY`)\n   - **Codex CLI**: `npm install -g @openai\u002Fcodex` (requires `OPENAI_API_KEY`)\n\n**Usage:**\n```bash\n# Implement a feature request\npdd change https:\u002F\u002Fgithub.com\u002Fowner\u002Frepo\u002Fissues\u002F123\n\n# Or fix a bug\npdd bug https:\u002F\u002Fgithub.com\u002Fowner\u002Frepo\u002Fissues\u002F456\npdd fix https:\u002F\u002Fgithub.com\u002Fowner\u002Frepo\u002Fissues\u002F456\n```\n\n### Option 3: Manual Prompt Workflow\n\nFor learning PDD fundamentals or working with existing prompt files:\n\n```bash\ncd your-project\npdd sync module_name  # Full automated workflow\n```\n\nSee the [Hello Example](#-quickstart-hello-example) below for a step-by-step introduction.\n\n---\n\n## 🚀 Quickstart (Hello Example)\n\nIf you want to understand PDD fundamentals, follow this manual example to see it in action.\n\n1. **Install prerequisites** (macOS\u002FLinux):\n   ```bash\n   xcode-select --install      # macOS only\n   curl -LsSf https:\u002F\u002Fastral.sh\u002Fuv\u002Finstall.sh | sh\n   uv tool install pdd-cli\n   pdd --version\n   ```\n\n2. **Clone repo**\n\n   ```bash\n     # Clone the repository (if not already done)\n    git clone https:\u002F\u002Fgithub.com\u002Fpromptdriven\u002Fpdd.git\n    cd pdd\u002Fexamples\u002Fhello\n   ```\n\n3. **Set one API key** (choose your provider):\n   ```bash\n   export GEMINI_API_KEY=\"your-gemini-key\"\n   # OR\n   export OPENAI_API_KEY=\"your-openai-key\"\n   ```\n\n### Post-Installation Setup (Required first step after installation)\n\nRun the comprehensive setup wizard:\n```bash\npdd setup\n```\n\nThe setup wizard runs these steps:\n  1.  Detects agentic CLI tools (Claude, Gemini, Codex) and offers installation and API key configuration if needed\n  2. Scans for API keys across `.env`, and `~\u002F.pdd\u002Fapi-env.*`, and the shell environment; prompts to add one if none are found\n  3. Configures models from a reference CSV `data\u002Fllm_model.csv` of top models (ELO ≥ 1400) across all LiteLLM-supported providers  based on your available keys\n  4. Optionally creates a `.pddrc` project config\n  5. Tests the first available model with a real LLM call \n  6. Prints a structured summary (CLIs, keys, models, test result)\n\nThe wizard can be re-run at any time to update keys, add providers, or reconfigure settings.\n\n> **Important:** After setup completes, source the API environment file so your keys take effect in the current terminal session:\n> ```bash\n> source ~\u002F.pdd\u002Fapi-env.zsh   # or api-env.bash, depending on your shell\n> ```\n> New terminal windows will load keys automatically.\n\nIf you skip this step, the first regular pdd command you run will detect the missing setup files and print a reminder banner so you can finish onboarding later.\n\n5. **Run Hello**:\n   ```bash\n   cd ..\u002Fhello\n   pdd --force generate hello_python.prompt\n   python3 hello.py\n   ```\n\n    ✅ Expected output:\n    ```\n    hello\n    ```\n\n\n\n## Cloud vs Local Execution\n\nPDD commands can be run either in the cloud or locally. By default, all commands run in the cloud mode, which provides several advantages:\n\n- No need to manage API keys locally\n- Access to more powerful models\n- Shared examples and improvements across the PDD community\n- Automatic updates and improvements\n- Better cost optimization\n\n### Cloud Authentication\n\nWhen running in cloud mode (default), PDD uses GitHub Single Sign-On (SSO) for authentication. On first use, you'll be prompted to authenticate:\n\n1. PDD will open your default browser to the GitHub login page\n2. Log in with your GitHub account\n3. Authorize PDD Cloud to access your GitHub profile\n4. Once authenticated, you can return to your terminal to continue using PDD\n\nThe authentication token is securely stored locally and automatically refreshed as needed.\n\n### Local Mode Requirements\n\nWhen running in local mode with the `--local` flag, you'll need to set up API keys for the language models:\n\n```bash\n# For OpenAI\nexport OPENAI_API_KEY=your_api_key_here\n\n# For Anthropic\nexport ANTHROPIC_API_KEY=your_api_key_here\n\n# For other supported providers (LiteLLM supports multiple LLM providers)\nexport PROVIDER_API_KEY=your_api_key_here\n```\n\nAdd these to your `.bashrc`, `.zshrc`, or equivalent for persistence.\n\nPDD's local mode uses LiteLLM (version 1.75.5 or higher) for interacting with language models, providing:\n\n- Support for multiple model providers (OpenAI, Anthropic, Google\u002FVertex AI, and more)\n- Automatic model selection based on strength settings\n- Response caching for improved performance\n- Smart token usage tracking and cost estimation\n- Interactive API key acquisition when keys are missing\n\nWhen keys are missing, PDD will prompt for them interactively and securely store them in your local `.env` file.\n\n### Local Model Configuration\n\nPDD uses a CSV file to configure model selection and capabilities. This configuration is loaded from:\n\n1. User-specific configuration: `~\u002F.pdd\u002Fllm_model.csv` (takes precedence if it exists)\n2. Project-specific configuration: `\u003CPROJECT_ROOT>\u002F.pdd\u002Fllm_model.csv`\n3. Package default: Bundled with PDD installation (fallback when no local configurations exist)\n\nThe CSV includes columns for:\n- `provider`: The LLM provider (e.g., \"openai\", \"anthropic\", \"google\")\n- `model`: The LiteLLM model identifier (e.g., \"gpt-4\", \"claude-3-opus-20240229\")\n- `input`\u002F`output`: Costs per million tokens\n- `coding_arena_elo`: ELO rating for coding ability\n- `api_key`: The environment variable name for the required API key\n- `structured_output`: Whether the model supports structured JSON output\n- `reasoning_type`: Support for reasoning capabilities (\"none\", \"budget\", or \"effort\")\n\nFor a concrete, up-to-date reference of supported models and example rows, see the bundled CSV in this repository: [pdd\u002Fdata\u002Fllm_model.csv](pdd\u002Fdata\u002Fllm_model.csv).\n\nFor proper model identifiers to use in your custom configuration, refer to the [LiteLLM Model List](https:\u002F\u002Fdocs.litellm.ai\u002Fdocs\u002Fproviders) documentation. LiteLLM typically uses model identifiers in the format `provider\u002Fmodel_name` (e.g., \"openai\u002Fgpt-4\", \"anthropic\u002Fclaude-3-opus-20240229\").\n\n## Troubleshooting Common Installation Issues\n\n1. **Command not found**\n   ```bash\n   # Add to PATH if needed\n   export PATH=\"$HOME\u002F.local\u002Fbin:$PATH\"\n   ```\n\n2. **Permission errors**\n   ```bash\n   # Install with user permissions\n   pip install --user pdd-cli\n   ```\n\n3. **macOS-specific issues**\n   - **Xcode Command Line Tools not found**: Run `xcode-select --install` to install the required development tools\n   - **Homebrew not found**: Install Homebrew using the command in the prerequisites section above\n   - **Python not found or wrong version**: Install Python 3 via Homebrew: `brew install python`\n   - **Permission denied during compilation**: Ensure Xcode Command Line Tools are properly installed and you have write permissions to the installation directory\n   - **uv installation fails**: Try installing uv through Homebrew: `brew install uv`\n   - **Python version conflicts**: If you have multiple Python versions, ensure `python3` points to Python 3.8+: `which python3 && python3 --version`\n\n## Version\n\nCurrent version: 0.0.179\n\nTo check your installed version, run:\n```\npdd --version\n```\nPDD includes an auto-update feature to ensure you always have access to the latest features and security patches. You can control this behavior using an environment variable (see \"Auto-Update Control\" section below).\n\n## Supported Programming Languages\n\nPDD supports a wide range of programming languages, including but not limited to:\n- Python\n- JavaScript\n- TypeScript\n- Java\n- C++\n- Ruby\n- Go\n\nThe specific language is often determined by the prompt file's naming convention or specified in the command options.\n\n## Prompt File Naming Convention\n\nPrompt files in PDD follow this specific naming format:\n```\n\u003Cbasename>_\u003Clanguage>.prompt\n```\nWhere:\n- `\u003Cbasename>` is the base name of the file or project\n- `\u003Clanguage>` is the programming language or context of the prompt file\n\nExamples:\n- `factorial_calculator_python.prompt` (basename: factorial_calculator, language: python)\n- `responsive_layout_css.prompt` (basename: responsive_layout, language: css)\n- `data_processing_pipeline_python.prompt` (basename: data_processing_pipeline, language: python)\n\n## Prompt-Driven Development Philosophy\n\n### Core Concepts\n\nPrompt-Driven Development (PDD) inverts traditional software development by treating prompts as the primary artifact - not code. This paradigm shift has profound implications:\n\n1. **Prompts as Source of Truth**: \n   In traditional development, source code is the ground truth that defines system behavior. In PDD, the prompts are authoritative, with code being a generated artifact.\n\n2. **Natural Language Over Code**:\n   Prompts are written primarily in natural language, making them more accessible to non-programmers and clearer in expressing intent.\n\n3. **Regenerative Development**:\n   When changes are needed, you modify the prompt and regenerate code, rather than directly editing the code. This maintains the conceptual integrity between requirements and implementation.\n\n4. **Intent Preservation**:\n   Prompts capture the \"why\" behind code in addition to the \"what\" - preserving design rationale in a way that comments often fail to do.\n\n### Mental Model\n\nTo work effectively with PDD, adopt these mental shifts:\n\n1. **Prompt-First Thinking**:\n   Always start by defining what you want in a prompt before generating any code.\n\n2. **Bidirectional Flow**:\n   - Prompt → Code: The primary direction (generation)\n   - Code → Prompt: Secondary but crucial (keeping prompts in sync with code changes)\n\n3. **Modular Prompts**:\n   Just as you modularize code, you should modularize prompts into self-contained units that can be composed.\n\n4. **Integration via Examples**:\n   Modules integrate through their examples, which serve as interfaces, allowing for token-efficient references.\n\n### PDD Workflows: Conceptual Understanding\n\nEach workflow in PDD addresses a fundamental development need:\n\n1. **Initial Development Workflow**\n   - **Purpose**: Creating functionality from scratch\n   - **Conceptual Flow**: Define dependencies → Generate implementation → Create interfaces → Ensure runtime functionality → Verify correctness\n   \n   This workflow embodies the prompt-to-code pipeline, moving from concept to tested implementation.\n\n2. **Code-to-Prompt Update Workflow**\n   - **Purpose**: Maintaining prompt as source of truth when code changes\n   - **Conceptual Flow**: Sync code changes to prompt → Identify impacts → Propagate changes\n   \n   This workflow ensures the information flow from code back to prompts, preserving prompts as the source of truth.\n\n3. **Debugging Workflows**\n   - **Purpose**: Resolving different types of issues\n   - **Conceptual Types**:\n     - **Context Issues**: Addressing misunderstandings in prompt interpretation\n     - **Runtime Issues**: Fixing execution failures\n     - **Logical Issues**: Correcting incorrect behavior\n     - **Traceability Issues**: Connecting code problems back to prompt sections\n   \n   These workflows recognize that different errors require different resolution approaches.\n\n4. **Refactoring Workflow**\n   - **Purpose**: Improving prompt organization and reusability\n   - **Conceptual Flow**: Extract functionality → Ensure dependencies → Create interfaces\n   \n   This workflow parallels code refactoring but operates at the prompt level.\n\n5. **Multi-Prompt Architecture Workflow**\n   - **Purpose**: Coordinating systems with multiple prompts\n   - **Conceptual Flow**: Detect conflicts → Resolve incompatibilities → Regenerate code → Update interfaces → Verify system\n   \n   This workflow addresses the complexity of managing multiple interdependent prompts.\n\n6. **Enhancement Phase**: Use Feature Enhancement when adding capabilities to existing modules.\n\n### Workflow Selection Principles\n\nThe choice of workflow should be guided by your current development phase:\n\n1. **Creation Phase**: Use Initial Development when building new functionality.\n\n2. **Maintenance Phase**: Use Code-to-Prompt Update when existing code changes.\n\n3. **Problem-Solving Phase**: Choose the appropriate Debugging workflow based on the issue type:\n   - Preprocess → Generate for prompt interpretation issues\n   - Crash for runtime errors\n   - Bug → Fix for logical errors\n   - Trace for locating problematic prompt sections\n\n4. **Restructuring Phase**: Use Refactoring when prompts grow too large or complex.\n\n5. **System Design Phase**: Use Multi-Prompt Architecture when coordinating multiple components.\n\n6. **Enhancement Phase**: Use Feature Enhancement when adding capabilities to existing modules.\n\n### PDD Design Patterns\n\nEffective PDD employs these recurring patterns:\n\n1. **Dependency Injection via Auto-deps**:\n   Automatically including relevant dependencies in prompts.\n\n2. **Interface Extraction via Example**:\n   Creating minimal reference implementations for reuse.\n\n3. **Bidirectional Traceability**:\n   Maintaining connections between prompt sections and generated code.\n\n4. **Test-Driven Prompt Fixing**:\n   Using tests to guide prompt improvements when fixing issues.\n\n5. **Hierarchical Prompt Organization**:\n   Structuring prompts from high-level architecture to detailed implementations.\n\n## Basic Usage\n\n```\npdd [GLOBAL OPTIONS] COMMAND [OPTIONS] [ARGS]...\n```\n\n## Command Overview\n\nHere is a brief overview of the main commands provided by PDD. Click the command name to jump to its detailed section:\n\n### Command Relationships\n\nThe following diagram shows how PDD commands interact:\n\n```mermaid\ngraph TB\n    subgraph Entry Points\n        connect[\"pdd connect (Web UI - Recommended)\"]\n        cli[\"Direct CLI\"]\n        ghapp[\"GitHub App\"]\n    end\n\n    gen_url[\"pdd generate &lt;url&gt;\"]\n\n    subgraph sync workflow\n        sync[\"pdd sync\"]\n        s_deps[\"auto-deps\"]\n        s_gen[\"generate\"]\n        s_example[\"example\"]\n        s_crash[\"crash\"]\n        s_verify[\"verify\"]\n        s_test[\"test\"]\n        s_fix[\"fix\"]\n        s_update[\"update\"]\n    end\n\n    checkup[\"pdd checkup &lt;url&gt;\"]\n    test_url[\"pdd test &lt;url&gt;\"]\n    bug_url[\"pdd bug &lt;url&gt;\"]\n    fix_url[\"pdd fix &lt;url&gt;\"]\n    change[\"pdd change &lt;url&gt;\"]\n    sync_url[\"pdd sync &lt;url&gt;\"]\n\n    connect --> gen_url\n    cli --> gen_url\n    ghapp --> gen_url\n    gen_url --> sync\n    sync --> s_deps\n    s_deps --> s_gen\n    s_gen --> s_example\n    s_example --> s_crash\n    s_crash --> s_verify\n    s_verify --> s_test\n    s_test --> s_fix\n    s_fix --> s_update\n    sync --> checkup\n    checkup --> test_url\n    checkup --> bug_url\n    checkup --> change\n    test_url --> fix_url\n    bug_url --> fix_url\n    change --> sync_url\n    sync_url -.-> sync\n```\n\n**Key concepts:**\n- **Entry points**: `pdd connect` (web UI), direct CLI, or the GitHub App\n- **Start**: `pdd generate \u003Curl>` scaffolds architecture, prompts, and `.pddrc` from a PRD GitHub issue\n- **Core loop**: `pdd sync` runs the full auto-deps → generate → example → crash → verify → test → fix → update cycle for each module\n- **Health check**: `pdd checkup \u003Curl>` identifies what needs attention next\n- **Defect path**: `test \u003Curl>` or `bug \u003Curl>` surfaces failing tests → `fix \u003Curl>` resolves them\n- **Feature path**: `change \u003Curl>` implements the feature → `sync \u003Curl>` re-runs sync across affected modules\n\n### Getting Started\n- **[`connect`](#18-connect)**: **[RECOMMENDED]** Launch web interface for visual PDD interaction\n- **[`setup`](#post-installation-setup-required-first-step-after-installation)**: Configure API keys and shell completion\n\n### Agentic Commands (Issue-Driven)\n- **[`change`](#8-change)**: Implement feature requests from GitHub issues (12-step workflow)\n- **[`bug`](#14-bug)**: Analyze bugs and create failing tests from GitHub issues\n- **[`checkup`](#17-checkup)**: Run automated project health check from a GitHub issue (8-step workflow)\n- **[`fix`](#6-fix)**: Fix failing tests (supports issue-driven and manual modes)\n- **[`sync`](#1-sync)**: Multi-module parallel sync from a GitHub issue (when passed a URL instead of basename)\n- **[`test`](#4-test)**: Generate UI tests from GitHub issues (18-step workflow in agentic mode)\n\n### Core Commands (Prompt-Based)\n- **[`sync`](#1-sync)**: **[PRIMARY FOR PROMPT WORKFLOWS]** Automated prompt-to-code cycle\n- **[`generate`](#2-generate)**: Creates runnable code from a prompt file; supports parameterized prompts via `-e\u002F--env`\n- **[`example`](#3-example)**: Generates a compact example showing how to use functionality defined in a prompt\n- **[`test`](#4-test)**: Generates or enhances unit tests for a code file and its prompt\n- **[`update`](#9-update)**: Updates the original prompt file based on modified code\n- **[`verify`](#16-verify)**: Verifies functional correctness by running a program and judging output against intent\n- **[`crash`](#12-crash)**: Fixes errors in a code module and its calling program that caused a crash\n\n### Prompt Management\n- **[`preprocess`](#5-preprocess)**: Preprocesses prompt files, handling includes, comments, and other directives\n- **[`split`](#7-split)**: Splits large prompt files into smaller, more manageable ones\n- **[`extracts prune`](#21-extracts)**: Garbage-collect orphaned extracts cache entries\n- **[`auto-deps`](#15-auto-deps)**: Analyzes and inserts needed dependencies into a prompt file\n- **[`detect`](#10-detect)**: Analyzes prompts to determine which ones need changes based on a description\n- **[`conflicts`](#11-conflicts)**: Finds and suggests resolutions for conflicts between two prompt files\n- **[`trace`](#13-trace)**: Finds the corresponding line number in a prompt file for a given code line\n\n### Utility Commands\n- **[`auth`](#19-auth)**: Manages authentication with PDD Cloud\n- **[`sessions`](#20-pdd-sessions---manage-remote-sessions)**: Manage remote sessions for `connect`\n\n### User Story Prompt Tests\nPDD can validate prompt changes against user stories stored as Markdown files. This uses `detect` under the hood: a story **passes** when `detect` returns no required prompt changes.\n\nDefaults:\n- Stories live in `user_stories\u002F` and match `story__*.md`.\n- Prompts are loaded from `prompts\u002F` (excluding `*_llm.prompt` by default).\n\nOverrides:\n- `PDD_USER_STORIES_DIR` sets the stories directory.\n- `PDD_PROMPTS_DIR` sets the prompts directory.\n\nCommands:\n- `pdd detect --stories` runs the validation suite.\n- `pdd change` runs story validation after prompt modifications and fails if any story fails.\n- `pdd fix user_stories\u002Fstory__*.md` applies a single story to prompts and re-validates it.\n- `pdd test \u003Cprompt_1.prompt> [prompt_2.prompt ...]` generates a `story__*.md` file and links those prompts.\n- `pdd test user_stories\u002Fstory__*.md` updates prompt links for an existing story file.\n\nStory prompt linkage:\n- Stories may include optional metadata to scope validation to a subset of prompts:\n  `\u003C!-- pdd-story-prompts: prompts\u002Fa_python.prompt, prompts\u002Fb_python.prompt -->`\n- If metadata is missing, `pdd detect --stories` validates against the full prompt set.\n- In `--stories` mode, when `detect` identifies impacted prompts, PDD caches links back into the story metadata for future deterministic runs.\n\nTemplate:\n- See `user_stories\u002Fstory__template.md` for a starter format.\n## Global Options\n\nThese options can be used with any command:\n\n- `--force`: Skip all interactive prompts (file overwrites, API key requests). Useful for CI\u002Fautomation.\n- `--strength FLOAT`: Set the strength of the AI model (0.0 to 1.0, default is 0.5).\n  - 0.0: Cheapest available model\n  - 0.5: Default base model\n  - 1.0: Most powerful model (highest ELO rating)\n- `--time FLOAT`: Controls the reasoning allocation for LLM models supporting reasoning capabilities (0.0 to 1.0, default is 0.25).\n  - For models with specific reasoning token limits (e.g., 64k), a value of `1.0` utilizes the maximum available tokens.\n  - For models with discrete effort levels, `1.0` corresponds to the highest effort level.\n  - Values between 0.0 and 1.0 scale the allocation proportionally.\n- `--temperature FLOAT`: Set the temperature of the AI model (default is 0.0).\n- `--verbose`: Increase output verbosity for more detailed information. Includes token count and context window usage for each LLM call.\n- `--quiet`: Decrease output verbosity for minimal information.\n- `--output-cost PATH_TO_CSV_FILE`: Enable cost tracking and output a CSV file with usage details.\n- `--review-examples`: Review and optionally exclude few-shot examples before command execution.\n- `--local`: Run commands locally instead of in the cloud.\n- `--core-dump`: Capture a debug bundle for this run so it can be replayed and analyzed later.\n- `report-core`: Report a bug by creating a GitHub issue with the core dump file.\n- `--context CONTEXT_NAME`: Override automatic context detection and use the specified context from `.pddrc`.\n- `--list-contexts`: List all available contexts defined in `.pddrc` and exit.\n\n### Core Dump Debug Bundles\n\nIf something goes wrong and you want the PDD team to be able to reproduce it, you can run any command with a core dump enabled:\n\n```bash\npdd --core-dump sync factorial_calculator\npdd --core-dump crash prompts\u002Fcalc_python.prompt src\u002Fcalc.py examples\u002Frun_calc.py crash_errors.log\n```\n\nWhen `--core-dump` is set, PDD:\n\n- Captures the full CLI command and arguments\n- Records relevant logs and internal trace information for that run\n- Bundles the prompt(s), generated code, and key metadata needed to replay the issue\n\nAt the end of the run, PDD prints the path to the core dump bundle.  \nAttach that bundle when you open a GitHub issue or send a bug report so maintainers can quickly reproduce and diagnose your problem.\n\n#### `report-core` Command\n\nThe `report-core` command helps you report a bug by creating a GitHub issue with the core dump file. It simplifies the reporting process by automatically collecting relevant files and information.\n\n**Usage:**\n```bash\npdd report-core [OPTIONS] [CORE_FILE]\n```\n\n**Arguments:**\n- `CORE_FILE`: The path to the core dump file (e.g., `.pdd\u002Fcore_dumps\u002Fpdd-core-....json`). If omitted, the most recent core dump is used.\n\n**Options:**\n- `--api`: Create the issue directly via the GitHub API instead of opening a browser. This enables automatic Gist creation for attached files.\n- `--repo OWNER\u002FREPO`: Override the target repository (default: `promptdriven\u002Fpdd`).\n- `--description`, `-d TEXT`: A short description of what went wrong.\n\n**Authentication:**\n\nTo use the `--api` flag, you need to be authenticated with GitHub. PDD checks for credentials in the following order:\n\n1.  **GitHub CLI**: `gh auth token` (recommended)\n2.  **Environment Variables**: `GITHUB_TOKEN` or `GH_TOKEN`\n3.  **Legacy**: `PDD_GITHUB_TOKEN`\n\n**File Tracking & Gists:**\n\nWhen using `--api`, PDD will:\n1.  Collect all relevant files (prompts, code, tests, configs, meta files).\n2.  Create a **private GitHub Gist** containing these files.\n3.  Link the Gist in the created issue.\n\nThis ensures that all necessary context is available for debugging while keeping the issue body clean. If you don't use `--api`, files will be truncated to fit within the URL length limits of the browser-based submission.\n\n---\n\n### Context Selection Flags\n\n- `--list-contexts` reads the nearest `.pddrc` (searching upward from the current directory), prints the available contexts one per line, and exits immediately with status 0. No auto‑update checks or subcommands run when this flag is present.\n- `--context CONTEXT_NAME` is validated early against the same `.pddrc` source of truth. If the name is unknown, the CLI raises a `UsageError` and exits with code 2 before running auto‑update or subcommands.\n- Precedence for configuration is: CLI options > `.pddrc` context > environment variables > defaults. See Configuration for details.\n\n## Auto-Update Control\n\nPDD automatically updates itself to ensure you have the latest features and security patches. However, you can control this behavior using the `PDD_AUTO_UPDATE` environment variable:\n\n```bash\n# Disable auto-updates\nexport PDD_AUTO_UPDATE=false\n\n# Enable auto-updates (default behavior)\nexport PDD_AUTO_UPDATE=true\n```\n\nFor persistent settings, add this environment variable to your shell's configuration file (e.g., `.bashrc` or `.zshrc`).\n\nThis is particularly useful in:\n- Production environments where version stability is crucial\n- CI\u002FCD pipelines where consistent behavior is required\n- Version-sensitive projects that require specific PDD versions\n\n## AI Model Information\n\nPDD uses a large language model to generate and manipulate code. The `--strength` and `--temperature` options allow you to control the model's output:\n\n- Strength: Determines how powerful\u002Fexpensive a model should be used. Higher values (closer to 1.0) result in high performance models with better capabilities (selected by ELO rating), while lower values (closer to 0.0) select more cost-effective models.\n- Temperature: Controls the randomness of the output. Higher values increase diversity but may lead to less coherent results, while lower values produce more focused and deterministic outputs.\n- Time: (Optional, controlled by `--time FLOAT`) For models supporting reasoning, this scales the allocated reasoning resources (e.g., tokens or effort level) between minimum (0.0) and maximum (1.0), with a default of 0.25.\n\nWhen running in local mode, PDD uses LiteLLM to select and interact with language models based on a configuration file that includes:\n- Input and output costs per million tokens\n- ELO ratings for coding ability\n- Required API key environment variables\n- Structured output capability flags\n- Reasoning capabilities (budget-based or effort-based)\n\n## Output Cost Tracking\n\nPDD includes a feature for tracking and reporting the cost of operations. When enabled, it generates a CSV file with usage details for each command execution.\n\n### Usage\n\nTo enable cost tracking, use the `--output-cost` option with any command:\n\n```\npdd --output-cost PATH_TO_CSV_FILE [COMMAND] [OPTIONS] [ARGS]...\n```\n\nThe `PATH_TO_CSV_FILE` should be the desired location and filename for the CSV output.\n\n### Cost Calculation and Presentation\n\nPDD calculates costs based on the AI model usage for each operation. Costs are presented in USD (United States Dollars) and are calculated using the following factors:\n\n1. Model strength: Higher strength settings generally result in higher costs.\n2. Input size: Larger inputs (e.g., longer prompts or code files) typically incur higher costs.\n3. Operation complexity: Some operations (like `fix` and `crash` with multiple iterations) may be more costly than simpler operations.\n\nThe exact cost per operation is determined by the LiteLLM integration using the provider's current pricing model. PDD uses an internal pricing table that is regularly updated to reflect the most current rates.\n\n### CSV Output\n\nThe generated CSV file includes the following columns:\n- timestamp: The date and time of the command execution\n- model: The AI model used for the operation\n- command: The PDD command that was executed\n- cost: The estimated cost of the operation in USD (e.g., 0.05 for 5 cents). This will be zero for local models or operations that do not use a LLM.\n- input_files: A list of input files involved in the operation\n- output_files: A list of output files generated or modified by the operation\n\nThis comprehensive output allows for detailed tracking of not only the cost and type of operations but also the specific files involved in each PDD command execution.\n\n### Environment Variable\n\nYou can set a default location for the cost output CSV file using the environment variable:\n\n- **`PDD_OUTPUT_COST_PATH`**: Default path for the cost tracking CSV file.\n\nIf this environment variable is set, the CSV file will be saved to the specified path by default, unless overridden by the `--output-cost` option. For example, if `PDD_OUTPUT_COST_PATH=\u002Fpath\u002Fto\u002Fcost\u002Freports\u002F`, the CSV file will be saved in that directory with a default filename.\n\n### Cost Budgeting\n\nFor commands that support it (like the `fix` command), you can set a maximum budget using the `--budget` option. This helps prevent unexpected high costs, especially for operations that might involve multiple AI model calls.\n\nExample:\n```\npdd [GLOBAL OPTIONS] fix --budget 5.0 [OTHER OPTIONS] [ARGS]...\n```\nThis sets a maximum budget of $5.00 for the fix operation.\n\n## Commands\n\nHere are the main commands provided by PDD:\n\n### 1. sync\n\n**[PRIMARY COMMAND]** Automatically execute the complete PDD workflow loop for a given basename. This command implements the entire synchronized cycle from the whitepaper, intelligently determining what steps are needed and executing them in the correct order with real-time visual feedback and sophisticated state management.\n\n```bash\n# Single-module sync\npdd [GLOBAL OPTIONS] sync [OPTIONS] BASENAME\n\n# Agentic multi-module sync from a GitHub issue\npdd [GLOBAL OPTIONS] sync [OPTIONS] GITHUB_ISSUE_URL\n```\n\nImportant: Sync frequently overwrites generated files to keep outputs up to date. In most real runs, include the global `--force` flag to allow overwrites without interactive confirmation:\n\n```\npdd --force sync BASENAME\n```\n\nArguments:\n- `BASENAME`: The base name for the prompt file (e.g., \"factorial_calculator\" for \"factorial_calculator_python.prompt\")\n\nOptions:\n- `--max-attempts INT`: Maximum number of fix attempts in any iterative loop (default is 3)\n- `--budget FLOAT`: Maximum total cost allowed for the entire sync process (default is $20.0)\n- `--skip-verify`: Skip the functional verification step\n- `--skip-tests`: Skip unit test generation and fixing\n- `--target-coverage FLOAT`: Desired code coverage percentage (default is 90.0)\n- `--dry-run`: Display real-time sync analysis for this basename instead of running sync operations. This performs the same state analysis as a normal sync run but without acquiring exclusive locks or executing any operations, allowing inspection even when another sync process is active.\n- `--no-steer`: Disable interactive steering of sync operations.\n- `--steer-timeout FLOAT`: Timeout in seconds for steering prompts (default: 8.0).\n\n**Real-time Progress Animation**:\nThe sync command provides live visual feedback showing:\n- Current operation being executed (auto-deps, generate, example, crash, verify, test, fix, update)\n- File status indicators with color coding:\n  - Green: File exists and up-to-date\n  - Yellow: File being processed\n  - Red: File has errors or missing\n  - Blue: File analysis in progress\n- Running cost totals and time elapsed\n- Progress through the workflow steps\n\n**Language Detection**:\nThe sync command automatically detects the programming language by scanning for existing prompt files matching the pattern `{basename}_{language}.prompt` in the prompts directory. For example:\n- `factorial_calculator_python.prompt` → generates `factorial_calculator.py`\n- `factorial_calculator_typescript.prompt` → generates `factorial_calculator.ts`\n- `factorial_calculator_javascript.prompt` → generates `factorial_calculator.js`\n\nIf multiple development language prompt files exist for the same basename, sync will process all of them.\n\n**Language Filtering**: The sync command only processes development languages (python, javascript, typescript, java, cpp, etc.) and excludes runtime languages (LLM). Files ending in `_llm.prompt` are used for internal processing only and cannot form valid development units since they lack associated code, examples, and tests required for the sync workflow.\n\n**Advanced Configuration Integration**:\n- **Automatic Context Detection**: Detects project structure and applies appropriate settings from `.pddrc`\n- **Configuration Hierarchy**: CLI options > .pddrc context > environment variables > defaults\n- **Multi-language Support**: Automatically processes all language variants of a basename\n- **Intelligent Path Resolution**: Uses sophisticated directory management for complex project structures\n- Context-specific settings include output paths, default language, model parameters, coverage targets, and budgets\n\n**Workflow Logic**:\n\nThe sync command automatically detects what files exist and executes the appropriate workflow:\n\n1. **auto-deps**: Find and inject relevant dependencies into the prompt — both code examples and documentation files (schema docs, API docs, etc.). Removes redundant inline content that duplicates included documents.\n2. **generate**: Create or update the code module from the prompt\n3. **example**: Generate usage example if it doesn't exist or is outdated\n4. **crash**: Fix any runtime errors to make code executable\n5. **verify**: Run functional verification against prompt intent (unless --skip-verify)\n6. **test**: Generate comprehensive unit tests if they don't exist (unless --skip-tests). Auth modules get auth-specific test patterns (mock OAuth servers, JWT fixtures, token lifecycle testing)\n7. **fix**: Resolve any bugs found by unit tests\n8. **update**: Back-propagate any learnings to the prompt file\n\n**Advanced Decision Making**:\n- **Fingerprint-based Change Detection**: Uses content hashes and timestamps to precisely detect what changed\n- **LLM-powered Conflict Resolution**: For complex scenarios with multiple file changes, uses AI to determine the best approach\n- **Persistent State Tracking**: Maintains sync history and learns from previous operations\n- **Smart Lock Management**: Prevents concurrent sync operations with automatic stale lock cleanup\n- Detects which files already exist and are up-to-date\n- Skips unnecessary steps (e.g., won't regenerate code if prompt hasn't changed)\n- Uses git integration to detect changes and determine incremental vs full regeneration\n- Accumulates tests over time rather than replacing them (in a single test file per target)\n- Automatically handles dependencies between steps\n\n**Robust State Management**:\n- **Fingerprint Files**: Maintains `.pdd\u002Fmeta\u002F{basename}_{language}.json` with operation history\n- **Run Reports**: Tracks test results, coverage, and execution status  \n- **Lock Management**: Prevents race conditions with file-descriptor based locking\n- **Git Integration**: Leverages version control for change detection and rollback safety\n\n**The `.pdd` Directory**:\nPDD uses a `.pdd` directory in your project root to store various metadata and configuration files:\n- `.pdd\u002Fmeta\u002F` - Contains fingerprint files, run reports, and sync logs\n- `.pdd\u002Flocks\u002F` - Stores lock files to prevent concurrent operations\n- `.pdd\u002Fllm_model.csv` - Project-specific LLM model configuration (optional)\n\nThis directory should typically be added to version control (except for lock files), as it contains important project state information.\n\n**Environment Variables**:\nAll existing PDD output path environment variables are respected, allowing the sync command to save files in the appropriate locations for your project structure.\n\n**Sync State Analysis**:\nThe sync command maintains detailed decision-making logs which you can view using the `--dry-run` option:\n\n```bash\n# View current sync state analysis (non-blocking)\npdd sync --dry-run calculator\n\n# View detailed LLM reasoning for complex scenarios\npdd --verbose sync --dry-run calculator\n```\n\n**Analysis Contents Include**:\n- Current file state and fingerprint comparisons\n- Real-time decision reasoning (heuristic-based vs LLM-powered analysis)\n- Operation recommendations with confidence levels\n- Estimated costs for recommended operations\n- Lock status and potential conflicts\n- State management details\n\nThe `--dry-run` option performs live analysis of the current project state, making it safe to run even when another sync operation is in progress. This differs from viewing historical logs - it shows what sync would decide to do right now based on current file states.\n\nUse `--verbose` with `--dry-run` to see detailed LLM reasoning for complex multi-file change scenarios and advanced state analysis.\n\n**When to use**: This is the recommended starting point for most PDD workflows. Use sync when you want to ensure all artifacts (code, examples, tests) are up-to-date and synchronized with your prompt files. The command embodies the PDD philosophy by treating the workflow as a batch process that developers can launch and return to later, freeing them from constant supervision.\n\nExamples:\n```bash\n# Complete workflow with progress animation and intelligent decision-making\npdd --force sync factorial_calculator\n\n# Advanced sync with higher budget, custom coverage, and full visual feedback\npdd --force sync --budget 15.0 --target-coverage 95.0 data_processor\n\n# Quick sync with animation showing real-time status updates\npdd --force sync --skip-verify --budget 5.0 web_scraper\n\n# Multi-language sync with fingerprint-based change detection\npdd --force sync multi_language_module\n\n# View comprehensive sync analysis with decision analysis\npdd sync --dry-run factorial_calculator\n\n# View detailed sync analysis with LLM reasoning for complex conflict resolution\npdd --verbose sync --dry-run factorial_calculator\n\n# Monitor what sync would do without executing (with state analysis)\npdd sync --dry-run calculator\n\n# Context-aware examples with automatic configuration detection\ncd backend && pdd --force sync calculator     # Uses backend context settings with animation\ncd frontend && pdd --force sync dashboard     # Uses frontend context with real-time feedback\npdd --context backend --force sync calculator # Explicit context override with visual progress\n```\n\n**Agentic Multi-Module Sync (GitHub Issue Mode)**:\n\nWhen a GitHub issue URL is passed instead of a basename, sync enters agentic mode:\n1. **Module Identification**: Fetches the issue content and uses an LLM to identify which modules need syncing\n2. **Dependency Validation**: Validates architecture.json dependencies and applies corrections if needed\n3. **Parallel Execution**: Dispatches parallel sync via `AsyncSyncRunner` with dependency-aware scheduling (max 4 concurrent workers)\n4. **Live Progress**: Posts and updates a GitHub comment with real-time module sync status\n\n```bash\n# Sync modules identified from a GitHub issue (parallel, dependency-aware)\npdd sync https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F100\n\n# With additional timeout for large modules\npdd sync --timeout-adder 60 https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F100\n```\n\nOptions (agentic mode):\n- `--timeout-adder FLOAT`: Add additional seconds to each module's timeout (default: 0.0)\n- `--no-github-state`: Disable GitHub state persistence, use local-only\n\n**Cross-Machine Resume**: Workflow state is stored in a hidden GitHub comment, enabling resume from any machine. Use `--no-github-state` to disable.\n\n### 2. generate\n\nCreate runnable code from a prompt file. This command produces the full implementation code that fulfills all requirements in the prompt. When changes are detected between the current prompt and its last committed version, it can automatically perform incremental updates rather than full regeneration.\n\n```bash\n# Basic usage\npdd [GLOBAL OPTIONS] generate [OPTIONS] PROMPT_FILE\n```\n\nArguments:\n- `PROMPT_FILE`: The filename of the prompt file used to generate the code.\n\nOptions:\n- `--output LOCATION`: Specify where to save the generated code. Supports `${VAR}`\u002F`$VAR` expansion from `-e\u002F--env`. The default file name is `\u003Cbasename>.\u003Clanguage_file_extension>`. If an environment variable `PDD_GENERATE_OUTPUT_PATH` is set, the file will be saved in that path unless overridden by this option.\n- `--original-prompt FILENAME`: The original prompt file used to generate the existing code. If not specified, the command automatically uses the last committed version of the prompt file from git.\n- `--incremental`: Force incremental patching even if changes are significant. This option is only valid when an output location is specified and the file exists.\n- `--unit-test FILENAME`: Path to a unit test file. If provided, automatic test discovery is disabled and only the content of this file is included in the prompt, instructing the model to generate code that passes the specified tests.\n- `--exclude-tests`: Do not automatically include test files found in the default tests directory.\n\n**Parameter Variables (-e\u002F--env)**:\nPass key=value pairs to parameterize a prompt so one prompt can generate multiple variants (e.g., multiple files) by invoking `generate` repeatedly with different values.\n\n- Syntax: `-e KEY=VALUE` or `--env KEY=VALUE` (repeatable).\n- Docker-style env fallback: `-e KEY` reads `VALUE` from the current process environment variable `KEY`.\n- Scope: Applies to `generate`.\n- Precedence: Values passed with `-e\u002F--env` override same‑named OS environment variables during template expansion for this command.\n\n**Templating**:\nPrompt files and `--output` values may reference variables using `$VAR` or `${VAR}`. Only variables explicitly provided via `-e\u002F--env` (or via env fallback with `-e KEY`) are substituted; all other dollar-prefixed text is left unchanged. No escaping is required for ordinary `$` usage.\n\n- In prompt content: `$VAR` and `${VAR}` are replaced only when `VAR` was provided.\n- In output path: When using `--output`, PDD also expands `$VAR`\u002F`${VAR}` using the same variable set.\n- Unknowns: Placeholders without a provided value are left unchanged. If you pass `-e KEY` (no value) and `KEY` exists in the OS environment, that environment value is used.\n\nExamples:\n```\n# Basic parameterized generation (Python module)\npdd generate -e MODULE=orders --output 'src\u002F${MODULE}.py' prompts\u002Fmodule_python.prompt\n\n# Generate multiple files from the same prompt\npdd generate -e MODULE=orders   --output 'src\u002F${MODULE}.py' prompts\u002Fmodule_python.prompt\npdd generate -e MODULE=payments --output 'src\u002F${MODULE}.py' prompts\u002Fmodule_python.prompt\npdd generate -e MODULE=customers --output 'src\u002F${MODULE}.py' prompts\u002Fmodule_python.prompt\n\n# Multiple variables\npdd generate -e MODULE=orders -e PACKAGE=core --output 'src\u002F${PACKAGE}\u002F${MODULE}.py' prompts\u002Fmodule_python.prompt\n\n# Docker-style env fallback (reads MODULE from your shell env)\nexport MODULE=orders\npdd generate -e MODULE --output 'src\u002F${MODULE}.py' prompts\u002Fmodule_python.prompt\n```\n\nShell quoting options:\n- Quote `KEY=VALUE` if the value contains spaces or shell-special characters: `-e \"DISPLAY_NAME=Order Processor\"`.\n- PDD-side expansion (portable): prevent shell expansion and let PDD expand using `-e\u002F--env` — e.g., `--output 'src\u002F${MODULE}.py'`.\n- Shell-side expansion (familiar): set an env var and let the shell expand `--output`, while still passing `-e KEY` so prompts get the same value — e.g.,\n  - `export MODULE=orders && pdd generate -e MODULE --output \"src\u002F$MODULE.py\" prompts\u002Fmodule_python.prompt`\n  - Or inline for POSIX shells: `MODULE=orders pdd generate -e MODULE --output \"src\u002F$MODULE.py\" prompts\u002Fmodule_python.prompt`\n  - Note: PowerShell\u002FWindows shells differ; PDD-side expansion is more portable across shells.\n\n**Git Integration**:\n- When the command detects changes between the current prompt and its last committed version, it automatically considers incremental generation if the output file exists.\n- If incremental generation is performed, both the current prompt and code files are staged with `git add` (if not already committed\u002Fadded) to ensure you can roll back if needed.\n- Full regeneration always happens for new files (when there's no existing output file to update) or when the existing output file is deleted.\n\n**When to use**: Choose this command when implementing new functionality from scratch or updating existing code based on prompt changes. The command will automatically detect changes and determine whether to use incremental patching or full regeneration based on the significance of the changes.\n\nExamples:\n```\n# Basic generation with automatic git-based change detection\n# (incremental if output file exists, full generation if it doesn't)\npdd [GLOBAL OPTIONS] generate --output src\u002Fcalculator.py calculator_python.prompt \n\n# Force incremental patching (requires output file to exist)\npdd [GLOBAL OPTIONS] generate --incremental --output src\u002Fcalculator.py calculator_python.prompt\n\n# Force full regeneration (just delete the output file first)\nrm src\u002Fcalculator.py  # Delete the file\npdd [GLOBAL OPTIONS] generate --output src\u002Fcalculator.py calculator_python.prompt\n\n# Specify a different original prompt (bypassing git detection)\npdd [GLOBAL OPTIONS] generate --output src\u002Fcalculator.py  --original-prompt old_calculator_python.prompt calculator_python.prompt\n```\n\n**Agentic Architecture Mode:**\n\nWhen the positional argument is a GitHub issue URL instead of a prompt file, `generate` enters agentic architecture mode. The issue body serves as the PRD (Product Requirements Document), and an 11-step agentic workflow generates `architecture.json`, `.pddrc`, and prompt files automatically.\n\n```bash\npdd generate https:\u002F\u002Fgithub.com\u002Fowner\u002Frepo\u002Fissues\u002F42\n```\n\nThe 11-step workflow:\n\n**Analysis & Generation (Steps 1-8):**\n1. **Analyze PRD**: Extract features, tech stack, and requirements from the issue content\n2. **Deep Analysis**: Feature decomposition, module boundaries, shared concerns\n3. **Research**: Web search for tech stack documentation and best practices\n4. **Design**: Module breakdown with dependency graph and priority ordering (auth modules are separated into dedicated concerns with low priority numbers)\n5. **Research Dependencies**: Find relevant API docs and code examples per module\n6. **Generate**: Produce complete `architecture.json` and scaffolding files\n7. **Generate .pddrc**: Create project configuration with context-specific paths\n8. **Generate Prompts**: Create prompt files for each module in `architecture.json`\n\n**Validation (Steps 9-11):**\n9. **Completeness Validation**: Verify all modules have prompts and dependencies\n10. **Sync Validation**: Run `pdd sync --dry-run` on each module to catch path issues\n11. **Dependency Validation**: Preprocess prompts to verify `\u003Cinclude>` tags resolve\n\nEach validation step retries up to 3 times with automatic fixes before proceeding.\n\n**Options:**\n- `--skip-prompts`: Skip prompt file generation (steps 8-11), only generate `architecture.json` and `.pddrc`\n\nPrerequisites:\n- `gh` CLI must be installed and authenticated\n- The issue must contain a PRD describing the project scope\n\n**Workflow Resumption**: Re-running `pdd generate \u003Cissue-url>` resumes from the last completed step. State is persisted to GitHub issue comments for cross-machine resume.\n\n**Hard Stops**: The workflow stops if the PRD content is insufficient, the tech stack is ambiguous, or clarification is needed. Address the issue and re-run.\n\nExample:\n```bash\npdd generate https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n# Generates: architecture.json, architecture_diagram.html, .pddrc, prompts\u002F*.prompt\n\n# Skip prompt generation (faster, just architecture)\npdd generate --skip-prompts https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n# Generates: architecture.json, architecture_diagram.html, .pddrc\n```\n\n#### Prompt Templates\n\nTemplates are reusable prompt files that generate a specific artifact (code, JSON, tests, etc.). Templates carry human\u002FCLI metadata in YAML front matter (parsed by the CLI and not sent to the LLM), while the body stays concise and model‑focused.\n\n- Front matter (human\u002FCLI):\n  - name, description, version, tags, language, output\n  - variables: schema for `-e\u002F--env` (required\u002Foptional, type, examples)\n  - usage: copyable `pdd generate` commands\n  - discover (optional): CLI‑executed file discovery (root, patterns, exclude, caps)\n  - output_schema (optional): JSON shape used by the CLI for validation and by `pdd templates show`\n- Prompt body (LLM):\n  - Includes to hydrate context: `\u003Cinclude>${VAR}\u003C\u002Finclude>`, `\u003Cinclude-many>${LIST}\u003C\u002Finclude-many>`\n  - Crisp instructions and an explicit output contract; no human usage notes or discovery logic\n\nQuick examples (templates)\n\n```\n# Minimal (PRD required)\npdd generate -e PRD_FILE=docs\u002Fspecs.md --output architecture.json \\\n  pdd\u002Ftemplates\u002Farchitecture\u002Farchitecture_json.prompt\n\n# With extra context\npdd generate -e PRD_FILE=docs\u002Fspecs.md -e TECH_STACK_FILE=docs\u002Ftech_stack.md \\\n  -e DOC_FILES='docs\u002Fux.md,docs\u002Fcomponents.md' \\\n  -e INCLUDE_FILES='src\u002Fapp.py,src\u002Fapi.py,frontend\u002Fapp\u002Flayout.tsx' \\\n  --output architecture.json pdd\u002Ftemplates\u002Farchitecture\u002Farchitecture_json.prompt\n\n# Multiple variants\npdd generate -e PRD_FILE=docs\u002Fspecs.md -e APP_NAME=Shop   --output apps\u002Fshop\u002Farchitecture.json   pdd\u002Ftemplates\u002Farchitecture\u002Farchitecture_json.prompt\npdd generate -e PRD_FILE=docs\u002Fspecs.md -e APP_NAME=Admin  --output apps\u002Fadmin\u002Farchitecture.json  pdd\u002Ftemplates\u002Farchitecture\u002Farchitecture_json.prompt\npdd generate -e PRD_FILE=docs\u002Fspecs.md -e APP_NAME=Public --output apps\u002Fpublic\u002Farchitecture.json pdd\u002Ftemplates\u002Farchitecture\u002Farchitecture_json.prompt\n\n# 4) Use variables in the output path\n# 5) Use shell env fallback for convenience\nexport APP=shop\npdd generate -e APP -e PRD_FILE=docs\u002Fspecs.md --output 'apps\u002F${APP}\u002Farchitecture.json' pdd\u002Ftemplates\u002Farchitecture\u002Farchitecture_json.prompt\n```\n\nTips for authoring templates\n\n- Put human guidance in YAML front matter (variables with examples, usage, notes); keep the prompt body model‑focused.\n- Use `\u003Cinclude>`\u002F`\u003Cinclude-many>` for curated context; prefer specs\u002Fconfigs over large code dumps.\n- Parameterized includes: pass file paths via `-e`, e.g. `\u003Cinclude>${PRD_FILE}\u003C\u002Finclude>`; the engine resolves includes after variable expansion.\n- If your template outputs a specific filename, show example commands with `--output`.\n\nBehavior notes\n\n- Variable expansion only applies to variables explicitly passed via `-e\u002F--env` (or via the env fallback with `-e KEY`). Other `$NAME` occurrences remain unchanged.\n- `--output` also accepts `$VAR`\u002F`${VAR}` from the same set of variables.\n- If you omit `--output`, PDD derives the filename from the prompt basename and detected language extension; set `PDD_GENERATE_OUTPUT_PATH` to direct outputs to a common directory.\n\nTemplates: Commands\n\n- Front matter is parsed (not sent to the LLM) and powers:\n  - Variables schema and validation\n  - Usage examples (rendered by `pdd templates show`)\n  - Optional `discover` settings (executed by the CLI with caps)\n  - Optional `output_schema` for validation\n- Commands:\n  - `pdd templates list [--json] [--filter tag=...]`\n  - `pdd templates show \u003Cname>`\n  - `pdd templates copy \u003Cname> --to prompts\u002F`\n  - `pdd generate --template \u003Cname> [-e KEY=VALUE...] [--output PATH]`\n\n#### Built-In Templates\n\nPDD can distribute a curated set of popular templates as part of the package to help you get started quickly (e.g., frontend\u002FNext.js, backend\u002FFlask, data\u002FETL).\n\nWhere built-ins live (packaged)\n\n- Under the installed package at `pdd\u002Ftemplates\u002F\u003Ccategory>\u002F**\u002F*.prompt` (plus optional README\u002Findex files). When installed from PyPI, these are included as package data.\n\nIncluded starter templates\n\n- `architecture\u002Farchitecture_json.prompt`: Universal architecture generator (requires `-e PRD_FILE=...`; supports optional `TECH_STACK_FILE`, `DOC_FILES`, `INCLUDE_FILES`).\n\n**LLM Toggle Functionality:**\n\nAll templates support the `llm` parameter to control whether LLM generation runs:\n\n- **`llm=true`** (default): Full generation with LLM + post-processing\n- **`llm=false`**: Skip LLM generation, run only post-processing\n\n**Architecture JSON Template Features:**\n\nThe `architecture\u002Farchitecture_json` template includes automatic **Mermaid diagram generation**:\n\n- **Post-processing**: Automatically converts the generated JSON into an interactive HTML Mermaid diagram\n- **Visualization**: Creates `architecture_diagram.html` with color-coded modules (frontend\u002Fbackend\u002Fshared)\n- **Interactive**: Hover tooltips show module details, dependencies, and descriptions\n- **Self-contained**: HTML file works offline with embedded Mermaid library\n\n**Example Commands:**\n\n```bash\n# Full generation (LLM + post-processing + Mermaid HTML)\npdd generate --template architecture\u002Farchitecture_json \\\n  -e PRD_FILE=docs\u002Fspecs.md \\\n  -e APP_NAME=\"MyApp\" \\\n  --output architecture.json\n# Results in: architecture.json + architecture_diagram.html\n\n# Post-processing only (skip LLM, generate HTML from existing JSON)\npdd generate --template architecture\u002Farchitecture_json \\\n  -e APP_NAME=\"MyApp\" \\\n  -e llm=false \\\n  --output architecture.json\n# Results in: architecture_diagram.html (from existing architecture.json)\n```\n\n**Context URLs (optional field):**\n\nArchitecture entries support an optional `context_urls` array that associates web documentation references with each module. When prompts are generated from the architecture (via `generate_prompt`), these URLs are emitted as `\u003Cweb>` tags in the Dependencies section, enabling the LLM to fetch relevant API documentation during code generation.\n\n```json\n{\n  \"filename\": \"orders_api_Python.prompt\",\n  \"dependencies\": [\"models_Python.prompt\"],\n  \"context_urls\": [\n    {\"url\": \"https:\u002F\u002Ffastapi.tiangolo.com\u002Ftutorial\u002Ffirst-steps\u002F\", \"purpose\": \"FastAPI routing patterns\"},\n    {\"url\": \"https:\u002F\u002Fdocs.pydantic.dev\u002Flatest\u002Fconcepts\u002Fmodels\u002F\", \"purpose\": \"Pydantic model validation\"}\n  ],\n  ...\n}\n```\n\nThe `context_urls` field is populated automatically by the agentic architecture workflow (step 5: research dependencies) but can also be added manually to any architecture entry.\n\nFront Matter (YAML) metadata\n\n- Templates include YAML front matter with human-readable metadata:\n  - `name`, `description`, `version`, `tags`: docs and discovery\n  - `language`, `output`: defaults for `generate`\n  - `variables`: parameter schema for `-e\u002F--env` (type, required, default)\n\nExample (architecture template):\n\n```\n---\nname: architecture\u002Farchitecture_json\ndescription: Unified architecture template for multiple stacks\nversion: 1.0.0\ntags: [architecture, template, json]\nlanguage: json\noutput: architecture.json\nvariables:\n  TECH_STACK:\n    required: false\n    type: string\n    description: Target tech stack for interface shaping and conventions.\n    examples: [nextjs, python, fastapi, flask, django, node, go]\n  API_STYLE:\n    required: false\n    type: string\n    description: API style for backends.\n    examples: [rest, graphql]\n  APP_NAME:\n    required: false\n    type: string\n    description: Optional app name for context.\n    example: Shop\n  PRD_FILE:\n    required: true\n    type: path\n    description: Primary product requirements document (PRD) describing scope and goals.\n    example_paths: [PRD.md, docs\u002Fspecs.md, docs\u002Fproduct\u002Fprd.md]\n    example_content: |\n      Title: Order Management MVP\n      Goals: Enable customers to create and track orders end-to-end.\n      Key Features:\n        - Create Order: id, user_id, items[], total, status\n        - View Order: details page with status timeline\n        - List Orders: filter by status, date, user\n      Non-Functional Requirements:\n        - P95 latency \u003C 300ms for read endpoints\n        - Error rate \u003C 0.1%\n  TECH_STACK_FILE:\n    required: false\n    type: path\n    description: Tech stack overview (languages, frameworks, infrastructure, and tools).\n    example_paths: [docs\u002Ftech_stack.md, docs\u002Farchitecture\u002Fstack.md]\n    example_content: |\n      Backend: Python (FastAPI), Postgres (SQLAlchemy), PyTest\n      Frontend: Next.js (TypeScript), shadcn\u002Fui, Tailwind CSS\n      API: REST\n      Auth: Firebase Auth (GitHub Device Flow), JWT for API\n      Infra: Vercel (frontend), Cloud Run (backend), Cloud SQL (Postgres)\n      Observability: OpenTelemetry traces, Cloud Logging\n  DOC_FILES:\n    required: false\n    type: list\n    description: Additional documentation files (comma\u002Fnewline-separated).\n    example_paths: [docs\u002Fux.md, docs\u002Fcomponents.md]\n    example_content: |\n      Design overview, patterns and constraints\n  INCLUDE_FILES:\n    required: false\n    type: list\n    description: Specific source files to include (comma\u002Fnewline-separated).\n    example_paths: [src\u002Fapp.py, src\u002Fapi.py, frontend\u002Fapp\u002Flayout.tsx, frontend\u002Fapp\u002Fpage.tsx]\n  usage:\n    generate:\n      - name: Minimal (PRD only)\n        command: pdd generate -e PRD_FILE=docs\u002Fspecs.md --output architecture.json pdd\u002Ftemplates\u002Farchitecture\u002Farchitecture_json.prompt\n      - name: With tech stack overview\n        command: pdd generate -e PRD_FILE=docs\u002Fspecs.md -e TECH_STACK_FILE=docs\u002Ftech_stack.md --output architecture.json pdd\u002Ftemplates\u002Farchitecture\u002Farchitecture_json.prompt\n  discover:\n    enabled: false\n    max_per_pattern: 5\n    max_total: 10\n---\n```\n\nNotes\n\n- YAML front matter is parsed and not sent to the LLM. Use `pdd templates show` to view variables, usage, discover, and output schema. Pass variables via `-e` at the CLI.\n\nTemplate Variables (reference)\n\n- Architecture (`architecture\u002Farchitecture_json.prompt`)\n  - `PRD_FILE` (path, required): Primary spec\u002FPRD file path\n  - `TECH_STACK_FILE` (path, optional): Tech stack overview file (includes API style; e.g., docs\u002Ftech_stack.md)\n  - `APP_NAME` (string, optional): App name for context\n  - `DOC_FILES` (list, optional): Comma\u002Fnewline-separated list of additional doc paths\n  - `INCLUDE_FILES` (list, optional): Comma\u002Fnewline-separated list of source files to include\n  - `SCAN_PATTERNS` (list, optional): Discovery patterns defined in front matter `discover` and executed by the CLI\n  - `SCAN_ROOT` (path, optional): Discovery root defined in front matter `discover`\n\nNotes\n\n- These variables are declared in YAML front matter at the top of each template for clarity and future CLI discovery. Until the CLI parses front matter, pass values via `-e` as shown in examples.\n\nCopy-and-generate\n\n- Copy the desired template(s) into your project’s `prompts\u002F` folder, then use `pdd generate` as usual. This keeps prompts versioned with your repo so you can edit and evolve them.\n- Quick copy (Python one‑liner; run from your project root):\n\n```\npython - \u003C\u003C'PY'\nfrom importlib.resources import files\nimport shutil, os\n\ndst_dir = 'prompts\u002Farchitecture'\nsrc_dir = files('pdd').joinpath('templates\u002Farchitecture')\nos.makedirs(dst_dir, exist_ok=True)\n\nfor p in src_dir.rglob('*.prompt'):\n    shutil.copy(p, dst_dir)\nprint(f'Copied built-in templates from {src_dir} -> {dst_dir}')\nPY\n\n# Then generate from the copied prompt(s)\npdd generate --output architecture.json prompts\u002Farchitecture\u002Farchitecture_json.prompt\n```\n\nUnified template examples\n\n```\n# Frontend (Next.js) — interface.page.route and component props\npdd generate \\\n  -e APP_NAME=Shop \\\n  # (routes are inferred from PRD\u002Ftech stack\u002Ffiles)\n  -e PRD_FILE=docs\u002Fspecs.md \\\n  -e DOC_FILES='docs\u002Fux.md,docs\u002Fcomponents.md' \\\n  -e TECH_STACK_FILE=docs\u002Ftech_stack.md \\\n  # discovery, if needed, is configured in template YAML and executed by the CLI\n  --output architecture.json \\\n  pdd\u002Ftemplates\u002Farchitecture\u002Farchitecture_json.prompt\n\n# Backend (Python) — interface.module.functions or interface.api.endpoints\npdd generate \\\n  -e PRD_FILE=docs\u002Fbackend-spec.md \\\n  -e TECH_STACK_FILE=docs\u002Ftech_stack.md \\\n  -e INCLUDE_FILES='src\u002Fapp.py,src\u002Fapi.py,pyproject.toml' \\\n  --output architecture.json \\\n  pdd\u002Ftemplates\u002Farchitecture\u002Farchitecture_json.prompt\n```\n\n**Interface Schema**\n\n- Core keys (every item):\n  - `reason`, `description`, `dependencies`, `priority`, `filename`, optional `tags`.\n- Interface object (typed, include only what applies):\n  - `type`: `component` | `page` | `module` | `api` | `graphql` | `cli` | `job` | `message` | `config`\n  - `component`: `props[]`, optional `emits[]`, `context[]`\n  - `page`: `route`, optional `params[]`, `layout`, and `dataSources[]` where each entry is an object with required `kind` (e.g., `api`, `query`) and `source` (URL or identifier), plus optional `method`, `description`, `auth`, `inputs[]`, `outputs[]`, `refreshInterval`, `notes`\n  - `module`: `functions[]` with `name`, `signature`, optional `returns`, `errors`, `sideEffects`\n  - `api`: `endpoints[]` with `method`, `path`, optional `auth`, `requestSchema`, `responseSchema`, `errors`\n  - `graphql`: optional `sdl`, or `operations` with `queries[]`, `mutations[]`, `subscriptions[]`\n  - `cli`: `commands[]` with `name`, optional `args[]`, `flags[]`, `exitCodes[]`; optional `io` (`stdin`, `stdout`)\n  - `job`: `trigger` (cron\u002Fevent), optional `inputs[]`, `outputs[]`, `retryPolicy`\n  - `message`: `topics[]` with `name`, `direction` (`publish`|`subscribe`), optional `schema`, `qos`\n  - `config`: `keys[]` with `name`, `type`, optional `default`, `required`, `source` (`env`|`file`|`secret`)\n  - Optional: `version`, `stability` (`experimental`|`stable`)\n\nExamples:\n\n```json\n{\n  \"reason\": \"Top-level products page\",\n  \"description\": \"...\",\n  \"dependencies\": [\"layout_tsx.prompt\"],\n  \"priority\": 1,\n  \"filename\": \"page_tsx.prompt\",\n  \"tags\": [\"frontend\",\"nextjs\"],\n  \"interface\": {\n    \"type\": \"page\",\n    \"page\": {\"route\": \"\u002Fproducts\", \"params\": [{\"name\":\"id\",\"type\":\"string\"}]},\n    \"component\": {\"props\": [{\"name\":\"initialProducts\",\"type\":\"Product[]\",\"required\":true}]}\n  }\n}\n```\n\n```json\n{\n  \"reason\": \"Order service module\",\n  \"description\": \"...\",\n  \"dependencies\": [\"db_python.prompt\"],\n  \"priority\": 1,\n  \"filename\": \"orders_python.prompt\",\n  \"tags\": [\"backend\",\"python\"],\n  \"interface\": {\n    \"type\": \"module\",\n    \"module\": {\n      \"functions\": [\n        {\"name\": \"load_orders\", \"signature\": \"def load_orders(user_id: str) -> list[Order]\"},\n        {\"name\": \"create_order\", \"signature\": \"def create_order(dto: OrderIn) -> Order\"}\n      ]\n    }\n  }\n}\n```\n\n```json\n{\n  \"reason\": \"Orders HTTP API\",\n  \"description\": \"...\",\n  \"dependencies\": [\"orders_python.prompt\"],\n  \"priority\": 2,\n  \"filename\": \"api_python.prompt\",\n  \"tags\": [\"backend\",\"api\"],\n  \"interface\": {\n    \"type\": \"api\",\n    \"api\": {\n      \"endpoints\": [\n        {\n          \"method\": \"GET\",\n          \"path\": \"\u002Forders\u002F{id}\",\n          \"auth\": \"bearer\",\n          \"responseSchema\": {\"type\":\"object\",\"properties\":{\"id\":{\"type\":\"string\"}}},\n          \"errors\": [\"404 Not Found\",\"401 Unauthorized\"]\n        }\n      ]\n    }\n  }\n}\n```\n\nNotes and recommendations\n\n- Treat copied templates as a starting point; edit them to match your stack and conventions.\n- Keep templates under version control along with your code to preserve the prompt‑as‑source‑of‑truth model.\n- If you maintain your own template set, store them under `prompts\u002F\u003Corg_or_team>\u002F...` and compose with `\u003Cinclude>` to maximize reuse.\n\nTemplates: additional UX\n\n- Goals:\n  - Discover, inspect, and vendor templates without manual file paths.\n  - Validate required variables and surface defaults from template metadata.\n  - Support a search order so project templates can override packaged ones.\n\n- Commands:\n  - `pdd templates list [--json] [--filter tag=frontend]` to discover templates\n  - `pdd templates show \u003Cname> [--raw]` to view metadata and variables\n  - `pdd templates copy \u003Cname> --to prompts\u002F` to vendor into your repo\n  - `pdd generate --template \u003Cname> [-e KEY=VALUE...] [--output PATH]`\n\n- Example usage:\n```\n# Discover and inspect\npdd templates list --filter tag=frontend\npdd templates show frontend\u002Fnextjs_architecture_json\n\n# Vendor and customize\npdd templates copy frontend\u002Fnextjs_architecture_json --to prompts\u002Ffrontend\u002F\n\n# Generate without specifying a file path\npdd generate --template frontend\u002Fnextjs_architecture_json \\\n  -e APP_NAME=Shop \\\n  # routes are inferred from PRD\u002Ftech stack\u002Ffiles\n  --output architecture.json\n```\n\n- Search order:\n  - Project: `.\u002Fprompts\u002F**` (allows team overrides)\n  - `.pddrc` paths: any configured `templates.paths`\n  - Packaged: `pdd\u002Ftemplates\u002F**` (built‑ins)\n  - Optional: `$PDD_PATH\u002Fprompts\u002F**` (org‑level packs)\n\n- Template front matter:\n  - YAML metadata at the top of `.prompt` files to declare `name`, `description`, `tags`, `version`, `language`, default `output`, and `variables` (with `required`, `default`, `type` such as `string` or `json`).\n  - CLI precedence: values from `-e\u002F--env` override front‑matter defaults; unknowns are validated and surfaced to the user.\n  - Example:\n    ```\n    ---\n    name: frontend\u002Fnextjs_architecture_json\n    description: Generate a Next.js architecture.json file from app metadata\n    tags: [frontend, nextjs, json]\n    version: 1.0.0\n    language: json\n    output: architecture.json\n    variables:\n      APP_NAME: { required: true }\n      ROUTES:   { type: json, default: [] }\n    ---\n    ...prompt body...\n    ```\n\n### 3. example\n\nCreate a compact example demonstrating how to use functionality defined in a prompt. Similar to a header file or API documentation, this produces minimal, token-efficient code that shows the interface without implementation details.\n\n```\npdd [GLOBAL OPTIONS] example [OPTIONS] PROMPT_FILE CODE_FILE\n```\n\nArguments:\n- `PROMPT_FILE`: The filename of the prompt file that generated the code.\n- `CODE_FILE`: The filename of the existing code file.\n\nOptions:\n- `--output LOCATION`: Specify where to save the generated example code. The default file name is `\u003Cbasename>_example.\u003Clanguage_file_extension>`. If an environment variable `PDD_EXAMPLE_OUTPUT_PATH` is set, the file will be saved in that path unless overridden by this option.\n- `--format FORMAT`: Output format for the generated example (default: `code`). Valid values:\n  - `code`: Uses the language-specific file extension (e.g., `.py` for Python, `.js` for JavaScript)\n  - `md`: Generates markdown format with `.md` extension\n  When `--format` is specified with an explicit `--output` path, the format option constrains the output file extension accordingly.\n\nWhere used:\n- Dependency references: Examples serve as lightweight (token efficient) interface references for other prompts and can be included as dependencies of a generate target.\n- Sanity checks: The example program is typically used as the runnable program for `crash` and `verify`, providing a quick end-to-end sanity check that the generated code runs and behaves as intended.\n- Auto-deps integration: The `auto-deps` command can scan example files (e.g., `examples\u002F**\u002F*.py`) and insert relevant references into prompts. Based on each example’s content (imports, API usage, filenames), it identifies useful development units to include as dependencies.\n\n**When to use**: Choose this command when creating reusable references that other prompts can efficiently import. This produces token-efficient examples that are easier to reuse across multiple prompts compared to including full implementations.\n\nExample:\n```\npdd [GLOBAL OPTIONS] example --output examples\u002Ffactorial_calculator_example.py factorial_calculator_python.prompt src\u002Ffactorial_calculator.py\n```\n\n### 4. test\n\nGenerate or enhance unit tests for a given code file and its corresponding prompt file. Also supports **agentic mode** for generating UI tests from GitHub issues.\n\n#### Agentic Mode (UI Test Generation)\n\nGenerate UI tests from a GitHub issue. The issue describes what needs to be tested (a webpage, CLI, or desktop app), and an agentic workflow analyzes the target, creates a test plan, and generates comprehensive UI tests.\n\n```\npdd [GLOBAL OPTIONS] test \u003Cgithub-issue-url>\n```\n\n**How it works (18-step workflow with GitHub comments):**\n\n1. **Duplicate check** - Search for existing issues describing the same test requirements. If found, merge content and close the duplicate.\n\n2. **Documentation check** - Review repo documentation and codebase to understand what needs to be tested. Identifies OpenAPI\u002FSwagger specs if present.\n\n3. **Analyze & clarify** - Determine if enough information exists in the issue to create tests. Posts comment requesting clarification if needed.\n\n4. **Detect frontend** - Identify the test type: web UI, CLI, desktop app, or API. Determines the appropriate testing framework.\n\n5. **Create test plan** - Design a comprehensive test plan and verify it's achievable.\n\n5b. **Enhance test plan** - Add contract validation test cases (from OpenAPI\u002FSwagger specs) and accessibility test cases (for web apps using `@axe-core\u002Fplaywright` at WCAG 2.1 AA level).\n\n6. **Assess coverage** *(web only, requires `playwright-cli`)* - Compare requirements against the enhanced test plan to identify gaps needing manual testing.\n\n7. **Create manual testing checklist** *(web only)* - Generate a checklist using three strategies: page-by-page exhaustive testing, user-story walkthroughs, and accessibility spot-checks.\n\n8. **Manual testing execution** *(web only)* - Execute checklist items via `playwright-cli` commands. Runs serially in CLI mode or in parallel via Cloud Batch when `PDD_CLOUD_RUN=true`.\n\n9. **Create regression tests** *(web only)* - Generate automated tests that reproduce bugs found in Step 8.\n\n10. **Validate regression tests** *(web only)* - Confirm regression tests fail against current code (proving bugs exist).\n\n11. **Loop check** *(web only)* - Check checklist completion. Loops back to Step 8 if items remain (max 3 iterations).\n\n12. **Generate tests** - Create tests in a worktree from the enhanced plan, including behavioral, contract, and accessibility tests.\n\n13. **Run tests** - Execute all generated tests against the target.\n\n14. **Fix & iterate** - Fix any failing tests and re-run until they pass.\n\n15. **Validate tests against plan** - Cross-reference the enhanced plan against generated tests. Generate missing tests for any unimplemented cases.\n\n16. **Run newly generated tests** - Run and fix tests created in Step 15 (if any).\n\n17. **Submit PR** - Create a draft PR with enhanced description including test plan coverage ratio, contract test summary, accessibility audit summary, and manual testing summary.\n\n**Execution Modes:**\n\n| Mode | Steps 6-11 behavior |\n|------|---------------------|\n| **CLI** (`pdd test \u003Curl>`) | Serial: Runs each checklist chunk one at a time |\n| **GitHub App** (`PDD_CLOUD_RUN=true`) | Parallel: Fans out to Cloud Batch spot VMs |\n\n**Prerequisites:**\n- Steps 6-11 (manual\u002Fexploratory testing) require `playwright-cli` in PATH. If not found, these steps are skipped with a warning.\n- Steps 6-11 only run for web test types (`TEST_TYPE: web`).\n\n**Agentic Options:**\n- `--timeout-adder FLOAT`: Add additional seconds to each step's timeout (default: 0.0)\n- `--no-github-state`: Disable GitHub issue comment-based state persistence, use local-only\n- `--manual`: Use legacy prompt-based mode instead of agentic mode\n\n**Environment Variables:**\n- `PDD_CLOUD_RUN=true`: Enable parallel execution mode for manual testing (Steps 6-11)\n- `PDD_NO_GITHUB_STATE=1`: Disable GitHub state persistence\n\n**Cross-Machine Resume**: By default, workflow state is stored in a hidden comment on the GitHub issue, enabling resume from any machine. Use `--no-github-state` to disable this feature.\n\n**Example (Agentic Mode):**\n```bash\n# Generate UI tests from a GitHub issue\npdd test https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F789\n\n# Resume after answering clarifying questions\npdd test https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F789\n```\n\n**Next Step - Fixing Test Issues:**\n\nIf the generated tests reveal issues that need code fixes, use `pdd fix` with the same issue URL:\n\n```bash\npdd fix https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F789\n```\n\n---\n\n#### Manual Mode (Prompt-Based)\n\nGenerate or enhance unit tests for a given code file and its corresponding prompt file.\n\nTest organization:\n- For each target `\u003Cbasename>`, PDD maintains a single test file (by default named `test_\u003Cbasename>.\u003Clanguage_extension>` and typically placed under a tests directory).\n- New tests accumulate in that same file over time rather than being regenerated from scratch. When augmenting tests, PDD can merge additions into the existing file (see `--merge`).\n\n```\npdd [GLOBAL OPTIONS] test [OPTIONS] PROMPT_FILE CODE_OR_EXAMPLE_FILE\npdd [GLOBAL OPTIONS] test --manual [OPTIONS] PROMPT_FILE CODE_OR_EXAMPLE_FILE\n```\n\nArguments:\n- `PROMPT_FILE`: The filename of the prompt file that generated the code.\n- `CODE_OR_EXAMPLE_FILE`: The filename of the code implementation or example file. Files ending with `_example` are treated as example files for TDD-style test generation.\n\nOptions:\n- `--output LOCATION`: Specify where to save the generated test file. The default file name is `test_\u003Cbasename>.\u003Clanguage_file_extension>`. If an output file with the specified name already exists, a new file with a numbered suffix (e.g., `test_calculator_1.py`) will be created instead of overwriting.\n- `--language`: Specify the programming language. Defaults to the language specified by the prompt file name.\n- `--coverage-report PATH`: Path to the coverage report file for existing tests. When provided, generates additional tests to improve coverage.\n- `--existing-tests PATH [PATH...]`: Path(s) to the existing unit test file(s). Required when using --coverage-report. Multiple paths can be provided.\n- `--target-coverage FLOAT`: Desired code coverage percentage to achieve (default is 90.0).\n- `--merge`: When used with --existing-tests, merges new tests with existing test file instead of creating a separate file.\n\n#### Story Mode\n\nGenerate or update user stories and link them to touched prompts.\n\n```\npdd [GLOBAL OPTIONS] test prompts\u002Fupload_python.prompt prompts\u002Fnotify_python.prompt\npdd [GLOBAL OPTIONS] test user_stories\u002Fstory__my_flow.md\n```\n\nBehavior:\n- If input is one or more `.prompt` files, PDD generates `user_stories\u002Fstory__\u003Cname>.md`.\n- During story generation, PDD runs prompt detection and auto-links touched prompts in `pdd-story-prompts` metadata.\n- If generation-time detection finds no touched prompts, metadata falls back to the provided prompt-file inputs.\n- If `pdd-story-prompts` metadata already exists and resolves cleanly, PDD keeps it unchanged.\n- If metadata is missing (or stale), PDD runs prompt detection and writes:\n  `\u003C!-- pdd-story-prompts: prompt_a_python.prompt, prompt_b_python.prompt -->`\n- This enables deterministic prompt-subset validation in `pdd detect --stories`.\n\n#### Providing Command-Specific Context\n\nWhile prompts are the primary source of instructions, some PDD commands (like `test` and `example`) can be further guided by project-specific context files. These commands may automatically look for conventional files (e.g., `context\u002Ftest.prompt`, `context\u002Fexample.prompt`) in the current working directory during their internal prompt preprocessing phase.\n\nIf found, the content of these context files is included (using the `\u003Cinclude>` mechanism described in the `preprocess` section) into the internal prompt used by the command. This allows you to provide specific instructions tailored to your project, such as:\n\n- Specifying required import statements.\n- Suggesting preferred testing frameworks or libraries.\n- Providing project-specific coding conventions or patterns.\n\n**Example:** Creating a file named `context\u002Ftest.prompt` with the content:\n```\nPlease ensure all tests use the 'unittest' framework and import the main module as 'from my_module import *'.\n```\ncould influence the output of the `pdd test` command when run in the same directory.\n\n**Note:** This feature relies on the internal implementation of specific PDD commands incorporating the necessary `\u003Cinclude>` tags for these conventional context files. It is primarily used by `test` and `example` but may be adopted by other commands in the future. Check the specific command documentation or experiment to confirm if a command utilizes this pattern.\n\n#### Basic Examples:\n\n1. Generate initial unit tests:\n```\npdd [GLOBAL OPTIONS] test --output tests\u002Ftest_factorial_calculator.py factorial_calculator_python.prompt src\u002Ffactorial_calculator.py\n```\n\n2. Generate tests from an example file (TDD-style):\n```\npdd [GLOBAL OPTIONS] test --output tests\u002Ftest_calculator.py calculator_python.prompt examples\u002Fcalculator_example.py\n```\n\n3. Generate additional tests to improve coverage (with multiple existing test files):\n```\npdd [GLOBAL OPTIONS] test --coverage-report coverage.xml --existing-tests tests\u002Ftest_calculator.py --existing-tests tests\u002Ftest_calculator_edge_cases.py --output tests\u002Ftest_calculator_enhanced.py calculator_python.prompt src\u002Fcalculator.py\n```\n\n4. Improve coverage and merge with existing tests:\n```\npdd [GLOBAL OPTIONS] test --coverage-report coverage.xml --existing-tests tests\u002Ftest_calculator.py --merge --target-coverage 95.0 calculator_python.prompt src\u002Fcalculator.py\n```\n\n#### Coverage Analysis Strategy\n\nWhen coverage options are provided, the test command will:\n1. Analyze the coverage report to identify:\n   - Uncovered lines and branches\n   - Partially tested conditions\n   - Missing edge cases\n\n2. Generate additional test cases prioritizing:\n   - Complex uncovered code paths\n   - Error conditions\n   - Boundary values\n   - Integration points\n\n3. Maintain consistency with:\n   - Existing test style and patterns\n   - Project's testing conventions\n   - Original prompt's intentions\n\n### 5. preprocess\n\nPreprocess prompt files and save the results.\n\n```\npdd [GLOBAL OPTIONS] preprocess [OPTIONS] PROMPT_FILE\n```\n\nArguments:\n- `PROMPT_FILE`: The filename of the prompt file to preprocess.\n\nOptions:\n- `--output LOCATION`: Specify where to save the preprocessed prompt file. The default file name is `\u003Cbasename>_\u003Clanguage>_preprocessed.prompt`.\n- `--xml`: Automatically insert XML delimiters for long and complex prompt files to structure the content better. With this option prompts are only preprocessed to insert in XML delimiters, but not preprocessed otherwise.\n- `--recursive`: Recursively preprocess all prompt files in the prompt file.\n- `--double`: Curly brackets will be doubled.\n- `--exclude`: List of keys to exclude from curly bracket doubling.\n\n#### XML-like Tags\n\nPDD supports the following XML-like tags in prompt files. Note: XML-like tags (`\u003Cinclude>`, `\u003Cinclude-many>`, `\u003Cshell>`, `\u003Cweb>`) are left untouched inside fenced code blocks (``` or ~~~) or inline single backticks so documentation examples remain literal.\n\n1. **`include`**: Includes file content into the prompt. The file path is always the tag body. Optional attributes extract specific parts instead of the full file:\n   ```xml\n   \u003Cinclude>.\u002Fpath\u002Fto\u002Ffile.txt\u003C\u002Finclude>\n   \u003Cinclude select=\"def:foo,class:Bar\">src\u002Futils.py\u003C\u002Finclude>\n   \u003Cinclude select=\"class:Handler\" mode=\"interface\">src\u002Fapi.py\u003C\u002Finclude>\n   \u003Cinclude query=\"authentication flow\">docs\u002Fapi_reference.md\u003C\u002Finclude>\n   ```\n   - `select=` — deterministic structural extraction (functions, classes, line ranges, headings, regex, JSON\u002FYAML paths). Composable via comma-separation.\n   - `mode=\"interface\"` — Python-only. Extracts signatures and docstrings with bodies replaced by `...`.\n   - `query=` — LLM-powered semantic extraction, cached in `.pdd\u002Fextracts\u002F`.\n   - `optional` — when present on an `\u003Cinclude ...>` tag, a missing file resolves to an empty string (`\"\"`) during non-recursive preprocessing (while still logging a warning).\n   - When both `select=` and `query=` are present, `select=` wins (no LLM cost).\n\n   This mechanism is also used internally by some commands (like `test` and `example`) to automatically incorporate project-specific context files if they exist in conventional locations (e.g., `context\u002Ftest.prompt`). See 'Providing Command-Specific Context' for details. For the full selector reference, see the [Prompting Guide](docs\u002Fprompting_guide.md#selective-includes).\n\n2. **`pdd`**: Indicates a comment that will be removed from the preprocessed prompt, including the tags themselves.\n   ```xml\n   \u003Cpdd>This is a comment that won't appear in the preprocessed output\u003C\u002Fpdd>\n   ```\n\n3. **`shell`**: Executes shell commands and includes their output in the prompt, removing the shell tags.\n   ```xml\n   \u003Cshell>ls -la\u003C\u002Fshell>\n   ```\n\n4. **`web`**: Scrapes a web page and includes its markdown content in the prompt, removing the web tags.\n   ```xml\n   \u003Cweb>https:\u002F\u002Fexample.com\u003C\u002Fweb>\n   ```\n\n#### Triple Backtick Includes\n\nPDD supports two ways of including external content:\n\n1. **Triple backtick includes**: Replaces angle brackets in triple backticks with the content of the specified file.\n   ````\n   ```\n   \u003C.\u002Fpath\u002Fto\u002Ffile.txt>\n   ```\n   This will be recursively processed until there are no more angle brackets in triple backticks.\n\n2. **XML include tags**: As described above.\n\n#### Curly Bracket Handling\n\nWhen using the `--double` option:\n\n- Single curly brackets are doubled if they're not already doubled\n- Already doubled brackets are preserved\n- Nested curly brackets are properly handled\n- Special handling is applied for code blocks (JSON, JavaScript, TypeScript, Python)\n- Multiline variables with curly brackets receive special handling\n\nUse the `--exclude` option to specify keys that should be excluded from curly bracket doubling. This option **only applies** if the **entire string** inside a pair of single curly braces **exactly matches** one of the excluded keys.\n\nFor example, with `--exclude model`:\n- `{model}` remains `{model}` (excluded due to exact match).\n- `{model_name}` is doubled, as 'model_name' is not an exact match for 'model'.\n- `{api_model}` is doubled, not an exact match.\n- Braces containing other content, even if related to the key (e.g., `var={key}_value`), will generally still follow doubling rules unless the inner `{key}` itself is excluded.\n\nExample command usage:\n```\npdd [GLOBAL OPTIONS] preprocess --output preprocessed\u002Ffactorial_calculator_python_preprocessed.prompt --recursive --double --exclude model,temperature factorial_calculator_python.prompt\n```\n\n### 6. fix\n\nFix errors in code and unit tests. Supports two modes: **Agentic E2E Fix** (default when given a GitHub URL) for multi-dev-unit test fixing, and **Manual mode** for single dev-unit fixing with explicit file arguments.\n\n**Agentic E2E Fix Mode (GitHub URL):**\n```\npdd [GLOBAL OPTIONS] fix [OPTIONS] \u003CGITHUB_ISSUE_URL>\n```\n\n**Manual Mode (file arguments):**\n```\npdd [GLOBAL OPTIONS] fix --manual [OPTIONS] PROMPT_FILE CODE_FILE UNIT_TEST_FILE ERROR_FILE\n```\n\n#### Manual Mode Arguments\n- `PROMPT_FILE`: The filename of the prompt file that generated the code under test.\n- `CODE_FILE`: The filename of the code file to be fixed.\n- `UNIT_TEST_FILES`: The filename(s) of the unit test file(s). Multiple files can be provided, and each will be processed individually.\n- `ERROR_FILE`: The filename containing the unit test runtime error messages. Optional and does not need to exist when used with the `--loop` command.\n\n#### Common Options\n- `--manual`: Use manual mode with explicit file arguments (required for legacy\u002Fsingle dev-unit fixing).\n- `--verbose`: Show detailed output during processing.\n- `--quiet`: Suppress all output except errors.\n- `--protect-tests\u002F--no-protect-tests`: When enabled, prevents the LLM from modifying test files. The LLM will treat tests as read-only specifications and only fix the code. This is especially useful when tests created by `pdd bug` are known to be correct. Default: `--no-protect-tests`.\n\n#### Agentic E2E Fix Options\n- `--timeout-adder FLOAT`: Additional seconds to add to each step's timeout (default: 0.0).\n- `--max-cycles INT`: Maximum number of outer loop cycles before giving up (default: 5).\n- `--resume\u002F--no-resume`: Resume from saved state if available (default: `--resume`).\n- `--force`: Override the branch mismatch safety check. By default, the command aborts if the current git branch doesn't match the expected branch from the issue (to prevent accidentally modifying the wrong codebase).\n\n#### Manual Mode Options\n- `--output-test LOCATION`: Specify where to save the fixed unit test file. The default file name is `test_\u003Cbasename>_fixed.\u003Clanguage_file_extension>`. **Warning: If multiple `UNIT_TEST_FILES` are provided along with this option, only the fixed content of the last processed test file will be saved to this location, overwriting previous results. For individual fixed files, omit this option.**\n- `--output-code LOCATION`: Specify where to save the fixed code file. The default file name is `\u003Cbasename>_fixed.\u003Clanguage_file_extension>`. If an environment variable `PDD_FIX_CODE_OUTPUT_PATH` is set, the file will be saved in that path unless overridden by this option.\n- `--output-results LOCATION`: Specify where to save the results of the error fixing process. The default file name is `\u003Cbasename>_fix_results.log`. If an environment variable `PDD_FIX_RESULTS_OUTPUT_PATH` is set, the file will be saved in that path unless overridden by this option.\n- `--loop`: Enable iterative fixing process.\n  - `--verification-program PATH`: Specify the path to a Python program that verifies if the code still runs correctly.\n  - `--max-attempts INT`: Set the maximum number of fix attempts before giving up (default is 3).\n  - `--budget FLOAT`: Set the maximum cost allowed for the fixing process (default is $5.0).\n- `--auto-submit`: Automatically submit the example if all unit tests pass during the fix loop.\n\nWhen the `--loop` option is used, the fix command will attempt to fix errors through multiple iterations. It will use the specified verification program to check if the code runs correctly after each fix attempt. The process will continue until either the errors are fixed, the maximum number of attempts is reached, or the budget is exhausted.\n\nOutputs:\n- Fixed unit test file(s).\n- Fixed code file.\n- Results file containing the LLM model's output with unit test results.\n- Print out of results when using '--loop' containing:\n  - Success status (boolean)\n  - Total number of fix attempts made\n  - Total cost of all fix attempts\n- This will also create intermediate versions of the unit test and code files for the different iterations with timestamp-based naming (e.g., `basename_1_0_3_0_20250402_124442.py`, `standalone_test_1_0_3_0_20250402_124442.py`).\n\nExample:\n```\npdd [GLOBAL OPTIONS] fix --output-code src\u002Ffactorial_calculator_fixed.py --output-results results\u002Ffactorial_fix_results.log factorial_calculator_python.prompt src\u002Ffactorial_calculator.py tests\u002Ftest_factorial_calculator.py tests\u002Ftest_factorial_calculator_edge_cases.py errors.log\n```\nIn this example, `pdd fix` will be run for each test file, and the fixed test files will be saved as `tests\u002Ftest_factorial_calculator_fixed.py` and `tests\u002Ftest_factorial_calculator_edge_cases_fixed.py`.\n\n\n#### Agentic Fallback Mode\n\n(This feature is also available for the `crash` and `verify` command.)\n\nFor particularly difficult bugs that the standard iterative fix process cannot resolve, `pdd fix` offers a powerful agentic fallback mode. When activated, it invokes a project-aware CLI agent to attempt a fix with a much broader context.\n\n**How it Works:**\nIf the standard fix loop completes all its attempts and fails to make the tests pass, the agentic fallback will take over. It constructs a detailed set of instructions and delegates the fixing task to a dedicated CLI agent like Google's Gemini, Anthropic's Claude, or OpenAI's Codex.\n\n**How to Use:**\n\nThis feature only takes effect when `--loop` is set.\n\nWhen the `--loop` flag is set, agentic fallback is enabled by default:\n```bash\npdd [GLOBAL OPTIONS] fix --manual --loop [OTHER OPTIONS] PROMPT_FILE CODE_FILE UNIT_TEST_FILE\n```\n\nOr you may want to enable it explicitly\n\n```bash\npdd [GLOBAL OPTIONS] fix --manual --loop --agentic-fallback [OTHER OPTIONS] PROMPT_FILE CODE_FILE UNIT_TEST_FILE\n```\n\nTo disable this feature while using `--loop`, add `--no-agentic-fallback` to turn it off.\n\n```bash\npdd [GLOBAL OPTIONS] fix --manual --loop --no-agentic-fallback [OTHER OPTIONS] PROMPT_FILE CODE_FILE UNIT_TEST_FILE\n```\n\n**Prerequisites:**\nFor the agentic fallback to function, you need to have at least one of the supported agent CLIs installed and the corresponding API key configured in your environment. The agents are tried in the following order of preference:\n\n1.  **Anthropic Claude:**\n    *   Requires the `claude` CLI to be installed and in your `PATH`.\n    *   Requires the `ANTHROPIC_API_KEY` environment variable to be set.\n2.  **Google Gemini:**\n    *   Requires the `gemini` CLI to be installed and in your `PATH`.\n    *   Requires the `GOOGLE_API_KEY` or `GEMINI_API_KEY` environment variable to be set.\n3.  **OpenAI Codex\u002FGPT:**\n    *   Requires the `codex` CLI to be installed and in your `PATH`.\n    *   Requires the `OPENAI_API_KEY` environment variable to be set.\n\nYou can configure these keys using `pdd setup` or by setting them in your shell's environment.\n\n#### Agentic E2E Fix Mode\n\nFor fixing end-to-end tests that span multiple dev units, use the agentic E2E fix mode by passing a GitHub issue URL (typically created by `pdd bug`). This mode orchestrates an 11-step iterative workflow to fix both unit tests and e2e tests across your codebase, including post-push CI validation and code cleanup.\n\n**How it Works:**\n\nThe workflow analyzes the GitHub issue to extract test information, then iteratively fixes failing tests:\n\n1. **Run Unit Tests**: Execute unit tests from the issue and run `pdd fix` on each failing test sequentially\n2. **Run E2E Tests**: Execute end-to-end tests to identify failures; stop if all pass\n3. **Root Cause Analysis**: Analyze failures against documentation to determine if issues are in code, tests, or both\n4. **Fix E2E Tests**: If e2e tests themselves are incorrect, fix them and return to step 2\n5. **Identify Dev Units**: Determine which dev units are involved in the failures\n6. **Create Unit Tests**: For code bugs, create or append unit tests for the affected dev units\n7. **Verify Tests**: Run new unit tests to confirm they detect the bugs and will pass once fixed\n8. **Run PDD Fix**: Execute `pdd fix` sequentially on failing unit tests for each dev unit\n9. **Verify All**: Final verification that all tests pass locally\n10. **CI Validation**: Poll external CI, retrieve logs on failure, and run an LLM fix loop to remediate CI-specific issues (lint, artifacts, build)\n11. **Code Cleanup**: Review all changes from the workflow and clean up code quality issues (debug statements, unused imports, duplicated code); revert if tests fail\n\n**Resumable Operations:**\n\nState is automatically persisted, allowing you to resume interrupted workflows. Use `--no-resume` to start fresh.\n\n**Cross-Machine Resume**: By default, workflow state is stored in a hidden comment on the GitHub issue, enabling resume from any machine. If you start the workflow on machine A, you can continue from machine B by checking out the branch and running `pdd fix` again. Use `--no-github-state` to disable this feature and use local-only state persistence. You can also set `PDD_NO_GITHUB_STATE=1` environment variable.\n\n**Example:**\n```bash\n# Fix tests from a GitHub issue (agentic mode)\npdd fix https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n\n# With custom timeout and max cycles\npdd fix --timeout-adder 30 --max-cycles 10 https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n\n# Configure CI retries and validation\npdd fix --ci-retries 5 https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n\n# Skip post-push CI validation entirely\npdd fix --skip-ci https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n\n# Start fresh (ignore saved state)\npdd fix --no-resume https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n\n# Disable GitHub state persistence (local-only)\npdd fix --no-github-state https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n\n# Protect tests from modification (only fix code, not tests)\npdd fix --protect-tests https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n```\n\n**Prerequisites:**\n- The `gh` CLI must be installed and authenticated\n- At least one supported agent CLI (Claude, Gemini, or Codex) with API key configured\n- For CI validation, the current branch must have an open PR on GitHub\n\n**Relationship with `pdd bug`:**\n\nThis feature works seamlessly with issues processed by `pdd bug`. The typical workflow is:\n1. Use `pdd bug \u003Cissue_url>` to analyze a bug and generate failing unit tests\n2. Use `pdd fix \u003Cissue_url>` to iteratively fix the failing tests across all affected dev units\n\n### 7. split\n\nSplit large complex prompt files into smaller, more manageable prompt files.\n\n```\npdd [GLOBAL OPTIONS] split [OPTIONS] INPUT_PROMPT INPUT_CODE EXAMPLE_CODE\n```\n\nArguments:\n- `INPUT_PROMPT`: The filename of the large prompt file to be split.\n- `INPUT_CODE`: The filename of the code generated from the input prompt.\n- `EXAMPLE_CODE`: The filename of the example code that serves as the interface to the sub-module prompt file.\n\nOptions:\n- `--output-sub LOCATION`: Specify where to save the generated sub-prompt file. The default file name is `sub_\u003Cbasename>.prompt`. If an environment variable `PDD_SPLIT_SUB_PROMPT_OUTPUT_PATH` is set, the file will be saved in that path unless overridden by this option.\n- `--output-modified LOCATION`: Specify where to save the modified prompt file. The default file name is `modified_\u003Cbasename>.prompt`. If an environment variable `PDD_SPLIT_MODIFIED_PROMPT_OUTPUT_PATH` is set, the file will be saved in that path unless overridden by this option.\n\nExample:\n```\npdd [GLOBAL OPTIONS] split --output-sub prompts\u002Fsub_data_processing.prompt --output-modified prompts\u002Fmodified_main_pipeline.prompt data_processing_pipeline_python.prompt src\u002Fdata_pipeline.py examples\u002Fpipeline_interface.py \n```\n\n### 8. change\n\nImplement a change request from a GitHub issue using a 12-step agentic workflow. The workflow researches the feature, ensures requirements are clear (asking clarifying questions if needed), reviews architecture (asking for decisions if needed), analyzes documentation changes, identifies affected dev units, designs prompt modifications, implements them, runs a review loop to identify and fix issues, and creates a PR.\n\n**Agentic Mode (default):**\n```\npdd [GLOBAL OPTIONS] change GITHUB_ISSUE_URL\n```\n\nArguments:\n- `GITHUB_ISSUE_URL`: The URL of the GitHub issue describing the change request.\n\nThe 12-step workflow:\n1. **Duplicate Check**: Search for duplicate issues\n2. **Documentation Check**: Verify feature isn't already implemented\n3. **Research**: Web search to clarify specifications and find best practices\n4. **Clarification**: Ensure requirements are clear; ask questions with options if not (stops workflow until answered)\n5. **Documentation Changes**: Analyze what documentation updates are needed\n6. **Identify Dev Units**: Find affected prompts, code, examples, and tests\n7. **Architecture Review**: Identify architectural decisions; ask questions with options if needed (stops workflow until answered)\n8. **Analyze Changes**: Design prompt modifications\n9. **Implement Changes**: Modify prompts in an isolated git worktree\n10. **Identify Issues**: Review changes for problems (part of review loop)\n11. **Fix Issues**: Fix identified issues (part of review loop, max 5 iterations)\n12. **Create PR**: Create a pull request linking to the issue\n\n**Workflow Resumption**: Steps 4 and 7 may pause the workflow to ask clarifying or architectural questions. When this happens, answer the questions in the GitHub issue and run `pdd change` again. The workflow will resume from where it left off, skipping already-completed steps to save tokens.\n\n**Cross-Machine Resume**: By default, workflow state is stored in a hidden comment on the GitHub issue, enabling resume from any machine. If you start the workflow on machine A, you can continue from machine B by checking out the branch and running `pdd change` again. Use `--no-github-state` to disable this feature and use local-only state persistence. You can also set the `PDD_NO_GITHUB_STATE=1` environment variable to disable GitHub state globally.\n\n**Review Loop**: Steps 10-11 form a review loop that identifies and fixes issues iteratively. The loop runs until no issues are found (max 5 iterations).\n\n**Worktree Branching Behavior**: When running `pdd change` or `pdd bug`, a new git worktree is created based on your current HEAD:\n- **From main\u002Fmaster**: Branch is based on latest main - creates independent PR\n- **From feature branch**: Branch inherits commits from that branch - useful for stacked\u002Fdependent PRs\n\nIf you want independent changes, run the command from the main branch. A warning will be displayed when running from a non-main branch.\n\nExample (agentic mode):\n```bash\npdd change https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F239\n```\n\nAfter the workflow completes, a PR is automatically created linking to the issue. The PR includes a `sync_order.sh` script that runs `pdd sync` commands in dependency order. Review the PR and run `.\u002Fsync_order.sh` after merge to regenerate code.\n\n**Manual Mode (legacy):**\n```\npdd [GLOBAL OPTIONS] change --manual [OPTIONS] CHANGE_PROMPT_FILE INPUT_CODE INPUT_PROMPT_FILE\n```\n\nArguments:\n- `CHANGE_PROMPT_FILE`: The filename containing the instructions on how to modify the input prompt file.\n- `INPUT_CODE`: The filename of the code that was generated from the input prompt file, or the directory containing the code files when used with the '--csv' option.\n- `INPUT_PROMPT_FILE`: The filename of the prompt file that will be modified. Required in standard mode; not used when using the '--csv' option.\n\nOptions:\n- `--budget FLOAT`: Set the maximum cost allowed for the change process (default is $5.0).\n- `--output LOCATION`: Specify where to save the modified prompt file. The default file name is `modified_\u003Cbasename>.prompt`. If an environment variable `PDD_CHANGE_OUTPUT_PATH` is set, the file will be saved in that path unless overridden by this option.\n- `--csv`: Use a CSV file for the change prompts instead of a single change prompt file. The CSV file should have columns: `prompt_name` and `change_instructions`. When this option is used, `INPUT_PROMPT_FILE` is not needed, and `INPUT_CODE` should be the directory where the code files are located. The command expects prompt names in the CSV to follow the `\u003Cbasename>_\u003Clanguage>.prompt` convention. For each `prompt_name` in the CSV, it will look for the corresponding code file (e.g., `\u003Cbasename>.\u003Clanguage_extension>`) within the specified `INPUT_CODE` directory. Output files will overwrite existing files unless `--output LOCATION` is specified. If `LOCATION` is a directory, the modified prompt files will be saved inside this directory using the default naming convention otherwise, if a csv filename is specified the modified prompts will be saved in that CSV file with columns 'prompt_name' and 'modified_prompt'.\n\nExample (manual single prompt change):\n```\npdd [GLOBAL OPTIONS] change --manual --output modified_factorial_calculator_python.prompt changes_factorial.prompt src\u002Ffactorial_calculator.py factorial_calculator_python.prompt\n```\n\nExample (manual batch change using CSV):\n```\npdd [GLOBAL OPTIONS] change --manual --csv --output modified_prompts\u002F changes_batch.csv src\u002F\n```\n\n### 9. update\n\nUpdate prompts based on code changes. This command operates in two primary modes:\n\n**Agentic Prompt Optimization (Default)**\n\nThe `update` command uses an agentic AI (Claude Code, Gemini, or Codex) by default to produce compact, high-quality prompts. The agent has full file access and performs a 4-step optimization:\n\n1. **Assess Differences**: Reads the prompt (including all `\u003Cinclude>` files) and compares against the modified code\n2. **Filter Using Guide + Tests**: Consults `docs\u002Fprompting_guide.md` and existing tests to determine what belongs in the prompt\n3. **Remove Duplication**: Eliminates redundant content that duplicates included files\n4. **Validate**: Ensures the prompt is human-readable and can reliably regenerate the code\n\nThis produces prompts that are more concise while remaining clear to developers and reliable for code generation.\n\n**Prerequisites**: Requires one of these CLI tools installed and configured:\n- `claude` (Anthropic Claude Code)\n- `gemini` (Google Gemini CLI)\n- `codex` (OpenAI Codex CLI)\n\nIf no agentic CLI is available, the command automatically falls back to the legacy 2-stage LLM update process.\n\n**Test-Aware Updates**: When tests exist for a module (e.g., `test_my_module.py`, `test_my_module_1.py`), the agentic update automatically discovers and considers them. Behaviors verified by tests don't need to be explicitly specified in the prompt, resulting in more compact prompts.\n\n**Modes:**\n\n1.  **Repository-Wide Mode (Default)**: When run with no file arguments, `pdd update` scans the entire repository. It finds all code\u002Fprompt pairs, creates any missing prompt files, and updates all of them based on the latest Git changes. This is the easiest way to keep your entire project in sync.\n\n2.  **Single-File Mode**: When you provide file arguments, the command operates on a specific file. There are three distinct use cases for this mode:\n\n    **A) Prompt Generation \u002F Regeneration**\n    To generate a brand new prompt for a code file from scratch, or to regenerate an existing prompt, simply provide the path to that code file. This will create a new prompt file or overwrite an existing one.\n    ```bash\n    pdd update \u003Cpath\u002Fto\u002Fyour_code_file.py>\n    ```\n\n    **B) Prompt Update (using Git)**\n    To update an existing prompt by comparing the modified code against the version in your last commit. This requires the prompt file and the modified code file.\n    ```bash\n    pdd update --git \u003Cpath\u002Fto\u002Fprompt.prompt> \u003Cpath\u002Fto\u002Fmodified_code.py>\n    ```\n\n    **C) Prompt Update (Manual)**\n    To update an existing prompt by manually providing the original code, the modified code, and the prompt. This is for scenarios where Git history is not available or desired.\n    ```bash\n    pdd update \u003Cpath\u002Fto\u002Fprompt.prompt> \u003Cpath\u002Fto\u002Fmodified_code.py> \u003Cpath\u002Fto\u002Foriginal_code.py>\n    ```\n\n```bash\n# Repository-Wide Mode (no arguments)\npdd [GLOBAL OPTIONS] update\n\n# Single-File Mode: Examples\n# Generate\u002FRegenerate a prompt for a code file\npdd [GLOBAL OPTIONS] update src\u002Fmy_new_module.py\n\n# Update an existing prompt using Git history\npdd [GLOBAL OPTIONS] update --git factorial_calculator_python.prompt src\u002Fmodified_factorial_calculator.py\n\n# Update an existing prompt by manually providing original code\npdd [GLOBAL OPTIONS] update factorial_calculator_python.prompt src\u002Fmodified_factorial_calculator.py src\u002Foriginal_factorial_calculator.py\n\n# Repository-wide update filtered by extension\npdd [GLOBAL OPTIONS] update --extensions py,js\n```\n\nArguments:\n- `MODIFIED_CODE_FILE`: The filename of the code that was modified or for which a prompt should be generated\u002Fregenerated.\n- `INPUT_PROMPT_FILE`: (Optional) The filename of the prompt file that generated the original code. Required for true update scenarios (B and C).\n- `INPUT_CODE_FILE`: (Optional) The filename of the original code. Required for manual update (C), not required when using `--git` (B), and not applicable for generation (A).\n\n**Important**: By default, this command overwrites the original prompt file to maintain the core PDD principle of \"prompts as source of truth.\"\n\nOptions:\n- `--output LOCATION`: Specify where to save the updated prompt file. **If not specified, the original prompt file is overwritten to maintain it as the authoritative source of truth.** If an environment variable `PDD_UPDATE_OUTPUT_PATH` is set, it will be used only when `--output` is explicitly omitted and you want a different default location.\n- `--git`: Use git history to find the original code file, eliminating the need for the `INPUT_CODE_FILE` argument.\n- `--extensions EXTENSIONS`: In repository-wide mode, filter the update to only include files with the specified comma-separated extensions (e.g., `py,js,ts`).\n- `--simple`: Use the legacy 2-stage LLM update process instead of the default agentic mode. Useful when agentic CLIs are not available or for faster updates.\n\nExample (overwrite original prompt - default behavior):\n```\npdd [GLOBAL OPTIONS] update factorial_calculator_python.prompt src\u002Fmodified_factorial_calculator.py src\u002Foriginal_factorial_calculator.py\n# This overwrites factorial_calculator_python.prompt in place\n```\n\nExample (agentic vs simple mode):\n```bash\n# Default: Agentic mode (uses claude\u002Fgemini\u002Fcodex for intelligent optimization)\npdd update --git my_module_python.prompt src\u002Fmy_module.py\n\n# Legacy: Simple 2-stage LLM update (faster, no agentic CLI required)\npdd update --simple --git my_module_python.prompt src\u002Fmy_module.py\n```\n\n\n\n\n### 10. detect\n\nAnalyze a list of prompt files and a change description to determine which prompts need to be changed.\n\n```\npdd [GLOBAL OPTIONS] detect [OPTIONS] PROMPT_FILES... CHANGE_FILE\n```\n\nArguments:\n- `PROMPT_FILES`: A list of filenames of prompts that may need to be changed.\n- `CHANGE_FILE`: Filename whose content describes the changes that need to be analyzed and potentially applied to the prompts.\n\nOptions:\n- `--output LOCATION`: Specify where to save the CSV file containing the analysis results. The default file name is `\u003Cchange_file_basename>_detect.csv`.  If an environment variable `PDD_DETECT_OUTPUT_PATH` is set, the file will be saved in that path unless overridden by this option.\n- `--stories`: Run user story validation mode. When set, positional `PROMPT_FILES... CHANGE_FILE` arguments are not allowed.\n- `--stories-dir DIR`: Directory containing `story__*.md` files (stories mode only).\n- `--prompts-dir DIR`: Directory containing `.prompt` files (stories mode only).\n- `--include-llm`: Include `*_llm.prompt` files in stories mode.\n- `--fail-fast\u002F--no-fail-fast`: Stop on the first failing story in stories mode (default: `--fail-fast`).\n  - In stories mode, PDD reads optional `pdd-story-prompts` metadata from each story to run prompt-subset (multi-prompt) validation.\n  - If metadata is missing, validation uses all prompts and can auto-cache detected prompt links in the story file.\n\nExample:\n```\npdd [GLOBAL OPTIONS] detect --output detect_results.csv factorial_calculator_python.prompt data_processing_python.prompt web_scraper_python.prompt changes_description.prompt\n```\n\n### 11. conflicts\n\nAnalyze two prompt files to find conflicts between them and suggest how to resolve those conflicts.\n\n```\npdd [GLOBAL OPTIONS] conflicts [OPTIONS] PROMPT1 PROMPT2\n```\n\nArguments:\n- `PROMPT1`: First prompt in the pair of prompts we are comparing.\n- `PROMPT2`: Second prompt in the pair of prompts we are comparing.\n\nOptions:\n- `--output LOCATION`: Specify where to save the CSV file containing the conflict analysis results. The default file name is `\u003Cprompt1_basename>_\u003Cprompt2_basename>_conflict.csv`.  If an environment variable `PDD_CONFLICTS_OUTPUT_PATH` is set, the file will be saved in that path unless overridden by this option.\n\nExample:\n```\npdd [GLOBAL OPTIONS] conflicts --output conflicts_analysis.csv data_processing_module_python.prompt data_visualization_module_python.prompt \n```\n\nBoth the `detect` and `conflicts` commands generate a csv file with the following columns: `prompt_name` and `change_instructions`. This csv file can be used as input for the `change --csv` command.\n\n### 12. crash\n\nFix errors in a code module and its calling program that caused a program to crash.\n\n```\npdd [GLOBAL OPTIONS] crash [OPTIONS] PROMPT_FILE CODE_FILE PROGRAM_FILE ERROR_FILE\n```\n\nArguments:\n- `PROMPT_FILE`: Filename of the prompt file that generated the code module.\n- `CODE_FILE`: Filename of the code module that caused the crash and will be modified so it runs properly.\n- `PROGRAM_FILE`: Filename of the program that was running the code module. This file will also be modified if necessary to fix the crash.\n- `ERROR_FILE`: Filename of the file containing the errors from the program run.\n\nOptions:\n- `--output LOCATION`: Specify where to save the fixed code file. The default file name is `\u003Cbasename>_fixed.\u003Clanguage_extension>`. If an environment variable `PDD_CRASH_OUTPUT_PATH` is set, the file will be saved in that path unless overridden by this option.\n- `--output-program LOCATION`: Specify where to save the fixed program file. The default file name is `\u003Cprogram_basename>_fixed.\u003Clanguage_extension>`.\n- `--loop`: Enable iterative fixing process.\n  - `--max-attempts INT`: Set the maximum number of fix attempts before giving up (default is 3).\n  - `--budget FLOAT`: Set the maximum cost allowed for the fixing process (default is $5.0).\n- `--agentic-fallback \u002F --no-agentic-fallback`: Enable or disable the agentic fallback mode (default: enabled).\n\nWhen the `--loop` option is used, the crash command will attempt to fix errors through multiple iterations. It will use the program to check if the code runs correctly after each fix attempt. The process will continue until either the errors are fixed, the maximum number of attempts is reached, or the budget is exhausted.\n\nIf the iterative process fails, the agentic fallback mode will be triggered (unless disabled with `--no-agentic-fallback`). This mode uses a project-aware CLI agent to attempt a fix with a broader context. For this to work, you need to have at least one of the supported agent CLIs (Claude, Gemini, or Codex) installed and the corresponding API key configured in your environment.\n\nExample:\n```\npdd [GLOBAL OPTIONS] crash --output fixed_data_processor.py --output-program fixed_main_pipeline.py data_processing_module_python.prompt crashed_data_processor.py main_pipeline.py crash_errors.log\n```\n\nExample with loop option:\n```\npdd [GLOBAL OPTIONS] crash --loop --max-attempts 5 --budget 10.0 --output fixed_data_processor.py --output-program fixed_main_pipeline.py data_processing_module_python.prompt crashed_data_processor.py main_pipeline.py crash_errors.log\n```\n\n### 13. trace\n\nFine the associated line number between a prompt file and the generated code.\n\n```\npdd [GLOBAL OPTIONS] trace [OPTIONS] PROMPT_FILE CODE_FILE CODE_LINE\n```\n\nArguments:\n- `PROMPT_FILE`: Filename of the prompt file that generated the code.\n- `CODE_FILE`: Filename of the code file to be analyzed.\n- `CODE_LINE`: Line number in the code file that the debugger trace line is on.\n\nOptions:\n- `--output LOCATION`: Specify where to save the trace analysis results. The default file name is `\u003Cbasename>_trace_results.log`.\n\nExample:\n```\npdd [GLOBAL OPTIONS] trace --output trace_results.log factorial_calculator_python.prompt src\u002Ffactorial_calculator.py\n```\n\nThis will print out the line number in the prompt file for the associated the code line.\n\n### 14. bug\n\nGenerate a unit test from a GitHub issue. The issue serves as the source of truth for both the error output and expected behavior. An agentic workflow analyzes the issue, reproduces the bug, and creates a failing test.\n\n```\npdd [GLOBAL OPTIONS] bug \u003Cgithub-issue-url>\npdd [GLOBAL OPTIONS] bug --manual PROMPT_FILE CODE_FILE PROGRAM_FILE CURRENT_OUTPUT DESIRED_OUTPUT\n```\n\n**How it works (step-by-step with GitHub comments):**\n\n1. **Duplicate check** - Search for existing issues describing the same problem. If found, merge content and close the duplicate. Posts comment with findings.\n\n2. **Documentation check** - Review repo documentation to determine if this is a bug or user error. Posts comment with findings.\n\n3. **Triage** - Assess if enough information is provided to proceed. If the issue already contains a detailed root cause analysis with file paths, line numbers, and causal explanation, fast-tracks to root cause analysis (skipping API research and reproduction). Posts comment requesting more info if needed.\n\n4. **Reproduce** - Attempt to reproduce the issue locally. Posts comment confirming reproduction (or failure to reproduce). Skipped when Step 3 fast-tracks.\n\n5. **Root cause analysis** - Run experiments to identify the root cause. Assesses whether the fix is localized or cross-cutting. Performs a variable reference audit to find sibling bugs in parallel code paths and a state symmetry check to detect save\u002Frestore asymmetries. Posts comment explaining the root cause.\n\n5.5. **Prompt classification** - Determine if the bug is in the code implementation or in the prompt specification itself. If the prompt is defective, auto-fix the prompt file. Posts comment with classification and any prompt changes. Defaults to \"code bug\" when uncertain.\n\n6. **Test plan** - Design a plan for creating tests to detect the problem. Enumerates all affected output channels and all distinct code paths (first-run, resume, retry, error recovery) to ensure complete coverage. Prefers appending tests to existing test files over creating new ones. Posts comment with the test plan.\n\n7. **Generate test** - Create the failing unit test. Posts comment with the generated test code.\n\n8. **Verify detection** - Confirm the unit test successfully detects the bug. Classifies whether an E2E test is needed (`E2E_NEEDED: yes|no`) based on bug scope. Posts comment confirming verification.\n\n9. **E2E test** - Generate and run end-to-end tests to verify the bug at integration level. Skipped deterministically when Step 8 outputs `E2E_NEEDED: no`, avoiding unnecessary LLM calls for purely internal bugs. Posts comment with E2E test results or skip reason.\n\n10. **Create draft PR** - Create a draft pull request with the failing tests and link it to the issue. Posts comment with PR link.\n\nArguments:\n- `ISSUE_URL`: GitHub issue URL (e.g., https:\u002F\u002Fgithub.com\u002Fowner\u002Frepo\u002Fissues\u002F123)\n\nOptions:\n- `--manual`: Use legacy mode with explicit file arguments (PROMPT_FILE, CODE_FILE, PROGRAM_FILE, CURRENT_OUTPUT, DESIRED_OUTPUT)\n- `--output LOCATION`: Specify where to save the generated unit test. Default: `test_\u003Cmodule>_bug.py`\n- `--language LANG`: Specify the programming language for the unit test (default is \"Python\").\n- `--timeout-adder FLOAT`: Add additional seconds to each step's timeout (default: 0.0)\n- `--no-github-state`: Disable GitHub issue comment-based state persistence, use local-only\n\n**Cross-Machine Resume**: By default, workflow state is stored in a hidden comment on the GitHub issue, enabling resume from any machine. Use `--no-github-state` to disable this feature. You can also set `PDD_NO_GITHUB_STATE=1` environment variable.\n\nExample:\n```bash\n# Agentic mode (recommended)\npdd bug https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n\n# Manual mode (legacy)\npdd bug --manual prompt.prompt code.py main.py current.txt desired.txt\n```\n\n**Next Step - Fixing the Bug:**\n\nAfter `pdd bug` creates failing tests and a draft PR, use `pdd fix` with the same issue URL to automatically fix the failing tests across all affected dev units:\n\n```bash\npdd fix https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n```\n\n**Tip:** If `pdd bug` correctly identified the bug and created valid failing tests, use `--protect-tests` to prevent `pdd fix` from modifying the tests. This ensures the LLM only fixes the code to make the tests pass:\n\n```bash\npdd fix --protect-tests https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n```\n\nSee the [fix command](#6-fix) documentation for details on the agentic E2E fix workflow.\n\n### 15. auto-deps\n\nAnalyze a prompt file and search for potential dependencies — both code examples and documentation files (schema docs, API docs, PRD sections) — to determine and insert into the prompt. Auto-deps automatically determines what parts of each dependency are needed and emits appropriate selectors on new and existing `\u003Cinclude>` tags. Automatically removes redundant inline content that duplicates what an included document provides.\n\n```\npdd [GLOBAL OPTIONS] auto-deps [OPTIONS] PROMPT_FILE DIRECTORY_PATH\n```\n\nArguments:\n- `PROMPT_FILE`: Filename of the prompt file that needs dependencies analyzed and inserted.\n- `DIRECTORY_PATH`: Directory path or glob pattern to search for dependency\u002Fexample files. Supports wildcards like `*.py` and `**\u002F*.py`. You can pass a plain directory (e.g., `examples\u002F`) or a glob (e.g., `examples\u002F**\u002F*.py`). If you pass a plain directory (no wildcards), it is scanned recursively by default (equivalent to `examples\u002F**\u002F*`).\n\nOptions:\n- `--output LOCATION`: Specify where to save the modified prompt file with dependencies inserted. The default file name is `\u003Cbasename>_with_deps.prompt`. If an environment variable `PDD_AUTO_DEPS_OUTPUT_PATH` is set, the file will be saved in that path unless overridden by this option.\n- `--csv FILENAME`: Specify the CSV file that contains or will contain dependency information. Default is \"project_dependencies.csv\". If the environment variable`PDD_AUTO_DEPS_CSV_PATH` is set, that path will be used unless overridden by this option.\n- `--force-scan`: Force rescanning of all potential dependency files even if they exist in the CSV file.\n- `--include-docs`: Include documentation files (`.md`, `.txt`, `.rst`) in dependency discovery. Default: disabled.\n- `--no-dedup`: Skip the redundant inline content removal pass.\n- `--concurrency N`: Maximum number of parallel LLM calls for dependency analysis (default: 1).\n\nThe command uses a two-stage retrieval pipeline when candidates exceed 50:\n1. **Embedding search**: Embeds the prompt and candidate files, retrieving the top-50 candidates by cosine similarity\n2. **LLM reranking**: Uses LLM-as-judge to select the most relevant dependencies from candidates\n\nAfter inserting `\u003Cinclude>` directives, the command performs a **deduplication pass** that identifies and removes inline content in the prompt that semantically duplicates what the included documents already provide.\n\nThe command maintains a CSV file with the following columns:\n- `full_path`: The full path to the dependency file\n- `file_summary`: A one-sentence summary of the file's content and purpose\n- `key_exports`: List of key exports (functions, classes, constants) from the file\n- `dependencies`: List of modules\u002Fpackages the file depends on\n- `date`: Timestamp of when the file was last analyzed\n\n**Note:** Existing CSV files using the old 3-column format (without `key_exports` and `dependencies`) are automatically re-summarized on the next run.\n\nExamples:\n```\n# Search code examples and documentation files\npdd auto-deps --include-docs my_module_python.prompt \"context\u002F\"\n\n# Search only Python examples (skip doc discovery)\npdd auto-deps my_module_python.prompt \"context\u002F*_example.py\"\n\n# Force rescan with custom concurrency\npdd auto-deps --force-scan --concurrency 30 my_module_python.prompt \"context\u002F\"\n\n# Skip redundant content removal\npdd auto-deps --no-dedup my_module_python.prompt \"docs\u002F\"\n```\n\n### 16. verify\n\nVerifies the functional correctness of generated code by executing a specified program (often the output of the `example` command) and using an LLM to judge the program's output against the original prompt's intent. No separate expected output file is needed; the LLM determines if the behavior aligns with the prompt requirements. If verification fails, it iteratively attempts to fix the code based on the judged discrepancy, similar to how `fix` and `crash` operate with their respective error signals.\n\n```bash\npdd [GLOBAL OPTIONS] verify [OPTIONS] PROMPT_FILE CODE_FILE PROGRAM_FILE\n```\n\nArguments:\n- `PROMPT_FILE`: Filename of the prompt file that generated the code being verified.\n- `CODE_FILE`: Filename of the code file to be verified and potentially fixed.\n- `PROGRAM_FILE`: Filename of the executable program to run for verification (e.g., the example script generated by `pdd example`). The output of this program run will be judged by the LLM.\n\nOptions:\n- `--output-results LOCATION`: Specify where to save the verification and fixing results log. This log typically contains the final status (pass\u002Ffail), number of attempts, total cost, and potentially LLM reasoning or identified issues. Default: `\u003Cbasename>_verify_results.log`. If an environment variable `PDD_VERIFY_RESULTS_OUTPUT_PATH` is set, the file will be saved in that path unless overridden by this option.\n- `--output-code LOCATION`: Specify where to save the final code file after verification attempts (even if verification doesn't fully succeed). Default: `\u003Cbasename>_verified.\u003Clanguage_extension>`. If an environment variable `PDD_VERIFY_CODE_OUTPUT_PATH` is set, the file will be saved in that path unless overridden by this option.\n- `--output-program LOCATION`: Specify where to save the final program file after verification attempts (even if verification doesn't fully succeed). The default file name is `\u003Cprogram_basename>_verified.\u003Clanguage_extension>`. If an environment variable `PDD_VERIFY_PROGRAM_OUTPUT_PATH` is set, the file will be saved in that path unless overridden by this option.\n- `--max-attempts INT`: Set the maximum number of fix attempts within the verification loop before giving up (default is 3).\n- `--budget FLOAT`: Set the maximum cost allowed for the entire verification and iterative fixing process (default is $5.0).\n- `--agentic-fallback \u002F --no-agentic-fallback`: Enable or disable the agentic fallback mode (default: enabled).\n\nThe command operates iteratively if the initial run of `PROGRAM_FILE` produces output judged incorrect by the LLM based on the `PROMPT_FILE`. After each fix attempt on `CODE_FILE`, `PROGRAM_FILE` is re-run, and its output is re-evaluated. This continues until the output is judged correct, `--max-attempts` is reached, or the `--budget` is exhausted. Intermediate code files may be generated during the loop, similar to the `fix` command.\n\nOutputs:\n- Final code file at `--output-code` location (always written when specified, allowing inspection even if verification doesn't fully succeed).\n- Final program file at `--output-program` location (always written when specified, allowing inspection even if verification doesn't fully succeed).\n- Results log file at `--output-results` location detailing the process and outcome.\n- Potentially intermediate code files generated during the fixing loop (timestamp-based naming).\n\nExample:\n```bash\n# Verify calc.py by running examples\u002Frun_calc.py, judging its output against prompts\u002Fcalc_py.prompt\n# Attempt to fix up to 5 times with a $2.50 budget if verification fails.\npdd verify --max-attempts 5 --budget 2.5 --output-code src\u002Fcalc_verified.py --output-results results\u002Fcalc_verify.log prompts\u002Fcalc_py.prompt src\u002Fcalc.py examples\u002Frun_calc.py\n```\n\n**When to use**: Use `verify` after `generate` and `example` for an initial round of functional validation and automated fixing based on *LLM judgment of program output against the prompt*. This helps ensure the code produces results aligned with the prompt's intent for a key scenario before proceeding to more granular unit testing (`test`) or fixing specific runtime errors (`crash`) or unit test failures (`fix`).\n\n### 17. checkup\n\nRun an automated health check on a project from a GitHub issue. The checkup workflow explores the project, identifies problems (missing deps, build errors, interface mismatches, failing tests, orphan pages, inconsistent API patterns), optionally fixes them, writes regression and e2e tests, and creates a PR.\n\n```\npdd [GLOBAL OPTIONS] checkup [OPTIONS] GITHUB_ISSUE_URL\n```\n\nArguments:\n- `GITHUB_ISSUE_URL`: GitHub issue URL describing what to check (e.g., \"Check the entire CRM app\")\n\nOptions:\n- `--no-fix`: Report-only mode — discover and report issues without applying fixes\n- `--timeout-adder FLOAT`: Add additional seconds to each step's timeout (default: 0.0)\n- `--no-github-state`: Disable GitHub state persistence, use local-only\n\n**How it works (8-step workflow with iterative fix-verify loop):**\n\n1. **Discover** — Scan project structure, tech stack, and module inventory\n2. **Dependency Audit** — Check all imports resolve, no missing packages, no circular deps\n3. **Build Check** — Run build\u002Fcompile commands, check for syntax\u002Ftype errors\n4. **Interface Check** — Verify cross-module interfaces, frontend nav reachability, API call consistency\n5. **Test Execution** — Run full test suite, identify failures\n6. **Fix Issues** (3 sub-steps):\n   - 6a. Fix discovered issues (missing deps, imports, interfaces, build errors, orphan pages, API patterns)\n   - 6b. Write regression tests for every fix\n   - 6c. Write e2e\u002Fintegration tests for cross-module interactions\n7. **Verify** — Re-run build + tests to confirm all fixes work\n8. **Create PR** — Create a pull request with all fixes and tests\n\n**Iterative Fix-Verify Loop**: Steps 3-7 run in a loop (max 3 iterations). If step 7 finds remaining issues, the workflow loops back to step 3 for another pass. The loop exits when step 7 reports \"All Issues Fixed\" or max iterations are reached.\n\n**Git Worktree Isolation**: All fix steps run in an isolated git worktree (`checkup\u002Fissue-{N}` branch), keeping the user's working directory clean.\n\n**Cross-Machine Resume**: Workflow state is stored in a hidden GitHub comment, enabling resume from any machine. Use `--no-github-state` to disable.\n\n**Report-Only Mode**: Use `--no-fix` to run steps 1-5 and 7 without applying fixes — useful for auditing a project's health without making changes.\n\nEach step posts its findings as a comment on the GitHub issue, providing a detailed audit trail.\n\nExample:\n```bash\n# Full checkup with fixes\npdd checkup https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n\n# Report-only mode (no fixes applied)\npdd checkup --no-fix https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n\n# With extra timeout for large projects\npdd checkup --timeout-adder 120 https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n```\n\n### 18. connect\n\n**[RECOMMENDED ENTRY POINT]** Launches a web-based interface for PDD at `localhost:9876`.\n\nThe web interface provides:\n- **Command Execution**: Run any PDD command (`pdd change`, `pdd bug`, `pdd fix`, `pdd sync`, etc.) with visual feedback\n- **File Browser**: View and edit prompts, code, and tests in your project\n- **Remote Access**: Access your session from any browser via PDD Cloud\n- **Session Management**: Run multiple sessions with custom names\n\n```bash\npdd [GLOBAL OPTIONS] connect [OPTIONS]\n```\n\nOptions:\n- `--port INT`: Port to listen on (default: 9876).\n- `--host TEXT`: Host to bind to (default: 127.0.0.1).\n- `--allow-remote`: Allow non-localhost connections. When enabled, the server binds to 0.0.0.0 to accept external connections.\n- `--token TEXT`: Bearer token for authentication. Recommended when using `--allow-remote`.\n- `--no-browser`: Don't open the browser automatically when starting the server.\n- `--frontend-url TEXT`: Custom frontend URL to open instead of the default.\n- `--local-only`: Skip cloud registration and run in local-only mode. The session will not be accessible remotely via PDD Cloud.\n- `--session-name TEXT`: Custom session name for identification. Useful when running multiple sessions.\n\nThe command starts a FastAPI server and automatically opens the web interface in your default browser. The server also provides:\n- A REST API for programmatic access to PDD commands\n- API documentation at `http:\u002F\u002Flocalhost:9876\u002Fdocs`\n\n**Remote Session Registration:**\nBy default, `pdd connect` registers with PDD Cloud, allowing you to access your session remotely from any browser. The session is automatically deregistered on graceful shutdown (Ctrl+C).\n\nSecurity Notes:\n- By default, the server only accepts connections from localhost (127.0.0.1).\n- Using `--allow-remote` without `--token` will display a security warning and require confirmation.\n- For remote access, always use the `--token` option to require authentication.\n\nExample:\n```bash\n# Start the server with default settings (opens browser automatically)\npdd connect\n\n# Start on a custom port without opening the browser\npdd connect --port 8080 --no-browser\n\n# Allow remote connections with authentication\npdd connect --allow-remote --token \"your-secret-token\"\n\n# Run in local-only mode (no cloud registration)\npdd connect --local-only\n\n# Start with a custom session name for easy identification\npdd connect --session-name \"my-dev-server\"\n```\n\n**When to use**: Use `connect` when you prefer a graphical interface for working with PDD, when demonstrating PDD to others, or when integrating PDD with other tools that can communicate via REST APIs.\n\n### 19. auth\n\nManages authentication with PDD Cloud. The `auth` command provides subcommands for signing in, signing out, checking status, and retrieving authentication tokens.\n\n```bash\npdd [GLOBAL OPTIONS] auth SUBCOMMAND [OPTIONS]\n```\n\n#### Subcommands\n\n##### auth login\n\nSigns in to PDD Cloud. Opens a web browser to complete the authentication process with an ephemeral code.\n\n```bash\npdd auth login\n```\n\n##### auth status\n\nDisplays the active account and current authentication state. Exit code is 0 if authenticated, 1 otherwise.\n\n```bash\npdd auth status [OPTIONS]\n```\n\n**Options:**\n- `--verify`: Verify authentication by actually attempting to refresh the token. Without this flag, only cached credentials are checked.\n\n**Examples:**\n```bash\n# Quick check (uses cached credentials)\npdd auth status\n\n# Deep verification (attempts token refresh)\npdd auth status --verify\n```\n\n**Note:** If only a refresh token exists (no cached JWT), the status will show a warning that the token is expired and will refresh on next use. Use `--verify` to actually test if the refresh will succeed, or run `pdd auth login` to refresh the token immediately.\n\n##### auth logout\n\nRemoves the stored authentication configuration for a PDD Cloud account locally.\n\n```bash\npdd auth logout\n```\n\n##### auth token\n\nOutputs the authentication token for the current account. Useful for scripts or programmatic access to PDD Cloud.\n\n```bash\npdd auth token [OPTIONS]\n```\n\n**Options:**\n- `--format [raw|json]`: Output format for the token. Use `raw` for just the token string (default), or `json` for structured output including token and expiration time.\n\n**When to use**: Use `auth` commands to manage your PDD Cloud authentication state. Use `auth login` to authenticate before using cloud features, `auth status` to verify your current session, and `auth token` when you need to pass credentials to scripts or other tools.\n\n### 20. `pdd sessions` - Manage Remote Sessions\n\nThe `sessions` command group allows you to manage remote PDD sessions registered with PDD Cloud. Remote sessions enable you to control PDD instances running on other machines through the web frontend.\n\n#### List Sessions\n\n```bash\npdd sessions list\npdd sessions list --json\n```\n\nLists all active remote sessions associated with your authenticated account. Use `--json` for machine-readable output.\n\n#### Session Info\n\n```bash\npdd sessions info \u003Csession_id>\n```\n\nDisplays detailed information about a specific session including project name, cloud URL, status, and last heartbeat time.\n\n#### Cleanup Sessions\n\n```bash\npdd sessions cleanup --stale\npdd sessions cleanup --all\npdd sessions cleanup --all --force\n```\n\n**Options:**\n- `--stale`: Remove only stale sessions (no recent heartbeat)\n- `--all`: Remove all sessions for the current user\n- `--force`: Skip confirmation prompt\n\n**Note:** Sessions are automatically registered when running `pdd connect` (unless `--local-only` is specified) and deregistered on graceful shutdown. Use `pdd sessions cleanup` to manually remove orphaned sessions if a `pdd connect` instance was terminated ungracefully.\n\n**When to use**: Use `sessions list` to discover available remote sessions, `sessions info` to check session details, and `sessions cleanup` to remove stale or orphaned sessions.\n\n### 21. extracts\n\nThe `\u003Cinclude query=\"...\">file\u003C\u002Finclude>` tag in prompts triggers LLM-powered semantic extraction with automatic caching in `.pdd\u002Fextracts\u002F`. Results are **auto-refreshed**: if the source file changes, PDD automatically re-extracts and updates the cache upon processing the `\u003Cinclude ... query>` tag the next time.\n\n```bash\n# Remove orphaned cache entries not referenced by any prompt\npdd extracts prune\n```\n\n### 22. Firecrawl Web Scraping Cache\n\n**Automatic caching** for web content scraped via `\u003Cweb>` tags in prompts. Reduces API credit usage by caching results for 24 hours by default.\n\n**How it works:**\n- Transparent and automatic - no manual management needed\n- Cached content stored in `PROJECT_ROOT\u002F.pdd\u002Fcache\u002Ffirecrawl.db`\n- Expired entries automatically skipped when accessed\n- URL normalization (removes tracking parameters, case-insensitive matching)\n- Access tracking for LRU eviction when cache is full\n\n**Configuration (optional):**\n```bash\nexport FIRECRAWL_CACHE_ENABLE=false          # Disable caching (default: true)\nexport FIRECRAWL_CACHE_TTL_HOURS=48          # Cache for 48 hours (default: 24)\nexport FIRECRAWL_CACHE_MAX_SIZE_MB=200       # Max cache size in MB (default: 100)\nexport FIRECRAWL_CACHE_MAX_ENTRIES=2000      # Max number of entries (default: 1000)\nexport FIRECRAWL_CACHE_AUTO_CLEANUP=false    # Disable auto cleanup (default: true)\n```\n\n**Cache management commands:**\n```bash\npdd firecrawl-cache stats              # View cache statistics\npdd firecrawl-cache clear              # Clear all cached entries\npdd firecrawl-cache info               # View cache configuration\npdd firecrawl-cache check \u003Curl>        # Check if a URL is cached\n```\n\n**When to use**: Caching is automatic. Use `stats` to check cache status, `info` to view configuration, `check` to verify if a URL is cached, or `clear` to force re-scraping all URLs.\n\n## Example Review Process\n\nWhen the global `--review-examples` option is used with any command, PDD will present potential few-shot examples that might be used for the current operation. The review process follows these steps:\n\n1. PDD displays the inputs (but not the outputs) of potential few-shot examples.\n2. For each example, you can choose to:\n   - Accept the example (it will be used in the operation)\n   - Exclude the example (it won't be used in this or future operations)\n   - Skip the example (it won't be used in this operation but may be presented again in the future)\n3. After reviewing all presented examples, PDD will proceed with the command execution using the accepted examples.\n\nThis feature allows you to have more control over the examples used in PDD operations, potentially improving the quality and relevance of the generated outputs.\n\n## Automatic Example Submission\n\nWhen using the `fix` command with the `--auto-submit` option, PDD will automatically submit the example to the PDD Cloud platform if all unit tests pass during the fix loop. This feature helps to continuously improve the platform's example database with successful fixes.\n\n## Output Location Specification\n\nFor all commands that generate or modify files, the `--output` option (or its variant, such as `--output-sub` or `--output-modified` for the `split` command) allows flexible specification of the output location:\n\n1. **Filename only**: If you provide just a filename (e.g., `--output result.py`), the file will be created in the current working directory.\n2. **Full path**: If you provide a full path (e.g., `--output \u002Fhome\u002Fuser\u002Fprojects\u002Fresult.py`), the file will be created at that exact location.\n3. **Directory**: If you provide a directory name (e.g., `--output .\u002Fgenerated\u002F`), a file with an automatically generated name will be created in that directory.\n4. **Environment Variable**: If the `--output` option is not provided, and an environment variable specific to the command is set, PDD will use the path specified by this variable. Otherwise, it will use default naming conventions and save the file in the current working directory.\n5. **No Output Location**: If no output location is specified and no environment variable is set, the file will be saved in the current working directory with a default name given the command.\n\n## Getting Help\n\nPDD provides comprehensive help features:\n\n1. **General Help**:\n   ```\n   pdd --help\n   ```\n   Displays a list of available commands and options.\n\n2. **Command-Specific Help**:\n   ```\n   pdd COMMAND --help\n   ```\n   Provides detailed help for a specific command, including available options and usage examples.\n\n## Additional Features\n\n- **Tab Completion**: `pdd setup` installs tab completion automatically. If you only need to refresh the completion script, run `pdd install_completion` directly.\n- **Colorized Output**: PDD provides colorized output for better readability in compatible terminals.\n\n\n## Configuration\n\nPDD supports multiple configuration methods to customize its behavior for different project structures and contexts.\n\n### Project Configuration File (.pddrc)\n\n**Recommended for multi-context projects** (e.g., monorepos with backend\u002Ffrontend)\n\nCreate a `.pddrc` file in your project root to define different contexts with their own settings:\n\n```yaml\n# .pddrc - committed to version control\nversion: \"1.0\"\ncontexts:\n  backend:\n    paths: [\"backend\u002F**\", \"api\u002F**\", \"server\u002F**\"]\n    defaults:\n      generate_output_path: \"backend\u002Fsrc\u002F\"\n      test_output_path: \"backend\u002Ftests\u002F\"\n      example_output_path: \"backend\u002Fexamples\u002F\"\n      default_language: \"python\"\n      target_coverage: 90.0\n      strength: 0.8\n  \n  frontend:\n    paths: [\"frontend\u002F**\", \"web\u002F**\", \"ui\u002F**\"]\n    defaults:\n      generate_output_path: \"frontend\u002Fsrc\u002F\"\n      test_output_path: \"frontend\u002F__tests__\u002F\"\n      example_output_path: \"frontend\u002Fexamples\u002F\"\n      default_language: \"typescript\"\n      target_coverage: 85.0\n      strength: 0.7\n  \n  shared:\n    paths: [\"shared\u002F**\", \"common\u002F**\", \"lib\u002F**\"]\n    defaults:\n      generate_output_path: \"shared\u002Flib\u002F\"\n      test_output_path: \"shared\u002Ftests\u002F\"\n      default_language: \"python\"\n      target_coverage: 95.0\n  \n  # Fallback for unmatched paths\n  default:\n    defaults:\n      generate_output_path: \"src\u002F\"\n      test_output_path: \"tests\u002F\"\n      default_language: \"python\"\n      target_coverage: 90.0\n      strength: 0.5\n```\n\n**Context Detection**:\nPDD automatically detects the appropriate context based on:\n1. **Current directory path**: Matches against the `paths` patterns in each context\n2. **Manual override**: Use `--context CONTEXT_NAME` to specify explicitly\n3. **Fallback**: Uses `default` context if no path matches\n\n**Available Context Settings**:\n- `prompts_dir`: Directory where prompt files are located (default: \"prompts\")\n- `generate_output_path`: Where generated code files are saved\n- `test_output_path`: Where test files are saved\n- `example_output_path`: Where example files are saved\n- `default_language`: Default programming language for the context\n- `target_coverage`: Default test coverage target\n- `strength`: Default AI model strength (0.0-1.0)\n- `temperature`: Default AI model temperature\n- `budget`: Default budget for iterative commands\n- `max_attempts`: Default maximum attempts for fixing operations\n\n**Path Behavior**:\n- Paths ending with `\u002F` are treated as explicit directories and do **not** preserve subdirectory basenames (e.g., `commands\u002Fanalysis` -> `pdd\u002Fanalysis.py`).\n- Paths without trailing `\u002F` preserve subdirectory basenames when the path is an existing directory (e.g., `commands\u002Fanalysis` -> `pdd\u002Fcommands\u002Fanalysis.py`).\n\n**Usage Examples**:\n```bash\n# Auto-detect context from current directory\ncd backend && pdd --force sync calculator     # Uses backend context\ncd frontend && pdd --force sync dashboard     # Uses frontend context\n\n# Explicit context override\npdd --context backend sync calculator\npdd --context frontend sync dashboard\n\n# List available contexts\npdd --list-contexts\n```\n\n### Environment Variables\n\nPDD uses several environment variables to customize its behavior:\n\n#### Core Environment Variables\n\n- **`PDD_PATH`**: Points to the root directory of PDD. This is automatically set during pip installation to the directory where PDD is installed. You typically don't need to set this manually.\n- **`PDD_AUTO_UPDATE`**: Controls whether PDD automatically updates itself (default: true).\n- **`PDD_CONFIG_PATH`**: Override the default `.pddrc` file location (default: searches upward from current directory).\n- **`PDD_DEFAULT_CONTEXT`**: Default context to use when no context is detected (default: \"default\").\n- **`PDD_DEFAULT_LANGUAGE`**: Global default programming language when not specified in context (default: \"python\").\n\n#### Agentic Workflow Variables\n\n- **`CLAUDE_MODEL`**: Override the model used by Claude CLI in agentic workflows (e.g., `claude-sonnet-4-5-20250929`). When set, passes `--model` to the Claude CLI command. No default; only used if explicitly set.\n- **`PDD_USER_FEEDBACK`**: Inject user feedback from GitHub issue comments into agentic task instructions. Set by the GitHub App executor to pass feedback from previous execution attempts. No default.\n- **`PDD_GH_TOKEN_FILE`**: Path to a file containing a fresh GitHub App installation token. When set, the e2e fix orchestrator reads a new token from this file on push auth failure and retries once. The token file is written and refreshed by the cloud job runner (pdd_cloud). No default; only used in cloud-hosted job environments.\n\n#### Output Path Variables\n\n**Note**: When using `.pddrc` configuration, context-specific settings take precedence over these global environment variables.\n\n- **`PDD_PROMPTS_DIR`**: Default directory where prompt files are located (default: \"prompts\").\n- **`PDD_GENERATE_OUTPUT_PATH`**: Default path for the `generate` command.\n- **`PDD_EXAMPLE_OUTPUT_PATH`**: Default path for the `example` command.\n- **`PDD_TEST_OUTPUT_PATH`**: Default path for the unit test file.\n- **`PDD_TEST_COVERAGE_TARGET`**: Default target coverage percentage.\n- **`PDD_PREPROCESS_OUTPUT_PATH`**: Default path for the `preprocess` command.\n- **`PDD_FIX_TEST_OUTPUT_PATH`**: Default path for the fixed unit test files in the `fix` command.\n- **`PDD_FIX_CODE_OUTPUT_PATH`**: Default path for the fixed code files in the `fix` command.\n- **`PDD_FIX_RESULTS_OUTPUT_PATH`**: Default path for the results file generated by the `fix` command.\n- **`PDD_SPLIT_SUB_PROMPT_OUTPUT_PATH`**: Default path for the sub-prompts generated by the `split` command.\n- **`PDD_SPLIT_MODIFIED_PROMPT_OUTPUT_PATH`**: Default path for the modified prompts generated by the `split` command.\n- **`PDD_CHANGE_OUTPUT_PATH`**: Default path for the modified prompts generated by the `change` command.\n- **`PDD_UPDATE_OUTPUT_PATH`**: Default path for the updated prompts generated by the `update` command.\n- **`PDD_OUTPUT_COST_PATH`**: Default path for the cost tracking CSV file.\n- **`PDD_DETECT_OUTPUT_PATH`**: Default path for the CSV file generated by the `detect` command.\n- **`PDD_CONFLICTS_OUTPUT_PATH`**: Default path for the CSV file generated by the `conflicts` command.\n- **`PDD_CRASH_OUTPUT_PATH`**: Default path for the fixed code file generated by the `crash` command.\n- **`PDD_CRASH_PROGRAM_OUTPUT_PATH`**: Default path for the fixed program file generated by the `crash` command.\n- **`PDD_TRACE_OUTPUT_PATH`**: Default path for the trace analysis results generated by the `trace` command.\n- **`PDD_BUG_OUTPUT_PATH`**: Default path for the unit test file generated by the `bug` command.\n- **`PDD_AUTO_DEPS_OUTPUT_PATH`**: Default path for the modified prompt files generated by the `auto-deps` command.\n- **`PDD_AUTO_DEPS_CSV_PATH`**: Default path and filename for the CSV file used by the auto-deps command to store dependency information. If set, this overrides the default \"project_dependencies.csv\" filename.\n- **`PDD_AUTO_DEPS_CONCURRENCY`**: Default maximum number of parallel LLM calls for auto-deps dependency analysis (default: 1).\n- **`PDD_EMBEDDING_MODEL`**: Embedding model used for two-stage retrieval in auto-deps (default: `text-embedding-3-small`).\n- **`PDD_VERIFY_RESULTS_OUTPUT_PATH`**: Default path for the results log file generated by the `verify` command.\n- **`PDD_VERIFY_CODE_OUTPUT_PATH`**: Default path for the final code file generated by the `verify` command.\n- **`PDD_VERIFY_PROGRAM_OUTPUT_PATH`**: Default path for the final program file generated by the `verify` command.\n- **`PDD_CLOUD_TIMEOUT`**: Cloud request timeout in seconds. Default is 900 (15 minutes). Increase this value if you experience timeouts with long-running cloud operations.\n\n### Configuration Priority\n\nPDD resolves configuration settings in the following order (highest to lowest priority):\n\n1. **Command-line options** (e.g., `--output`, `--strength`)\n2. **Context-specific settings** (from `.pddrc` file)\n3. **Global environment variables** (e.g., `PDD_GENERATE_OUTPUT_PATH`)\n4. **Built-in defaults**\n\n### Migration from Environment Variables\n\nIf you're currently using environment variables, you can migrate to `.pddrc` configuration:\n\n```bash\n# Before: Environment variables\nexport PDD_GENERATE_OUTPUT_PATH=backend\u002Fsrc\u002F\nexport PDD_TEST_OUTPUT_PATH=backend\u002Ftests\u002F\nexport PDD_DEFAULT_LANGUAGE=python\n\n# After: .pddrc file\ncontexts:\n  default:\n    defaults:\n      generate_output_path: \"backend\u002Fsrc\u002F\"\n      test_output_path: \"backend\u002Ftests\u002F\" \n      default_language: \"python\"\n```\n\nThe `.pddrc` approach is recommended for team projects as it ensures consistent configuration across all team members and can be version controlled.\n\n\n### Model Configuration (`llm_model.csv`)\n\nPDD uses a CSV file (`llm_model.csv`) to store information about available AI models, their costs, capabilities, and required API key names.\n\nWhen running commands locally, PDD determines which configuration file to use based on the following priority:\n\n1.  **User-specific:** `~\u002F.pdd\u002Fllm_model.csv` - If this file exists, it takes precedence over any project-level configuration. This allows users to maintain a personal, system-wide model configuration.\n2.  **Project-specific:** `\u003CPROJECT_ROOT>\u002F.pdd\u002Fllm_model.csv` - If the user-specific file is not found, PDD looks for the file within the `.pdd` directory of the determined project root (based on `PDD_PATH` or auto-detection).\n3.  **Package default:** If neither of the above exist, PDD falls back to the default configuration bundled with the package installation.\n\nThis tiered approach allows for both shared project configurations and individual user overrides, while ensuring PDD works out-of-the-box without requiring manual configuration.\n\n**Note:** You can manually edit this CSV, but running `pdd setup` again is the recommended way to add providers and update models.\n\n*Note: This file-based configuration primarily affects local operations and utilities. Cloud execution modes likely rely on centrally managed configurations.*\n\n\nThese environment variables allow you to set default output locations for each command. If an environment variable is set and the corresponding `--output` option is not used in the command, PDD will use the path specified by the environment variable. This can help streamline your workflow by reducing the need to specify output paths for frequently used commands.\n\nFor example, if you set `PDD_GENERATE_OUTPUT_PATH=\u002Fpath\u002Fto\u002Fgenerated\u002Fcode\u002F`, all files created by the `generate` command will be saved in that directory by default, unless overridden by the `--output` option in the command line.\n\nTo set these environment variables, you can add them to your shell configuration file (e.g., `.bashrc` or `.zshrc`) or set them before running PDD commands:\n\n```bash\nexport PDD_GENERATE_OUTPUT_PATH=\u002Fpath\u002Fto\u002Fgenerated\u002Fcode\u002F\nexport PDD_TEST_OUTPUT_PATH=\u002Fpath\u002Fto\u002Ftests\u002F\n# ... set other variables as needed\n\npdd generate factorial_calculator_python.prompt  # Output will be saved in \u002Fpath\u002Fto\u002Fgenerated\u002Fcode\u002F\n```\n\nThis feature allows for more flexible and customized setups, especially in team environments or when working across multiple projects with different directory structures.\n\n## Error Handling\n\nPDD provides informative error messages when issues occur during command execution. Common error scenarios include:\n\n- Invalid input files or formats\n- Insufficient permissions to read\u002Fwrite files\n- AI model-related errors (e.g., API failures)\n- Prompt exceeding model context window — PDD validates prompt token count before sending to the LLM. If the prompt exceeds the model's context limit, PDD reports the token count, the model's limit, usage percentage, and which prompt caused the overflow. When multiple candidate models are configured, PDD automatically falls back to the next model.\n- Syntax errors in generated code\n\nWhen an error occurs, PDD will display a message describing the issue and, when possible, suggest steps to resolve it.\n\n## Cloud Features\n\nWhen running in cloud mode (default), PDD provides additional features:\n\n1. **Shared Examples**: Access to a growing database of community-contributed examples\n2. **Automatic Updates**: Latest improvements and bug fixes are automatically available\n3. **Cost Optimization**: Smart model selection and caching to minimize costs\n4. **Usage Analytics**: Track your team's usage and costs through the PDD Cloud dashboard\n5. **Collaboration**: Share prompts and generated code with team members\n\nTo access the PDD Cloud dashboard: https:\u002F\u002Fpromptdriven.ai\u002F\n\nHere you can:\n- View usage statistics\n- Manage team access\n- Configure default settings\n- Access shared examples\n- Track costs\n\n## Troubleshooting\n\nHere are some common issues and their solutions:\n\n1. **Command not found**: Ensure PDD is properly installed and added to your system's PATH.\n\n2. **Permission denied errors**: Check that you have the necessary permissions to read input files and write to output locations.\n\n3. **AI model not responding**: Verify your internet connection and check the status of the AI service.\n\n4. **Unexpected output**: Try adjusting the `--strength` and `--temperature` parameters to fine-tune the AI model's behavior.\n\n5. **High costs**: Use the `--output-cost` option to track usage and set appropriate budgets for the `fix` command's `--budget` option.\n\n6. **Dependency scanning issues**: If the `auto-deps` command fails to identify relevant dependencies:\n   - Check that the file paths and glob patterns are correct\n   - Use the `--force-scan` option to ensure all files are re-analyzed\n   - Verify the CSV file format and content\n   - Check file permissions for the dependency directory\n   - For documentation dependencies, ensure `.md`\u002F`.txt`\u002F`.rst` files are in the search path and `--include-docs` is set\n   - If redundant content removal is too aggressive, use `--no-dedup` to skip it\n\n7. **Command Timeout**:\n   - Check internet connection\n   - Try running with `--local` flag to compare\n   - Increase timeout with `export PDD_CLOUD_TIMEOUT=1800` (30 minutes) for long-running operations\n   - If persistent, check PDD Cloud status page\n\n8. **Context Window Overflow**: If you see \"Prompt exceeds context limit\" errors:\n   - The error message includes token count and model limit — use this to gauge how much to reduce\n   - Reduce the size of your prompt files or split into smaller modules\n   - Remove unnecessary `\u003Cinclude>` directives or use targeted excerpts instead of full files\n   - Use a model with a larger context window (e.g., Gemini with 1M tokens, or Claude which automatically uses the 1M beta header)\n   - Run with `--verbose` to see exact token counts and context usage percentages\n   - If using `auto-deps`, review included dependencies for unnecessary bulk\n\n9. **Sync-Specific Issues**:\n   - **\"Another sync is running\"**: Check for stale locks in `.pdd\u002Flocks\u002F` directory and remove if process no longer exists\n   - **Complex conflict resolution problems**: Use `pdd --verbose sync --dry-run basename` to see detailed LLM reasoning and decision analysis\n   - **State corruption or unexpected behavior**: Delete `.pdd\u002Fmeta\u002F{basename}_{language}.json` to reset fingerprint state\n   - **Animation display issues**: Sync operations work in background; animation is visual feedback only and doesn't affect functionality\n   - **Fingerprint mismatches**: Use `pdd sync --dry-run basename` to see what changes were detected and why operations were recommended\n\nIf you encounter persistent issues, consult the PDD documentation or post an issue on GitHub for assistance.\n\n## Security Considerations\n\nWhen using PDD, keep the following security considerations in mind:\n\n1. **Code Execution**: PDD generates and modifies code. Always review generated code before execution, especially in production environments.\n\n2. **Data Privacy**: Avoid using sensitive data in prompts or code files, as this information may be processed by the AI model.\n\n3. **API Keys**: If PDD requires API keys for AI model access, store these securely and never include them in version control systems.\n\n4. **Input Validation**: PDD assumes input files are trustworthy. Implement proper input validation if using PDD in a multi-user or networked environment.\n\n5. **Output Handling**: Treat output files with the same security considerations as you would any other code or configuration files in your project.\n\n6. **Dependency Analysis**: When using the `auto-deps` command, be cautious with untrusted dependency files and verify the generated summaries before including them in your prompts.\n\nWhen using PDD in cloud mode:\n\n1. **Authentication**: \n   - PDD uses GitHub SSO for secure authentication\n   - Tokens are stored securely in your system's credential manager\n   - No need to manage API keys manually\n\n2. **Data Privacy**:\n   - All data is encrypted in transit and at rest\n   - Prompts and generated code are associated only with your account\n   - You can delete your data at any time through the dashboard\n\n3. **Team Access**:\n   - Manage team member access through GitHub organizations\n   - Set up fine-grained permissions for different commands\n   - Track usage per team member\n\nAdditionally:\n- Consider disabling auto-updates in production environments using `PDD_AUTO_UPDATE=false`\n- Implement a controlled update process for production systems\n- Review changelogs before manually updating PDD in sensitive environments\n\n## Workflow Integration\n\nPDD can be integrated into various development workflows. Here are the conceptual models for key workflow patterns:\n\n### Initial Development\n\n**Conceptual Flow**: `auto-deps → generate → example → crash → verify → test → fix`\n\n**Purpose**: Create new functionality from scratch with proper testing and verification.\n\n**Process**:\n1. Identify and inject dependencies for your prompt (`auto-deps`).\n2. Generate full implementation code from the prompt (`generate`).\n3. Create reusable interface examples (`example`).\n4. Ensure the code runs without crashing and fix runtime errors (`crash`).\n5. Run the example and use an LLM to check if the output aligns with the prompt's intent, attempting iterative fixes if necessary (`verify`).\n6. Generate comprehensive unit tests for the implementation (`test`).\n7. Fix any issues revealed by the unit tests (`fix`).\n\n**Key Insight**: This workflow follows a progression from concept to verified implementation, ensuring the code runs (`crash`) before checking functional output (`verify`) and detailed unit testing (`test`).\n\n### Code-to-Prompt Update\n\n**Conceptual Flow**: `update → detect → change`\n\n**Purpose**: Maintain prompt as source of truth after code changes.\n\n**Process**:\n1. Synchronize direct code changes back to the original prompt\n2. Detect other prompts that might be affected by these changes\n3. Apply necessary changes to dependent prompts\n\n**Key Insight**: This bidirectional flow ensures prompts remain the source of truth even when code changes happen first.\n\n### Refactoring\n\n**Conceptual Flow**: `split → auto-deps → example`\n\n**Purpose**: Break large prompts into modular components.\n\n**Process**:\n1. Extract specific functionality from a large prompt into a separate prompt\n2. Ensure the new prompt has all dependencies it needs\n3. Create interface examples for the extracted functionality\n\n**Key Insight**: Just as code should be modular, prompts benefit from decomposition into focused, reusable components.\n\n### Debugging Workflows\n\n#### Prompt Context Issues\n**Conceptual Flow**: `preprocess → generate`\n\n**Purpose**: Resolve issues with prompt interpretation or preprocessing.\n\n**Process**:\n1. Examine how the prompt is being preprocessed\n2. Regenerate code with improved prompt clarity\n\n#### Runtime Crash Debugging\n**Conceptual Flow**: `generate → example → crash`\n\n**Purpose**: Fix code that fails to execute.\n\n**Process**:\n1. Generate initial code from prompt\n2. Create examples and test programs\n3. Fix runtime errors to make code executable\n\n#### Logical Bug Fixing\n**Conceptual Flow**: `bug → fix`\n\n**Purpose**: Correct code that runs but produces incorrect results.\n\n**Process**:\n1. Generate test cases that demonstrate the bug\n2. Fix the code to pass the tests\n\n#### Debugger-Guided Analysis\n**Conceptual Flow**: `trace → [edit prompt]`\n\n**Purpose**: Identify which prompt sections produce problematic code.\n\n**Process**:\n1. Locate the relationship between code lines and prompt sections\n2. Update relevant prompt sections\n\n### Multi-Prompt Architecture\n\n**Conceptual Flow**: `conflicts\u002Fdetect → change → generate → example → test`\n\n**Purpose**: Coordinate multiple prompts derived from higher-level requirements.\n\n**Process**:\n1. Identify conflicts or dependencies between prompts\n2. Harmonize the prompts to work together\n3. Regenerate code from updated prompts\n4. Update interface examples after changes\n5. Verify system integration with tests\n\n**Key Insight**: Complex systems require coordination between prompts, just as they do between code modules.\n\n### Feature Enhancement\n\n**Conceptual Flow**: `change → generate → example → test → fix`\n\n**Purpose**: Add new capabilities to existing functionality.\n\n**Process**:\n1. Modify prompts to describe new features\n2. Regenerate code with enhanced functionality\n3. Update examples to demonstrate new features\n4. Test to verify correct implementation\n5. Fix any issues that arise\n\n**Key Insight**: Feature additions should flow from prompt changes rather than direct code modifications.\n\n### CI Drift Detection & Auto-Heal\n\n**Conceptual Flow**: `detect drift → heal (update\u002Fsync) → commit → push`\n\n**Purpose**: Automatically detect and fix prompt\u002Fexample drift in CI pipelines.\n\n**Process**:\n1. Scan modules for drift using `sync_determine_operation` (no LLM calls)\n2. For stale prompts: run `pdd update` to sync code changes back to prompts\n3. For stale examples: run `pdd sync` with example+verify operations\n4. Stage and commit healed files with a descriptive message\n5. Push changes to the current branch\n\n**Usage:**\n```bash\n# Scan all modules (main branch trigger)\npython -m pdd.ci_drift_heal\n\n# Scan specific modules (PR trigger)\npython -m pdd.ci_drift_heal --modules module_a module_b\n\n# With budget cap and skip-ci flag\npython -m pdd.ci_drift_heal --budget-cap 5.00 --skip-ci\n```\n\n**Key Options:**\n- `--modules`: Limit detection to specific modules (for PR-scoped checks)\n- `--budget-cap FLOAT`: Maximum dollar amount for LLM healing calls\n- `--skip-ci`: Add `[skip ci]` to commit message (prevents CI re-trigger)\n\n**Key Insight**: This workflow automates the Code-to-Prompt Update pattern for CI, ensuring prompts stay in sync with code changes without manual intervention.\n\n### Critical Dependencies\n\nWhen using these workflows, remember these crucial tool dependencies:\n\n- 'generate' must be done before 'example' or 'test'\n- 'crash' is used to fix runtime errors and make code runnable\n- 'fix' requires runnable code created\u002Fverified by 'crash'\n- 'test' must be created before using 'fix'\n- Always update 'example' after major prompt interface changes\n- 'ci_drift_heal' requires modules to have existing prompts and code files\n\nFor detailed command examples for each workflow, see the respective command documentation sections.\n\n### CI Auto-Heal\n\n**Workflow File**: `.github\u002Fworkflows\u002Fauto-heal-drift.yml`\n\n**Purpose**: Automatically detect and fix prompt-code drift in CI.\n\n**Triggers**:\n- **Pull requests**: Heals only modules changed by the PR, commits fixes to the PR branch\n- **Push to main**: Heals all modules, commits fixes directly to main\n\n**Loop prevention**: Commits from auto-heal use `chore: auto-heal [skip ci]` message; the workflow skips runs triggered by this pattern.\n\n**Configuration**: Set `PDD_BUDGET_CAP` repository variable to control LLM spend per run (default: `5.00`).\n\nFor full details, see [docs\u002Fci-auto-heal.md](docs\u002Fci-auto-heal.md).\n\n## Integrations\n\nPDD offers integrations to streamline its use within your development environment:\n\n### VS Code Extension\n\nA dedicated VS Code extension (`utils\u002Fvscode_prompt`) provides syntax highlighting, snippets, and potentially other features for working with `.prompt` files directly within the editor. The extension is compatible with all OpenVSX-compatible IDEs including VS Code, Cursor, VSCodium, Gitpod, Kiro, Windsurf, and other editors that support the OpenVSX marketplace. Refer to the [extension's README](utils\u002Fvscode_prompt\u002FREADME.md) for installation and usage details.\n\n### MCP Server (for Agentic Clients)\n\nThe `pdd-mcp-server` (`utils\u002Fmcp`) acts as a bridge using the Model Context Protocol (MCP). This allows agentic clients like Cursor, Claude Desktop, Continue.dev, and others to invoke `pdd-cli` commands programmatically. See the [MCP Server README](utils\u002Fmcp\u002FREADME.md) for configuration and usage instructions.\n\n### CI Drift Detection\n\nPDD includes a CI-ready drift detection and auto-heal script (`pdd\u002Fci_drift_heal.py`) that can be integrated into GitHub Actions or other CI systems. It scans for prompt\u002Fexample drift, heals it using `pdd update` and `pdd sync`, and commits the results. See the [Workflow Integration](#ci-drift-detection--auto-heal) section for usage details.\n\n## Utilities\n\n### Update LLM Model Data (`pdd\u002Fupdate_model_costs.py`)\n\nThis script automatically updates the `llm_model.csv` file. **It prioritizes updating the user-specific configuration at `~\u002F.pdd\u002Fllm_model.csv` if it exists.** Otherwise, it targets the file specified by the `--csv-path` argument (defaulting to `\u003CPROJECT_ROOT>\u002F.pdd\u002Fllm_model.csv`).\n\nIt uses the `litellm` library to:\n*   Fetch and fill in missing input\u002Foutput costs for listed models (converting per-token costs to per-million-token costs).\n*   Compare existing costs against LiteLLM data and report mismatches (without overwriting).\n*   Check and update the `structured_output` flag (True\u002FFalse) based on `litellm.supports_response_schema`.\n*   Validate model identifiers using `litellm` before processing.\n\n**Usage:**\n\n```bash\nconda activate pdd\n# The script will automatically check ~\u002F.pdd\u002Fllm_model.csv first.\n# If not found, it will use the path given by --csv-path (or the default project path).\npython pdd\u002Fupdate_model_costs.py [--csv-path path\u002Fto\u002Fyour\u002Fproject\u002Fllm_model.csv]\n```\n\n*Note: The `max_reasoning_tokens` column requires manual maintenance.*\n\n## Patents\n\nOne or more patent applications covering aspects of the PDD workflows and systems are pending. This notice does not grant any patent rights; rights are as specified in the [LICENSE](LICENSE).\n\n## Conclusion\n\nPDD (Prompt-Driven Development) CLI provides a comprehensive set of tools for managing prompt files, generating code, creating examples, running tests, and handling various aspects of prompt-driven development. By leveraging the power of AI models and iterative processes, PDD aims to streamline the development workflow and improve code quality.\n\nThe various commands and options allow for flexible usage, from simple code generation to complex workflows involving multiple steps. The ability to track costs and manage output locations through environment variables further enhances the tool's utility in different development environments.\n\nWith the consistent argument order placing prompt files first, PDD emphasizes its prompt-driven nature and provides a more intuitive interface for users. This consistency across commands should make the tool easier to learn and use effectively.\n\nAs you become more familiar with PDD, you can compose richer workflows by chaining commands in shell scripts, task runners, or CI pipelines while leveraging the full range of options available. Always refer to the latest documentation and use the built-in help features to make the most of PDD in your development process.\n\nRemember to stay mindful of security considerations, especially when working with generated code or sensitive data. Regularly update PDD to access the latest features and improvements.\n\nHappy coding with PDD!\n","# PDD（提示驱动开发）命令行界面\n\n![PDD-CLI 版本](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fpdd--cli-v0.0.179-blue) [![Discord](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FDiscord-join%20chat-7289DA.svg?logo=discord&logoColor=white)](https:\u002F\u002Fdiscord.gg\u002FYp4RTh8bG7)\n\n## 简介\n\nPDD（提示驱动开发）是一个用于 AI 驱动的代码生成与维护的工具集。\n\n**开始使用非常简单：**\n\n```bash\n# 安装并运行\nuv tool install pdd-cli\npdd setup\npdd connect\n```\n\n这将启动一个位于 `localhost:9876` 的 Web 界面，您可以在其中：\n- 自动实现 GitHub 问题\n- 根据提示生成和测试代码\n- 可视化地管理您的 PDD 项目\n\n\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fpromptdriven_pdd_readme_913020f6e87e.gif\" alt=\"PDD 手绘演示\" \u002F>\n\u003C\u002Fp>\n\n对于命令行用户，PDD 还提供了强大的 **代理式命令**，可自动实现 GitHub 问题：\n- `pdd change \u003Cissue-url>` - 实现功能请求（12 步流程）\n- `pdd bug \u003Cissue-url>` - 为 Bug 创建失败测试\n- `pdd fix \u003Cissue-url>` - 修复失败测试\n- `pdd generate \u003Cissue-url>` - 从 PRD 问题生成 architecture.json（11 步流程）\n- `pdd test \u003Cissue-url>` - 从问题描述生成 UI 测试（包含探索性测试、契约验证、无障碍审计的 18 步流程）\n\n对于基于提示的工作流，**`sync`** 命令通过智能决策、实时可视化反馈和复杂的状态管理，自动化整个开发周期。\n\n## 白皮书\n\n如需详细了解提示驱动开发的概念、架构及优势，请参阅我们的完整白皮书。本文档深入探讨了 PDD 的理念、其相对于传统开发的优势，并包含基准测试和案例研究。\n\n[阅读包含基准测试的完整白皮书](docs\u002Fwhitepaper_with_benchmarks\u002Fwhitepaper_w_benchmarks.md)\n\n另请参阅《提示驱动开发教义》，了解核心原则和实践：[docs\u002Fprompt-driven-development-doctrine.md](docs\u002Fprompt-driven-development-doctrine.md)\n\n## 安装\n\n### macOS 前置条件\n\n在 macOS 上，安装 PDD 之前需要先安装一些前置条件：\n\n1. **安装 Xcode 命令行工具**（Python 编译所需）：\n   ```bash\n   xcode-select --install\n   ```\n\n2. **安装 Homebrew**（推荐的 macOS 包管理器）：\n   ```bash\n   \u002Fbin\u002Fbash -c \"$(curl -fsSL https:\u002F\u002Fraw.githubusercontent.com\u002FHomebrew\u002Finstall\u002FHEAD\u002Finstall.sh)\"\n   ```\n   \n   安装完成后，将 Homebrew 添加到您的 PATH 中：\n   ```bash\n   echo 'eval \"$(\u002Fopt\u002Fhomebrew\u002Fbin\u002Fbrew shellenv)\"' >> ~\u002F.zprofile && eval \"$(\u002Fopt\u002Fhomebrew\u002Fbin\u002Fbrew shellenv)\"\n   ```\n\n3. **安装 Python**（如果尚未安装）：\n   ```bash\n   # 检查是否已安装 Python\n   python3 --version\n   \n   # 如果未找到 Python，则通过 Homebrew 安装\n   brew install python\n   ```\n   \n   **注意**：较新的 macOS 版本不再预装 Python。PDD 需要 Python 3.8 或更高版本。`brew install python` 命令会安装最新的 Python 3 版本。\n\n### 推荐方法：uv\n\n我们建议使用 [uv 包管理器](https:\u002F\u002Fgithub.com\u002Fastral-sh\u002Fuv) 来安装 PDD，以获得更好的依赖项管理和自动环境配置：\n\n```bash\n# 如果尚未安装 uv\ncurl -LsSf https:\u002F\u002Fastral.sh\u002Fuv\u002Finstall.sh | sh\n\n# 使用 uv 工具安装 PDD\nuv tool install pdd-cli\n```\n\n此安装方法可确保：\n- 更快的安装速度和优化的依赖解析\n- 自动设置环境，无需手动配置\n- 正确处理 PDD_PATH 环境变量\n- 更好地与其他 Python 包隔离\n\n安装完成后，PDD CLI 将立即可用，无需任何额外的环境配置。\n\n验证安装：\n```bash\npdd --version\n```\n\n当 CLI 已添加到您的 `PATH` 后，继续执行：\n```bash\npdd setup\n```\n\n该命令会检测代理式 CLI 工具、扫描 API 密钥、配置模型，并初始化本地配置文件。如果您推迟此步骤，CLI 会在您首次运行其他命令时检测到缺少的设置文件，并显示提醒横幅，以便您稍后完成设置（一旦 `~\u002F.pdd\u002Fapi-env` 存在，或您的项目已通过 `.env` 或 `.pdd\u002F` 提供凭据，横幅将被隐藏）。\n\n### 替代方案：pip 安装\n\n如果您更喜欢使用 pip，可以使用以下命令安装 PDD：\n```bash\npip install pdd-cli\n```\n\n\n## 高级安装选项\n\n### 虚拟环境安装\n```bash\n# 创建虚拟环境\npython -m venv pdd-env\n\n# 激活环境\n# 在 Windows 上：\npdd-env\\Scripts\\activate\n# 在 Unix\u002FMacOS 上：\nsource pdd-env\u002Fbin\u002Factivate\n\n# 安装 PDD\npip install pdd-cli\n```\n\n\n\n## 开始使用\n\n### 选项 1：Web 界面（推荐）\n\n使用 PDD 最简单的方式是通过 Web 界面：\n\n```bash\n# 1. 安装 PDD\ncurl -LsSf https:\u002F\u002Fastral.sh\u002Fuv\u002Finstall.sh | sh\nuv tool install pdd-cli\n\n# 2. 运行设置（API 密钥、Shell 补全）\npdd setup\n\n# 3. 启动 Web 界面\npdd connect\n```\n\n这将打开一个基于浏览器的界面，您可以在其中：\n- **运行命令**：直观地执行 `pdd change`、`pdd bug`、`pdd fix`、`pdd sync` 等命令\n- **浏览文件**：查看和编辑项目中的提示、代码和测试\n- **远程访问**：通过 PDD Cloud 从任何浏览器访问您的会话（使用 `--local-only` 可禁用）\n\n### 选项 2：基于问题的 CLI\n\n对于命令行爱好者，可以直接实现 GitHub 问题：\n\n**前置条件：**\n1. **GitHub CLI** - 访问问题所必需：\n   ```bash\n   brew install gh && gh auth login\n   ```\n\n2. **一种代理式 CLI** - 运行工作流所必需（至少安装一种）：\n   - **Claude Code**：`npm install -g @anthropic-ai\u002Fclaude-code`（需要 `ANTHROPIC_API_KEY`）\n   - **Gemini CLI**：`npm install -g @google\u002Fgemini-cli`（需要 `GOOGLE_API_KEY` 或 `GEMINI_API_KEY`）\n   - **Codex CLI**：`npm install -g @openai\u002Fcodex`（需要 `OPENAI_API_KEY`）\n\n**使用方法：**\n```bash\n# 实现功能请求\npdd change https:\u002F\u002Fgithub.com\u002Fowner\u002Frepo\u002Fissues\u002F123\n\n# 或修复 Bug\npdd bug https:\u002F\u002Fgithub.com\u002Fowner\u002Frepo\u002Fissues\u002F456\npdd fix https:\u002F\u002Fgithub.com\u002Fowner\u002Frepo\u002Fissues\u002F456\n```\n\n### 选项 3：手动提示工作流\n\n用于学习 PDD 基础知识或处理现有提示文件：\n\n```bash\ncd your-project\npdd sync module_name  # 完整自动化流程\n```\n\n请参阅下方的【快速入门 Hello 示例】，获取逐步介绍。\n\n---\n\n## 🚀 快速入门（Hello 示例）\n\n如果你想了解 PDD 的基本原理，请按照本手册示例来实际体验。\n\n1. **安装先决条件**（macOS\u002FLinux）：\n   ```bash\n   xcode-select --install      # 仅适用于 macOS\n   curl -LsSf https:\u002F\u002Fastral.sh\u002Fuv\u002Finstall.sh | sh\n   uv tool install pdd-cli\n   pdd --version\n   ```\n\n2. **克隆仓库**\n\n   ```bash\n     # 克隆仓库（如果尚未完成）\n    git clone https:\u002F\u002Fgithub.com\u002Fpromptdriven\u002Fpdd.git\n    cd pdd\u002Fexamples\u002Fhello\n   ```\n\n3. **设置一个 API 密钥**（选择你的提供商）：\n   ```bash\n   export GEMINI_API_KEY=\"your-gemini-key\"\n   # 或者\n   export OPENAI_API_KEY=\"your-openai-key\"\n   ```\n\n### 安装后设置（安装后的第一步必做）\n\n运行全面的设置向导：\n```bash\npdd setup\n```\n\n设置向导会执行以下步骤：\n  1. 检测代理型 CLI 工具（Claude、Gemini、Codex），并在需要时提供安装和 API 密钥配置\n  2. 在 `.env`、`~\u002F.pdd\u002Fapi-env.*` 和 shell 环境中扫描 API 密钥；如果没有找到任何密钥，则提示添加一个\n  3. 根据你可用的密钥，从参考 CSV 文件 `data\u002Fllm_model.csv` 中配置顶级模型（ELO ≥ 1400），涵盖所有 LiteLLM 支持的提供商\n  4. 可选地创建一个 `.pddrc` 项目配置文件\n  5. 使用第一个可用的模型进行一次真实的 LLM 调用测试\n  6. 打印一份结构化的摘要（CLI 工具、密钥、模型、测试结果）\n\n该向导可以随时重新运行，以更新密钥、添加提供商或重新配置设置。\n\n> **重要提示：** 设置完成后，请加载 API 环境文件，以便在当前终端会话中使您的密钥生效：\n> ```bash\n> source ~\u002F.pdd\u002Fapi-env.zsh   # 或 api-env.bash，取决于您的 shell\n> ```\n> 新的终端窗口将自动加载密钥。\n\n如果你跳过这一步，当你第一次运行常规的 pdd 命令时，系统会检测到缺少设置文件，并显示提醒横幅，以便你稍后完成初始设置。\n\n5. **运行 Hello**：\n   ```bash\n   cd ..\u002Fhello\n   pdd --force generate hello_python.prompt\n   python3 hello.py\n   ```\n\n    ✅ 预期输出：\n    ```\n    hello\n    ```\n\n\n\n## 云端与本地执行\n\nPDD 命令可以在云端或本地运行。默认情况下，所有命令都在云端模式下运行，这具有多项优势：\n\n- 无需在本地管理 API 密钥\n- 可访问更强大的模型\n- PDD 社区共享示例和改进\n- 自动更新和优化\n- 更好的成本优化\n\n### 云端认证\n\n在云端模式下运行时（默认），PDD 使用 GitHub 单点登录 (SSO) 进行认证。首次使用时，系统会提示您进行认证：\n\n1. PDD 将打开您的默认浏览器并跳转到 GitHub 登录页面\n2. 使用您的 GitHub 账户登录\n3. 授权 PDD Cloud 访问您的 GitHub 个人资料\n4. 认证成功后，您可以返回终端继续使用 PDD\n\n认证令牌会安全地存储在本地，并根据需要自动刷新。\n\n### 本地模式要求\n\n当使用 `--local` 标志在本地模式下运行时，您需要为语言模型设置 API 密钥：\n\n```bash\n# 对于 OpenAI\nexport OPENAI_API_KEY=your_api_key_here\n\n# 对于 Anthropic\nexport ANTHROPIC_API_KEY=your_api_key_here\n\n# 对于其他支持的提供商（LiteLLM 支持多个 LLM 提供商）\nexport PROVIDER_API_KEY=your_api_key_here\n```\n\n将这些添加到您的 `.bashrc`、`.zshrc` 或等效文件中，以确保持久性。\n\nPDD 的本地模式使用 LiteLLM（版本 1.75.5 或更高）与语言模型交互，提供：\n\n- 支持多个模型提供商（OpenAI、Anthropic、Google\u002FVertex AI 等）\n- 根据强度设置自动选择模型\n- 响应缓存以提高性能\n- 智能的 token 使用跟踪和成本估算\n- 当缺少密钥时，可交互式获取 API 密钥\n\n当密钥缺失时，PDD 会以交互方式提示您输入，并将其安全地存储在您的本地 `.env` 文件中。\n\n### 本地模型配置\n\nPDD 使用 CSV 文件来配置模型选择和功能。此配置从以下位置加载：\n\n1. 用户特定配置：`~\u002F.pdd\u002Fllm_model.csv`（如果存在，则优先使用）\n2. 项目特定配置：`\u003CPROJECT_ROOT>\u002F.pdd\u002Fllm_model.csv`\n3. 包含的默认配置：随 PDD 安装一起提供（当没有本地配置时作为后备）\n\nCSV 文件包含以下列：\n- `provider`：LLM 提供商（例如，“openai”、“anthropic”、“google”）\n- `model`：LiteLLM 模型标识符（例如，“gpt-4”、“claude-3-opus-20240229”）\n- `input`\u002F`output`：每百万 tokens 的成本\n- `coding_arena_elo`：编码能力的 ELO 评分\n- `api_key`：所需 API 密钥的环境变量名称\n- `structured_output`：模型是否支持结构化 JSON 输出\n- `reasoning_type`：推理能力的支持程度（“无”、“预算”或“努力”）\n\n有关受支持的模型和示例行的具体且最新的参考信息，请参阅此仓库中捆绑的 CSV 文件：[pdd\u002Fdata\u002Fllm_model.csv](pdd\u002Fdata\u002Fllm_model.csv)。\n\n要获取用于自定义配置的正确模型标识符，请参考 [LiteLLM 模型列表](https:\u002F\u002Fdocs.litellm.ai\u002Fdocs\u002Fproviders) 文档。LiteLLM 通常使用 `provider\u002Fmodel_name` 格式的模型标识符（例如，“openai\u002Fgpt-4”、“anthropic\u002Fclaude-3-opus-20240229”）。\n\n## 常见安装问题排查\n\n1. **命令未找到**\n   ```bash\n   # 如有必要，添加到 PATH\n   export PATH=\"$HOME\u002F.local\u002Fbin:$PATH\"\n   ```\n\n2. **权限错误**\n   ```bash\n   # 使用用户权限安装\n   pip install --user pdd-cli\n   ```\n\n3. **macOS 特定问题**\n   - **未找到 Xcode 命令行工具**：运行 `xcode-select --install` 以安装所需的开发工具\n   - **未找到 Homebrew**：按照上述先决条件部分中的命令安装 Homebrew\n   - **未找到 Python 或版本不正确**：通过 Homebrew 安装 Python 3：`brew install python`\n   - **编译期间权限被拒绝**：确保 Xcode 命令行工具已正确安装，并且您对安装目录具有写入权限\n   - **uv 安装失败**：尝试通过 Homebrew 安装 uv：`brew install uv`\n   - **Python 版本冲突**：如果您有多个 Python 版本，请确保 `python3` 指向 Python 3.8+：`which python3 && python3 --version`\n\n## 版本\n\n当前版本：0.0.179\n\n要检查已安装的版本，请运行：\n```\npdd --version\n```\nPDD 包含自动更新功能，以确保您始终拥有最新的功能和安全补丁。您可以使用环境变量控制此行为（请参阅下方的“自动更新控制”部分）。\n\n## 支持的编程语言\n\nPDD 支持多种编程语言，包括但不限于：\n- Python\n- JavaScript\n- TypeScript\n- Java\n- C++\n- Ruby\n- Go\n\n具体使用的语言通常由提示文件的命名约定决定，或在命令选项中指定。\n\n## 提示文件命名规范\n\nPDD 中的提示文件遵循以下特定命名格式：\n```\n\u003Cbasename>_\u003Clanguage>.prompt\n```\n其中：\n- `\u003Cbasename>` 是文件或项目的基名\n- `\u003Clanguage>` 是提示文件所使用的编程语言或上下文\n\n示例：\n- `factorial_calculator_python.prompt`（基名：factorial_calculator，语言：python）\n- `responsive_layout_css.prompt`（基名：responsive_layout，语言：css）\n- `data_processing_pipeline_python.prompt`（基名：data_processing_pipeline，语言：python）\n\n## 提示驱动开发理念\n\n### 核心概念\n\n提示驱动开发（PDD）颠覆了传统的软件开发方式，将提示视为主要的工件——而非代码。这种范式转变具有深远的影响：\n\n1. **提示作为真理来源**：\n   在传统开发中，源代码是定义系统行为的根本依据。而在 PDD 中，提示才是权威性的，代码则是由提示生成的产物。\n\n2. **自然语言优先于代码**：\n   提示主要以自然语言编写，这使得非程序员也能更容易理解，并且更清晰地表达意图。\n\n3. **再生式开发**：\n   当需要进行更改时，只需修改提示并重新生成代码，而不是直接编辑代码。这样可以保持需求与实现之间的概念一致性。\n\n4. **意图保留**：\n   提示不仅捕捉代码的“是什么”，还记录了“为什么”这样做的原因——以一种注释通常难以做到的方式保留设计 rationale。\n\n### 思维模式\n\n为了有效地使用 PDD，需要做出以下思维转变：\n\n1. **提示优先思维**：\n   始终先用提示定义所需内容，然后再生成任何代码。\n\n2. **双向流动**：\n   - 提示 → 代码：主要方向（生成）\n   - 代码 → 提示：次要但关键的方向（保持提示与代码变更同步）\n\n3. **模块化提示**：\n   就像对代码进行模块化一样，也应将提示模块化为可组合的独立单元。\n\n4. **通过示例集成**：\n   模块之间通过其示例进行集成，这些示例充当接口，从而实现高效的标记引用。\n\n### PDD 工作流程：概念理解\n\nPDD 中的每个工作流程都针对一个基本的开发需求：\n\n1. **初始开发流程**\n   - **目的**：从零开始创建功能\n   - **概念流程**：定义依赖关系 → 生成实现 → 创建接口 → 确保运行时功能 → 验证正确性\n   \n   该流程体现了从提示到代码的完整流程，从概念到经过测试的实现。\n\n2. **代码到提示更新流程**\n   - **目的**：当代码发生变化时，保持提示作为真理来源\n   - **概念流程**：将代码变更同步到提示 → 识别影响 → 传播变更\n   \n   该流程确保信息从代码回流到提示，从而保持提示作为真理来源。\n\n3. **调试流程**\n   - **目的**：解决不同类型的问题\n   - **概念类型**：\n     - **上下文问题**：处理提示解释中的误解\n     - **运行时问题**：修复执行失败\n     - **逻辑问题**：纠正错误行为\n     - **可追溯性问题**：将代码问题追溯到提示部分\n   \n   这些流程认识到不同的错误需要不同的解决方法。\n\n4. **重构流程**\n   - **目的**：改善提示的组织结构和可重用性\n   - **概念流程**：提取功能 → 确保依赖关系 → 创建接口\n   \n   该流程与代码重构类似，但在提示层面进行操作。\n\n5. **多提示架构流程**\n   - **目的**：协调使用多个提示的系统\n   - **概念流程**：检测冲突 → 解决不兼容问题 → 重新生成代码 → 更新接口 → 验证系统\n   \n   该流程解决了管理多个相互依赖提示的复杂性。\n\n6. **增强阶段**：在现有模块中添加功能时，使用功能增强流程。\n\n### 工作流程选择原则\n\n工作流程的选择应根据当前的开发阶段来决定：\n\n1. **创建阶段**：构建新功能时，使用初始开发流程。\n\n2. **维护阶段**：现有代码发生变更时，使用代码到提示更新流程。\n\n3. **问题解决阶段**：根据问题类型选择合适的调试流程：\n   - 对于提示解释问题，使用预处理 → 生成流程。\n   - 对于运行时错误，使用崩溃流程。\n   - 对于逻辑错误，使用错误 → 修复流程。\n   - 对于定位问题提示部分，使用追踪流程。\n\n4. **重组阶段**：当提示变得过于庞大或复杂时，使用重构流程。\n\n5. **系统设计阶段**：在协调多个组件时，使用多提示架构流程。\n\n6. **增强阶段**：在现有模块中添加功能时，使用功能增强流程。\n\n### PDD 设计模式\n\n有效的 PDD 会采用以下常见模式：\n\n1. **通过自动依赖注入依赖项**：\n   自动将相关依赖项包含在提示中。\n\n2. **通过示例提取接口**：\n   创建最小化的参考实现以供重用。\n\n3. **双向可追溯性**：\n   维持提示部分与生成代码之间的联系。\n\n4. **测试驱动的提示修复**：\n   在修复问题时，使用测试来指导提示的改进。\n\n5. **分层提示组织**：\n   将提示从高层架构组织到详细的实现。\n\n## 基本用法\n\n```\npdd [全局选项] 命令 [选项] [参数]...\n```\n\n## 命令概览\n\n以下是 PDD 提供的主要命令的简要概述。单击命令名称即可跳转到其详细说明部分：\n\n### 命令关系\n\n下图展示了 PDD 命令之间的交互方式：\n\n```mermaid\ngraph TB\n    subgraph 入口点\n        connect[\"pdd connect（Web UI - 推荐）\"]\n        cli[\"直接 CLI\"]\n        ghapp[\"GitHub 应用\"]\n    end\n\n    gen_url[\"pdd generate \u003Curl>\"]\n\n    subgraph 同步工作流\n        sync[\"pdd sync\"]\n        s_deps[\"auto-deps\"]\n        s_gen[\"generate\"]\n        s_example[\"example\"]\n        s_crash[\"crash\"]\n        s_verify[\"verify\"]\n        s_test[\"test\"]\n        s_fix[\"fix\"]\n        s_update[\"update\"]\n    end\n\n    checkup[\"pdd checkup \u003Curl>\"]\n    test_url[\"pdd test \u003Curl>\"]\n    bug_url[\"pdd bug \u003Curl>\"]\n    fix_url[\"pdd fix \u003Curl>\"]\n    change[\"pdd change \u003Curl>\"]\n    sync_url[\"pdd sync \u003Curl>\"]\n\n    connect --> gen_url\n    cli --> gen_url\n    ghapp --> gen_url\n    gen_url --> sync\n    sync --> s_deps\n    s_deps --> s_gen\n    s_gen --> s_example\n    s_example --> s_crash\n    s_crash --> s_verify\n    s_verify --> s_test\n    s_test --> s_fix\n    s_fix --> s_update\n    sync --> checkup\n    checkup --> test_url\n    checkup --> bug_url\n    checkup --> change\n    test_url --> fix_url\n    bug_url --> fix_url\n    change --> sync_url\n    sync_url -.-> sync\n```\n\n**关键概念：**\n- **入口点**：`pdd connect`（Web 界面）、直接 CLI 或 GitHub 应用\n- **起点**：`pdd generate \u003Curl>` 会根据 PRD 的 GitHub 问题生成架构、提示和 `.pddrc` 文件。\n- **核心循环**：`pdd sync` 会对每个模块执行完整的自动依赖分析 → 生成 → 示例 → 故障重现 → 验证 → 测试 → 修复 → 更新 循环。\n- **健康检查**：`pdd checkup \u003Curl>` 可以识别接下来需要关注的内容。\n- **缺陷路径**：`test \u003Curl>` 或 `bug \u003Curl>` 会发现失败的测试 → `fix \u003Curl>` 则用于解决这些问题。\n- **功能路径**：`change \u003Curl>` 用于实现功能需求 → `sync \u003Curl>` 会在受影响的模块中重新运行同步。\n\n### 开始使用\n- **[`connect`](#18-connect)**：**[推荐]** 启动 Web 界面，进行可视化的 PDD 操作。\n- **[`setup`](#post-installation-setup-required-first-step-after-installation)**：配置 API 密钥和 Shell 补全功能。\n\n### 代理式命令（基于问题驱动）\n- **[`change`](#8-change)**：根据 GitHub 问题实现功能请求（12 步流程）。\n- **[`bug`](#14-bug)**：分析 Bug 并从 GitHub 问题中创建失败测试。\n- **[`checkup`](#17-checkup)**：根据 GitHub 问题运行自动化项目健康检查（8 步流程）。\n- **[`fix`](#6-fix)**：修复失败测试（支持基于问题和手动模式）。\n- **[`sync`](#1-sync)**：根据 GitHub 问题进行多模块并行同步（当传入 URL 而不是基础名时）。\n- **[`test`](#4-test)**：根据 GitHub 问题生成 UI 测试（代理模式下为 18 步流程）。\n\n### 核心命令（基于提示）\n- **[`sync`](#1-sync)**：**[提示工作流的主要命令]** 自动化提示到代码的循环。\n- **[`generate`](#2-generate)**：根据提示文件生成可运行的代码；支持通过 `-e\u002F--env` 使用参数化提示。\n- **[`example`](#3-example)**：生成一个简洁示例，展示如何使用提示中定义的功能。\n- **[`test`](#4-test)**：为代码文件及其提示生成或增强单元测试。\n- **[`update`](#9-update)**：根据修改后的代码更新原始提示文件。\n- **[`verify`](#16-verify)**：通过运行程序并判断输出是否符合预期来验证功能正确性。\n- **[`crash`](#12-crash)**：修复导致崩溃的代码模块及其调用程序中的错误。\n\n### 提示管理\n- **[`preprocess`](#5-preprocess)**：预处理提示文件，处理包含、注释和其他指令。\n- **[`split`](#7-split)**：将大型提示文件拆分为更小、更易管理的部分。\n- **[`extracts prune`](#21-extracts)**：清理孤立的提取缓存条目。\n- **[`auto-deps`](#15-auto-deps)**：分析并插入提示文件所需的依赖项。\n- **[`detect`](#10-detect)**：分析提示，根据描述确定哪些提示需要更改。\n- **[`conflicts`](#11-conflicts)**：查找两个提示文件之间的冲突，并提出解决方案。\n- **[`trace`](#13-trace)**：根据给定的代码行号找到提示文件中对应的行数。\n\n### 实用命令\n- **[`auth`](#19-auth)**：管理与 PDD Cloud 的认证。\n- **[`sessions`](#20-pdd-sessions---manage-remote-sessions)**：管理 `connect` 的远程会话。\n\n### 用户故事提示测试\nPDD 可以根据存储为 Markdown 文件的用户故事来验证提示变更。这背后使用了 `detect` 功能：当 `detect` 返回无需更改提示时，该故事即**通过**。\n\n默认设置：\n- 故事文件位于 `user_stories\u002F` 目录下，文件名匹配 `story__*.md`。\n- 提示文件从 `prompts\u002F` 目录加载（默认排除 `*_llm.prompt` 文件）。\n\n可覆盖的设置：\n- `PDD_USER_STORIES_DIR` 用于设置故事目录。\n- `PDD_PROMPTS_DIR` 用于设置提示目录。\n\n相关命令：\n- `pdd detect --stories` 运行验证套件。\n- `pdd change` 在提示修改后运行故事验证，若有任何故事未通过，则操作失败。\n- `pdd fix user_stories\u002Fstory__*.md` 将单个故事应用到提示上并重新验证。\n- `pdd test \u003Cprompt_1.prompt> [prompt_2.prompt ...]` 会生成一个 `story__*.md` 文件，并将这些提示链接起来。\n- `pdd test user_stories\u002Fstory__*.md` 会更新现有故事文件的提示链接。\n\n故事提示关联：\n- 故事可以包含可选元数据，以限定验证范围到部分提示：\n  `\u003C!-- pdd-story-prompts: prompts\u002Fa_python.prompt, prompts\u002Fb_python.prompt -->`\n- 如果缺少元数据，`pdd detect --stories` 会针对全部提示集进行验证。\n- 在 `--stories` 模式下，当 `detect` 找到受影响的提示时，PDD 会将链接缓存回故事元数据中，以便未来进行确定性运行。\n\n模板：\n- 可参考 `user_stories\u002Fstory__template.md` 作为起始格式。\n\n## 全局选项\n\n这些选项可与任何命令一起使用：\n\n- `--force`：跳过所有交互式提示（文件覆盖、API密钥请求）。适用于 CI\u002F自动化。\n- `--strength FLOAT`：设置 AI 模型的强度（0.0 到 1.0，默认为 0.5）。\n  - 0.0：最经济实惠的模型\n  - 0.5：默认的基础模型\n  - 1.0：功能最强的模型（ELO 评分最高）\n- `--time FLOAT`：控制支持推理能力的 LLM 模型的推理资源分配（0.0 到 1.0，默认为 0.25）。\n  - 对于具有特定推理令牌限制的模型（如 64k），值为 `1.0` 时将使用最大可用令牌数。\n  - 对于具有离散努力级别的模型，`1.0` 对应最高努力级别。\n  - 值介于 0.0 和 1.0 之间时，按比例缩放资源分配。\n- `--temperature FLOAT`：设置 AI 模型的温度（默认为 0.0）。\n- `--verbose`：增加输出详细程度以获取更详尽的信息。包括每次 LLM 调用的 token 数量和上下文窗口使用情况。\n- `--quiet`：减少输出详细程度以获得最少信息。\n- `--output-cost PATH_TO_CSV_FILE`：启用成本跟踪，并输出包含使用详情的 CSV 文件。\n- `--review-examples`：在执行命令前查看并选择性排除少样本示例。\n- `--local`：在本地而非云端运行命令。\n- `--core-dump`：捕获本次运行的调试包，以便后续回放和分析。\n- `report-core`：通过创建带有核心转储文件的 GitHub 问题来报告错误。\n- `--context CONTEXT_NAME`：覆盖自动上下文检测，使用 `.pddrc` 中指定的上下文。\n- `--list-contexts`：列出 `.pddrc` 中定义的所有可用上下文并退出。\n\n### 核心转储调试包\n\n如果出现问题且希望 PDD 团队能够重现，您可以在启用核心转储的情况下运行任何命令：\n\n```bash\npdd --core-dump sync factorial_calculator\npdd --core-dump crash prompts\u002Fcalc_python.prompt src\u002Fcalc.py examples\u002Frun_calc.py crash_errors.log\n```\n\n当设置 `--core-dump` 时，PDD：\n\n- 捕获完整的 CLI 命令及其参数\n- 记录该次运行的相关日志和内部跟踪信息\n- 将提示、生成的代码以及重现问题所需的关键元数据打包在一起\n\n运行结束时，PDD 会打印核心转储包的路径。在打开 GitHub 问题或发送错误报告时，请附上该包，以便维护人员能够快速重现并诊断您的问题。\n\n#### `report-core` 命令\n\n`report-core` 命令可通过创建带有核心转储文件的 GitHub 问题来帮助您报告错误。它通过自动收集相关文件和信息简化了报告流程。\n\n**用法：**\n```bash\npdd report-core [OPTIONS] [CORE_FILE]\n```\n\n**参数：**\n- `CORE_FILE`：核心转储文件的路径（例如 `.pdd\u002Fcore_dumps\u002Fpdd-core-....json`）。若省略，则使用最近一次的核心转储。\n\n**选项：**\n- `--api`：直接通过 GitHub API 创建问题，而不是在浏览器中打开。这可以自动为附加文件创建 Gist。\n- `--repo OWNER\u002FREPO`：覆盖目标仓库（默认为 `promptdriven\u002Fpdd`）。\n- `--description`, `-d TEXT`：简要描述发生的问题。\n\n**身份验证：**\n\n要使用 `--api` 标志，您需要通过 GitHub 身份验证。PDD 按照以下顺序检查凭据：\n\n1. **GitHub CLI**：`gh auth token`（推荐）\n2. **环境变量**：`GITHUB_TOKEN` 或 `GH_TOKEN`\n3. **旧版**：`PDD_GITHUB_TOKEN`\n\n**文件跟踪与 Gists：**\n\n使用 `--api` 时，PDD 将：\n1. 收集所有相关文件（提示、代码、测试、配置、元文件）。\n2. 创建一个包含这些文件的**私有 GitHub Gist**。\n3. 在创建的问题中链接该 Gist。\n\n这样可以确保调试时拥有所有必要上下文，同时保持问题正文简洁。如果您不使用 `--api`，文件将被截断以适应基于浏览器提交的 URL 长度限制。\n\n---\n\n### 上下文选择标志\n\n- `--list-contexts` 会读取最近的 `.pddrc`（从当前目录向上搜索），逐行打印可用上下文，并立即以状态码 0 退出。当此标志存在时，不会运行自动更新检查或子命令。\n- `--context CONTEXT_NAME` 会在同一 `.pddrc` 来源中提前验证。如果名称未知，CLI 将抛出 `UsageError` 并以代码 2 退出，然后再运行自动更新或子命令。\n- 配置优先级为：CLI 选项 > `.pddrc` 上下文 > 环境变量 > 默认值。详情请参阅“配置”部分。\n\n## 自动更新控制\n\nPDD 会自动更新自身，以确保您拥有最新的功能和安全补丁。不过，您可以通过 `PDD_AUTO_UPDATE` 环境变量控制此行为：\n\n```bash\n# 禁用自动更新\nexport PDD_AUTO_UPDATE=false\n\n# 启用自动更新（默认行为）\nexport PDD_AUTO_UPDATE=true\n```\n\n对于持久化设置，可将此环境变量添加到您的 shell 配置文件中（例如 `.bashrc` 或 `.zshrc`）。\n\n这在以下情况下特别有用：\n- 生产环境中，版本稳定性至关重要\n- CI\u002FCD 流水线中，需要一致的行为\n- 对版本敏感的项目，需要特定的 PDD 版本\n\n## AI 模型信息\n\nPDD 使用大型语言模型来生成和操作代码。`--strength` 和 `--temperature` 选项允许您控制模型的输出：\n\n- 强度：决定应使用多强大\u002F昂贵的模型。较高值（接近 1.0）会选择性能更好、功能更强的模型（根据 ELO 评分筛选），而较低值（接近 0.0）则会选择更具成本效益的模型。\n- 温度：控制输出的随机性。较高值会增加多样性，但可能导致结果不够连贯；较低值则会产生更专注、确定性的输出。\n- 时间：（可选，由 `--time FLOAT` 控制）对于支持推理的模型，此选项可在最小值（0.0）和最大值（1.0）之间缩放分配的推理资源（例如令牌或努力级别），默认为 0.25。\n\n在本地模式下运行时，PDD 使用 LiteLLM 根据包含以下内容的配置文件选择和与语言模型交互：\n- 每百万 tokens 的输入和输出成本\n- 编码能力的 ELO 评分\n- 所需的 API 密钥环境变量\n- 结构化输出能力标志\n- 推理能力（基于预算或努力）\n\n## 输出成本跟踪\n\nPDD 包含一项用于跟踪和报告操作成本的功能。启用后，它会为每次命令执行生成包含使用详情的 CSV 文件。\n\n### 使用方法\n\n要启用成本跟踪，请在任何命令中使用 `--output-cost` 选项：\n\n```\npdd --output-cost PATH_TO_CSV_FILE [COMMAND] [OPTIONS] [ARGS]...\n```\n\n`PATH_TO_CSV_FILE` 应为 CSV 输出的期望位置和文件名。\n\n### 成本计算与展示\n\nPDD 根据每次操作中 AI 模型的使用情况来计算成本。成本以美元（USD）为单位，并通过以下因素进行计算：\n\n1. 模型强度：较高的强度设置通常会导致更高的成本。\n2. 输入大小：较大的输入（例如较长的提示或代码文件）通常会产生更高的成本。\n3. 操作复杂性：某些操作（如包含多次迭代的 `fix` 和 `crash`）可能比简单操作更昂贵。\n\n每项操作的具体成本由 LiteLLM 集成根据提供商当前的定价模型确定。PDD 使用内部定价表，该表会定期更新以反映最新的费率。\n\n### CSV 输出\n\n生成的 CSV 文件包含以下列：\n- timestamp：命令执行的日期和时间\n- model：用于该操作的 AI 模型\n- command：已执行的 PDD 命令\n- cost：以美元计的操作估算成本（例如 0.05 表示 5 美分）。对于本地模型或不使用 LLM 的操作，此值将为零。\n- input_files：参与该操作的输入文件列表\n- output_files：由该操作生成或修改的输出文件列表\n\n这一全面的输出不仅能够详细跟踪操作的成本和类型，还能记录每次 PDD 命令执行中涉及的具体文件。\n\n### 环境变量\n\n您可以通过环境变量设置成本输出 CSV 文件的默认位置：\n\n- **`PDD_OUTPUT_COST_PATH`**：成本跟踪 CSV 文件的默认路径。\n\n如果设置了此环境变量，则 CSV 文件将默认保存到指定路径，除非被 `--output-cost` 选项覆盖。例如，如果 `PDD_OUTPUT_COST_PATH=\u002Fpath\u002Fto\u002Fcost\u002Freports\u002F`，则 CSV 文件将保存到该目录，并使用默认文件名。\n\n### 成本预算\n\n对于支持该功能的命令（如 `fix` 命令），您可以使用 `--budget` 选项设置最大预算。这有助于防止意外的高额费用，尤其是在可能涉及多次 AI 模型调用的操作中。\n\n示例：\n```\npdd [GLOBAL OPTIONS] fix --budget 5.0 [OTHER OPTIONS] [ARGS]...\n```\n这将为 `fix` 操作设置 5.00 美元的最大预算。\n\n## 命令\n\n以下是 PDD 提供的主要命令：\n\n### 1. sync\n\n**[主要命令]** 自动为给定的 basename 执行完整的 PDD 工作流循环。该命令实现了白皮书中的整个同步周期，能够智能地确定所需的步骤，并按正确的顺序执行，同时提供实时可视化反馈和复杂的状态管理。\n\n```bash\n# 单模块同步\npdd [GLOBAL OPTIONS] sync [OPTIONS] BASENAME\n\n# 从 GitHub 问题同步代理式多模块\npdd [全局选项] sync [选项] GITHUB_ISSUE_URL\n```\n\n重要提示：同步操作会频繁覆盖生成的文件，以保持输出内容为最新状态。在大多数实际运行中，请使用全局 `--force` 标志来允许覆盖而无需交互式确认：\n\n```\npdd --force sync BASENAME\n```\n\n参数：\n- `BASENAME`：提示文件的基础名称（例如，“factorial_calculator_python.prompt”对应“factorial_calculator”）\n\n选项：\n- `--max-attempts INT`：任何迭代循环中的最大修复尝试次数（默认值为3）\n- `--budget FLOAT`：整个同步过程允许的最大总成本（默认值为20.0美元）\n- `--skip-verify`：跳过功能验证步骤\n- `--skip-tests`：跳过单元测试的生成和修复\n- `--target-coverage FLOAT`：期望的代码覆盖率百分比（默认值为90.0）\n- `--dry-run`：显示该基础名称的实时同步分析，而不执行同步操作。这会执行与正常同步运行相同的状况分析，但不会获取独占锁或执行任何操作，因此即使有其他同步进程正在运行，也可以进行检查。\n- `--no-steer`：禁用同步操作的交互式引导\n- `--steer-timeout FLOAT`：引导提示的超时时间（默认值为8.0秒）。\n\n**实时进度动画**：\nsync 命令提供实时可视化反馈，显示：\n- 当前正在执行的操作（auto-deps、generate、example、crash、verify、test、fix、update）\n- 文件状态指示器采用颜色编码：\n  - 绿色：文件存在且为最新\n  - 黄色：文件正在处理中\n  - 红色：文件存在错误或缺失\n  - 蓝色：文件正在分析中\n- 运行中的总成本和已用时间\n- 工作流步骤的进度\n\n**语言检测**：\nsync 命令会自动检测编程语言，方法是扫描 prompts 目录中是否存在符合 `{basename}_{language}.prompt` 模式的现有提示文件。例如：\n- `factorial_calculator_python.prompt` → 生成 `factorial_calculator.py`\n- `factorial_calculator_typescript.prompt` → 生成 `factorial_calculator.ts`\n- `factorial_calculator_javascript.prompt` → 生成 `factorial_calculator.js`\n\n如果对于同一个 basename 存在多个开发语言的提示文件，sync 将会同时处理所有这些文件。\n\n**语言过滤**：sync 命令仅处理开发语言（python、javascript、typescript、java、cpp 等），并排除运行时语言（LLM）。以 `_llm.prompt` 结尾的文件仅供内部处理使用，不能构成有效的开发单元，因为它们缺少同步工作流所需的关联代码、示例和测试。\n\n**高级配置集成**：\n- **自动上下文检测**：检测项目结构并应用来自 `.pddrc` 的适当设置\n- **配置层级**：CLI 选项 > .pddrc 上下文 > 环境变量 > 默认值\n- **多语言支持**：自动处理一个 basename 的所有语言变体\n- **智能路径解析**：针对复杂的项目结构使用先进的目录管理技术\n- 上下文相关的设置包括输出路径、默认语言、模型参数、覆盖率目标和预算等。\n\n**工作流逻辑**：\n\nsync 命令会自动检测哪些文件已经存在，并执行相应的工作流程：\n\n1. **auto-deps**：查找并将相关依赖注入到提示中——包括代码示例和文档文件（schema 文档、API 文档等）。移除重复包含文档的冗余内联内容。\n2. **generate**：根据提示创建或更新代码模块\n3. **example**：如果使用示例不存在或已过时，则生成新的使用示例\n4. **crash**：修复任何运行时错误，使代码可执行\n5. **verify**：对照提示意图运行功能验证（除非使用了 --skip-verify 选项）\n6. **test**：如果单元测试不存在，则生成全面的单元测试（除非使用了 --skip-tests 选项）。认证模块会获得特定于认证的测试模式（模拟 OAuth 服务器、JWT 固定装置、令牌生命周期测试等）\n7. **fix**：解决单元测试中发现的所有缺陷\n8. **update**：将学习到的经验回传到提示文件中\n\n**高级决策机制**：\n- **基于指纹的变更检测**：使用内容哈希值和时间戳精确检测哪些内容发生了变化\n- **LLM 驱动的冲突解决**：对于涉及多个文件更改的复杂场景，使用 AI 来确定最佳处理方案\n- **持久化状态跟踪**：维护同步历史并从以往操作中学习\n- **智能锁管理**：防止并发同步操作，并自动清理过期锁\n- 检测哪些文件已经存在且为最新\n- 跳过不必要的步骤（例如，如果提示未发生变化，则不会重新生成代码）\n- 使用 git 集成来检测更改，并确定是增量再生还是完全再生\n- 长期累积测试结果，而不是替换原有测试（每个目标保留一个测试文件）\n- 自动处理各步骤之间的依赖关系\n\n**强大的状态管理**：\n- **指纹文件**：维护 `.pdd\u002Fmeta\u002F{basename}_{language}.json` 文件，记录操作历史\n- **运行报告**：跟踪测试结果、覆盖率和执行状态\n- **锁管理**：通过基于文件描述符的锁定机制防止竞态条件\n- **Git 集成**：利用版本控制进行变更检测和安全回滚\n\n**`.pdd` 目录**：\nPDD 在您的项目根目录下使用 `.pdd` 目录来存储各种元数据和配置文件：\n- `.pdd\u002Fmeta\u002F`：包含指纹文件、运行报告和同步日志\n- `.pdd\u002Flocks\u002F`：存储用于防止并发操作的锁文件\n- `.pdd\u002Fllm_model.csv`：项目专用的 LLM 模型配置文件（可选）\n\n通常应将此目录添加到版本控制系统中（锁文件除外），因为它包含了重要的项目状态信息。\n\n**环境变量**：\n所有现有的 PDD 输出路径环境变量都会被尊重，从而使 sync 命令能够将文件保存到适合您项目结构的位置。\n\n**同步状态分析**：\nsync 命令会维护详细的决策日志，您可以使用 `--dry-run` 选项查看这些日志：\n\n```bash\n# 查看当前同步状态分析（非阻塞）\npdd sync --dry-run calculator\n\n# 查看复杂场景下的详细 LLM 推理\npdd --verbose sync --dry-run calculator\n```\n\n**分析内容包括**：\n- 当前文件状态与指纹比较\n- 实时决策推理（基于启发式方法与 LLM 驱动的分析）\n- 带置信度的操作建议\n- 建议操作的预估成本\n- 锁定状态及潜在冲突\n- 状态管理详情\n\n`--dry-run` 选项会实时分析当前项目状态，因此即使其他同步操作正在进行中，也可以安全运行。这与查看历史日志不同——它显示的是根据当前文件状态，同步操作此刻将做出的决策。\n\n使用 `--verbose` 结合 `--dry-run` 可以查看复杂多文件变更场景及高级状态分析中的详细 LLM 推理。\n\n**何时使用**：这是大多数 PDD 工作流的推荐起点。当您希望确保所有工件（代码、示例、测试）都与提示文件保持最新和同步时，可以使用同步命令。该命令体现了 PDD 的理念，即将工作流视为一个批处理过程，开发者可以启动后稍后再回来，从而无需持续监控。\n\n示例：\n```bash\n# 完整工作流，带有进度动画和智能决策\npdd --force sync factorial_calculator\n\n# 高级同步，预算更高、自定义覆盖率，并提供完整可视化反馈\npdd --force sync --budget 15.0 --target-coverage 95.0 data_processor\n\n# 快速同步，带实时状态更新的动画\npdd --force sync --skip-verify --budget 5.0 web_scraper\n\n# 多语言同步，基于指纹的变更检测\npdd --force sync multi_language_module\n\n# 查看全面的同步分析及决策分析\npdd sync --dry-run factorial_calculator\n\n# 查看复杂冲突解决的详细同步分析及 LLM 推理\npdd --verbose sync --dry-run factorial_calculator\n\n# 监控同步在不执行的情况下会做什么（包含状态分析）\npdd sync --dry-run calculator\n\n# 上下文感知示例，自动检测配置\ncd backend && pdd --force sync calculator     # 使用后端上下文设置并显示动画\ncd frontend && pdd --force sync dashboard     # 使用前端上下文并提供实时反馈\npdd --context backend --force sync calculator # 显式覆盖上下文并显示进度\n```\n\n**代理式多模块同步（GitHub 问题模式）**：\n\n当传入 GitHub 问题 URL 而不是基础名时，同步会进入代理模式：\n1. **模块识别**：获取问题内容，并使用 LLM 识别需要同步的模块。\n2. **依赖验证**：验证 architecture.json 中的依赖关系，必要时进行修正。\n3. **并行执行**：通过 `AsyncSyncRunner` 派遣并行同步任务，采用依赖感知调度（最多 4 个并发工作者）。\n4. **实时进度**：发布并更新 GitHub 评论，显示模块同步的实时状态。\n\n```bash\n# 根据 GitHub 问题识别的模块进行同步（并行且依赖感知）\npdd sync https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F100\n\n# 为大型模块增加额外超时时间\npdd sync --timeout-adder 60 https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F100\n```\n\n选项（代理模式）：\n- `--timeout-adder FLOAT`：为每个模块的超时时间增加额外秒数（默认：0.0）。\n- `--no-github-state`：禁用 GitHub 状态持久化，仅使用本地状态。\n\n**跨机器恢复**：工作流状态存储在隐藏的 GitHub 评论中，因此可以在任何机器上恢复。使用 `--no-github-state` 可禁用此功能。\n\n### 2. generate\n\n从提示文件生成可运行代码。该命令会生成满足提示中所有要求的完整实现代码。当检测到当前提示与其上次提交版本之间存在变更时，它可以自动执行增量更新，而不是完全重新生成。\n\n```bash\n# 基本用法\npdd [GLOBAL OPTIONS] generate [OPTIONS] PROMPT_FILE\n```\n\n参数：\n- `PROMPT_FILE`：用于生成代码的提示文件名。\n\n选项：\n- `--output LOCATION`：指定生成代码的保存位置。支持从 `-e\u002F--env` 中展开 `${VAR}` 或 `$VAR`。默认文件名为 `\u003Cbasename>.\u003Clanguage_file_extension>`。如果设置了环境变量 `PDD_GENERATE_OUTPUT_PATH`，则文件将保存在该路径下，除非被此选项覆盖。\n- `--original-prompt FILENAME`：用于生成现有代码的原始提示文件。若未指定，命令将自动使用 git 中提示文件的最新提交版本。\n- `--incremental`：即使变更显著，也强制执行增量补丁。此选项仅在指定了输出位置且文件已存在时有效。\n- `--unit-test FILENAME`：单元测试文件的路径。如果提供，则会禁用自动测试发现功能，仅将该文件的内容纳入提示中，指示模型生成能够通过指定测试的代码。\n- `--exclude-tests`：不自动包含默认测试目录中的测试文件。\n\n**参数变量 (-e\u002F--env)**：\n传递 key=value 对来参数化提示，以便通过多次调用 `generate` 并使用不同值，使一个提示生成多个变体（例如多个文件）。\n\n- 语法：`-e KEY=VALUE` 或 `--env KEY=VALUE`（可重复）。\n- Docker 风格的环境回退：`-e KEY` 会从当前进程的环境变量 `KEY` 中读取 `VALUE`。\n- 作用范围：适用于 `generate`。\n- 优先级：通过 `-e\u002F--env` 传递的值在此命令的模板展开过程中会覆盖同名的 OS 环境变量。\n\n**模板化**：\n提示文件和 `--output` 值可能使用 `$VAR` 或 `${VAR}` 引用变量。只有通过 `-e\u002F--env`（或通过 `-e KEY` 的环境回退）显式提供的变量会被替换；其他所有以美元符号开头的文本都将保持原样。普通 `$` 的使用无需转义。\n\n- 在提示内容中：只有当提供了 `VAR` 时，`$VAR` 和 `${VAR}` 才会被替换。\n- 在输出路径中：使用 `--output` 时，PDD 也会使用相同的变量集展开 `$VAR`\u002F`${VAR}`。\n- 未知项：未提供值的占位符将保持不变。如果您传递了 `-e KEY`（无值），而 OS 环境中存在 `KEY`，则会使用该环境值。\n\n示例：\n```\n# 基本参数化生成（Python 模块）\npdd generate -e MODULE=orders --output 'src\u002F${MODULE}.py' prompts\u002Fmodule_python.prompt\n\n# 从同一提示生成多个文件\npdd generate -e MODULE=orders   --output 'src\u002F${MODULE}.py' prompts\u002Fmodule_python.prompt\npdd generate -e MODULE=payments --output 'src\u002F${MODULE}.py' prompts\u002Fmodule_python.prompt\npdd generate -e MODULE=customers --output 'src\u002F${MODULE}.py' prompts\u002Fmodule_python.prompt\n\n# 多个变量\npdd generate -e MODULE=orders -e PACKAGE=core --output 'src\u002F${PACKAGE}\u002F${MODULE}.py' prompts\u002Fmodule_python.prompt\n\n# Docker 风格的环境变量回退（从你的 shell 环境中读取 MODULE）\nexport MODULE=orders\npdd generate -e MODULE --output 'src\u002F${MODULE}.py' prompts\u002Fmodule_python.prompt\n```\n\nShell 引号选项：\n- 如果值包含空格或 shell 特殊字符，请使用引号包裹 `KEY=VALUE`：`-e \"DISPLAY_NAME=Order Processor\"`。\n- PDD 侧展开（可移植）：防止 shell 展开，让 PDD 使用 `-e\u002F--env` 来展开——例如，`--output 'src\u002F${MODULE}.py'`。\n- Shell 侧展开（熟悉）：设置一个环境变量，让 shell 展开 `--output` 参数，同时仍然传递 `-e KEY`，以便提示词获取相同的值——例如：\n  - `export MODULE=orders && pdd generate -e MODULE --output \"src\u002F$MODULE.py\" prompts\u002Fmodule_python.prompt`\n  - 或者在 POSIX shell 中直接内联：`MODULE=orders pdd generate -e MODULE --output \"src\u002F$MODULE.py\" prompts\u002Fmodule_python.prompt`\n  - 注意：PowerShell\u002FWindows shell 不同；PDD 侧展开在不同 shell 之间更具可移植性。\n\n**Git 集成**：\n- 当命令检测到当前提示词与其上次提交版本之间存在变化时，如果输出文件已存在，则会自动考虑增量生成。\n- 如果执行增量生成，当前提示词和代码文件都会通过 `git add` 被暂存（如果尚未提交\u002F添加），以确保在需要时可以回滚。\n- 对于新文件（即不存在要更新的输出文件时）或现有输出文件被删除时，始终会进行完全再生。\n\n**何时使用**：当您从头开始实现新功能或根据提示词的变化更新现有代码时，可以选择此命令。该命令会自动检测变化，并根据变化的重要程度决定是采用增量补丁还是完全再生。\n\n示例：\n```\n# 基本生成，带有基于 Git 的自动变更检测\n# （如果输出文件存在则为增量生成，否则为完全生成）\npdd [全局选项] generate --output src\u002Fcalculator.py calculator_python.prompt \n\n# 强制增量补丁（要求输出文件已存在）\npdd [全局选项] generate --incremental --output src\u002Fcalculator.py calculator_python.prompt\n\n# 强制完全再生（只需先删除输出文件）\nrm src\u002Fcalculator.py  # 删除文件\npdd [全局选项] generate --output src\u002Fcalculator.py calculator_python.prompt\n\n# 指定不同的原始提示词（绕过 Git 检测）\npdd [全局选项] generate --output src\u002Fcalculator.py  --original-prompt old_calculator_python.prompt calculator_python.prompt\n```\n\n**代理架构模式**：\n\n当位置参数是一个 GitHub 问题 URL 而不是提示词文件时，`generate` 命令会进入代理架构模式。问题正文将作为 PRD（产品需求文档），并通过一个 11 步的代理工作流自动生成 `architecture.json`、`.pddrc` 文件以及提示词文件。\n\n```bash\npdd generate https:\u002F\u002Fgithub.com\u002Fowner\u002Frepo\u002Fissues\u002F42\n```\n\n11 步工作流：\n\n**分析与生成（步骤 1–8）**：\n1. **分析 PRD**：从问题内容中提取功能、技术栈和需求\n2. **深度分析**：功能分解、模块边界、共享关注点\n3. **研究**：针对技术栈的文档和最佳实践进行网络搜索\n4. **设计**：模块拆分，包括依赖关系图和优先级排序（认证模块会被分离出来，作为独立的关注点，赋予较低的优先级编号）\n5. **研究依赖项**：为每个模块查找相关的 API 文档和代码示例\n6. **生成**：生成完整的 `architecture.json` 和脚手架文件\n7. **生成 .pddrc**：创建包含上下文特定路径的项目配置文件\n8. **生成提示词**：为 `architecture.json` 中的每个模块创建提示词文件\n\n**验证（步骤 9–11）**：\n9. **完整性验证**：检查所有模块是否都有提示词和依赖关系\n10. **同步验证**：对每个模块运行 `pdd sync --dry-run` 以发现路径问题\n11. **依赖关系验证**：预处理提示词，以验证 `\u003Cinclude>` 标签是否能正确解析\n\n每个验证步骤最多会重试 3 次，并在继续下一步之前自动修复问题。\n\n**选项**：\n- `--skip-prompts`：跳过提示词文件生成（步骤 8–11），仅生成 `architecture.json` 和 `.pddrc`\n\n前提条件：\n- 必须安装并认证 `gh` CLI\n- 问题中必须包含描述项目范围的 PRD\n\n**工作流恢复**：再次运行 `pdd generate \u003Cissue-url>` 将从上一次完成的步骤继续。状态会持久化到 GitHub 问题评论中，以便在不同机器上恢复。\n\n**硬性停止**：如果 PRD 内容不足、技术栈不明确或需要进一步澄清，工作流将会停止。请解决相关问题后重新运行。\n\n示例：\n```bash\npdd generate https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n# 生成：architecture.json、architecture_diagram.html、.pddrc、prompts\u002F*.prompt\n\n# 跳过提示词生成（更快，仅生成架构）\npdd generate --skip-prompts https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n# 生成：architecture.json、architecture_diagram.html、.pddrc\n```\n\n#### 提示词模板\n\n模板是可重用的提示词文件，用于生成特定的工件（代码、JSON、测试等）。模板在 YAML 前言部分包含人类\u002FCLI 元数据（由 CLI 解析，不会发送给 LLM），而主体部分则保持简洁且专注于模型。\n\n- 前言（人类\u002FCLI）：\n  - 名称、描述、版本、标签、语言、输出\n  - 变量：`-e\u002F--env` 的模式（必填\u002F可选、类型、示例）\n  - 使用方法：可复制的 `pdd generate` 命令\n  - 发现（可选）：CLI 执行的文件发现（根目录、模式、排除、大小写）\n  - 输出模式（可选）：CLI 用于验证以及 `pdd templates show` 使用的 JSON 结构\n- 提示词主体（LLM）：\n  - 包含用于填充上下文的指令：`\u003Cinclude>${VAR}\u003C\u002Finclude>`、`\u003Cinclude-many>${LIST}\u003C\u002Finclude-many>`\n  - 清晰的指示和明确的输出契约；不含人类使用说明或发现逻辑\n\n快速示例（模板）\n\n```\n# 最简（需提供 PRD）\npdd generate -e PRD_FILE=docs\u002Fspecs.md --output architecture.json \\\n  pdd\u002Ftemplates\u002Farchitecture\u002Farchitecture_json.prompt\n\n# 带有额外上下文\npdd generate -e PRD_FILE=docs\u002Fspecs.md -e TECH_STACK_FILE=docs\u002Ftech_stack.md \\\n  -e DOC_FILES='docs\u002Fux.md,docs\u002Fcomponents.md' \\\n  -e INCLUDE_FILES='src\u002Fapp.py,src\u002Fapi.py,frontend\u002Fapp\u002Flayout.tsx' \\\n  --output architecture.json pdd\u002Ftemplates\u002Farchitecture\u002Farchitecture_json.prompt\n\n# 多个变体\npdd generate -e PRD_FILE=docs\u002Fspecs.md -e APP_NAME=Shop   --output apps\u002Fshop\u002Farchitecture.json   pdd\u002Ftemplates\u002Farchitecture\u002Farchitecture_json.prompt\npdd generate -e PRD_FILE=docs\u002Fspecs.md -e APP_NAME=Admin  --output apps\u002Fadmin\u002Farchitecture.json  pdd\u002Ftemplates\u002Farchitecture\u002Farchitecture_json.prompt\npdd generate -e PRD_FILE=docs\u002Fspecs.md -e APP_NAME=Public --output apps\u002Fpublic\u002Farchitecture.json pdd\u002Ftemplates\u002Farchitecture\u002Farchitecture_json.prompt\n\n# 4) 在输出路径中使用变量\n\n# 5) 为方便起见，使用 shell 环境变量回退\nexport APP=shop\npdd generate -e APP -e PRD_FILE=docs\u002Fspecs.md --output 'apps\u002F${APP}\u002Farchitecture.json' pdd\u002Ftemplates\u002Farchitecture\u002Farchitecture_json.prompt\n```\n\n模板编写提示\n\n- 将人类指导信息放在 YAML 前置元数据中（包含示例、用法和备注的变量）；保持提示主体以模型为中心。\n- 使用 `\u003Cinclude>`\u002F`\u003Cinclude-many>` 引入精选的上下文内容；优先使用规范或配置文件，而非大段代码。\n- 参数化包含：通过 `-e` 传递文件路径，例如 `\u003Cinclude>${PRD_FILE}\u003C\u002Finclude>`；引擎会在变量展开后解析包含内容。\n- 如果你的模板会输出特定文件名，请在示例命令中注明 `--output`。\n\n行为说明\n\n- 变量展开仅适用于通过 `-e\u002F--env` 显式传递的变量（或通过 `-e KEY` 的环境变量回退机制）。其他 `$NAME` 出现处将保持不变。\n- `--output` 也支持使用同一组变量中的 `$VAR` 或 `${VAR}`。\n- 如果省略 `--output`，PDD 会根据提示文件的基本名称和检测到的语言扩展名推导出文件名；可通过设置 `PDD_GENERATE_OUTPUT_PATH` 将输出定向到一个公共目录。\n\n模板相关命令\n\n- 前置元数据会被解析（不会发送给 LLM），并用于：\n  - 定义变量模式及验证逻辑\n  - 提供使用示例（由 `pdd templates show` 渲染）\n  - 支持可选的 `discover` 设置（由 CLI 大写执行）\n  - 提供可选的 `output_schema` 进行验证\n- 命令包括：\n  - `pdd templates list [--json] [--filter tag=...]`\n  - `pdd templates show \u003Cname>`\n  - `pdd templates copy \u003Cname> --to prompts\u002F`\n  - `pdd generate --template \u003Cname> [-e KEY=VALUE...] [--output PATH]`\n\n#### 内置模板\n\nPDD 可以随软件包分发一组精选的常用模板，帮助用户快速上手（例如前端\u002FNext.js、后端\u002FFlask、数据\u002FETL）。\n\n内置模板的位置（打包形式）\n\n- 在已安装的软件包目录下，路径为 `pdd\u002Ftemplates\u002F\u003Ccategory>\u002F**\u002F*.prompt`（并附有可选的 README 或 index 文件）。从 PyPI 安装时，这些文件会作为包数据一同包含。\n\n包含的入门模板\n\n- `architecture\u002Farchitecture_json.prompt`：通用架构生成器（需要 `-e PRD_FILE=...`；支持可选的 `TECH_STACK_FILE`、`DOC_FILES` 和 `INCLUDE_FILES`）。\n\n**LLM 切换功能：**\n\n所有模板都支持 `llm` 参数来控制是否运行 LLM 生成：\n\n- **`llm=true`**（默认）：完整流程，包括 LLM 生成和后处理\n- **`llm=false`**：跳过 LLM 生成，仅执行后处理\n\n**架构 JSON 模板特性：**\n\n`architecture\u002Farchitecture_json` 模板内置了自动 Mermaid 图表生成功能：\n\n- **后处理**：自动生成交互式的 HTML 格式的 Mermaid 架构图\n- **可视化效果**：生成 `architecture_diagram.html` 文件，其中模块按颜色区分（前端\u002F后端\u002F共享部分）\n- **交互性**：鼠标悬停时会显示模块详情、依赖关系及描述\n- **独立性**：HTML 文件自带嵌入的 Mermaid 库，可在离线状态下直接打开\n\n**示例命令：**\n\n```bash\n# 完整流程（LLM 生成 + 后处理 + Mermaid HTML）\npdd generate --template architecture\u002Farchitecture_json \\\n  -e PRD_FILE=docs\u002Fspecs.md \\\n  -e APP_NAME=\"MyApp\" \\\n  --output architecture.json\n# 输出结果：architecture.json + architecture_diagram.html\n\n# 仅后处理（跳过 LLM，基于已有 JSON 生成 HTML）\npdd generate --template architecture\u002Farchitecture_json \\\n  -e APP_NAME=\"MyApp\" \\\n  -e llm=false \\\n  --output architecture.json\n\n# 结果在：architecture_diagram.html（来自现有的 architecture.json）\n```\n\n**上下文 URL（可选字段）：**\n\n架构条目支持一个可选的 `context_urls` 数组，用于将每个模块的 Web 文档引用关联起来。当从架构生成提示时（通过 `generate_prompt`），这些 URL 会在依赖项部分以 `\u003Cweb>` 标签的形式输出，从而使 LLM 在代码生成过程中能够获取相关的 API 文档。\n\n```json\n{\n  \"filename\": \"orders_api_Python.prompt\",\n  \"dependencies\": [\"models_Python.prompt\"],\n  \"context_urls\": [\n    {\"url\": \"https:\u002F\u002Ffastapi.tiangolo.com\u002Ftutorial\u002Ffirst-steps\u002F\", \"purpose\": \"FastAPI 路由模式\"},\n    {\"url\": \"https:\u002F\u002Fdocs.pydantic.dev\u002Flatest\u002Fconcepts\u002Fmodels\u002F\", \"purpose\": \"Pydantic 模型验证\"}\n  ],\n  ...\n}\n```\n\n`context_urls` 字段由代理式架构工作流自动填充（步骤 5：研究依赖项），但也可以手动添加到任何架构条目中。\n\nFront Matter（YAML）元数据\n\n- 模板包含 YAML front matter 元数据，便于人类阅读：\n  - `name`、`description`、`version`、`tags`：用于文档和发现\n  - `language`、`output`：`generate` 的默认设置\n  - `variables`：用于 `-e\u002F--env` 的参数模式（类型、是否必填、默认值）\n\n示例（架构模板）：\n\n```\n---\nname: architecture\u002Farchitecture_json\ndescription: 适用于多个技术栈的统一架构模板\nversion: 1.0.0\ntags: [架构, 模板, json]\nlanguage: json\noutput: architecture.json\nvariables:\n  TECH_STACK:\n    required: false\n    type: string\n    description: 目标技术栈，用于界面设计和规范。\n    examples: [nextjs, python, fastapi, flask, django, node, go]\n  API_STYLE:\n    required: false\n    type: string\n    description: 后端的 API 风格。\n    examples: [rest, graphql]\n  APP_NAME:\n    required: false\n    type: string\n    description: 可选的应用名称，用于提供上下文。\n    example: Shop\n  PRD_FILE:\n    required: true\n    type: path\n    description: 主要的产品需求文档 (PRD)，描述范围和目标。\n    example_paths: [PRD.md, docs\u002Fspecs.md, docs\u002Fproduct\u002Fprd.md]\n    example_content: |\n      标题：订单管理 MVP\n      目标：使客户能够端到端地创建和跟踪订单。\n      关键功能：\n        - 创建订单：id、user_id、items[]、total、status\n        - 查看订单：带有状态时间线的详情页\n        - 列出订单：可按状态、日期、用户筛选\n      非功能性要求：\n        - P95 延迟 \u003C 300ms 对于读取端点\n        - 错误率 \u003C 0.1%\n  TECH_STACK_FILE:\n    required: false\n    type: path\n    description: 技术栈概述（语言、框架、基础设施和工具）。\n    example_paths: [docs\u002Ftech_stack.md, docs\u002Farchitecture\u002Fstack.md]\n    example_content: |\n      后端：Python (FastAPI)、Postgres (SQLAlchemy)、PyTest\n      前端：Next.js (TypeScript)、shadcn\u002Fui、Tailwind CSS\n      API：REST\n      认证：Firebase Auth (GitHub 设备流)、用于 API 的 JWT\n      基础设施：Vercel（前端）、Cloud Run（后端）、Cloud SQL（Postgres）\n      可观测性：OpenTelemetry 跟踪、Cloud Logging\n  DOC_FILES:\n    required: false\n    type: list\n    description: 其他文档文件（用逗号或换行分隔）。\n    example_paths: [docs\u002Fux.md, docs\u002Fcomponents.md]\n    example_content: |\n      设计概述、模式和约束\n  INCLUDE_FILES:\n    required: false\n    type: list\n    description: 特定的源文件列表（用逗号或换行分隔）。\n    example_paths: [src\u002Fapp.py, src\u002Fapi.py, frontend\u002Fapp\u002Flayout.tsx, frontend\u002Fapp\u002Fpage.tsx]\n  usage:\n    generate:\n      - name: 最小化（仅 PRD）\n        command: pdd generate -e PRD_FILE=docs\u002Fspecs.md --output architecture.json pdd\u002Ftemplates\u002Farchitecture\u002Farchitecture_json.prompt\n      - name: 包含技术栈概述\n        command: pdd generate -e PRD_FILE=docs\u002Fspecs.md -e TECH_STACK_FILE=docs\u002Ftech_stack.md --output architecture.json pdd\u002Ftemplates\u002Farchitecture\u002Farchitecture_json.prompt\n  discover:\n    enabled: false\n    max_per_pattern: 5\n    max_total: 10\n---\n```\n\n注释\n\n- YAML front matter 会被解析，但不会发送给 LLM。使用 `pdd templates show` 可以查看变量、使用方法、发现功能以及输出模式。在 CLI 中可以通过 `-e` 参数传递变量。\n\n模板变量（参考）\n\n- 架构（`architecture\u002Farchitecture_json.prompt`）\n  - `PRD_FILE`（路径，必填）：主要规格\u002FPRD 文件路径\n  - `TECH_STACK_FILE`（路径，可选）：技术栈概述文件（包括 API 风格；例如 docs\u002Ftech_stack.md）\n  - `APP_NAME`（字符串，可选）：应用名称，用于提供上下文\n  - `DOC_FILES`（列表，可选）：用逗号或换行分隔的其他文档路径列表\n  - `INCLUDE_FILES`（列表，可选）：需要包含的源文件列表，用逗号或换行分隔\n  - `SCAN_PATTERNS`（列表，可选）：在 front matter 的 `discover` 中定义，并由 CLI 执行的发现模式\n  - `SCAN_ROOT`（路径，可选）：在 front matter 的 `discover` 中定义的发现根目录\n\n注释\n\n- 这些变量在每个模板顶部的 YAML front matter 中声明，以便清晰明了，并方便未来的 CLI 发现。在 CLI 解析 front matter 之前，可以按照示例所示，通过 `-e` 参数传递值。\n\n复制并生成\n\n- 将所需的模板复制到项目的 `prompts\u002F` 文件夹中，然后像往常一样使用 `pdd generate`。这样可以使提示与您的仓库版本化，从而方便您编辑和改进它们。\n- 快速复制（Python 一行命令；在项目根目录运行）：\n\n```\npython - \u003C\u003C'PY'\nfrom importlib.resources import files\nimport shutil, os\n\ndst_dir = 'prompts\u002Farchitecture'\nsrc_dir = files('pdd').joinpath('templates\u002Farchitecture')\nos.makedirs(dst_dir, exist_ok=True)\n\nfor p in src_dir.rglob('*.prompt'):\n    shutil.copy(p, dst_dir)\nprint(f'已从 {src_dir} 复制内置模板到 {dst_dir}')\nPY\n\n# 然后从复制的提示中生成\npdd generate --output architecture.json prompts\u002Farchitecture\u002Farchitecture_json.prompt\n```\n\n统一模板示例\n\n```\n# 前端（Next.js）— interface.page.route 和组件属性\npdd generate \\\n  -e APP_NAME=Shop \\\n  # （路由根据 PRD\u002F技术栈\u002F文件推断）\n  -e PRD_FILE=docs\u002Fspecs.md \\\n  -e DOC_FILES='docs\u002Fux.md,docs\u002Fcomponents.md' \\\n  -e TECH_STACK_FILE=docs\u002Ftech_stack.md \\\n  # 如果需要，发现功能已在模板 YAML 中配置，并由 CLI 执行\n  --output architecture.json \\\n  pdd\u002Ftemplates\u002Farchitecture\u002Farchitecture_json.prompt\n\n# 后端（Python）— interface.module.functions 或 interface.api.endpoints\npdd generate \\\n  -e PRD_FILE=docs\u002Fbackend-spec.md \\\n  -e TECH_STACK_FILE=docs\u002Ftech_stack.md \\\n  -e INCLUDE_FILES='src\u002Fapp.py,src\u002Fapi.py,pyproject.toml' \\\n  --output architecture.json \\\n  pdd\u002Ftemplates\u002Farchitecture\u002Farchitecture_json.prompt\n```\n\n**接口模式**\n\n- 核心键（每个条目）：\n  - `reason`、`description`、`dependencies`、`priority`、`filename`，以及可选的 `tags`。\n- 接口对象（类型化，仅包含适用的部分）：\n  - `type`: `component` | `page` | `module` | `api` | `graphql` | `cli` | `job` | `message` | `config`\n  - `component`: `props[]`，可选的 `emits[]` 和 `context[]`\n  - `page`: `route`，可选的 `params[]`、`layout` 和 `dataSources[]`，其中每项都包含必填的 `kind`（如 `api`、`query`）和 `source`（URL 或标识符），以及可选的 `method`、`description`、`auth`、`inputs[]`、`outputs[]`、`refreshInterval` 和 `notes`\n  - `module`: `functions[]`，包含 `name`、`signature`，以及可选的 `returns`、`errors` 和 `sideEffects`\n  - `api`: `endpoints[]`，包含 `method`、`path`，以及可选的 `auth`、`requestSchema`、`responseSchema` 和 `errors`\n  - `graphql`: 可选的 `sdl`，或包含 `queries[]`、`mutations[]`、`subscriptions[]` 的 `operations`\n  - `cli`: `commands[]`，包含 `name`、可选的 `args[]`、`flags[]` 和 `exitCodes[]`；还可选的 `io`（`stdin`、`stdout`）\n  - `job`: `trigger`（cron\u002Fevent），可选的 `inputs[]`、`outputs[]` 和 `retryPolicy`\n  - `message`: `topics[]`，包含 `name`、`direction`（`publish` 或 `subscribe`），以及可选的 `schema` 和 `qos`\n  - `config`: `keys[]`，包含 `name`、`type`，以及可选的 `default`、`required` 和 `source`（`env`、`file` 或 `secret`）\n  - 可选：`version`、`stability`（`experimental` 或 `stable`）\n\n示例：\n\n```json\n{\n  \"reason\": \"顶级产品页面\",\n  \"description\": \"...\",\n  \"dependencies\": [\"layout_tsx.prompt\"],\n  \"priority\": 1,\n  \"filename\": \"page_tsx.prompt\",\n  \"tags\": [\"frontend\",\"nextjs\"],\n  \"interface\": {\n    \"type\": \"page\",\n    \"page\": {\"route\": \"\u002Fproducts\", \"params\": [{\"name\":\"id\",\"type\":\"string\"}]},\n    \"component\": {\"props\": [{\"name\":\"initialProducts\",\"type\":\"Product[]\",\"required\":true}]}\n  }\n}\n```\n\n```json\n{\n  \"reason\": \"订单服务模块\",\n  \"description\": \"...\",\n  \"dependencies\": [\"db_python.prompt\"],\n  \"priority\": 1,\n  \"filename\": \"orders_python.prompt\",\n  \"tags\": [\"backend\",\"python\"],\n  \"interface\": {\n    \"type\": \"module\",\n    \"module\": {\n      \"functions\": [\n        {\"name\": \"load_orders\", \"signature\": \"def load_orders(user_id: str) -> list[Order]\"},\n        {\"name\": \"create_order\", \"signature\": \"def create_order(dto: OrderIn) -> Order\"}\n      ]\n    }\n  }\n}\n```\n\n```json\n{\n  \"reason\": \"订单 HTTP API\",\n  \"description\": \"...\",\n  \"dependencies\": [\"orders_python.prompt\"],\n  \"priority\": 2,\n  \"filename\": \"api_python.prompt\",\n  \"tags\": [\"backend\",\"api\"],\n  \"interface\": {\n    \"type\": \"api\",\n    \"api\": {\n      \"endpoints\": [\n        {\n          \"method\": \"GET\",\n          \"path\": \"\u002Forders\u002F{id}\",\n          \"auth\": \"bearer\",\n          \"responseSchema\": {\"type\":\"object\",\"properties\":{\"id\":{\"type\":\"string\"}}},\n          \"errors\": [\"404 Not Found\",\"401 Unauthorized\"]\n        }\n      ]\n    }\n  }\n}\n```\n\n注意事项与建议\n\n- 复制的模板应作为起点；请根据您的技术栈和规范进行编辑。\n- 将模板与代码一起纳入版本控制，以保持“提示即真理”的模型。\n- 如果您维护自己的模板集，请将其存储在 `prompts\u002F\u003Corg_or_team>\u002F...` 中，并使用 `\u003Cinclude>` 进行组合，以最大化复用。\n\n模板：额外的用户体验\n\n- 目标：\n  - 无需手动指定文件路径即可发现、检查和复制模板。\n  - 验证所需变量，并从模板元数据中显示默认值。\n  - 支持搜索顺序，以便项目模板可以覆盖打包模板。\n\n- 命令：\n  - `pdd templates list [--json] [--filter tag=frontend]` 用于发现模板\n  - `pdd templates show \u003Cname> [--raw]` 用于查看元数据和变量\n  - `pdd templates copy \u003Cname> --to prompts\u002F` 用于将模板复制到您的仓库\n  - `pdd generate --template \u003Cname> [-e KEY=VALUE...] [--output PATH]`\n\n- 示例用法：\n```\n# 发现和检查\npdd templates list --filter tag=frontend\npdd templates show frontend\u002Fnextjs_architecture_json\n\n# 复制并定制\npdd templates copy frontend\u002Fnextjs_architecture_json --to prompts\u002Ffrontend\u002F\n\n# 不指定文件路径生成\npdd generate --template frontend\u002Fnextjs_architecture_json \\\n  -e APP_NAME=Shop \\\n  # 路由从 PRD\u002F技术栈\u002F文件中推断\n  --output architecture.json\n```\n\n- 搜索顺序：\n  - 项目：`.\u002Fprompts\u002F**`（允许团队覆盖）\n  - `.pddrc` 路径：任何配置的 `templates.paths`\n  - 打包模板：`pdd\u002Ftemplates\u002F**`（内置模板）\n  - 可选：`$PDD_PATH\u002Fprompts\u002F**`（组织级别的模板包）\n\n- 模板前言：\n  - 在 `.prompt` 文件顶部使用 YAML 元数据声明 `name`、`description`、`tags`、`version`、`language`、默认 `output` 以及 `variables`（包括 `required`、`default` 和 `type`，如 `string` 或 `json`）。\n  - CLI 优先级：`-e\u002F--env` 中的值会覆盖前言中的默认值；未知变量会被验证并提示用户输入。\n  - 示例：\n    ```\n    ---\n    name: frontend\u002Fnextjs_architecture_json\n    description: 从应用元数据生成 Next.js 的 architecture.json 文件\n    tags: [frontend, nextjs, json]\n    version: 1.0.0\n    language: json\n    output: architecture.json\n    variables:\n      APP_NAME: { required: true }\n      ROUTES:   { type: json, default: [] }\n    ---\n    ...提示内容...\n    ```\n\n### 3. 示例\n\n创建一个紧凑的示例，演示如何使用在提示中定义的功能。类似于头文件或 API 文档，这会生成最小化的、token 效率高的代码，展示接口而不包含实现细节。\n\n```\npdd [全局选项] example [选项] 提示文件 代码文件\n```\n\n参数：\n- `提示文件`：生成代码的提示文件名。\n- `代码文件`：现有代码文件的名称。\n\n选项：\n- `--output LOCATION`：指定保存生成示例代码的位置。默认文件名为 `\u003Cbasename>_example.\u003Clanguage_file_extension>`。如果设置了环境变量 `PDD_EXAMPLE_OUTPUT_PATH`，则文件将保存在该路径下，除非被此选项覆盖。\n- `--format FORMAT`：生成示例的输出格式（默认为 `code`）。有效值：\n  - `code`：使用语言特定的文件扩展名（例如，Python 的 `.py`，JavaScript 的 `.js`）。\n  - `md`：生成 Markdown 格式，扩展名为 `.md`。\n  当指定了明确的 `--output` 路径时，`--format` 选项会相应地限制输出文件的扩展名。\n\n使用场景：\n- 依赖引用：示例可以作为其他提示的轻量级（token 效率高）接口参考，并可作为生成目标的依赖项。\n- 健全性检查：示例程序通常用作 `crash` 和 `verify` 的可运行程序，提供快速的端到端健全性检查，以确保生成的代码能够按预期运行和行为。\n- 自动依赖集成：`auto-deps` 命令可以扫描示例文件（例如 `examples\u002F**\u002F*.py`），并将相关引用插入到提示中。根据每个示例的内容（导入、API 使用、文件名），它会识别出有用的开发单元并将其作为依赖项。\n\n**何时使用**：当需要创建其他提示可以高效导入的可重用引用时，选择此命令。这样可以生成 token 效率高的示例，相比包含完整实现，更容易在多个提示之间重复使用。\n\n示例：\n```\npdd [全局选项] example --output examples\u002Ffactorial_calculator_example.py factorial_calculator_python.prompt src\u002Ffactorial_calculator.py\n```\n\n### 4. 测试\n\n为给定的代码文件及其对应的提示文件生成或增强单元测试。还支持从 GitHub 问题生成 UI 测试的 **代理模式**。\n\n#### 代理模式（UI 测试生成）\n\n从 GitHub 问题生成 UI 测试。该问题描述了需要测试的内容（网页、CLI 或桌面应用），代理工作流会分析目标、制定测试计划并生成全面的 UI 测试。\n\n```\npdd [全局选项] test \u003Cgithub-issue-url>\n```\n\n**工作流程（18 步骤，配合 GitHub 评论）：**\n\n1. **重复检查** - 搜索是否存在描述相同测试要求的现有问题。如果找到，则合并内容并关闭重复问题。\n\n2. **文档检查** - 审查仓库文档和代码库，以了解需要测试的内容。如果存在 OpenAPI\u002FSwagger 规范，则会识别出来。\n\n3. **分析与澄清** - 确定问题中是否已包含足够的信息来创建测试。如果需要，会发布评论请求进一步澄清。\n\n4. **检测前端类型** - 确定测试类型：Web UI、CLI、桌面应用或 API。从而决定合适的测试框架。\n\n5. **创建测试计划** - 设计一个全面的测试计划，并验证其可行性。\n\n5b. **增强测试计划** - 添加契约验证测试用例（基于 OpenAPI\u002FSwagger 规范）以及可访问性测试用例（针对 Web 应用，使用 `@axe-core\u002Fplaywright` 达到 WCAG 2.1 AA 级别）。\n\n6. **评估覆盖率** *(仅限 Web，需安装 `playwright-cli`)* - 将需求与增强后的测试计划进行比较，找出需要手动测试的差距。\n\n7. **创建手动测试清单** *(仅限 Web)* - 使用三种策略生成清单：逐页详尽测试、用户故事 walkthrough 以及可访问性抽查。\n\n8. **执行手动测试** *(仅限 Web)* - 通过 `playwright-cli` 命令执行清单中的项目。可在 CLI 模式下串行运行，或在 `PDD_CLOUD_RUN=true` 时通过 Cloud Batch 并行运行。\n\n9. **创建回归测试** *(仅限 Web)* - 生成能够重现第 8 步中发现的 bug 的自动化测试。\n\n10. **验证回归测试** *(仅限 Web)* - 确认回归测试会在当前代码上失败（证明确实存在 bug）。\n\n11. **循环检查** *(仅限 Web)* - 检查清单是否已完成。如果仍有未完成的项目，则返回第 8 步（最多循环 3 次）。\n\n12. **生成测试** - 根据增强后的计划，在工作树中创建测试，包括行为测试、契约测试和可访问性测试。\n\n13. **运行测试** - 对目标执行所有生成的测试。\n\n14. **修复并迭代** - 修复任何失败的测试，并重新运行直到通过为止。\n\n15. **验证测试与计划的一致性** - 将增强后的计划与生成的测试进行交叉核对。对于未实现的测试用例，生成缺失的测试。\n\n16. **运行新生成的测试** - 运行并修复第 15 步中生成的测试（如有）。\n\n17. **提交 PR** - 创建一个带有详细说明的草稿 PR，包括测试计划覆盖率、契约测试摘要、可访问性审计摘要以及手动测试摘要。\n\n**执行模式：**\n\n| 模式 | 第 6–11 步的行为 |\n|------|---------------------|\n| **CLI** (`pdd test \u003Curl>`) | 串行：每次只运行清单中的一个部分 |\n| **GitHub App** (`PDD_CLOUD_RUN=true`) | 并行：分配到 Cloud Batch 的虚拟机上运行 |\n\n**先决条件：**\n- 第 6–11 步（手动\u002F探索性测试）需要 PATH 中有 `playwright-cli`。如果未找到，则会跳过这些步骤并发出警告。\n- 第 6–11 步仅适用于 Web 测试类型（`TEST_TYPE: web`）。\n\n**代理选项：**\n- `--timeout-adder FLOAT`：为每一步的超时时间增加额外秒数（默认为 0.0）。\n- `--no-github-state`：禁用基于 GitHub 问题评论的状态持久化，仅使用本地状态。\n- `--manual`：使用传统的提示模式代替代理模式。\n\n**环境变量：**\n- `PDD_CLOUD_RUN=true`：启用手动测试的并行执行模式（第 6–11 步）。\n- `PDD_NO_GITHUB_STATE=1`：禁用 GitHub 状态持久化。\n\n**跨机器恢复**：默认情况下，工作流状态会存储在 GitHub 问题的隐藏评论中，从而允许从任何机器继续执行。使用 `--no-github-state` 可以禁用此功能。\n\n**示例（代理模式）：**\n```bash\n# 从 GitHub 问题生成 UI 测试\npdd test https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F789\n\n# 在回答澄清问题后继续\npdd 测试 https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F789\n```\n\n**下一步 - 修复测试问题：**\n\n如果生成的测试发现需要代码修复的问题，请使用 `pdd fix` 并提供相同的议题 URL：\n\n```bash\npdd fix https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F789\n```\n\n---\n\n#### 手动模式（基于提示）\n\n为给定的代码文件及其对应的提示文件生成或增强单元测试。\n\n测试组织方式：\n- 对于每个目标 `\u003Cbasename>`，PDD 维护一个单独的测试文件（默认命名为 `test_\u003Cbasename>.\u003Clanguage_extension>`，通常放置在 tests 目录下）。\n- 新的测试会逐渐累积到该文件中，而不是从头开始重新生成。在扩充测试时，PDD 可以将新增内容合并到现有文件中（参见 `--merge`）。\n\n```\npdd [全局选项] test [选项] 提示文件 代码文件或示例文件\npdd [全局选项] test --manual [选项] 提示文件 代码文件或示例文件\n```\n\n参数：\n- `PROMPT_FILE`：生成代码的提示文件名。\n- `CODE_OR_EXAMPLE_FILE`：代码实现或示例文件名。以 `_example` 结尾的文件被视为 TDD 风格测试生成的示例文件。\n\n选项：\n- `--output LOCATION`：指定生成的测试文件保存位置。默认文件名为 `test_\u003Cbasename>.\u003Clanguage_file_extension>`。如果指定名称的输出文件已存在，则会创建带有编号后缀的新文件（例如 `test_calculator_1.py`），而不会覆盖原有文件。\n- `--language`：指定编程语言。默认为提示文件名中指定的语言。\n- `--coverage-report PATH`：现有测试的覆盖率报告文件路径。提供此选项时，会生成额外的测试以提高覆盖率。\n- `--existing-tests PATH [PATH...]`：现有单元测试文件的路径。使用 `--coverage-report` 时必须提供此选项。可以提供多个路径。\n- `--target-coverage FLOAT`：期望达到的代码覆盖率百分比（默认为 90.0）。\n- `--merge`：与 `--existing-tests` 一起使用时，会将新测试合并到现有测试文件中，而不是创建单独的文件。\n\n#### 故事模式\n\n生成或更新用户故事，并将其链接到相关的提示文件。\n\n```\npdd [全局选项] test prompts\u002Fupload_python.prompt prompts\u002Fnotify_python.prompt\npdd [全局选项] test user_stories\u002Fstory__my_flow.md\n```\n\n行为：\n- 如果输入是一个或多个 `.prompt` 文件，PDD 会生成 `user_stories\u002Fstory__\u003Cname>.md`。\n- 在生成故事的过程中，PDD 会运行提示检测，并自动将相关提示链接到 `pdd-story-prompts` 元数据中。\n- 如果在生成时未检测到任何相关提示，元数据将回退到提供的提示文件输入。\n- 如果 `pdd-story-prompts` 元数据已经存在且解析正常，PDD 将保持其不变。\n- 如果元数据缺失（或过时），PDD 会运行提示检测并写入：\n  `\u003C!-- pdd-story-prompts: prompt_a_python.prompt, prompt_b_python.prompt -->`\n- 这使得在 `pdd detect --stories` 中能够对提示子集进行确定性验证。\n\n#### 提供命令特定的上下文\n\n虽然提示是主要的指令来源，但某些 PDD 命令（如 `test` 和 `example`）还可以通过项目特定的上下文文件进一步指导。这些命令在其内部提示预处理阶段，可能会自动在当前工作目录中查找约定文件（例如 `context\u002Ftest.prompt`, `context\u002Fexample.prompt`）。\n\n如果找到，这些上下文文件的内容会被包含进来（使用 `preprocess` 部分中描述的 `\u003Cinclude>` 机制），并整合到命令使用的内部提示中。这允许您为项目提供特定的指导，例如：\n\n- 指定所需的导入语句。\n- 推荐首选的测试框架或库。\n- 提供项目特定的编码规范或模式。\n\n**示例：** 创建一个名为 `context\u002Ftest.prompt` 的文件，内容如下：\n```\n请确保所有测试都使用 'unittest' 框架，并将主模块以 'from my_module import *' 的形式导入。\n```\n这可能会影响在同一目录下运行 `pdd test` 命令时的输出。\n\n**注意：** 此功能依赖于特定 PDD 命令的内部实现，这些命令需要包含用于这些约定上下文文件的必要 `\u003Cinclude>` 标签。它主要用于 `test` 和 `example` 命令，但未来也可能被其他命令采用。请查阅具体命令的文档或进行试验，以确认某个命令是否使用此模式。\n\n#### 基本示例：\n\n1. 生成初始单元测试：\n```\npdd [全局选项] test --output tests\u002Ftest_factorial_calculator.py factorial_calculator_python.prompt src\u002Ffactorial_calculator.py\n```\n\n2. 从示例文件生成测试（TDD 风格）：\n```\npdd [全局选项] test --output tests\u002Ftest_calculator.py calculator_python.prompt examples\u002Fcalculator_example.py\n```\n\n3. 生成额外测试以提高覆盖率（使用多个现有测试文件）：\n```\npdd [全局选项] test --coverage-report coverage.xml --existing-tests tests\u002Ftest_calculator.py --existing-tests tests\u002Ftest_calculator_edge_cases.py --output tests\u002Ftest_calculator_enhanced.py calculator_python.prompt src\u002Fcalculator.py\n```\n\n4. 提高覆盖率并合并到现有测试中：\n```\npdd [全局选项] test --coverage-report coverage.xml --existing-tests tests\u002Ftest_calculator.py --merge --target-coverage 95.0 calculator_python.prompt src\u002Fcalculator.py\n```\n\n#### 覆盖率分析策略\n\n当提供覆盖率选项时，测试命令将执行以下操作：\n1. 分析覆盖率报告，识别：\n   - 未覆盖的行和分支\n   - 部分测试的条件\n   - 缺失的边界情况\n\n2. 优先生成以下类型的测试用例：\n   - 复杂的未覆盖代码路径\n   - 错误条件\n   - 边界值\n   - 集成点\n\n3. 保持与以下内容的一致性：\n   - 现有测试风格和模式\n   - 项目的测试规范\n   - 原始提示的意图\n\n### 5. 预处理\n\n预处理提示文件并保存结果。\n\n```\npdd [GLOBAL OPTIONS] preprocess [OPTIONS] PROMPT_FILE\n```\n\n参数：\n- `PROMPT_FILE`: 要预处理的提示文件的文件名。\n\n选项：\n- `--output LOCATION`: 指定保存预处理后提示文件的位置。默认文件名为 `\u003Cbasename>_\u003Clanguage>_preprocessed.prompt`。\n- `--xml`: 自动为较长且复杂的提示文件插入 XML 分隔符，以更好地组织内容。使用此选项时，提示仅会被预处理以插入 XML 分隔符，而不会进行其他预处理。\n- `--recursive`: 递归预处理提示文件中的所有提示文件。\n- `--double`: 将花括号加倍。\n- `--exclude`: 用于排除在花括号加倍之外的键列表。\n\n#### 类 XML 标签\n\nPDD 在提示文件中支持以下类 XML 标签。注意：XML 类标签（`\u003Cinclude>`、`\u003Cinclude-many>`、`\u003Cshell>`、`\u003Cweb>`）在 fenced code 块（``` 或 ~~~）或内联单反引号中会保持原样，以便文档示例保持字面意思。\n\n1. **`include`**: 将文件内容包含到提示中。文件路径始终是标签主体。可选属性可用于提取特定部分，而不是整个文件：\n   ```xml\n   \u003Cinclude>.\u002Fpath\u002Fto\u002Ffile.txt\u003C\u002Finclude>\n   \u003Cinclude select=\"def:foo,class:Bar\">src\u002Futils.py\u003C\u002Finclude>\n   \u003Cinclude select=\"class:Handler\" mode=\"interface\">src\u002Fapi.py\u003C\u002Finclude>\n   \u003Cinclude query=\"authentication flow\">docs\u002Fapi_reference.md\u003C\u002Finclude>\n   ```\n   - `select=` — 确定性结构化提取（函数、类、行范围、标题、正则表达式、JSON\u002FYAML 路径）。可通过逗号分隔组合使用。\n   - `mode=\"interface\"` — 仅适用于 Python。提取签名和文档字符串，并将函数体替换为 `...`。\n   - `query=` — 基于 LLM 的语义提取，缓存在 `.pdd\u002Fextracts\u002F` 中。\n   - `optional` — 当 `\u003Cinclude ...>` 标签上出现此属性时，如果文件不存在，在非递归预处理过程中会解析为空字符串 (`\"\"`)，同时仍会记录警告。\n   - 当 `select=` 和 `query=` 同时存在时，`select=` 优先（不产生 LLM 费用）。\n\n   此机制也被一些命令（如 `test` 和 `example`）内部使用，以自动引入项目特定的上下文文件，如果它们存在于常规位置（例如 `context\u002Ftest.prompt`）。有关详细信息，请参阅“提供命令特定的上下文”。完整的选择器参考，请参阅 [Prompting Guide](docs\u002Fprompting_guide.md#selective-includes)。\n\n2. **`pdd`**: 表示将在预处理后的提示中被移除的注释，包括标签本身。\n   ```xml\n   \u003Cpdd>This is a comment that won't appear in the preprocessed output\u003C\u002Fpdd>\n   ```\n\n3. **`shell`**: 执行 shell 命令并将输出包含到提示中，同时移除 shell 标签。\n   ```xml\n   \u003Cshell>ls -la\u003C\u002Fshell>\n   ```\n\n4. **`web`**: 抓取网页并将其中的 Markdown 内容包含到提示中，同时移除 web 标签。\n   ```xml\n   \u003Cweb>https:\u002F\u002Fexample.com\u003C\u002Fweb>\n   ```\n\n#### 三重反引号包含\n\nPDD 支持两种包含外部内容的方式：\n\n1. **三重反引号包含**：将三重反引号中的尖括号替换为指定文件的内容。\n   ````\n   ```\n   \u003C.\u002Fpath\u002Fto\u002Ffile.txt>\n   ```\n   这将被递归处理，直到三重反引号中不再有尖括号为止。\n\n2. **XML 包含标签**：如上所述。\n\n#### 花括号处理\n\n当使用 `--double` 选项时：\n\n- 如果单个花括号尚未加倍，则会加倍；\n- 已经加倍的花括号将保持不变；\n- 嵌套花括号会得到正确处理；\n- 对代码块（JSON、JavaScript、TypeScript、Python）有特殊处理；\n- 包含花括号的多行变量也会得到特殊处理。\n\n使用 `--exclude` 选项可以指定应排除在花括号加倍之外的键。此选项**仅在**一对单个花括号内的**整个字符串**与其中一个排除键**完全匹配**时才适用。\n\n例如，使用 `--exclude model`：\n- `{model}` 保持 `{model}` 不变（因完全匹配而被排除）。\n- `{model_name}` 会被加倍，因为 `'model_name'` 并不完全等于 `'model'`。\n- `{api_model}` 也会被加倍，因为它也不完全匹配。\n- 包含其他内容的花括号，即使与该键相关（例如 `var={key}_value`），通常仍会遵循加倍规则，除非内部的 `{key}` 本身被排除。\n\n示例命令：\n```\npdd [GLOBAL OPTIONS] preprocess --output preprocessed\u002Ffactorial_calculator_python_preprocessed.prompt --recursive --double --exclude model,temperature factorial_calculator_python.prompt\n```\n\n### 6. 修复\n\n修复代码和单元测试中的错误。支持两种模式：**代理式端到端修复**（给定 GitHub URL 时为默认模式），用于多开发者单元测试修复；以及**手动模式**，用于单个开发者单元测试修复，需明确指定文件参数。\n\n**代理式端到端修复模式（GitHub URL）：**\n```\npdd [GLOBAL OPTIONS] fix [OPTIONS] \u003CGITHUB_ISSUE_URL>\n```\n\n**手动模式（文件参数）：**\n```\npdd [GLOBAL OPTIONS] fix --manual [OPTIONS] PROMPT_FILE CODE_FILE UNIT_TEST_FILE ERROR_FILE\n```\n\n#### 手动模式参数\n- `PROMPT_FILE`: 生成待测试代码的提示文件名。\n- `CODE_FILE`: 要修复的代码文件名。\n- `UNIT_TEST_FILES`: 单元测试文件名。可以提供多个文件，每个文件将单独处理。\n- `ERROR_FILE`: 包含单元测试运行时错误信息的文件名。可选，与 `--loop` 命令一起使用时无需存在。\n\n#### 共享选项\n- `--manual`: 使用手动模式，需明确指定文件参数（用于传统或单个开发者单元测试修复）。\n- `--verbose`: 显示详细的处理输出。\n- `--quiet`: 屏蔽所有输出，仅显示错误。\n- `--protect-tests\u002F--no-protect-tests`: 启用时，防止 LLM 修改测试文件。LLM 会将测试视为只读规范，仅修复代码。这在已知由 `pdd bug` 创建的测试正确时尤为有用。默认：`--no-protect-tests`。\n\n#### 代理式端到端修复选项\n- `--timeout-adder FLOAT`: 每个步骤超时时间额外增加的秒数（默认：0.0）。\n- `--max-cycles INT`: 放弃前的最大外层循环次数（默认：5）。\n- `--resume\u002F--no-resume`: 如果有保存的状态，则从中恢复（默认：`--resume`）。\n- `--force`: 覆盖分支不匹配的安全检查。默认情况下，如果当前 Git 分支与问题中预期的分支不匹配，命令会中止（以防止意外修改错误的代码库）。\n\n#### 手动模式选项\n- `--output-test LOCATION`：指定保存修复后的单元测试文件的路径。默认文件名为 `test_\u003Cbasename>_fixed.\u003Clanguage_file_extension>`。**警告：如果同时提供了多个 `UNIT_TEST_FILES` 和此选项，只有最后一个处理的测试文件的修复内容会被保存到该位置，之前的结果将被覆盖。若需为每个文件单独保存修复结果，请省略此选项。**\n- `--output-code LOCATION`：指定保存修复后代码文件的路径。默认文件名为 `\u003Cbasename>_fixed.\u003Clanguage_file_extension>`。如果设置了环境变量 `PDD_FIX_CODE_OUTPUT_PATH`，则文件会保存到该路径，除非被此选项覆盖。\n- `--output-results LOCATION`：指定保存错误修复过程结果的路径。默认文件名为 `\u003Cbasename>_fix_results.log>。如果设置了环境变量 `PDD_FIX_RESULTS_OUTPUT_PATH`，则文件会保存到该路径，除非被此选项覆盖。\n- `--loop`：启用迭代修复流程。\n  - `--verification-program PATH`：指定用于验证代码是否仍能正确运行的 Python 程序路径。\n  - `--max-attempts INT`：设置放弃前的最大修复尝试次数（默认为 3 次）。\n  - `--budget FLOAT`：设置修复过程允许的最大成本（默认为 5.0 美元）。\n- `--auto-submit`：在修复循环中所有单元测试通过时，自动提交示例。\n\n当使用 `--loop` 选项时，修复命令将通过多次迭代尝试修复错误。每次修复尝试后，它都会使用指定的验证程序检查代码是否能够正确运行。该过程将持续进行，直到错误被修复、达到最大尝试次数或预算耗尽为止。\n\n输出：\n- 修复后的单元测试文件。\n- 修复后的代码文件。\n- 包含 LLM 模型输出及单元测试结果的记录文件。\n- 使用 `--loop` 时的输出打印，包含：\n  - 成功状态（布尔值）\n  - 总修复尝试次数\n  - 所有修复尝试的总成本\n此外，还会为不同迭代生成带有时间戳命名的单元测试和代码文件中间版本（例如：`basename_1_0_3_0_20250402_124442.py`, `standalone_test_1_0_3_0_20250402_124442.py`）。\n\n示例：\n```\npdd [全局选项] fix --output-code src\u002Ffactorial_calculator_fixed.py --output-results results\u002Ffactorial_fix_results.log factorial_calculator_python.prompt src\u002Ffactorial_calculator.py tests\u002Ftest_factorial_calculator.py tests\u002Ftest_factorial_calculator_edge_cases.py errors.log\n```\n在此示例中，`pdd fix` 将针对每个测试文件运行，并将修复后的测试文件分别保存为 `tests\u002Ftest_factorial_calculator_fixed.py` 和 `tests\u002Ftest_factorial_calculator_edge_cases_fixed.py`。\n\n\n#### 智能代理回退模式\n\n（此功能同样适用于 `crash` 和 `verify` 命令。）\n\n对于标准迭代修复流程无法解决的特别棘手的 bug，`pdd fix` 提供了一种强大的智能代理回退模式。启用后，它会调用一个具备项目上下文感知能力的 CLI 代理，以更广泛的背景信息尝试修复问题。\n\n**工作原理：**\n如果标准修复循环完成所有尝试仍未使测试通过，则智能代理回退模式将接管。它会构建一套详细的指令，并将修复任务委托给专门的 CLI 代理，如 Google 的 Gemini、Anthropic 的 Claude 或 OpenAI 的 Codex。\n\n**使用方法：**\n\n此功能仅在设置 `--loop` 时生效。\n\n当设置 `--loop` 标志时，智能代理回退模式默认启用：\n```bash\npdd [全局选项] fix --manual --loop [其他选项] PROMPT_FILE CODE_FILE UNIT_TEST_FILE\n```\n\n或者您也可以显式地启用它：\n```bash\npdd [全局选项] fix --manual --loop --agentic-fallback [其他选项] PROMPT_FILE CODE_FILE UNIT_TEST_FILE\n```\n\n若要在使用 `--loop` 时禁用此功能，可添加 `--no-agentic-fallback` 来关闭它。\n```bash\npdd [全局选项] fix --manual --loop --no-agentic-fallback [其他选项] PROMPT_FILE CODE_FILE UNIT_TEST_FILE\n```\n\n**前提条件：**\n要使智能代理回退模式正常工作，您需要至少安装一个受支持的代理 CLI，并在环境中配置相应的 API 密钥。代理将按以下优先级顺序尝试：\n\n1. **Anthropic Claude：**\n    * 需要安装并添加到 `PATH` 中的 `claude` CLI。\n    * 需要设置 `ANTHROPIC_API_KEY` 环境变量。\n2. **Google Gemini：**\n    * 需要安装并添加到 `PATH` 中的 `gemini` CLI。\n    * 需要设置 `GOOGLE_API_KEY` 或 `GEMINI_API_KEY` 环境变量。\n3. **OpenAI Codex\u002FGPT：**\n    * 需要安装并添加到 `PATH` 中的 `codex` CLI。\n    * 需要设置 `OPENAI_API_KEY` 环境变量。\n\n您可以使用 `pdd setup` 命令或直接在 shell 环境中设置这些密钥。\n\n#### 智能代理端到端修复模式\n\n对于跨多个开发单元的端到端测试修复，可使用智能代理端到端修复模式，只需传递一个 GitHub 问题 URL（通常由 `pdd bug` 创建）。该模式会编排一个包含 11 个步骤的迭代工作流，以修复整个代码库中的单元测试和端到端测试，包括推送后的 CI 验证以及代码清理。\n\n**工作原理：**\n\n该工作流会分析 GitHub 问题以提取测试信息，然后迭代修复失败的测试：\n\n1. **运行单元测试**：执行问题中的单元测试，并依次对每个失败的测试运行 `pdd fix`。\n2. **运行端到端测试**：执行端到端测试以识别失败；若全部通过则停止。\n3. **根因分析**：根据文档分析失败原因，判断问题是出在代码、测试还是两者兼而有之。\n4. **修复端到端测试**：如果端到端测试本身存在问题，则修复它们并返回步骤 2。\n5. **确定开发单元**：查明哪些开发单元涉及失败。\n6. **创建单元测试**：对于代码缺陷，为受影响的开发单元创建或追加单元测试。\n7. **验证测试**：运行新的单元测试，确认它们能够检测到缺陷，并在修复后通过。\n8. **运行 PDD Fix**：依次对每个开发单元的失败单元测试运行 `pdd fix`。\n9. **全面验证**：最终验证本地所有测试是否通过。\n10. **CI 验证**：轮询外部 CI，获取失败日志，并运行 LLM 修复循环以解决 CI 特有的问题（代码风格检查、构建产物、编译错误等）。\n11. **代码清理**：审查工作流中的所有更改，清理代码质量问题（调试语句、未使用的导入、重复代码等）；若测试再次失败，则回滚更改。\n\n**可恢复操作：**\n\n系统会自动保存状态，以便您可以在中断后继续工作。若想从头开始，请使用 `--no-resume`。\n\n**跨机器恢复**：默认情况下，工作流状态会存储在 GitHub 问题的隐藏评论中，从而支持从任何机器继续执行。如果您在机器 A 上启动了工作流，只需在机器 B 上检出分支并再次运行 `pdd fix`，即可从中断处继续。使用 `--no-github-state` 可禁用此功能，改为仅使用本地状态持久化。您也可以设置环境变量 `PDD_NO_GITHUB_STATE=1`。\n\n**示例：**\n```bash\n\n\n# 修复 GitHub 问题中的测试（代理模式）\npdd fix https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n\n# 自定义超时时间和最大循环次数\npdd fix --timeout-adder 30 --max-cycles 10 https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n\n# 配置 CI 重试和验证\npdd fix --ci-retries 5 https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n\n# 完全跳过推送后的 CI 验证\npdd fix --skip-ci https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n\n# 从头开始（忽略已保存的状态）\npdd fix --no-resume https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n\n# 禁用 GitHub 状态持久化（仅本地）\npdd fix --no-github-state https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n\n# 保护测试不被修改（仅修复代码，不修改测试）\npdd fix --protect-tests https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n```\n\n**前提条件：**\n- 必须安装并认证 `gh` CLI\n- 至少配置了一个支持的代理 CLI（Claude、Gemini 或 Codex），并设置了 API 密钥\n- 对于 CI 验证，当前分支必须在 GitHub 上有一个打开的 PR\n\n**与 `pdd bug` 的关系：**\n\n此功能可与通过 `pdd bug` 处理的问题无缝协作。典型的工作流程如下：\n1. 使用 `pdd bug \u003Cissue_url>` 分析错误并生成失败的单元测试\n2. 使用 `pdd fix \u003Cissue_url>` 在所有受影响的开发单元中迭代修复这些失败的测试\n\n### 7. split\n\n将大型复杂提示文件拆分为更小、更易于管理的提示文件。\n\n```\npdd [全局选项] split [选项] 输入提示 文件 输入代码 文件 示例代码 文件\n```\n\n参数：\n- `INPUT_PROMPT`：要拆分的大型提示文件的文件名。\n- `INPUT_CODE`：由输入提示生成的代码文件名。\n- `EXAMPLE_CODE`：作为子模块提示文件接口的示例代码文件名。\n\n选项：\n- `--output-sub LOCATION`：指定生成的子提示文件的保存位置。默认文件名为 `sub_\u003Cbasename>.prompt`。如果设置了环境变量 `PDD_SPLIT_SUB_PROMPT_OUTPUT_PATH`，则文件将保存到该路径，除非被此选项覆盖。\n- `--output-modified LOCATION`：指定修改后的提示文件的保存位置。默认文件名为 `modified_\u003Cbasename>.prompt`。如果设置了环境变量 `PDD_SPLIT_MODIFIED_PROMPT_OUTPUT_PATH`，则文件将保存到该路径，除非被此选项覆盖。\n\n示例：\n```\npdd [全局选项] split --output-sub prompts\u002Fsub_data_processing.prompt --output-modified prompts\u002Fmodified_main_pipeline.prompt data_processing_pipeline_python.prompt src\u002Fdata_pipeline.py examples\u002Fpipeline_interface.py \n```\n\n### 8. 变更\n\n使用一个包含12个步骤的代理式工作流来实现来自GitHub议题的变更请求。该工作流会调研该功能，确保需求清晰（如有需要则提出澄清问题），审查架构（如有需要则请求决策），分析文档变更，识别受影响的开发单元，设计提示词修改方案，实施这些修改，运行评审循环以识别并修复问题，并创建一个拉取请求。\n\n**代理模式（默认）：**\n```\npdd [全局选项] change GITHUB_ISSUE_URL\n```\n\n参数：\n- `GITHUB_ISSUE_URL`：描述变更请求的GitHub议题的URL。\n\n12步工作流：\n1. **重复检查**：搜索是否有重复的议题\n2. **文档检查**：确认该功能是否已实现\n3. **调研**：通过网络搜索澄清规范并找到最佳实践\n4. **澄清**：确保需求清晰；若不清晰则提出带有选项的问题（工作流将暂停，直到得到回答）\n5. **文档变更**：分析需要哪些文档更新\n6. **识别开发单元**：找出受影响的提示词、代码、示例和测试\n7. **架构审查**：识别架构决策；如有需要则提出带有选项的问题（工作流将暂停，直到得到回答）\n8. **分析变更**：设计提示词修改方案\n9. **实施变更**：在隔离的Git工作树中修改提示词\n10. **识别问题**：审查变更是否存在问题（属于评审循环的一部分）\n11. **修复问题**：修复已识别的问题（属于评审循环的一部分，最多迭代5次）\n12. **创建PR**：创建一个链接到该议题的拉取请求\n\n**工作流恢复**：第4步和第7步可能会暂停工作流，以提出澄清或架构相关的问题。当这种情况发生时，请在GitHub议题中回答这些问题，然后再次运行`pdd change`。工作流将从上次中断的地方继续执行，跳过已完成的步骤以节省Token。\n\n**跨机器恢复**：默认情况下，工作流状态会存储在GitHub议题的一个隐藏评论中，从而允许从任何机器上恢复。如果您在机器A上启动了工作流，可以在机器B上检出分支并再次运行`pdd change`来继续。您可以使用`--no-github-state`选项来禁用此功能，而仅使用本地状态持久化。您也可以设置环境变量`PDD_NO_GITHUB_STATE=1`来全局禁用GitHub状态保存。\n\n**评审循环**：第10至11步构成了一个评审循环，用于迭代地识别和修复问题。该循环将持续进行，直到不再发现任何问题为止（最多5次迭代）。\n\n**工作树分支行为**：当运行`pdd change`或`pdd bug`时，会基于您当前的HEAD创建一个新的Git工作树：\n- **从main\u002Fmaster分支**：分支基于最新的main分支——创建独立的PR\n- **从特性分支**：分支继承该分支上的提交——适用于堆叠式或依赖性PR\n\n如果您希望进行独立的更改，请从main分支运行该命令。当您从非main分支运行时，系统会显示警告。\n\n代理模式示例：\n```bash\npdd change https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F239\n```\n\n工作流完成后，会自动创建一个链接到该议题的PR。该PR包含一个`sync_order.sh`脚本，用于按依赖顺序运行`pdd sync`命令。请审查该PR，并在合并后运行`.\u002Fsync_order.sh`以重新生成代码。\n\n**手动模式（旧版）：**\n```\npdd [全局选项] change --manual [选项] CHANGE_PROMPT_FILE INPUT_CODE INPUT_PROMPT_FILE\n```\n\n参数：\n- `CHANGE_PROMPT_FILE`：包含如何修改输入提示词文件的说明的文件名。\n- `INPUT_CODE`：由输入提示词文件生成的代码文件名，或在使用`--csv`选项时，包含代码文件的目录。\n- `INPUT_PROMPT_FILE`：将被修改的提示词文件的文件名。标准模式下为必填项；使用`--csv`选项时则无需提供。\n\n选项：\n- `--budget FLOAT`：设置变更过程允许的最大成本（默认为5.0美元）。\n- `--output LOCATION`：指定保存修改后提示词文件的位置。默认文件名为`modified_\u003Cbasename>.prompt`。如果设置了环境变量`PDD_CHANGE_OUTPUT_PATH`，文件将保存在该路径下，除非在此处覆盖。\n- `--csv`：使用CSV文件来代替单个变更提示词文件。CSV文件应包含两列：`prompt_name`和`change_instructions`。使用此选项时，无需提供`INPUT_PROMPT_FILE`，而`INPUT_CODE`应为存放代码文件的目录。该命令期望CSV中的提示词名称遵循`\u003Cbasename>_\u003Clanguage>.prompt`的命名规则。对于CSV中的每个`prompt_name`，它将在指定的`INPUT_CODE`目录中查找对应的代码文件（例如`\u003Cbasename>.\u003Clanguage_extension>`）。输出文件将覆盖现有文件，除非指定了`--output LOCATION`。如果`LOCATION`是一个目录，则修改后的提示词文件将按照默认命名规则保存在该目录中；否则，如果指定了CSV文件名，则修改后的提示词将保存在该CSV文件中，并新增‘prompt_name’和‘modified_prompt’两列。\n\n手动单个提示词变更示例：\n```\npdd [全局选项] change --manual --output modified_factorial_calculator_python.prompt changes_factorial.prompt src\u002Ffactorial_calculator.py factorial_calculator_python.prompt\n```\n\n手动批量变更使用CSV示例：\n```\npdd [全局选项] change --manual --csv --output modified_prompts\u002F changes_batch.csv src\u002F\n```\n\n### 9. 更新\n\n根据代码变更更新提示。此命令主要有两种模式：\n\n**代理式提示优化（默认）**\n\n`update` 命令默认使用代理型 AI（Claude Code、Gemini 或 Codex）生成简洁、高质量的提示。该代理拥有完整的文件访问权限，并执行以下四步优化流程：\n\n1. **评估差异**：读取提示内容（包括所有 `\u003Cinclude>` 文件），并与修改后的代码进行比较。\n2. **依据指南和测试筛选**：参考 `docs\u002Fprompting_guide.md` 和现有测试，确定哪些内容应保留在提示中。\n3. **去除重复**：移除与已包含文件内容重复的部分。\n4. **验证**：确保提示对人类可读，并且能够可靠地重新生成代码。\n\n这样生成的提示更加简洁，同时保持对开发人员清晰易懂，并且在代码生成方面具有可靠性。\n\n**前提条件**：需要安装并配置以下 CLI 工具之一：\n- `claude`（Anthropic Claude Code）\n- `gemini`（Google Gemini CLI）\n- `codex`（OpenAI Codex CLI）\n\n如果未找到可用的代理型 CLI，该命令将自动回退到传统的两阶段 LLM 更新流程。\n\n**测试感知更新**：当某个模块存在测试文件时（例如 `test_my_module.py`、`test_my_module_1.py`），代理式更新会自动发现并考虑这些测试。通过测试验证的行为无需在提示中显式指定，从而生成更简洁的提示。\n\n**模式：**\n\n1. **整个仓库模式（默认）**：不带任何文件参数运行时，`pdd update` 会扫描整个仓库。它会查找所有代码\u002F提示对，创建缺失的提示文件，并基于最新的 Git 更改更新所有提示。这是使整个项目保持同步的最简单方式。\n\n2. **单文件模式**：当提供文件参数时，该命令将针对特定文件操作。此模式有三种不同的用法：\n\n    **A) 提示生成\u002F重新生成**\n    要从头为代码文件生成全新的提示，或重新生成现有提示，只需提供该代码文件的路径即可。这将创建一个新的提示文件，或覆盖现有的提示文件。\n    ```bash\n    pdd update \u003Cpath\u002Fto\u002Fyour_code_file.py>\n    ```\n\n    **B) 使用 Git 更新提示**\n    通过将修改后的代码与上次提交版本进行对比来更新现有提示。此操作需要提示文件和修改后的代码文件。\n    ```bash\n    pdd update --git \u003Cpath\u002Fto\u002Fprompt.prompt> \u003Cpath\u002Fto\u002Fmodified_code.py>\n    ```\n\n    **C) 手动更新提示**\n    通过手动提供原始代码、修改后的代码和提示来更新现有提示。适用于无法或不需要使用 Git 历史记录的场景。\n    ```bash\n    pdd update \u003Cpath\u002Fto\u002Fprompt.prompt> \u003Cpath\u002Fto\u002Fmodified_code.py> \u003Cpath\u002Fto\u002Foriginal_code.py>\n    ```\n\n```bash\n# 整个仓库模式（无参数）\npdd [全局选项] update\n\n# 单文件模式：示例\n# 为代码文件生成\u002F重新生成提示\npdd [全局选项] update src\u002Fmy_new_module.py\n\n# 使用 Git 历史更新现有提示\npdd [全局选项] update --git factorial_calculator_python.prompt src\u002Fmodified_factorial_calculator.py\n\n# 手动提供原始代码更新现有提示\npdd [全局选项] update factorial_calculator_python.prompt src\u002Fmodified_factorial_calculator.py src\u002Foriginal_factorial_calculator.py\n\n# 按文件扩展名过滤的仓库级更新\npdd [全局选项] update --extensions py,js\n```\n\n参数：\n- `MODIFIED_CODE_FILE`：被修改的代码文件名，或需生成\u002F重新生成提示的代码文件名。\n- `INPUT_PROMPT_FILE`：（可选）用于生成原始代码的提示文件名。在真正的更新场景（B 和 C）中为必填项。\n- `INPUT_CODE_FILE`：（可选）原始代码文件名。仅在手动更新（C）时为必填项；使用 `--git` 时（B）则无需提供；而在生成（A）时则不适用。\n\n**重要提示**：默认情况下，此命令会覆盖原始提示文件，以维持 PDD 的核心原则——“提示即事实来源”。\n\n选项：\n- `--output LOCATION`：指定保存更新后提示文件的位置。**若未指定，则会覆盖原始提示文件，以保持其作为权威事实来源的地位。** 如果设置了环境变量 `PDD_UPDATE_OUTPUT_PATH`，则仅在明确省略了 `--output` 参数且希望使用不同默认位置时才会生效。\n- `--git`：利用 Git 历史记录查找原始代码文件，从而无需提供 `INPUT_CODE_FILE` 参数。\n- `--extensions EXTENSIONS`：在仓库级模式下，仅对指定逗号分隔扩展名的文件进行更新（例如 `py,js,ts`）。\n- `--simple`：使用传统的两阶段 LLM 更新流程，而非默认的代理式模式。当无法使用代理型 CLI 或需要更快更新时非常有用。\n\n示例（覆盖原始提示——默认行为）：\n```\npdd [全局选项] update factorial_calculator_python.prompt src\u002Fmodified_factorial_calculator.py src\u002Foriginal_factorial_calculator.py\n# 这将原地覆盖 factorial_calculator_python.prompt\n```\n\n示例（代理式与简单模式对比）：\n```bash\n# 默认：代理式模式（使用 claude\u002Fgemini\u002Fcodex 进行智能优化）\npdd update --git my_module_python.prompt src\u002Fmy_module.py\n\n# 传统：简单的两阶段 LLM 更新（速度更快，无需代理型 CLI）\npdd update --simple --git my_module_python.prompt src\u002Fmy_module.py\n```\n\n### 10. detect\n\n分析提示文件列表和变更描述，以确定哪些提示需要更改。\n\n```\npdd [GLOBAL OPTIONS] detect [OPTIONS] PROMPT_FILES... CHANGE_FILE\n```\n\n参数：\n- `PROMPT_FILES`：可能需要更改的提示文件名列表。\n- `CHANGE_FILE`：其内容描述了需要分析并可能应用于提示的变更的文件名。\n\n选项：\n- `--output LOCATION`：指定保存包含分析结果的 CSV 文件的位置。默认文件名为 `\u003Cchange_file_basename>_detect.csv`。如果设置了环境变量 `PDD_DETECT_OUTPUT_PATH`，则文件将保存在该路径下，除非被此选项覆盖。\n- `--stories`：运行用户故事验证模式。设置后，不允许使用位置参数 `PROMPT_FILES... CHANGE_FILE`。\n- `--stories-dir DIR`：包含 `story__*.md` 文件的目录（仅限故事模式）。\n- `--prompts-dir DIR`：包含 `.prompt` 文件的目录（仅限故事模式）。\n- `--include-llm`：在故事模式中包含 `*_llm.prompt` 文件。\n- `--fail-fast\u002F--no-fail-fast`：在故事模式中遇到第一个失败的故事时停止（默认为 `--fail-fast`）。\n  - 在故事模式下，PDD 会从每个故事中读取可选的 `pdd-story-prompts` 元数据，以进行提示子集（多提示）验证。\n  - 如果缺少元数据，则验证将使用所有提示，并且可以自动将检测到的提示链接缓存在故事文件中。\n\n示例：\n```\npdd [GLOBAL OPTIONS] detect --output detect_results.csv factorial_calculator_python.prompt data_processing_python.prompt web_scraper_python.prompt changes_description.prompt\n```\n\n### 11. conflicts\n\n分析两个提示文件，找出它们之间的冲突，并提出解决这些冲突的建议。\n\n```\npdd [GLOBAL OPTIONS] conflicts [OPTIONS] PROMPT1 PROMPT2\n```\n\n参数：\n- `PROMPT1`：我们要比较的一对提示中的第一个提示。\n- `PROMPT2`：我们要比较的一对提示中的第二个提示。\n\n选项：\n- `--output LOCATION`：指定保存包含冲突分析结果的 CSV 文件的位置。默认文件名为 `\u003Cprompt1_basename>_\u003Cprompt2_basename>_conflict.csv`。如果设置了环境变量 `PDD_CONFLICTS_OUTPUT_PATH`，则文件将保存在该路径下，除非被此选项覆盖。\n\n示例：\n```\npdd [GLOBAL OPTIONS] conflicts --output conflicts_analysis.csv data_processing_module_python.prompt data_visualization_module_python.prompt \n```\n\n`detect` 和 `conflicts` 命令都会生成一个包含以下列的 CSV 文件：`prompt_name` 和 `change_instructions`。此 CSV 文件可用作 `change --csv` 命令的输入。\n\n### 12. crash\n\n修复导致程序崩溃的代码模块及其调用程序中的错误。\n\n```\npdd [GLOBAL OPTIONS] crash [OPTIONS] PROMPT_FILE CODE_FILE PROGRAM_FILE ERROR_FILE\n```\n\n参数：\n- `PROMPT_FILE`：生成代码模块的提示文件名。\n- `CODE_FILE`：导致崩溃并将会被修改以正常运行的代码模块文件名。\n- `PROGRAM_FILE`：正在运行代码模块的程序文件名。如果有必要修复崩溃问题，该文件也会被修改。\n- `ERROR_FILE`：包含程序运行时错误信息的文件名。\n\n选项：\n- `--output LOCATION`：指定保存修复后的代码文件的位置。默认文件名为 `\u003Cbasename>_fixed.\u003Clanguage_extension>`。如果设置了环境变量 `PDD_CRASH_OUTPUT_PATH`，则文件将保存在该路径下，除非被此选项覆盖。\n- `--output-program LOCATION`：指定保存修复后的程序文件的位置。默认文件名为 `\u003Cprogram_basename>_fixed.\u003Clanguage_extension>`。\n- `--loop`：启用迭代修复过程。\n  - `--max-attempts INT`：设置放弃前的最大修复尝试次数（默认为 3 次）。\n  - `--budget FLOAT`：设置修复过程允许的最大成本（默认为 $5.0）。\n- `--agentic-fallback \u002F --no-agentic-fallback`：启用或禁用代理回退模式（默认为启用）。\n\n当使用 `--loop` 选项时，crash 命令将通过多次迭代尝试修复错误。它会在每次修复尝试后使用程序检查代码是否能正确运行。该过程将持续进行，直到错误被修复、达到最大尝试次数或预算耗尽为止。\n\n如果迭代过程失败，将触发代理回退模式（除非使用 `--no-agentic-fallback` 禁用）。此模式会使用具备项目感知能力的 CLI 代理，在更广泛的上下文中尝试修复问题。为此，您需要至少安装一个受支持的代理 CLI（Claude、Gemini 或 Codex），并在您的环境中配置相应的 API 密钥。\n\n示例：\n```\npdd [GLOBAL OPTIONS] crash --output fixed_data_processor.py --output-program fixed_main_pipeline.py data_processing_module_python.prompt crashed_data_processor.py main_pipeline.py crash_errors.log\n```\n\n带有循环选项的示例：\n```\npdd [GLOBAL OPTIONS] crash --loop --max-attempts 5 --budget 10.0 --output fixed_data_processor.py --output-program fixed_main_pipeline.py data_processing_module_python.prompt crashed_data_processor.py main_pipeline.py crash_errors.log\n```\n\n### 13. trace\n\n找到提示文件与生成代码之间的对应行号。\n\n```\npdd [GLOBAL OPTIONS] trace [OPTIONS] PROMPT_FILE CODE_FILE CODE_LINE\n```\n\n参数：\n- `PROMPT_FILE`：生成代码的提示文件名。\n- `CODE_FILE`：要分析的代码文件名。\n- `CODE_LINE`：调试器跟踪所在的代码文件中的行号。\n\n选项：\n- `--output LOCATION`：指定保存跟踪分析结果的位置。默认文件名为 `\u003Cbasename>_trace_results.log`。\n\n示例：\n```\npdd [GLOBAL OPTIONS] trace --output trace_results.log factorial_calculator_python.prompt src\u002Ffactorial_calculator.py\n```\n\n这将打印出提示文件中与代码行对应的行号。\n\n### 14. bug\n\n从 GitHub 问题生成单元测试。该问题既是错误输出的权威来源，也是预期行为的依据。智能体工作流会分析该问题、复现 Bug，并创建一个失败的测试用例。\n\n```\npdd [全局选项] bug \u003CGitHub问题URL>\npdd [全局选项] bug --manual PROMPT_FILE CODE_FILE PROGRAM_FILE CURRENT_OUTPUT DESIRED_OUTPUT\n```\n\n**工作原理（分步说明，附带 GitHub 评论）：**\n\n1. **重复问题检查** - 搜索是否存在描述相同问题的现有问题。如果找到，则合并内容并关闭重复问题，同时发布包含检查结果的评论。\n\n2. **文档检查** - 审查仓库文档，以确定这是真正的 Bug 还是用户操作失误。发布包含检查结果的评论。\n\n3. **分类处理** - 评估是否已提供足够的信息以继续处理。如果问题中已包含详细的根因分析，包括文件路径、行号及因果解释，则直接进入根因分析步骤（跳过 API 研究和复现）。若信息不足，则发布请求补充信息的评论。\n\n4. **复现问题** - 尝试在本地复现该问题。发布确认复现成功（或无法复现）的评论。当步骤 3 直接进入根因分析时，此步骤将被跳过。\n\n5. **根因分析** - 通过实验确定根本原因。评估修复方案是局部性的还是跨模块的。进行变量引用审计，以查找并行代码路径中的类似 Bug；同时执行状态对称性检查，以发现保存与恢复之间的不对称问题。发布解释根因的评论。\n\n5.5. **提示分类** - 判断 Bug 是出在代码实现中，还是出在提示规范本身。如果提示存在缺陷，则自动修复提示文件。发布包含分类结果及任何提示更改的评论。若不确定，则默认为“代码 Bug”。\n\n6. **测试计划** - 设计用于检测该问题的测试方案。枚举所有受影响的输出通道以及所有不同的代码路径（首次运行、恢复、重试、错误恢复），以确保全面覆盖。优先选择将测试追加到现有测试文件中，而非新建测试文件。发布包含测试计划的评论。\n\n7. **生成测试** - 创建失败的单元测试。发布包含生成的测试代码的评论。\n\n8. **验证检测效果** - 确认单元测试能够成功检测到该 Bug。根据 Bug 的影响范围，判断是否需要端到端测试（`E2E_NEEDED: yes|no`）。发布确认验证成功的评论。\n\n9. **端到端测试** - 生成并运行端到端测试，以在集成级别验证该 Bug。当步骤 8 输出 `E2E_NEEDED: no` 时，此步骤将被确定性地跳过，从而避免对纯内部 Bug 进行不必要的 LLM 调用。发布包含 E2E 测试结果或跳过原因的评论。\n\n10. **创建草稿 PR** - 使用失败的测试创建拉取请求草稿，并将其链接到该问题。发布包含 PR 链接的评论。\n\n参数：\n- `ISSUE_URL`：GitHub 问题 URL（例如 https:\u002F\u002Fgithub.com\u002Fowner\u002Frepo\u002Fissues\u002F123）\n\n选项：\n- `--manual`：使用旧版模式，需显式指定文件参数（PROMPT_FILE、CODE_FILE、PROGRAM_FILE、CURRENT_OUTPUT、DESIRED_OUTPUT）\n- `--output LOCATION`：指定生成的单元测试保存位置。默认为 `test_\u003Cmodule>_bug.py`\n- `--language LANG`：指定单元测试的编程语言（默认为“Python”）。\n- `--timeout-adder FLOAT`：为每一步骤的超时时间增加额外秒数（默认为 0.0）\n- `--no-github-state`：禁用基于 GitHub 问题评论的状态持久化，仅使用本地存储\n\n**跨机器续跑**：默认情况下，工作流状态会存储在 GitHub 问题的隐藏评论中，从而支持从任意设备继续运行。使用 `--no-github-state` 可禁用此功能。您也可以设置环境变量 `PDD_NO_GITHUB_STATE=1`。\n\n示例：\n```bash\n# 智能体模式（推荐）\npdd bug https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n\n# 手动模式（旧版）\npdd bug --manual prompt.prompt code.py main.py current.txt desired.txt\n```\n\n**下一步——修复 Bug：**\n\n在 `pdd bug` 创建失败测试和拉取请求草稿后，可使用 `pdd fix` 并传入相同的 issue URL，以在所有受影响的开发单元中自动修复失败的测试：\n\n```bash\npdd fix https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n```\n\n**提示**：如果 `pdd bug` 正确识别了 Bug 并创建了有效的失败测试，则可使用 `--protect-tests` 参数，防止 `pdd fix` 修改这些测试。这样可以确保 LLM 只修复代码，使测试通过：\n\n```bash\npdd fix --protect-tests https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n```\n\n有关智能体端到端修复工作流的详细信息，请参阅 [fix 命令](#6-fix) 文档。\n\n### 15. auto-deps\n\n分析提示文件并搜索潜在的依赖项——包括代码示例和文档文件（模式文档、API 文档、PRD 部分）——以确定这些依赖项并将它们插入到提示中。Auto-deps 会自动确定每个依赖项中需要的部分，并在新的和现有的 `\u003Cinclude>` 标签上发出相应的选择器。它还会自动移除与被包含文档内容重复的冗余内联内容。\n\n```\npdd [全局选项] auto-deps [选项] 提示文件 目录路径\n```\n\n参数：\n- `PROMPT_FILE`：需要分析和插入依赖项的提示文件名。\n- `DIRECTORY_PATH`：用于搜索依赖项\u002F示例文件的目录路径或 glob 模式。支持通配符，如 `*.py` 和 `**\u002F*.py`。您可以传递一个普通目录（例如 `examples\u002F`）或一个 glob（例如 `examples\u002F**\u002F*.py`）。如果您传递一个普通目录（不含通配符），则默认会递归扫描该目录（相当于 `examples\u002F**\u002F*`）。\n\n选项：\n- `--output LOCATION`：指定保存插入了依赖项的修改后提示文件的位置。默认文件名为 `\u003Cbasename>_with_deps.prompt`。如果设置了环境变量 `PDD_AUTO_DEPS_OUTPUT_PATH`，则文件将保存在该路径下，除非被此选项覆盖。\n- `--csv FILENAME`：指定包含或即将包含依赖信息的 CSV 文件。默认为 “project_dependencies.csv”。如果设置了环境变量 `PDD_AUTO_DEPS_CSV_PATH`，则会使用该路径，除非被此选项覆盖。\n- `--force-scan`：强制重新扫描所有潜在的依赖文件，即使它们已存在于 CSV 文件中。\n- `--include-docs`：在依赖发现中包含文档文件（`.md`、`.txt`、`.rst`）。默认：禁用。\n- `--no-dedup`：跳过冗余内联内容的移除步骤。\n- `--concurrency N`：依赖分析的最大并行 LLM 调用数（默认：1）。\n\n当候选文件超过 50 个时，该命令会使用两阶段检索管道：\n1. **嵌入搜索**：对提示和候选文件进行嵌入处理，根据余弦相似度检索前 50 个候选文件。\n2. **LLM 重排序**：使用 LLM 作为评判者，从候选文件中选出最相关的依赖项。\n\n在插入 `\u003Cinclude>` 指令后，该命令会执行一次 **去重步骤**，识别并移除提示中语义上与被包含文档内容重复的内联内容。\n\n该命令维护一个 CSV 文件，包含以下列：\n- `full_path`：依赖文件的完整路径。\n- `file_summary`：对该文件内容和用途的一句话摘要。\n- `key_exports`：该文件中的关键导出内容（函数、类、常量）列表。\n- `dependencies`：该文件所依赖的模块\u002F包列表。\n- `date`：该文件上次被分析的时间戳。\n\n**注意**：使用旧版 3 列格式（不含 `key_exports` 和 `dependencies`）的现有 CSV 文件将在下次运行时自动重新摘要。\n\n示例：\n```\n# 搜索代码示例和文档文件\npdd auto-deps --include-docs my_module_python.prompt \"context\u002F\"\n\n# 仅搜索 Python 示例（跳过文档发现）\npdd auto-deps my_module_python.prompt \"context\u002F*_example.py\"\n\n# 强制重新扫描并自定义并发数\npdd auto-deps --force-scan --concurrency 30 my_module_python.prompt \"context\u002F\"\n\n# 跳过冗余内容移除\npdd auto-deps --no-dedup my_module_python.prompt \"docs\u002F\"\n```\n\n### 16. verify\n\n通过执行指定程序（通常是 `example` 命令的输出）并使用 LLM 来判断程序的输出是否符合原始提示的意图，从而验证生成代码的功能正确性。无需单独的预期输出文件；LLM 会自行判断行为是否符合提示要求。如果验证失败，它会根据判断出的差异迭代尝试修复代码，类似于 `fix` 和 `crash` 命令如何分别处理各自的错误信号。\n\n```bash\npdd [全局选项] verify [选项] 提示文件 代码文件 程序文件\n```\n\n参数：\n- `PROMPT_FILE`：生成待验证代码的提示文件名。\n- `CODE_FILE`：待验证并可能修复的代码文件名。\n- `PROGRAM_FILE`：用于验证的可执行程序文件名（例如由 `pdd example` 生成的示例脚本）。该程序运行后的输出将由 LLM 进行评判。\n\n选项：\n- `--output-results LOCATION`：指定保存验证和修复结果日志的位置。该日志通常包含最终状态（通过\u002F未通过）、尝试次数、总成本，以及可能的 LLM 推理过程或识别出的问题。默认：`\u003Cbasename>_verify_results.log`。如果设置了环境变量 `PDD_VERIFY_RESULTS_OUTPUT_PATH`，则文件将保存在该路径下，除非被此选项覆盖。\n- `--output-code LOCATION`：指定在验证尝试结束后保存最终代码文件的位置（即使验证并未完全成功）。默认：`\u003Cbasename>_verified.\u003Clanguage_extension>`。如果设置了环境变量 `PDD_VERIFY_CODE_OUTPUT_PATH`，则文件将保存在该路径下，除非被此选项覆盖。\n- `--output-program LOCATION`：指定在验证尝试结束后保存最终程序文件的位置（即使验证并未完全成功）。默认文件名为 `\u003Cprogram_basename>_verified.\u003Clanguage_extension>`。如果设置了环境变量 `PDD_VERIFY_PROGRAM_OUTPUT_PATH`，则文件将保存在该路径下，除非被此选项覆盖。\n- `--max-attempts INT`：设置验证循环中允许的最大修复尝试次数（默认为 3 次）。\n- `--budget FLOAT`：设置整个验证和迭代修复过程允许的最大成本（默认为 $5.0）。\n- `--agentic-fallback \u002F --no-agentic-fallback`：启用或禁用代理回退模式（默认：启用）。\n\n如果首次运行 `PROGRAM_FILE` 产生的输出被 LLM 根据 `PROMPT_FILE` 判断为不正确，该命令将进入迭代模式。每次对 `CODE_FILE` 进行修复后，都会重新运行 `PROGRAM_FILE` 并再次评估其输出。这一过程将持续进行，直到输出被判定为正确、达到 `--max-attempts` 的限制，或耗尽 `--budget`。在循环过程中可能会生成中间代码文件，类似于 `fix` 命令的行为。\n\n输出：\n- 最终代码文件保存在 `--output-code` 指定的位置（只要指定了该位置就会始终写入，即使验证未能完全成功也能进行检查）。\n- 最终程序文件保存在 `--output-program` 指定的位置（只要指定了该位置就会始终写入，即使验证未能完全成功也能进行检查）。\n- 结果日志文件保存在 `--output-results` 指定的位置，详细记录了整个过程及结果。\n- 可能会在修复循环中生成中间代码文件（按时间戳命名）。\n\n示例：\n```bash\n# 验证 calc.py，方法是运行 examples\u002Frun_calc.py，并根据 prompts\u002Fcalc_py.prompt 判断其输出\n\n# 如果验证失败，使用2.50美元的预算最多尝试修复5次。\npdd verify --max-attempts 5 --budget 2.5 --output-code src\u002Fcalc_verified.py --output-results results\u002Fcalc_verify.log prompts\u002Fcalc_py.prompt src\u002Fcalc.py examples\u002Frun_calc.py\n```\n\n**适用场景**：在 `generate` 和 `example` 之后使用 `verify` 进行初始的功能验证，并根据 *LLM 对程序输出与提示之间一致性的判断* 自动修复代码。这有助于确保代码在进入更细粒度的单元测试（`test`）、修复特定运行时错误（`crash`）或单元测试失败（`fix`）之前，能够为关键场景生成符合提示意图的结果。\n\n### 17. checkup\n\n针对 GitHub 问题对项目进行自动化健康检查。checkup 工作流会探索项目、识别问题（缺失依赖、构建错误、接口不匹配、测试失败、孤立页面、API 模式不一致），并可选择性地修复这些问题，编写回归测试和端到端测试，最后创建一个 Pull Request。\n\n```\npdd [全局选项] checkup [选项] GITHUB_ISSUE_URL\n```\n\n参数：\n- `GITHUB_ISSUE_URL`：描述要检查内容的 GitHub 问题 URL（例如：“检查整个 CRM 应用”）\n\n选项：\n- `--no-fix`：仅报告模式 — 发现并报告问题，但不应用修复\n- `--timeout-adder FLOAT`：为每个步骤的超时时间增加额外秒数（默认：0.0）\n- `--no-github-state`：禁用 GitHub 状态持久化，仅使用本地状态\n\n**工作流程（8 步骤，包含迭代修复-验证循环）：**\n\n1. **发现** — 扫描项目结构、技术栈和模块清单\n2. **依赖审计** — 检查所有导入是否解析成功，无缺失包，无循环依赖\n3. **构建检查** — 运行构建\u002F编译命令，检查语法\u002F类型错误\n4. **接口检查** — 验证跨模块接口、前端导航可达性以及 API 调用一致性\n5. **测试执行** — 运行完整测试套件，识别失败项\n6. **修复问题**（3 个子步骤）：\n   - 6a. 修复发现的问题（缺失依赖、导入、接口、构建错误、孤立页面、API 模式）\n   - 6b. 为每次修复编写回归测试\n   - 6c. 为跨模块交互编写端到端\u002F集成测试\n7. **验证** — 重新运行构建和测试以确认所有修复有效\n8. **创建 PR** — 创建包含所有修复和测试的 Pull Request\n\n**迭代修复-验证循环**：步骤 3–7 会循环执行（最多 3 次）。如果步骤 7 发现仍有未解决的问题，工作流将回到步骤 3 再次执行。当步骤 7 报告“所有问题已修复”或达到最大迭代次数时，循环结束。\n\n**Git 工作树隔离**：所有修复步骤都在一个隔离的 Git 工作树中执行（`checkup\u002Fissue-{N}` 分支），从而保持用户的工作目录整洁。\n\n**跨机器恢复**：工作流状态存储在一个隐藏的 GitHub 评论中，允许从任何机器恢复。使用 `--no-github-state` 可以禁用此功能。\n\n**仅报告模式**：使用 `--no-fix` 可以只运行步骤 1–5 和 7，而不进行修复——这对于在不做出更改的情况下审计项目的健康状况非常有用。\n\n每一步都会将其发现结果作为评论发布到 GitHub 问题中，提供详细的审计记录。\n\n示例：\n```bash\n# 完整检查并修复\npdd checkup https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n\n# 仅报告模式（不应用修复）\npdd checkup --no-fix https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n\n# 为大型项目增加超时时间\npdd checkup --timeout-adder 120 https:\u002F\u002Fgithub.com\u002Fmyorg\u002Fmyrepo\u002Fissues\u002F42\n```\n\n### 18. connect\n\n**【推荐入口】** 在 `localhost:9876` 启动 PDD 的基于 Web 的界面。\n\nWeb 界面提供：\n- **命令执行**：运行任何 PDD 命令（`pdd change`、`pdd bug`、`pdd fix`、`pdd sync` 等），并提供可视化反馈\n- **文件浏览器**：查看和编辑项目中的提示、代码和测试\n- **远程访问**：通过 PDD Cloud，可在任何浏览器中访问您的会话\n- **会话管理**：运行多个具有自定义名称的会话\n\n```bash\npdd [全局选项] connect [选项]\n```\n\n选项：\n- `--port INT`：监听的端口（默认：9876）\n- `--host TEXT`：绑定的主机（默认：127.0.0.1）\n- `--allow-remote`：允许非 localhost 连接。启用后，服务器将绑定到 0.0.0.0，以接受外部连接\n- `--token TEXT`：用于身份验证的 Bearer 令牌。建议在使用 `--allow-remote` 时使用\n- `--no-browser`：启动服务器时不自动打开浏览器\n- `--frontend-url TEXT`：自定义前端 URL，代替默认 URL\n- `--local-only`：跳过云注册，仅在本地模式下运行。会话将无法通过 PDD Cloud 远程访问\n- `--session-name TEXT`：自定义会话名称，便于识别。在运行多个会话时很有用\n\n该命令会启动一个 FastAPI 服务器，并自动在您的默认浏览器中打开 Web 界面。服务器还提供：\n- 用于以编程方式访问 PDD 命令的 REST API\n- API 文档，地址为 `http:\u002F\u002Flocalhost:9876\u002Fdocs`\n\n**远程会话注册：**\n默认情况下，`pdd connect` 会向 PDD Cloud 注册，使您能够从任何浏览器远程访问您的会话。会话将在正常关闭（Ctrl+C）时自动注销。\n\n安全注意事项：\n- 默认情况下，服务器仅接受来自 localhost（127.0.0.1）的连接。\n- 使用 `--allow-remote` 而不使用 `--token` 会显示安全警告，并需要确认。\n- 对于远程访问，始终使用 `--token` 选项以要求身份验证。\n\n示例：\n```bash\n# 使用默认设置启动服务器（自动打开浏览器）\npdd connect\n\n# 在自定义端口上启动，不打开浏览器\npdd connect --port 8080 --no-browser\n\n# 允许带身份验证的远程连接\npdd connect --allow-remote --token \"your-secret-token\"\n\n# 仅在本地模式下运行（不注册云）\npdd connect --local-only\n\n# 使用自定义会话名称以便识别\npdd connect --session-name \"my-dev-server\"\n```\n\n**适用场景**：当您更喜欢使用图形界面来操作 PDD、向他人演示 PDD，或者将 PDD 与可通过 REST API 通信的其他工具集成时，可以使用 `connect`。\n\n### 19. auth\n\n管理与 PDD Cloud 的身份验证。`auth` 命令提供用于登录、登出、检查状态和获取身份验证令牌的子命令。\n\n```bash\npdd [全局选项] auth 子命令 [选项]\n```\n\n#### 子命令\n\n##### auth login\n\n登录 PDD Cloud。会打开一个网页浏览器，通过临时验证码完成身份验证过程。\n\n```bash\npdd auth login\n```\n\n##### auth status\n\n显示当前活动账户和身份验证状态。如果已认证，退出码为 0；否则为 1。\n\n```bash\npdd auth status [选项]\n```\n\n**选项：**\n- `--verify`：通过实际尝试刷新令牌来验证身份。如果不使用此标志，则仅检查缓存的凭据。\n\n**示例：**\n```bash\n# 快速检查（使用缓存凭据）\npdd auth status\n\n# 深度验证（尝试刷新令牌）\npdd auth status --verify\n```\n\n**注意**：如果仅存在刷新令牌而没有缓存的 JWT，状态将显示警告，提示令牌已过期，并在下次使用时刷新。使用 `--verify` 可以实际测试刷新是否成功，或者运行 `pdd auth login` 来立即刷新令牌。\n\n##### auth logout\n\n移除本地存储的 PDD Cloud 账户认证配置。\n\n```bash\npdd auth logout\n```\n\n##### auth token\n\n输出当前账户的认证令牌。对于脚本或程序化访问 PDD Cloud 非常有用。\n\n```bash\npdd auth token [OPTIONS]\n```\n\n**选项：**\n- `--format [raw|json]`：令牌的输出格式。使用 `raw` 只输出令牌字符串（默认），或使用 `json` 输出包含令牌和过期时间的结构化信息。\n\n**何时使用**：使用 `auth` 命令来管理你的 PDD Cloud 认证状态。在使用云功能之前使用 `auth login` 进行认证，使用 `auth status` 验证当前会话，并在需要将凭据传递给脚本或其他工具时使用 `auth token`。\n\n### 20. `pdd sessions` - 管理远程会话\n\n`sessions` 命令组允许你管理注册到 PDD Cloud 的远程会话。远程会话使你可以通过 Web 前端控制运行在其他机器上的 PDD 实例。\n\n#### 列出会话\n\n```bash\npdd sessions list\npdd sessions list --json\n```\n\n列出与你已认证账户关联的所有活动远程会话。使用 `--json` 可获得机器可读的输出。\n\n#### 会话信息\n\n```bash\npdd sessions info \u003Csession_id>\n```\n\n显示特定会话的详细信息，包括项目名称、云 URL、状态和最后的心跳时间。\n\n#### 清理会话\n\n```bash\npdd sessions cleanup --stale\npdd sessions cleanup --all\npdd sessions cleanup --all --force\n```\n\n**选项：**\n- `--stale`：仅移除过期会话（无最近心跳）\n- `--all`：移除当前用户的所有会话\n- `--force`：跳过确认提示\n\n**注意**：会话会在运行 `pdd connect` 时自动注册（除非指定 `--local-only`），并在正常关闭时注销。如果 `pdd connect` 实例被非正常终止，可以使用 `pdd sessions cleanup` 手动移除孤立会话。\n\n**何时使用**：使用 `sessions list` 发现可用的远程会话，使用 `sessions info` 查看会话详情，以及使用 `sessions cleanup` 移除过期或孤立会话。\n\n### 21. extracts\n\n提示中的 `\u003Cinclude query=\"...\">file\u003C\u002Finclude>` 标签会触发由 LLM 驱动的语义提取，并自动缓存在 `.pdd\u002Fextracts\u002F` 中。结果会 **自动刷新**：如果源文件发生变化，PDD 会在下次处理 `\u003Cinclude ... query>` 标签时自动重新提取并更新缓存。\n\n```bash\n# 移除未被任何提示引用的孤立缓存条目\npdd extracts prune\n```\n\n### 22. Firecrawl 网页抓取缓存\n\n通过提示中的 `\u003Cweb>` 标签抓取的网页内容会进行 **自动缓存**。默认情况下，缓存结果会保留 24 小时，从而减少 API 信用的使用。\n\n**工作原理：**\n- 透明且自动，无需手动管理\n- 缓存内容存储在 `PROJECT_ROOT\u002F.pdd\u002Fcache\u002Ffirecrawl.db`\n- 访问时会自动跳过已过期的条目\n- URL 规范化（移除跟踪参数，不区分大小写匹配）\n- 记录访问情况，以便在缓存满时采用 LRU 策略淘汰\n\n**配置（可选）：**\n```bash\nexport FIRECRAWL_CACHE_ENABLE=false          # 禁用缓存（默认：启用）\nexport FIRECRAWL_CACHE_TTL_HOURS=48          # 缓存有效期为 48 小时（默认：24 小时）\nexport FIRECRAWL_CACHE_MAX_SIZE_MB=200       # 最大缓存大小为 200 MB（默认：100 MB）\nexport FIRECRAWL_CACHE_MAX_ENTRIES=2000      # 最大缓存条目数为 2000（默认：1000）\nexport FIRECRAWL_CACHE_AUTO_CLEANUP=false    # 禁用自动清理（默认：启用）\n```\n\n**缓存管理命令：**\n```bash\npdd firecrawl-cache stats              # 查看缓存统计信息\npdd firecrawl-cache clear              # 清除所有缓存条目\npdd firecrawl-cache info               # 查看缓存配置\npdd firecrawl-cache check \u003Curl>        # 检查某个 URL 是否已被缓存\n```\n\n**何时使用**：缓存是自动进行的。使用 `stats` 查看缓存状态，使用 `info` 查看配置，使用 `check` 验证某个 URL 是否已被缓存，或使用 `clear` 强制重新抓取所有 URL。\n\n## 示例审查流程\n\n当全局 `--review-examples` 选项与任何命令一起使用时，PDD 会展示可能用于当前操作的少量示例。审查流程如下：\n\n1. PDD 显示潜在少量示例的输入（但不显示输出）。\n2. 对于每个示例，你可以选择：\n   - 接受该示例（它将被用于操作中）\n   - 排除该示例（它不会被用于本次或未来的操作中）\n   - 跳过该示例（它不会被用于本次操作，但未来可能会再次出现）\n3. 审查完所有示例后，PDD 将使用接受的示例继续执行命令。\n\n此功能使你能够更好地控制 PDD 操作中使用的示例，从而可能提高生成输出的质量和相关性。\n\n## 自动提交示例\n\n当使用带有 `--auto-submit` 选项的 `fix` 命令时，如果修复循环中的所有单元测试都通过，PDD 会自动将示例提交到 PDD Cloud 平台。此功能有助于通过成功的修复不断改进平台的示例数据库。\n\n## 输出位置指定\n\n对于所有生成或修改文件的命令，`--output` 选项（或其变体，如 `split` 命令的 `--output-sub` 或 `--output-modified`）允许灵活指定输出位置：\n\n1. **仅文件名**：如果你只提供文件名（例如 `--output result.py`），文件将在当前工作目录中创建。\n2. **完整路径**：如果你提供完整路径（例如 `--output \u002Fhome\u002Fuser\u002Fprojects\u002Fresult.py`），文件将被创建在该确切位置。\n3. **目录**：如果你提供目录名称（例如 `--output .\u002Fgenerated\u002F`），系统将自动生成文件名，并将其保存在该目录中。\n4. **环境变量**：如果未提供 `--output` 选项，且设置了与该命令相关的环境变量，PDD 将使用该变量指定的路径。否则，它将使用默认命名规则，并将文件保存在当前工作目录中。\n5. **无输出位置**：如果未指定输出位置且未设置环境变量，文件将以命令赋予的默认名称保存在当前工作目录中。\n\n## 获取帮助\n\nPDD 提供了全面的帮助功能：\n\n1. **通用帮助**：\n   ```\n   pdd --help\n   ```\n   显示可用的命令和选项列表。\n\n2. **特定命令的帮助**：\n   ```\n   pdd COMMAND --help\n   ```\n   提供特定命令的详细帮助，包括可用选项和使用示例。\n\n## 其他功能\n\n- **Tab 补全**：运行 `pdd setup` 会自动安装 Tab 补全功能。如果只需要刷新补全脚本，可以直接运行 `pdd install_completion`。\n- **彩色输出**：PDD 在兼容的终端中提供彩色输出，以提高可读性。\n\n\n## 配置\n\nPDD 支持多种配置方式，以便为不同的项目结构和上下文自定义其行为。\n\n### 项目配置文件 (.pddrc)\n\n**推荐用于多上下文项目**（例如包含后端和前端的 monorepo）\n\n在项目根目录下创建 `.pddrc` 文件，以定义具有各自设置的不同上下文：\n\n```yaml\n# .pddrc - 提交到版本控制系统\nversion: \"1.0\"\ncontexts:\n  backend:\n    paths: [\"backend\u002F**\", \"api\u002F**\", \"server\u002F**\"]\n    defaults:\n      generate_output_path: \"backend\u002Fsrc\u002F\"\n      test_output_path: \"backend\u002Ftests\u002F\"\n      example_output_path: \"backend\u002Fexamples\u002F\"\n      default_language: \"python\"\n      target_coverage: 90.0\n      strength: 0.8\n  \n  frontend:\n    paths: [\"frontend\u002F**\", \"web\u002F**\", \"ui\u002F**\"]\n    defaults:\n      generate_output_path: \"frontend\u002Fsrc\u002F\"\n      test_output_path: \"frontend\u002F__tests__\u002F\"\n      example_output_path: \"frontend\u002Fexamples\u002F\"\n      default_language: \"typescript\"\n      target_coverage: 85.0\n      strength: 0.7\n  \n  shared:\n    paths: [\"shared\u002F**\", \"common\u002F**\", \"lib\u002F**\"]\n    defaults:\n      generate_output_path: \"shared\u002Flib\u002F\"\n      test_output_path: \"shared\u002Ftests\u002F\"\n      default_language: \"python\"\n      target_coverage: 95.0\n  \n  # 匹配不到路径时的默认上下文\n  default:\n    defaults:\n      generate_output_path: \"src\u002F\"\n      test_output_path: \"tests\u002F\"\n      default_language: \"python\"\n      target_coverage: 90.0\n      strength: 0.5\n```\n\n**上下文检测**：\nPDD 会根据以下规则自动检测合适的上下文：\n1. **当前目录路径**：与各上下文中定义的 `paths` 模式进行匹配\n2. **手动覆盖**：使用 `--context CONTEXT_NAME` 参数显式指定\n3. **回退机制**：如果没有路径匹配，则使用 `default` 上下文\n\n**可用的上下文设置**：\n- `prompts_dir`：提示文件所在的目录（默认值为 “prompts”）\n- `generate_output_path`：生成的代码文件保存路径\n- `test_output_path`：测试文件保存路径\n- `example_output_path`：示例文件保存路径\n- `default_language`：该上下文的默认编程语言\n- `target_coverage`：默认的测试覆盖率目标\n- `strength`：默认的 AI 模型强度（0.0–1.0）\n- `temperature`：默认的 AI 模型温度\n- `budget`：迭代命令的默认预算\n- `max_attempts`：修复操作的默认最大尝试次数\n\n**路径行为**：\n- 以 `\u002F` 结尾的路径被视为明确的目录，且**不会保留**子目录的基名（例如，`commands\u002Fanalysis` 将生成 `pdd\u002Fanalysis.py`）。\n- 不带尾部 `\u002F` 的路径，在路径为现有目录时会保留子目录的基名（例如，`commands\u002Fanalysis` 将生成 `pdd\u002Fcommands\u002Fanalysis.py`）。\n\n**使用示例**：\n```bash\n# 根据当前目录自动检测上下文\ncd backend && pdd --force sync calculator     # 使用 backend 上下文\ncd frontend && pdd --force sync dashboard     # 使用 frontend 上下文\n\n# 显式覆盖上下文\npdd --context backend sync calculator\npdd --context frontend sync dashboard\n\n# 列出所有可用上下文\npdd --list-contexts\n```\n\n### 环境变量\n\nPDD 使用多个环境变量来定制其行为：\n\n#### 核心环境变量\n\n- **`PDD_PATH`**：指向 PDD 的根目录。此变量在通过 pip 安装时会自动设置为 PDD 的安装目录。通常无需手动设置。\n- **`PDD_AUTO_UPDATE`**：控制 PDD 是否自动更新自身（默认值：true）。\n- **`PDD_CONFIG_PATH`**：覆盖默认的 `.pddrc` 文件位置（默认：从当前目录向上搜索）。\n- **`PDD_DEFAULT_CONTEXT`**：当未检测到上下文时使用的默认上下文（默认值：`default`）。\n- **`PDD_DEFAULT_LANGUAGE`**：在上下文中未指定时的全局默认编程语言（默认值：`python`）。\n\n#### 代理工作流变量\n\n- **`CLAUDE_MODEL`**：覆盖代理工作流中 Claude CLI 使用的模型（例如 `claude-sonnet-4-5-20250929`）。设置后，会将 `--model` 参数传递给 Claude CLI 命令。无默认值；仅在显式设置时使用。\n- **`PDD_USER_FEEDBACK`**：将来自 GitHub 问题评论的用户反馈注入到代理任务指令中。由 GitHub App 执行者设置，用于传递之前执行尝试的反馈。无默认值。\n- **`PDD_GH_TOKEN_FILE`**：包含最新 GitHub App 安装令牌的文件路径。设置后，端到端修复编排器会在推送认证失败时从此文件读取新令牌并重试一次。该令牌文件由云作业运行程序（pdd_cloud）写入和刷新。无默认值；仅在云托管的作业环境中使用。\n\n#### 输出路径变量\n\n**注意**：使用 `.pddrc` 配置时，上下文特定的设置优先于这些全局环境变量。\n\n- **`PDD_PROMPTS_DIR`**：提示文件的默认存放目录（默认值：`prompts`）。\n- **`PDD_GENERATE_OUTPUT_PATH`**：`generate` 命令的默认输出路径。\n- **`PDD_EXAMPLE_OUTPUT_PATH`**：`example` 命令的默认输出路径。\n- **`PDD_TEST_OUTPUT_PATH`**：单元测试文件的默认输出路径。\n- **`PDD_TEST_COVERAGE_TARGET`**：默认的目标覆盖率百分比。\n- **`PDD_PREPROCESS_OUTPUT_PATH`**：`preprocess` 命令的默认输出路径。\n- **`PDD_FIX_TEST_OUTPUT_PATH`**：`fix` 命令中修复后的单元测试文件的默认输出路径。\n- **`PDD_FIX_CODE_OUTPUT_PATH`**：`fix` 命令中修复后的代码文件的默认输出路径。\n- **`PDD_FIX_RESULTS_OUTPUT_PATH`**：`fix` 命令生成的结果文件的默认输出路径。\n- **`PDD_SPLIT_SUB_PROMPT_OUTPUT_PATH`**：`split` 命令生成的子提示文件的默认输出路径。\n- **`PDD_SPLIT_MODIFIED_PROMPT_OUTPUT_PATH`**：`split` 命令生成的修改后提示文件的默认输出路径。\n- **`PDD_CHANGE_OUTPUT_PATH`**：`change` 命令生成的修改后提示文件的默认输出路径。\n- **`PDD_UPDATE_OUTPUT_PATH`**：`update` 命令生成的更新后提示文件的默认输出路径。\n- **`PDD_OUTPUT_COST_PATH`**：成本跟踪 CSV 文件的默认输出路径。\n- **`PDD_DETECT_OUTPUT_PATH`**：`detect` 命令生成的 CSV 文件的默认输出路径。\n- **`PDD_CONFLICTS_OUTPUT_PATH`**：`conflicts` 命令生成的 CSV 文件的默认输出路径。\n- **`PDD_CRASH_OUTPUT_PATH`**：`crash` 命令生成的修复后代码文件的默认输出路径。\n- **`PDD_CRASH_PROGRAM_OUTPUT_PATH`**：`crash` 命令生成的修复后程序文件的默认输出路径。\n- **`PDD_TRACE_OUTPUT_PATH`**：`trace` 命令生成的追踪分析结果的默认输出路径。\n- **`PDD_BUG_OUTPUT_PATH`**：`bug` 命令生成的单元测试文件的默认输出路径。\n- **`PDD_AUTO_DEPS_OUTPUT_PATH`**：`auto-deps` 命令生成的修改后提示文件的默认输出路径。\n- **`PDD_AUTO_DEPS_CSV_PATH`**：`auto-deps` 命令用于存储依赖信息的 CSV 文件的默认路径和文件名。如果设置，将覆盖默认的 `project_dependencies.csv` 文件名。\n- **`PDD_AUTO_DEPS_CONCURRENCY`**：`auto-deps` 依赖分析的默认最大并行 LLM 调用数（默认值：1）。\n- **`PDD_EMBEDDING_MODEL`**：`auto-deps` 中两阶段检索所使用的嵌入模型（默认值：`text-embedding-3-small`）。\n- **`PDD_VERIFY_RESULTS_OUTPUT_PATH`**：`verify` 命令生成的验证结果日志文件的默认输出路径。\n- **`PDD_VERIFY_CODE_OUTPUT_PATH`**：`verify` 命令生成的最终代码文件的默认输出路径。\n- **`PDD_VERIFY_PROGRAM_OUTPUT_PATH`**：`verify` 命令生成的最终程序文件的默认输出路径。\n- **`PDD_CLOUD_TIMEOUT`**：云请求超时时间，单位为秒。默认值为 900 秒（15 分钟）。如果长时间运行的云操作经常超时，请增加此值。\n\n### 配置优先级\n\nPDD 按以下顺序解析配置设置（优先级从高到低）：\n\n1. **命令行选项**（例如 `--output`、`--strength`）\n2. **上下文特定设置**（来自 `.pddrc` 文件）\n3. **全局环境变量**（例如 `PDD_GENERATE_OUTPUT_PATH`）\n4. **内置默认值**\n\n### 从环境变量迁移\n\n如果您目前使用环境变量，可以迁移到 `.pddrc` 配置：\n\n```bash\n# 之前：环境变量\nexport PDD_GENERATE_OUTPUT_PATH=backend\u002Fsrc\u002F\nexport PDD_TEST_OUTPUT_PATH=backend\u002Ftests\u002F\nexport PDD_DEFAULT_LANGUAGE=python\n\n# 之后：.pddrc 文件\ncontexts:\n  default:\n    defaults:\n      generate_output_path: \"backend\u002Fsrc\u002F\"\n      test_output_path: \"backend\u002Ftests\u002F\" \n      default_language: \"python\"\n```\n\n对于团队项目，建议使用 `.pddrc` 方法，因为它能够确保所有团队成员的配置一致，并且可以进行版本控制。\n\n### 模型配置（`llm_model.csv`）\n\nPDD 使用一个 CSV 文件（`llm_model.csv`）来存储可用 AI 模型的相关信息，包括其成本、功能以及所需的 API 密钥名称。\n\n在本地运行命令时，PDD 会根据以下优先级确定使用哪个配置文件：\n\n1.  **用户专属：** `~\u002F.pdd\u002Fllm_model.csv` - 如果该文件存在，则优先于任何项目级别的配置。这允许用户维护一套个人的、系统范围内的模型配置。\n2.  **项目专属：** `\u003CPROJECT_ROOT>\u002F.pdd\u002Fllm_model.csv` - 如果未找到用户专属文件，PDD 将在所确定的项目根目录下的 `.pdd` 目录中查找该文件（基于 `PDD_PATH` 或自动检测）。\n3.  **软件包默认：** 如果上述两种情况均不存在，PDD 将回退到随软件包安装提供的默认配置。\n\n这种分层方式既支持共享的项目配置，也允许用户进行个性化覆盖，同时确保 PDD 在无需手动配置的情况下即可开箱即用。\n\n**注意：** 您可以手动编辑此 CSV 文件，但建议通过再次运行 `pdd setup` 来添加提供商和更新模型。\n\n*注：这种基于文件的配置主要影响本地操作和工具。云端执行模式很可能依赖于集中管理的配置。*\n\n\n这些环境变量允许您为每个命令设置默认的输出位置。如果已设置了某个环境变量，但在命令中未使用相应的 `--output` 选项，则 PDD 将使用该环境变量指定的路径。这有助于简化工作流程，减少为常用命令指定输出路径的需求。\n\n例如，如果您设置 `PDD_GENERATE_OUTPUT_PATH=\u002Fpath\u002Fto\u002Fgenerated\u002Fcode\u002F`，那么所有由 `generate` 命令生成的文件都将默认保存到该目录中，除非在命令行中使用 `--output` 选项进行了覆盖。\n\n要设置这些环境变量，您可以将其添加到您的 shell 配置文件中（如 `.bashrc` 或 `.zshrc`），或者在运行 PDD 命令之前直接设置：\n\n```bash\nexport PDD_GENERATE_OUTPUT_PATH=\u002Fpath\u002Fto\u002Fgenerated\u002Fcode\u002F\nexport PDD_TEST_OUTPUT_PATH=\u002Fpath\u002Fto\u002Ftests\u002F\n# ... 根据需要设置其他变量\n\npdd generate factorial_calculator_python.prompt  # 输出将保存到 \u002Fpath\u002Fto\u002Fgenerated\u002Fcode\u002F\n```\n\n此功能提供了更灵活和个性化的设置方式，尤其适用于团队环境或在具有不同目录结构的多个项目之间工作时。\n\n## 错误处理\n\nPDD 在命令执行过程中出现问题时会提供详细的错误信息。常见的错误场景包括：\n\n- 输入文件无效或格式不正确\n- 读取或写入文件权限不足\n- AI 模型相关错误（如 API 调用失败）\n- 提示超出模型上下文窗口 — PDD 会在发送提示给 LLM 之前验证提示的 token 数量。如果提示超过模型的上下文限制，PDD 会报告 token 数量、模型限制、使用百分比以及导致溢出的具体提示内容。当配置了多个候选模型时，PDD 会自动切换到下一个模型。\n- 生成代码中的语法错误\n\n当发生错误时，PDD 会显示一条描述问题的消息，并在可能的情况下提供解决步骤。\n\n## 云功能\n\n在云模式下运行时（默认），PDD 提供额外的功能：\n\n1. **共享示例**：访问不断增长的社区贡献示例数据库\n2. **自动更新**：最新的改进和错误修复会自动生效\n3. **成本优化**：智能选择模型并使用缓存以最大限度地降低费用\n4. **使用分析**：通过 PDD Cloud 控制面板跟踪团队的使用情况和成本\n5. **协作**：与团队成员共享提示和生成的代码\n\n访问 PDD Cloud 控制面板：https:\u002F\u002Fpromptdriven.ai\u002F\n\n在这里您可以：\n- 查看使用统计信息\n- 管理团队访问权限\n- 配置默认设置\n- 访问共享示例\n- 跟踪成本\n\n## 故障排除\n\n以下是一些常见问题及其解决方案：\n\n1. **命令未找到**：请确保 PDD 已正确安装，并已添加到系统的 PATH 中。\n\n2. **权限拒绝错误**：请检查您是否具有读取输入文件和写入输出位置所需的权限。\n\n3. **AI 模型无响应**：请验证您的互联网连接，并检查 AI 服务的状态。\n\n4. **意外输出**：尝试调整 `--strength` 和 `--temperature` 参数，以微调 AI 模型的行为。\n\n5. **费用过高**：使用 `--output-cost` 选项跟踪使用情况，并为 `fix` 命令的 `--budget` 选项设置适当的预算。\n\n6. **依赖项扫描问题**：如果 `auto-deps` 命令未能识别相关依赖项：\n   - 请检查文件路径和 glob 模式是否正确\n   - 使用 `--force-scan` 选项确保所有文件被重新分析\n   - 验证 CSV 文件的格式和内容\n   - 检查依赖项目录的文件权限\n   - 对于文档依赖项，请确保 `.md`\u002F`.txt`\u002F`.rst` 文件位于搜索路径中，并且已启用 `--include-docs`\n   - 如果冗余内容移除过于激进，可使用 `--no-dedup` 来跳过此步骤\n\n7. **命令超时**：\n   - 请检查您的互联网连接\n   - 尝试使用 `--local` 标志运行以进行比较\n   - 对于长时间运行的操作，可通过 `export PDD_CLOUD_TIMEOUT=1800`（30 分钟）增加超时时间\n   - 如果问题持续存在，请查看 PDD Cloud 的状态页面\n\n8. **上下文窗口溢出**：如果您看到“提示超出上下文限制”的错误：\n   - 错误消息中包含 token 数量和模型限制——请据此判断需要减少多少内容\n   - 缩小提示文件的大小，或将提示拆分为更小的模块\n   - 移除不必要的 `\u003Cinclude>` 指令，或使用有针对性的摘录代替完整文件\n   - 使用具有更大上下文窗口的模型（如拥有 100 万 token 的 Gemini，或自动使用 100 万 token 测试版头文件的 Claude）\n   - 使用 `--verbose` 选项查看确切的 token 数量和上下文使用百分比\n   - 如果使用 `auto-deps`，请审查包含的依赖项，去除不必要的冗余内容\n\n9. **同步相关问题**：\n   - **“另一个同步正在运行”**：请检查 `.pdd\u002Flocks\u002F` 目录中是否存在过期锁文件，若相应进程已不存在，则将其删除\n   - **复杂的冲突解决难题**：使用 `pdd --verbose sync --dry-run basename` 查看详细的 LLM 推理过程和决策分析\n   - **状态损坏或意外行为**：删除 `.pdd\u002Fmeta\u002F{basename}_{language}.json` 文件以重置指纹状态\n   - **动画显示问题**：同步操作在后台运行；动画仅为视觉反馈，并不影响功能\n   - **指纹不匹配**：使用 `pdd sync --dry-run basename` 查看检测到的更改内容以及为何推荐执行这些操作\n\n如果您遇到持续性问题，请查阅 PDD 文档或在 GitHub 上提交问题寻求帮助。\n\n## 安全注意事项\n\n使用 PDD 时，请注意以下安全事项：\n\n1. **代码执行**：PDD 会生成并修改代码。在执行之前，务必审查生成的代码，尤其是在生产环境中。\n\n2. **数据隐私**：避免在提示或代码文件中使用敏感数据，因为这些信息可能会被 AI 模型处理。\n\n3. **API 密钥**：如果 PDD 需要 API 密钥来访问 AI 模型，请将这些密钥安全存储，切勿将其包含在版本控制系统中。\n\n4. **输入验证**：PDD 假设输入文件是可信的。如果在多用户或网络环境中使用 PDD，请实施适当的输入验证。\n\n5. **输出处理**：对待输出文件时，应与项目中的其他代码或配置文件一样，采取相同的安全措施。\n\n6. **依赖分析**：使用 `auto-deps` 命令时，需谨慎处理不可信的依赖文件，并在将生成的摘要纳入提示之前进行验证。\n\n在云模式下使用 PDD 时：\n\n1. **身份验证**：\n   - PDD 使用 GitHub SSO 进行安全身份验证\n   - 令牌会安全地存储在您系统的凭据管理器中\n   - 无需手动管理 API 密钥\n\n2. **数据隐私**：\n   - 所有数据在传输和存储过程中均被加密\n   - 提示和生成的代码仅与您的账户相关联\n   - 您可以随时通过仪表板删除您的数据\n\n3. **团队访问权限**：\n   - 通过 GitHub 组织管理团队成员的访问权限\n   - 为不同命令设置细粒度的权限\n   - 跟踪每位团队成员的使用情况\n\n此外：\n- 在生产环境中，可考虑通过设置 `PDD_AUTO_UPDATE=false` 来禁用自动更新。\n- 为生产系统实施受控的更新流程。\n- 在敏感环境中手动更新 PDD 之前，请先查看变更日志。\n\n## 工作流集成\n\nPDD 可以集成到各种开发工作流中。以下是关键工作流模式的概念模型：\n\n### 初始开发\n\n**概念流程**：`auto-deps → generate → example → crash → verify → test → fix`\n\n**目的**：从零开始创建新功能，并进行充分的测试和验证。\n\n**流程**：\n1. 识别并注入提示所需的依赖项（`auto-deps`）。\n2. 根据提示生成完整的实现代码（`generate`）。\n3. 创建可重用的接口示例（`example`）。\n4. 确保代码能够正常运行，并修复运行时错误（`crash`）。\n5. 运行示例，并使用 LLM 检查输出是否符合提示意图；如有必要，尝试迭代修复（`verify`）。\n6. 为实现部分生成全面的单元测试（`test`）。\n7. 修复单元测试中发现的问题（`fix`）。\n\n**关键洞察**：此工作流遵循从概念到验证实现的流程，在检查功能输出（`verify`）和详细单元测试（`test`）之前，先确保代码能够正常运行（`crash`）。\n\n### 代码到提示的更新\n\n**概念流程**：`update → detect → change`\n\n**目的**：在代码更改后，保持提示作为事实来源。\n\n**流程**：\n1. 将直接的代码更改同步回原始提示。\n2. 检测可能受这些更改影响的其他提示。\n3. 对依赖于这些提示的内容进行必要的调整。\n\n**关键洞察**：这种双向流程确保即使代码先发生变化，提示仍然能保持其作为事实来源的地位。\n\n### 重构\n\n**概念流程**：`split → auto-deps → example`\n\n**目的**：将大型提示分解为模块化组件。\n\n**流程**：\n1. 将大型提示中的特定功能提取出来，形成独立的提示。\n2. 确保新提示具备所有必要的依赖项。\n3. 为提取的功能创建接口示例。\n\n**关键洞察**：正如代码应模块化一样，提示也受益于分解为专注且可重用的组件。\n\n### 调试工作流\n\n#### 提示上下文问题\n**概念流程**：`preprocess → generate`\n\n**目的**：解决提示解释或预处理方面的问题。\n\n**流程**：\n1. 检查提示的预处理方式。\n2. 重新生成代码，以提高提示的清晰度。\n\n#### 运行时崩溃调试\n**概念流程**：`generate → example → crash`\n\n**目的**：修复无法执行的代码。\n\n**流程**：\n1. 根据提示生成初始代码。\n2. 创建示例和测试程序。\n3. 修复运行时错误，使代码可执行。\n\n#### 逻辑错误修复\n**概念流程**：`bug → fix`\n\n**目的**：修正虽然能运行但结果不正确的代码。\n\n**流程**：\n1. 生成能够展示该 bug 的测试用例。\n2. 修复代码，使其通过测试。\n\n#### 调试器引导的分析\n**概念流程**：`trace → [edit prompt]`\n\n**目的**：确定哪些提示部分生成了有问题的代码。\n\n**流程**：\n1. 找出代码行与提示部分之间的对应关系。\n2. 更新相关的提示部分。\n\n### 多提示架构\n\n**概念流程**：`conflicts\u002Fdetect → change → generate → example → test`\n\n**目的**：协调由更高层需求衍生出的多个提示。\n\n**流程**：\n1. 识别提示之间的冲突或依赖关系。\n2. 协调这些提示，使它们协同工作。\n3. 根据更新后的提示重新生成代码。\n4. 在更改后更新接口示例。\n5. 通过测试验证系统集成。\n\n**关键洞察**：复杂系统需要提示之间的协调，正如代码模块之间需要协调一样。\n\n### 功能增强\n\n**概念流程**：`change → generate → example → test → fix`\n\n**目的**：为现有功能添加新能力。\n\n**流程**：\n1. 修改提示以描述新功能。\n2. 重新生成具有增强功能的代码。\n3. 更新示例以展示新功能。\n4. 测试以验证正确实现。\n5. 修复可能出现的问题。\n\n**关键洞察**：功能的添加应基于提示的变化，而不是直接修改代码。\n\n### CI 漂移检测与自动修复\n\n**概念流程**：`detect drift → heal (update\u002Fsync) → commit → push`\n\n**目的**：在 CI 流水线中自动检测并修复提示\u002F示例的漂移。\n\n**流程**：\n1. 使用 `sync_determine_operation` 扫描模块是否存在漂移（无需调用 LLM）。\n2. 对于过时的提示，运行 `pdd update` 将代码更改同步回提示。\n3. 对于过时的示例，运行 `pdd sync` 并执行示例和验证操作。\n4. 将修复后的文件暂存并提交，附上描述性信息。\n5. 将更改推送到当前分支。\n\n**使用方法**：\n```bash\n# 扫描所有模块（主分支触发）\npython -m pdd.ci_drift_heal\n\n# 扫描特定模块（PR 触发）\npython -m pdd.ci_drift_heal --modules module_a module_b\n\n# 使用预算上限和跳过 CI 标志\npython -m pdd.ci_drift_heal --budget-cap 5.00 --skip-ci\n```\n\n**关键选项：**\n- `--modules`：将检测范围限制在特定模块（用于 PR 范围内的检查）\n- `--budget-cap FLOAT`：LLM 修复调用的最大金额\n- `--skip-ci`：在提交信息中添加 `[skip ci]`，以防止 CI 重新触发\n\n**核心见解**：此工作流自动化了 CI 中的“代码到提示”更新模式，确保提示与代码变更保持同步，无需人工干预。\n\n### 关键依赖\n\n使用这些工作流时，请记住以下关键工具依赖：\n\n- 必须先执行 `generate`，再执行 `example` 或 `test`\n- `crash` 用于修复运行时错误，使代码可运行\n- `fix` 需要由 `crash` 创建或验证的可运行代码\n- 必须先创建 `test`，再使用 `fix`\n- 提示界面发生重大变化后，务必更新 `example`\n- `ci_drift_heal` 要求模块必须已有提示文件和代码文件\n\n有关每个工作流的详细命令示例，请参阅相应的命令文档部分。\n\n### CI 自动修复\n\n**工作流文件**：`.github\u002Fworkflows\u002Fauto-heal-drift.yml`\n\n**目的**：在 CI 中自动检测并修复提示与代码之间的偏差。\n\n**触发条件**：\n- **拉取请求**：仅修复 PR 所更改的模块，并将修复提交到 PR 分支\n- **推送到 main 分支**：修复所有模块，并将修复直接提交到 main 分支\n\n**防止循环**：自动修复生成的提交会使用 `chore: auto-heal [skip ci]` 提交信息；该工作流会跳过由此类模式触发的运行。\n\n**配置**：设置 `PDD_BUDGET_CAP` 仓库变量以控制每次运行的 LLM 开销（默认值为 `5.00`）。\n\n有关完整详情，请参阅 [docs\u002Fci-auto-heal.md](docs\u002Fci-auto-heal.md)。\n\n## 集成\n\nPDD 提供多种集成方式，以简化其在开发环境中的使用：\n\n### VS Code 扩展\n\n专用的 VS Code 扩展 (`utils\u002Fvscode_prompt`) 提供语法高亮、代码片段等功能，方便用户直接在编辑器中处理 `.prompt` 文件。该扩展兼容所有支持 OpenVSX 的 IDE，包括 VS Code、Cursor、VSCodium、Gitpod、Kiro、Windsurf 等。有关安装和使用方法，请参阅该扩展的 [README](utils\u002Fvscode_prompt\u002FREADME.md)。\n\n### MCP 服务器（适用于代理客户端）\n\n`pdd-mcp-server` (`utils\u002Fmcp`) 基于模型上下文协议 (MCP) 构建，充当桥梁。这使得 Cursor、Claude Desktop、Continue.dev 等代理客户端能够以编程方式调用 `pdd-cli` 命令。有关配置和使用说明，请参阅 [MCP 服务器 README](utils\u002Fmcp\u002FREADME.md)。\n\n### CI 偏差检测\n\nPDD 包含一个适用于 CI 的偏差检测与自动修复脚本 (`pdd\u002Fci_drift_heal.py`)，可集成到 GitHub Actions 或其他 CI 系统中。该脚本会扫描提示与示例之间的偏差，使用 `pdd update` 和 `pdd sync` 进行修复，并提交结果。有关使用详情，请参阅【CI 偏差检测与自动修复】部分。\n\n## 实用工具\n\n### 更新 LLM 模型数据 (`pdd\u002Fupdate_model_costs.py`)\n\n此脚本会自动更新 `llm_model.csv` 文件。**它会优先更新用户特定的配置文件 `~\u002F.pdd\u002Fllm_model.csv`（如果存在）。** 否则，它将使用 `--csv-path` 参数指定的文件路径（默认为 `\u003CPROJECT_ROOT>\u002F.pdd\u002Fllm_model.csv`）。\n\n该脚本使用 `litellm` 库完成以下操作：\n* 获取并填充所列模型的缺失输入输出成本（将每 token 成本转换为每百万 token 成本）。\n* 将现有成本与 LiteLLM 数据进行比较，并报告不一致之处（但不覆盖）。\n* 根据 `litellm.supports_response_schema` 检查并更新 `structured_output` 标志（True\u002FFalse）。\n* 在处理前使用 `litellm` 验证模型标识符。\n\n**使用方法：**\n\n```bash\nconda activate pdd\n# 脚本会首先自动检查 ~\u002F.pdd\u002Fllm_model.csv。\n# 如果未找到，则会使用 --csv-path 指定的路径（或默认的项目路径）。\npython pdd\u002Fupdate_model_costs.py [--csv-path path\u002Fto\u002Fyour\u002Fproject\u002Fllm_model.csv]\n```\n\n*注意：`max_reasoning_tokens` 列需要手动维护。*\n\n## 专利\n\n关于 PDD 工作流和系统相关方面的专利申请正在审查中。本声明并不授予任何专利权利；具体权利以 [LICENSE](LICENSE) 文件为准。\n\n## 结论\n\nPDD（提示驱动开发）CLI 提供了一套全面的工具，用于管理提示文件、生成代码、创建示例、运行测试以及处理提示驱动开发的各个方面。通过利用 AI 模型的强大功能和迭代流程，PDD 旨在简化开发流程并提高代码质量。\n\n各种命令和选项提供了灵活的使用方式，从简单的代码生成到涉及多个步骤的复杂工作流。通过环境变量跟踪成本和管理输出位置的功能，进一步增强了该工具在不同开发环境中的实用性。\n\nPDD 采用统一的参数顺序，将提示文件置于首位，突出了其提示驱动的本质，并为用户提供更直观的界面。这种跨命令的一致性应使该工具更易于学习和有效使用。\n\n随着对 PDD 的熟悉程度加深，您可以通过在 Shell 脚本、任务调度器或 CI 流水线中串联命令来构建更丰富的工作流，并充分利用可用的选项。请始终参考最新文档，并使用内置的帮助功能，以便在开发过程中最大化 PDD 的效用。\n\n请务必注意安全问题，尤其是在处理生成的代码或敏感数据时。定期更新 PDD，以获取最新的功能和改进。\n\n祝您使用 PDD 编码愉快！","# PDD (Prompt-Driven Development) 快速上手指南\n\nPDD 是一个基于提示词驱动开发（Prompt-Driven Development）的 AI 工具包，旨在通过自然语言提示词自动生成、测试和维护代码。它支持通过 Web 界面可视化操作，也提供强大的命令行代理功能来自动实现 GitHub Issue。\n\n## 环境准备\n\n在开始之前，请确保您的系统满足以下要求：\n\n### 系统要求\n- **操作系统**: macOS, Linux 或 Windows\n- **Python 版本**: Python 3.8 或更高版本\n- **网络**: 需要访问 GitHub 和 LLM 提供商 API（或使用 PDD Cloud 模式）\n\n### 前置依赖 (macOS 用户必读)\n如果您使用的是 macOS，请先安装以下基础工具：\n\n1. **安装 Xcode Command Line Tools** (用于 Python 编译):\n   ```bash\n   xcode-select --install\n   ```\n\n2. **安装 Homebrew** (推荐的包管理器):\n   ```bash\n   \u002Fbin\u002Fbash -c \"$(curl -fsSL https:\u002F\u002Fraw.githubusercontent.com\u002FHomebrew\u002Finstall\u002FHEAD\u002Finstall.sh)\"\n   ```\n   安装完成后，将 Homebrew 添加到 PATH:\n   ```bash\n   echo 'eval \"$(\u002Fopt\u002Fhomebrew\u002Fbin\u002Fbrew shellenv)\"' >> ~\u002F.zprofile && eval \"$(\u002Fopt\u002Fhomebrew\u002Fbin\u002Fbrew shellenv)\"\n   ```\n\n3. **确认 Python 已安装**:\n   ```bash\n   python3 --version\n   # 如果未安装，使用 Homebrew 安装\n   brew install python\n   ```\n\n### 可选依赖 (针对 CLI 高级用法)\n如果您计划直接使用命令行处理 GitHub Issue，还需安装：\n- **GitHub CLI**: `brew install gh && gh auth login`\n- **至少一个 Agent CLI** (任选其一):\n  - Claude Code: `npm install -g @anthropic-ai\u002Fclaude-code`\n  - Gemini CLI: `npm install -g @google\u002Fgemini-cli`\n  - Codex CLI: `npm install -g @openai\u002Fcodex`\n\n## 安装步骤\n\n推荐使用 **uv** 包管理器进行安装，它能提供更快的依赖解析和自动环境配置。\n\n### 1. 安装 uv (如果尚未安装)\n```bash\ncurl -LsSf https:\u002F\u002Fastral.sh\u002Fuv\u002Finstall.sh | sh\n```\n\n### 2. 安装 PDD CLI\n```bash\nuv tool install pdd-cli\n```\n\n### 3. 验证安装\n```bash\npdd --version\n```\n\n### 4. 初始化配置\n运行设置向导，它将自动检测代理工具、扫描 API Key 并配置模型：\n```bash\npdd setup\n```\n*注意：运行完成后，根据终端提示执行 `source ~\u002F.pdd\u002Fapi-env.zsh` (或 `.bash`) 以在当前会话中生效 API 密钥。*\n\n## 基本使用\n\nPDD 提供两种主要使用方式：推荐的 **Web 界面** 和 **命令行模式**。\n\n### 方式一：Web 界面 (推荐新手)\n这是最简单的使用方式，提供可视化的项目管理、文件浏览和命令执行功能。\n\n1. **启动 Web 服务**:\n   ```bash\n   pdd connect\n   ```\n2. **访问界面**:\n   浏览器会自动打开 `http:\u002F\u002Flocalhost:9876`。\n   - 在此界面中，您可以可视化地运行 `change`, `bug`, `fix`, `sync` 等命令。\n   - 支持远程访问（通过 PDD Cloud），也可添加 `--local-only` 仅限本地访问。\n\n### 方式二：命令行快速示例 (Hello World)\n如果您想通过命令行体验核心功能，请跟随以下步骤：\n\n1. **获取示例项目**:\n   ```bash\n   git clone https:\u002F\u002Fgithub.com\u002Fpromptdriven\u002Fpdd.git\n   cd pdd\u002Fexamples\u002Fhello\n   ```\n\n2. **配置 API Key** (选择其中一个设置):\n   ```bash\n   export GEMINI_API_KEY=\"your-gemini-key\"\n   # 或者\n   export OPENAI_API_KEY=\"your-openai-key\"\n   ```\n\n3. **运行生成命令**:\n   该命令会根据提示词文件生成代码：\n   ```bash\n   pdd --force generate hello_python.prompt\n   ```\n\n4. **运行生成的代码**:\n   ```bash\n   python3 hello.py\n   ```\n   ✅ 预期输出: `hello`\n\n### 方式三：自动化处理 GitHub Issue (进阶)\n如果您已配置好 GitHub CLI 和 Agent CLI，可以直接通过 Issue 链接驱动开发：\n\n```bash\n# 自动实现功能需求\npdd change \u003Cissue-url>\n\n# 为 Bug 创建失败测试\npdd bug \u003Cissue-url>\n\n# 修复失败的测试\npdd fix \u003Cissue-url>\n```\n\n---\n*注：默认情况下，PDD 使用 Cloud 模式运行（通过 GitHub SSO 认证），无需本地配置复杂的 API Key 即可使用强大模型。如需完全本地运行，请使用 `--local` 标志并确保已导出相应的 `*_API_KEY` 环境变量。*","某初创团队的后端工程师正面临 GitHub 上堆积的 20 个功能需求与紧急 Bug 修复任务，需要在极短的版本周期内完成交付。\n\n### 没有 pdd 时\n- **上下文切换频繁**：开发者需手动在 GitHub 议题、本地 IDE 和测试终端间反复跳转，复制粘贴错误日志和需求描述，打断心流。\n- **测试驱动开发成本高**：为复现 Bug 或验证新功能，必须人工编写大量样板测试代码，耗时且容易遗漏边界条件。\n- **流程标准化难**：缺乏统一的执行标准，不同成员处理议题的步骤各异，导致代码风格不一致且审查（Code Review）效率低下。\n- **状态追踪滞后**：无法实时可视化开发进度，往往等到编码结束才发现理解偏差，返工成本巨大。\n\n### 使用 pdd 后\n- **一键自动执行**：直接运行 `pdd bug \u003Cissue-url>` 或 `pdd change \u003Cissue-url>`，pdd 自动拉取议题内容并在本地启动包含 12 至 18 步的智能工作流。\n- **智能生成与验证**：pdd 自动生成失败测试用例并修复代码，甚至通过探索性测试和可访问性审计，确保产出质量符合预期。\n- **流程高度统一**：内置的标准化代理命令强制遵循最佳实践，无论谁操作，输出的代码结构和测试覆盖率都保持一致。\n- **实时可视反馈**：通过 `localhost:9876` 界面实时监控 pdd 的思考路径与执行状态，随时干预决策，实现“所见即所得”的开发体验。\n\npdd 将原本碎片化的人工编码过程转化为自动化的智能闭环，让开发者从繁琐的执行者转变为高效的项目指挥官。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fpromptdriven_pdd_c03e8b1a.png","promptdriven","Prompt Driven","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fpromptdriven_394235ef.png","",null,"Prompt_Driven","www.promptdriven.ai","https:\u002F\u002Fgithub.com\u002Fpromptdriven",[85,89,93,97,101,105],{"name":86,"color":87,"percentage":88},"Python","#3572A5",90,{"name":90,"color":91,"percentage":92},"TypeScript","#3178c6",7.8,{"name":94,"color":95,"percentage":96},"Shell","#89e051",1.7,{"name":98,"color":99,"percentage":100},"Makefile","#427819",0.4,{"name":102,"color":103,"percentage":104},"HTML","#e34c26",0.1,{"name":106,"color":107,"percentage":108},"JavaScript","#f1e05a",0,638,57,"2026-04-05T20:33:05","MIT","macOS, Linux, Windows","未说明",{"notes":116,"python":117,"dependencies":118},"该工具主要作为命令行接口运行，依赖外部 AI 代理（如 Claude Code、Gemini CLI 或 Codex CLI）执行实际代码生成任务。推荐使用 'uv' 进行安装以自动管理环境。本地模式需配置 LLM API 密钥（支持 OpenAI、Anthropic、Google 等），云端模式则通过 GitHub SSO 认证。macOS 用户需先安装 Xcode Command Line Tools 和 Homebrew。","3.8+",[119,120,121,122],"uv","litellm>=1.75.5","GitHub CLI (gh)","Claude Code \u002F Gemini CLI \u002F Codex CLI (至少其一)",[15,61,13,26,14],[125,126,127,128,129,130,131,132,133,134,135],"ai","cli","code","developer-tools","development","methodology","prompt","prompt-engineering","prompt-toolkit","prompts","prompts-template","2026-03-27T02:49:30.150509","2026-04-06T14:01:43.030114",[],[]]