[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-ai-christianson--RA.Aid":3,"tool-ai-christianson--RA.Aid":64},[4,17,27,35,43,56],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":16},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,3,"2026-04-05T11:01:52",[13,14,15],"开发框架","图像","Agent","ready",{"id":18,"name":19,"github_repo":20,"description_zh":21,"stars":22,"difficulty_score":23,"last_commit_at":24,"category_tags":25,"status":16},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",138956,2,"2026-04-05T11:33:21",[13,15,26],"语言模型",{"id":28,"name":29,"github_repo":30,"description_zh":31,"stars":32,"difficulty_score":23,"last_commit_at":33,"category_tags":34,"status":16},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,"2026-04-03T11:11:01",[13,14,15],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":23,"last_commit_at":41,"category_tags":42,"status":16},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[13,26],{"id":44,"name":45,"github_repo":46,"description_zh":47,"stars":48,"difficulty_score":23,"last_commit_at":49,"category_tags":50,"status":16},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,"2026-04-05T10:45:23",[14,51,52,53,15,54,26,13,55],"数据工具","视频","插件","其他","音频",{"id":57,"name":58,"github_repo":59,"description_zh":60,"stars":61,"difficulty_score":10,"last_commit_at":62,"category_tags":63,"status":16},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,"2026-04-04T04:44:48",[15,14,13,26,54],{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":70,"readme_en":71,"readme_zh":72,"quickstart_zh":73,"use_case_zh":74,"hero_image_url":75,"owner_login":76,"owner_name":77,"owner_avatar_url":78,"owner_bio":79,"owner_company":80,"owner_location":81,"owner_email":81,"owner_twitter":82,"owner_website":81,"owner_url":83,"languages":84,"stars":114,"forks":115,"last_commit_at":116,"license":117,"difficulty_score":23,"env_os":118,"env_gpu":119,"env_ram":120,"env_deps":121,"category_tags":130,"github_topics":131,"view_count":10,"oss_zip_url":81,"oss_zip_packed_at":81,"status":16,"created_at":135,"updated_at":136,"faqs":137,"releases":166},1074,"ai-christianson\u002FRA.Aid","RA.Aid","Develop software autonomously.","RA.Aid 是一个能自主完成软件开发任务的智能助手，它通过多步骤规划与自动化执行，帮助开发者高效完成从需求分析到代码实现的全流程工作。这个工具特别擅长处理需要连续决策的复杂开发场景，例如自动规划功能实现步骤、编写并调试多文件代码、执行命令行操作等，能显著减少重复性人工操作。\n\n对于需要频繁处理需求变更、技术调研或代码重构的开发者来说，RA.Aid 提供了系统化的解决方案。它通过三阶段架构实现智能开发：先研究现有代码库和需求背景，再拆解任务为可执行步骤，最后自动编写代码并验证结果。这种设计解决了传统开发中任务规划碎片化、执行效率低等问题。\n\n该工具的核心价值在于其深度整合能力：既支持与专业代码编辑工具 aider 的联动，又能调用高级推理模型（如 OpenAI o1）解决复杂逻辑问题，配合网络搜索功能实现技术方案的实时验证。其自动化命令执行特性虽大幅提升效率，但也建议使用者保持代码审查习惯。\n\n主要面向有编程基础的开发者和技术研究人员，特别适合需要快速验证产品原型、维护复杂系统的场景。设计上采用模块化架构，用户可根据项目需求灵活配置模型与工具链。尽管处于 Beta 阶段，但其开源特性已","RA.Aid 是一个能自主完成软件开发任务的智能助手，它通过多步骤规划与自动化执行，帮助开发者高效完成从需求分析到代码实现的全流程工作。这个工具特别擅长处理需要连续决策的复杂开发场景，例如自动规划功能实现步骤、编写并调试多文件代码、执行命令行操作等，能显著减少重复性人工操作。\n\n对于需要频繁处理需求变更、技术调研或代码重构的开发者来说，RA.Aid 提供了系统化的解决方案。它通过三阶段架构实现智能开发：先研究现有代码库和需求背景，再拆解任务为可执行步骤，最后自动编写代码并验证结果。这种设计解决了传统开发中任务规划碎片化、执行效率低等问题。\n\n该工具的核心价值在于其深度整合能力：既支持与专业代码编辑工具 aider 的联动，又能调用高级推理模型（如 OpenAI o1）解决复杂逻辑问题，配合网络搜索功能实现技术方案的实时验证。其自动化命令执行特性虽大幅提升效率，但也建议使用者保持代码审查习惯。\n\n主要面向有编程基础的开发者和技术研究人员，特别适合需要快速验证产品原型、维护复杂系统的场景。设计上采用模块化架构，用户可根据项目需求灵活配置模型与工具链。尽管处于 Beta 阶段，但其开源特性已吸引社区贡献，持续优化着自动化开发的边界。","\u003Cpicture>\n  \u003Csource media=\"(prefers-color-scheme: dark)\" srcset=\"assets\u002Flogo-white-transparent.gif\">\n  \u003Csource media=\"(prefers-color-scheme: light)\" srcset=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fai-christianson_RA.Aid_readme_f77cda8e7f51.png\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fai-christianson_RA.Aid_readme_f77cda8e7f51.png\" alt=\"RA.Aid - Develop software autonomously.\" style=\"margin-bottom: 20px;\">\n\u003C\u002Fpicture>\n\n[![Python Versions](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fpython-3.8%2B-blue)](https:\u002F\u002Fwww.python.org)\n[![License](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Flicense-Apache%202.0-blue)](LICENSE)\n[![Status](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fstatus-Beta-yellow)]()\n\n**Develop software autonomously.**\n\nRA.Aid (pronounced \"raid\") helps you develop software autonomously. It is a standalone coding agent built on LangGraph's agent-based task execution framework. The tool provides an intelligent assistant that can help with research, planning, and implementation of multi-step development tasks. RA.Aid can optionally integrate with `aider` (https:\u002F\u002Faider.chat\u002F) via the `--use-aider` flag to leverage its specialized code editing capabilities.\n\nThe result is **near-fully-autonomous software development**.\n\n**Enjoying RA.Aid?** Show your support by giving us a star ⭐ on [GitHub](https:\u002F\u002Fgithub.com\u002Fai-christianson\u002FRA.Aid)!\n\nHere's a demo of RA.Aid adding a feature to itself:\n\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fai-christianson_RA.Aid_readme_089c094adf9c.gif\" alt=\"RA.Aid Demo\" autoplay loop style=\"width: 100%; max-width: 800px;\">\n\n## Documentation\n\nComplete documentation is available at https:\u002F\u002Fdocs.ra-aid.ai\n\nKey sections:\n- [Installation Guide](https:\u002F\u002Fdocs.ra-aid.ai\u002Fquickstart\u002Finstallation)\n- [Recommended Configuration](https:\u002F\u002Fdocs.ra-aid.ai\u002Fquickstart\u002Frecommended)\n- [Open Models Setup](https:\u002F\u002Fdocs.ra-aid.ai\u002Fquickstart\u002Fopen-models)\n- [Usage Examples](https:\u002F\u002Fdocs.ra-aid.ai\u002Fcategory\u002Fusage)\n- [Logging System](https:\u002F\u002Fdocs.ra-aid.ai\u002Fconfiguration\u002Flogging)\n- [Memory Management](https:\u002F\u002Fdocs.ra-aid.ai\u002Fconfiguration\u002Fmemory-management)\n- [Contributing Guide](https:\u002F\u002Fdocs.ra-aid.ai\u002Fcontributing)\n- [Getting Help](https:\u002F\u002Fdocs.ra-aid.ai\u002Fgetting-help)\n\n## Table of Contents\n\n- [Features](#features)\n- [Installation](#installation)\n- [Usage](#usage)\n- [Architecture](#architecture)\n- [Dependencies](#dependencies)\n- [Development Setup](#development-setup)\n- [Contributing](#contributing)\n- [License](#license)\n- [Contact](#contact)\n\n> 👋 **Pull requests are very welcome!** Have ideas for how to improve RA.Aid? Don't be shy - your help makes a real difference!\n>\n> 💬 **Join our Discord community:** [Click here to join](https:\u002F\u002Fdiscord.gg\u002Ff6wYbzHYxV)\n\n⚠️ **IMPORTANT: USE AT YOUR OWN RISK** ⚠️\n- This tool **can and will** automatically execute shell commands and make code changes\n- The --cowboy-mode flag can be enabled to skip shell command approval prompts\n- No warranty is provided, either express or implied\n- Always use in version-controlled repositories\n- Review proposed changes in your git diff before committing\n\n## Key Features\n\n- **Multi-Step Task Planning**: The agent breaks down complex tasks into discrete, manageable steps and executes them sequentially. This systematic approach ensures thorough implementation and reduces errors.\n\n- **Automated Command Execution**: The agent can run shell commands automatically to accomplish tasks. While this makes it powerful, it also means you should carefully review its actions.\n\n- **Ability to Leverage Expert Reasoning Models**: The agent can use advanced reasoning models such as OpenAI's o1 *just when needed*, e.g. to solve complex debugging problems or in planning for complex feature implementation.\n\n- **Web Research Capabilities**: Leverages Tavily API for intelligent web searches to enhance research and gather real-world context for development tasks\n\n- **Three-Stage Architecture**:\n  1. **Research**: Analyzes codebases and gathers context\n  2. **Planning**: Breaks down tasks into specific, actionable steps\n  3. **Implementation**: Executes each planned step sequentially\n\nWhat sets RA.Aid apart is its ability to handle complex programming tasks that extend beyond single-shot code edits. By combining research, strategic planning, and implementation into a cohesive workflow, RA.Aid can:\n\n- Break down and execute multi-step programming tasks\n- Research and analyze complex codebases to answer architectural questions\n- Plan and implement significant code changes across multiple files\n- Provide detailed explanations of existing code structure and functionality\n- Execute sophisticated refactoring operations with proper planning\n\n## Features\n\n- **Three-Stage Architecture**: The workflow consists of three powerful stages:\n  1. **Research** 🔍 - Gather and analyze information\n  2. **Planning** 📋 - Develop execution strategy\n  3. **Implementation** ⚡ - Execute the plan with AI assistance\n\n  Each stage is powered by dedicated AI agents and specialized toolsets.\n- **Advanced AI Integration**: Built on LangChain and leverages the latest LLMs for natural language understanding and generation.\n- **Human-in-the-Loop Interaction**: Optional mode that enables the agent to ask you questions during task execution, ensuring higher accuracy and better handling of complex tasks that may require your input or clarification\n- **Comprehensive Toolset**:\n  - Shell command execution\n  - Expert querying system\n  - File operations and management\n  - Memory management\n  - Research and planning tools\n  - Code analysis capabilities\n- **Interactive CLI Interface**: Simple yet powerful command-line interface for seamless interaction\n- **Modular Design**: Structured as a Python package with specialized modules for console output, processing, text utilities, and tools\n- **Git Integration**: Built-in support for Git operations and repository management\n\n## Installation\n\n### Windows Installation\n1. Install Python 3.8 or higher from [python.org](https:\u002F\u002Fwww.python.org\u002Fdownloads\u002F)\n2. Install required system dependencies:\n   ```powershell\n   # Install Chocolatey if not already installed (run in admin PowerShell)\n   Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https:\u002F\u002Fcommunity.chocolatey.org\u002Finstall.ps1'))\n   \n   # Install ripgrep using Chocolatey\n   choco install ripgrep\n   ```\n3. Install RA.Aid:\n   ```powershell\n   pip install ra-aid\n   ```\n4. Install Windows-specific dependencies:\n   ```powershell\n   pip install pywin32\n   ```\n5. Set up your API keys in a `.env` file:\n   ```env\n   ANTHROPIC_API_KEY=your_anthropic_key\n   OPENAI_API_KEY=your_openai_key\n   ```\n\n### Unix\u002FLinux Installation\nRA.Aid can be installed directly using pip:\n\n```bash\npip install ra-aid\n```\n### macOS Installation with Homebrew\n\n```bash\nbrew tap ai-christianson\u002Fhomebrew-ra-aid\nbrew install ra-aid\n```\n\n**NOTE:** macOS may also be installed with pip as shown above.\n\n\n### Prerequisites\n\nBefore using RA.Aid, you'll need API keys for the required AI services:\n\n```bash\n# Set up API keys based on your preferred provider:\n\n# For Anthropic Claude models (recommended)\nexport ANTHROPIC_API_KEY=your_api_key_here\n\n# For OpenAI models (optional)\nexport OPENAI_API_KEY=your_api_key_here\n\n# For OpenRouter provider (optional)\nexport OPENROUTER_API_KEY=your_api_key_here\n\n# For Makehub provider (optional)\nexport MAKEHUB_API_KEY=your_api_key_here\n\n# For OpenAI-compatible providers (optional)\nexport OPENAI_API_BASE=your_api_base_url\n\n# For Gemini provider (optional)\nexport GEMINI_API_KEY=your_api_key_here\n\n# For web research capabilities\nexport TAVILY_API_KEY=your_api_key_here\n```\n\nNote: When using the `--use-aider` flag, the programmer tool (aider) will automatically select its model based on your available API keys:\n- If ANTHROPIC_API_KEY is set, it will use Claude models\n- If only OPENAI_API_KEY is set, it will use OpenAI models\n- You can set multiple API keys to enable different features\n\nYou can get your API keys from:\n- Anthropic API key: https:\u002F\u002Fconsole.anthropic.com\u002F\n- OpenAI API key: https:\u002F\u002Fplatform.openai.com\u002Fapi-keys\n- OpenRouter API key: https:\u002F\u002Fopenrouter.ai\u002Fkeys\n- Makehub API key: https:\u002F\u002Fmakehub.ai\u002F\n- Gemini API key: https:\u002F\u002Faistudio.google.com\u002Fapp\u002Fapikey\n\nNote: `aider` must be installed separately as it is not included in the RA.Aid package. See [aider-chat](https:\u002F\u002Fpypi.org\u002Fproject\u002Faider-chat\u002F) for more details.\n\nComplete installation documentation is available in our [Installation Guide](https:\u002F\u002Fdocs.ra-aid.ai\u002Fquickstart\u002Finstallation).\n\n## Usage\n\nRA.Aid is designed to be simple yet powerful. Here's how to use it:\n\n```bash\n# Basic usage\nra-aid -m \"Your task or query here\"\n\n# Research-only mode (no implementation)\nra-aid -m \"Explain the authentication flow\" --research-only\n\n# File logging with console warnings (default mode)\nra-aid -m \"Add new feature\" --log-mode file\n\n# Console-only logging with detailed output\nra-aid -m \"Add new feature\" --log-mode console --log-level debug\n```\n\nMore information is available in our [Usage Examples](https:\u002F\u002Fdocs.ra-aid.ai\u002Fcategory\u002Fusage), [Logging System](https:\u002F\u002Fdocs.ra-aid.ai\u002Fconfiguration\u002Flogging), and [Memory Management](https:\u002F\u002Fdocs.ra-aid.ai\u002Fconfiguration\u002Fmemory-management) documentation.\n\n### Command Line Options\n\n- `-m, --message`: The task or query to be executed (required except in chat mode, cannot be used with --msg-file)\n- `--msg-file`: Path to a text file containing the task\u002Fmessage (cannot be used with --message)\n- `--research-only`: Only perform research without implementation\n- `--provider`: The LLM provider to use (choices: anthropic, openai, openrouter, openai-compatible, makehub, gemini)\n- `--model`: The model name to use (required for non-Anthropic providers)\n- `--use-aider`: Enable aider integration for code editing. When enabled, RA.Aid uses aider's specialized code editing capabilities instead of its own native file modification tools. This option is useful when you need aider's specific editing features or prefer its approach to code modifications. This feature is optional and disabled by default.\n- `--research-provider`: Provider to use specifically for research tasks (falls back to --provider if not specified)\n- `--research-model`: Model to use specifically for research tasks (falls back to --model if not specified)\n- `--planner-provider`: Provider to use specifically for planning tasks (falls back to --provider if not specified)\n- `--planner-model`: Model to use specifically for planning tasks (falls back to --model if not specified)\n- `--cowboy-mode`: Skip interactive approval for shell commands\n- `--expert-provider`: The LLM provider to use for expert knowledge queries (choices: anthropic, openai, openrouter, openai-compatible, makehub, gemini)\n- `--expert-model`: The model name to use for expert knowledge queries (required for non-OpenAI providers)\n- `--hil, -H`: Enable human-in-the-loop mode for interactive assistance during task execution\n- `--chat`: Enable chat mode with direct human interaction (implies --hil)\n- `--log-mode`: Logging mode (choices: file, console)\n  - `file` (default): Logs to both file and console (only warnings and errors to console)\n  - `console`: Logs to console only at the specified log level with no file logging\n- `--log-level`: Set specific logging level (debug, info, warning, error, critical)\n  - With `--log-mode=file`: Controls the file logging level (console still shows only warnings+)\n  - With `--log-mode=console`: Controls the console logging level directly\n  - Default: warning\n- `--experimental-fallback-handler`: Enable experimental fallback handler to attempt to fix too calls when the same tool fails 3 times consecutively. (OPENAI_API_KEY recommended as openai has the top 5 tool calling models.) See `ra_aid\u002Ftool_leaderboard.py` for more info.\n- `--pretty-logger`: Enables colored panel-style formatted logging output for better readability.\n- `--temperature`: LLM temperature (0.0-2.0) to control randomness in responses\n- `--disable-limit-tokens`: Disable token limiting for Anthropic Claude react agents\n- `--recursion-limit`: Maximum recursion depth for agent operations (default: 100)\n- `--test-cmd`: Custom command to run tests. If set user will be asked if they want to run the test command\n- `--auto-test`: Automatically run tests after each code change\n- `--max-test-cmd-retries`: Maximum number of test command retry attempts (default: 3)\n- `--test-cmd-timeout`: Timeout in seconds for test command execution (default: 300)\n- `--show-cost`: Display cost information as the agent works - currently only supported on claude model agents\n- `--track-cost`: Track token usage and costs (default: False)\n- `--no-track-cost`: Disable tracking of token usage and costs\n- `--max-cost`: Maximum cost threshold in USD (positive float)\n- `--max-tokens`: Maximum token threshold (positive integer)\n- `--exit-at-limit`: Exit immediately without prompting when --max-cost or --max-tokens limits are reached\n- `--price-performance-ratio`: Price-performance ratio for Makehub API (0.0-1.0, where 0.0 prioritizes speed and 1.0 prioritizes cost efficiency)\n- `--version`: Show program version number and exit\n- `--server`: Launch the server with web interface (alpha feature)\n- `--server-host`: Host to listen on for server (default: 0.0.0.0)  (alpha feature)\n- `--server-port`: Port to listen on for server (default: 1818) (alpha feature)\n\n### Example Tasks\n\n1. Code Analysis:\n   ```bash\n   ra-aid -m \"Explain how the authentication middleware works\" --research-only\n   ```\n\n2. Complex Changes:\n   ```bash\n   ra-aid -m \"Refactor the database connection code to use connection pooling\" --cowboy-mode\n   ```\n\n3. Automated Updates:\n   ```bash\n   ra-aid -m \"Update deprecated API calls across the entire codebase\" --cowboy-mode\n   ```\n\n4. Code Research:\n   ```bash\n   ra-aid -m \"Analyze the current error handling patterns\" --research-only\n   ```\n\n2. Code Research:\n   ```bash\n   ra-aid -m \"Explain how the authentication middleware works\" --research-only\n   ```\n\n3. Refactoring:\n   ```bash\n   ra-aid -m \"Refactor the database connection code to use connection pooling\" --cowboy-mode\n   ```\n\n### Human-in-the-Loop Mode\n\nEnable interactive mode to allow the agent to ask you questions during task execution:\n\n```bash\nra-aid -m \"Implement a new feature\" --hil\n# or\nra-aid -m \"Implement a new feature\" -H\n```\n\nThis mode is particularly useful for:\n- Complex tasks requiring human judgment\n- Clarifying ambiguous requirements\n- Making architectural decisions\n- Validating critical changes\n- Providing domain-specific knowledge\n\n### Web Research\n\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fai-christianson_RA.Aid_readme_a2f241205ff6.gif\" alt=\"RA.Aid Demo\" autoplay loop style=\"width: 100%; max-width: 800px;\">\n\nThe agent features autonomous web research capabilities powered by the [Tavily](https:\u002F\u002Ftavily.com\u002F) API, seamlessly integrating real-world information into its problem-solving workflow. Web research is conducted automatically when the agent determines additional context would be valuable - no explicit configuration required.\n\nFor example, when researching modern authentication practices or investigating new API requirements, the agent will autonomously:\n- Search for current best practices and security recommendations\n- Find relevant documentation and technical specifications\n- Gather real-world implementation examples\n- Stay updated on latest industry standards\n\nWhile web research happens automatically as needed, you can also explicitly request research-focused tasks:\n\n```bash\n# Focused research task with web search capabilities\nra-aid -m \"Research current best practices for API rate limiting\" --research-only\n```\n\nMake sure to set your TAVILY_API_KEY environment variable to enable this feature.\n\n### Chat Mode\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fai-christianson_RA.Aid_readme_ae49978f83b4.gif\" alt=\"Chat Mode Demo\" autoplay loop style=\"display: block; margin: 0 auto; width: 100%; max-width: 800px;\">\n\nEnable with `--chat` to transform ra-aid into an interactive assistant that guides you through research and implementation tasks. Have a natural conversation about what you want to build, explore options together, and dispatch work - all while maintaining context of your discussion. Perfect for when you want to think through problems collaboratively rather than just executing commands.\n\n### Server with Web Interface\n\nRA.Aid includes a modern server with web interface that provides:\n\n- Beautiful dark-themed chat interface\n- Real-time streaming of agent trajectory\n- Responsive design that works on all devices\n\nTo launch the server with web interface:\n\n```bash\n# Start with default settings (0.0.0.0:1818)\nra-aid --server\n\n# Specify custom host and port\nra-aid --server --server-host 127.0.0.1 --server-port 3000\n```\n\nCommand line options for server with web interface:\n- `--server`: Launch the server with web interface\n- `--server-host`: Host to listen on (default: 0.0.0.0)\n- `--server-port`: Port to listen on (default: 1818)\n\nAfter starting the server, open your web browser to the displayed URL (e.g., http:\u002F\u002Flocalhost:1818).\n\n### Command Interruption and Feedback\n\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fai-christianson_RA.Aid_readme_4529107044ef.gif\" alt=\"Command Interrupt Demo\" autoplay loop style=\"display: block; margin: 0 auto; width: 100%; max-width: 800px;\">\n\nYou can interrupt the agent at any time by pressing `Ctrl-C`. This pauses the agent, allowing you to provide feedback, adjust your instructions, or steer the execution in a new direction. Press `Ctrl-C` again if you want to completely exit the program.\n\n\n### Shell Command Automation with Cowboy Mode 🏇\n\nThe `--cowboy-mode` flag enables automated shell command execution without confirmation prompts. This is useful for:\n\n- CI\u002FCD pipelines\n- Automated testing environments\n- Batch processing operations\n- Scripted workflows\n\n```bash\nra-aid -m \"Update all deprecated API calls\" --cowboy-mode\n```\n\n**⚠️ Important Safety Notes:**\n- Cowboy mode skips confirmation prompts for shell commands\n- Always use in version-controlled repositories\n- Ensure you have a clean working tree before running\n- Review changes in git diff before committing\n\n### Model Configuration\n\nRA.Aid supports multiple AI providers and models. The default model is Anthropic's Claude 3 Sonnet (`claude-3-7-sonnet-20250219`).\n\nWhen using the `--use-aider` flag, the programmer tool (aider) automatically selects its model based on your available API keys. It will use Claude models if ANTHROPIC_API_KEY is set, or fall back to OpenAI models if only OPENAI_API_KEY is available.\n\nNote: The expert tool can be configured to use different providers (OpenAI, Anthropic, OpenRouter, Gemini) using the --expert-provider flag along with the corresponding EXPERT_*API_KEY environment variables. Each provider requires its own API key set through the appropriate environment variable.\n\n#### Environment Variables\n\nRA.Aid supports multiple providers through environment variables:\n\n- `ANTHROPIC_API_KEY`: Required for the default Anthropic provider\n- `OPENAI_API_KEY`: Required for OpenAI provider\n- `OPENROUTER_API_KEY`: Required for OpenRouter provider\n- `MAKEHUB_API_KEY`: Required for Makehub provider\n- `DEEPSEEK_API_KEY`: Required for DeepSeek provider\n- `OPENAI_API_BASE`: Required for OpenAI-compatible providers along with `OPENAI_API_KEY`\n- `GEMINI_API_KEY`: Required for Gemini provider\n\nExpert Tool Environment Variables:\n- `EXPERT_OPENAI_API_KEY`: API key for expert tool using OpenAI provider\n- `EXPERT_ANTHROPIC_API_KEY`: API key for expert tool using Anthropic provider\n- `EXPERT_OPENROUTER_API_KEY`: API key for expert tool using OpenRouter provider\n- `EXPERT_MAKEHUB_API_KEY`: API key for expert tool using Makehub provider (automatically uses `MAKEHUB_API_KEY` if not set)\n- `EXPERT_OPENAI_API_BASE`: Base URL for expert tool using OpenAI-compatible provider\n- `EXPERT_GEMINI_API_KEY`: API key for expert tool using Gemini provider\n- `EXPERT_DEEPSEEK_API_KEY`: API key for expert tool using DeepSeek provider\n\nYou can set these permanently in your shell's configuration file (e.g., `~\u002F.bashrc` or `~\u002F.zshrc`):\n\n```bash\n# Default provider (Anthropic)\nexport ANTHROPIC_API_KEY=your_api_key_here\n\n# For OpenAI features and expert tool\nexport OPENAI_API_KEY=your_api_key_here\n\n# For OpenRouter provider\nexport OPENROUTER_API_KEY=your_api_key_here\n\n# For Makehub provider\nexport MAKEHUB_API_KEY=your_api_key_here\n\n# For OpenAI-compatible providers\nexport OPENAI_API_BASE=your_api_base_url\n\n# For Gemini provider\nexport GEMINI_API_KEY=your_api_key_here\n```\n\n### Custom Model Examples\n\n1. **Using Anthropic (Default)**\n   ```bash\n   # Uses default model (claude-3-7-sonnet-20250219)\n   ra-aid -m \"Your task\"\n\n   # Or explicitly specify:\n   ra-aid -m \"Your task\" --provider anthropic --model claude-3-5-sonnet-20241022\n   ```\n\n2. **Using OpenAI**\n   ```bash\n   ra-aid -m \"Your task\" --provider openai --model gpt-4o\n   ```\n\n3. **Using OpenRouter**\n   ```bash\n   ra-aid -m \"Your task\" --provider openrouter --model mistralai\u002Fmistral-large-2411\n   ```\n\n4. **Using Makehub**\n   ```bash\n   ra-aid -m \"Your task\" --provider makehub --model openai\u002Fgpt-4o\n   \n   # With price-performance optimization\n   ra-aid -m \"Your task\" --provider makehub --model anthropic\u002Fclaude-4-sonnet --price-performance-ratio 0.7\n   ```\n\n5. **Using DeepSeek**\n   ```bash\n   # Direct DeepSeek provider (requires DEEPSEEK_API_KEY)\n   ra-aid -m \"Your task\" --provider deepseek --model deepseek-reasoner\n   \n   # DeepSeek via OpenRouter\n   ra-aid -m \"Your task\" --provider openrouter --model deepseek\u002Fdeepseek-r1\n   ```\n\n6. **Configuring Expert Provider**\n\n   The expert tool is used by the agent for complex logic and debugging tasks. It can be configured to use different providers (OpenAI, Anthropic, OpenRouter, Gemini, openai-compatible) using the --expert-provider flag along with the corresponding EXPERT_*API_KEY environment variables.\n\n   ```bash\n   # Use Anthropic for expert tool\n   export EXPERT_ANTHROPIC_API_KEY=your_anthropic_api_key\n   ra-aid -m \"Your task\" --expert-provider anthropic --expert-model claude-3-5-sonnet-20241022\n\n   # Use OpenRouter for expert tool\n   export OPENROUTER_API_KEY=your_openrouter_api_key\n   ra-aid -m \"Your task\" --expert-provider openrouter --expert-model mistralai\u002Fmistral-large-2411\n\n   # Use DeepSeek for expert tool\n   export DEEPSEEK_API_KEY=your_deepseek_api_key\n   ra-aid -m \"Your task\" --expert-provider deepseek --expert-model deepseek-reasoner\n\n   # Use Makehub for expert tool (automatically uses MAKEHUB_API_KEY)\n   export MAKEHUB_API_KEY=your_makehub_api_key\n   ra-aid -m \"Your task\" --expert-provider makehub --expert-model anthropic\u002Fclaude-4-sonnet\n\n   # Use default OpenAI for expert tool\n   export EXPERT_OPENAI_API_KEY=your_openai_api_key\n   ra-aid -m \"Your task\" --expert-provider openai --expert-model o1\n\n   # Use Gemini for expert tool\n   export EXPERT_GEMINI_API_KEY=your_gemini_api_key\n   ra-aid -m \"Your task\" --expert-provider gemini --expert-model gemini-2.0-flash-thinking-exp-1219\n   ```\n\nAider specific Environment Variables you can add:\n\n- `AIDER_FLAGS`: Optional comma-separated list of flags to pass to the underlying aider tool (e.g., \"yes-always,dark-mode\")\n\n```bash\n# Optional: Configure aider behavior\nexport AIDER_FLAGS=\"yes-always,dark-mode,no-auto-commits\"\n```\n\nNote: For `AIDER_FLAGS`, you can specify flags with or without the leading `--`. Multiple flags should be comma-separated, and spaces around flags are automatically handled. For example, both `\"yes-always,dark-mode\"` and `\"--yes-always, --dark-mode\"` are valid.\n\n**Important Notes:**\n- Performance varies between models. The default Claude 3 Sonnet model currently provides the best and most reliable results.\n- Model configuration is done via command line arguments: `--provider` and `--model`\n- The `--model` argument is required for all providers except Anthropic (which defaults to `claude-3-7-sonnet-20250219`)\n\nMore information is available in our [Open Models Setup](https:\u002F\u002Fdocs.ra-aid.ai\u002Fquickstart\u002Fopen-models) guide.\n\n## Architecture\n\nRA.Aid implements a three-stage architecture for handling development and research tasks:\n\n1. **Research Stage**:\n   - Gathers information and context\n   - Analyzes requirements\n   - Identifies key components and dependencies\n\n2. **Planning Stage**:\n   - Develops detailed implementation plans\n   - Breaks down tasks into manageable steps\n   - Identifies potential challenges and solutions\n\n3. **Implementation Stage**:\n   - Executes planned tasks\n   - Generates code or documentation\n   - Performs necessary system operations\n\n### Core Components\n\n- **Console Module** (`console\u002F`): Handles console output formatting and user interaction\n- **Processing Module** (`proc\u002F`): Manages interactive processing and workflow control\n- **Text Module** (`text\u002F`): Provides text processing and manipulation utilities\n- **Tools Module** (`tools\u002F`): Contains various utility tools for file operations, search, and more\n\n## Dependencies\n\n### Core Dependencies\n- `langchain-anthropic`: LangChain integration with Anthropic's Claude\n- `tavily-python`: Tavily API client for web research\n- `langgraph`: Graph-based workflow management\n- `rich>=13.0.0`: Terminal formatting and output\n- `GitPython==3.1.41`: Git repository management\n- `fuzzywuzzy==0.18.0`: Fuzzy string matching\n- `python-Levenshtein==0.23.0`: Fast string matching\n- `pathspec>=0.11.0`: Path specification utilities\n\n### Development Dependencies\n- `pytest>=7.0.0`: Testing framework\n- `pytest-timeout>=2.2.0`: Test timeout management\n\n## Development Setup\n\n1. Clone the repository:\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002Fai-christianson\u002FRA.Aid.git\ncd RA.Aid\n```\n\n2. Create and activate a virtual environment:\n```bash\npython -m venv venv\nsource venv\u002Fbin\u002Factivate  # On Windows use `venv\\Scripts\\activate`\n```\n\n3. Install development dependencies:\n```bash\npip install -e \".[dev]\"\n```\n\n4. Run tests:\n```bash\npython -m pytest\n```\n\n## Contributing\n\nContributions are welcome! Please follow these steps:\n\n1. Fork the repository\n2. Create a feature branch:\n```bash\ngit checkout -b feature\u002Fyour-feature-name\n```\n\n3. Make your changes and commit:\n```bash\ngit commit -m 'Add some feature'\n```\n\n4. Push to your fork:\n```bash\ngit push origin feature\u002Fyour-feature-name\n```\n\n5. Open a Pull Request\n\n### Guidelines\n\n- Follow PEP 8 style guidelines\n- Add tests for new features\n- Update documentation as needed\n- Keep commits focused and message clear\n- Ensure all tests pass before submitting PR\n\nMore information is available in our [Contributing Guide](https:\u002F\u002Fdocs.ra-aid.ai\u002Fcontributing).\n\n## License\n\nThis project is licensed under the Apache License 2.0 - see the [LICENSE](LICENSE) file for details.\n\nCopyright (c) 2024 AI Christianson\n\n## Contact\n\n- **Issues**: Please report bugs and feature requests on our [Issue Tracker](https:\u002F\u002Fgithub.com\u002Fai-christianson\u002FRA.Aid\u002Fissues)\n- **Repository**: [https:\u002F\u002Fgithub.com\u002Fai-christianson\u002FRA.Aid](https:\u002F\u002Fgithub.com\u002Fai-christianson\u002FRA.Aid)\n- **Documentation**: [https:\u002F\u002Fgithub.com\u002Fai-christianson\u002FRA.Aid#readme](https:\u002F\u002Fgithub.com\u002Fai-christianson\u002FRA.Aid#readme)\n","\u003Cpicture>\n  \u003Csource media=\"(prefers-color-scheme: dark)\" srcset=\"assets\u002Flogo-white-transparent.gif\">\n  \u003Csource media=\"(prefers-color-scheme: light)\" srcset=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fai-christianson_RA.Aid_readme_f77cda8e7f51.png\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fai-christianson_RA.Aid_readme_f77cda8e7f51.png\" alt=\"RA.Aid - Develop software autonomously.\" style=\"margin-bottom: 20px;\">\n\u003C\u002Fpicture>\n\n[![Python Versions](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fpython-3.8%2B-blue)](https:\u002F\u002Fwww.python.org)\n[![License](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Flicense-Apache%202.0-blue)](LICENSE)\n[![Status](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fstatus-Beta-yellow)]()\n\n**自主开发软件。**\n\nRA.Aid（发音为\"raid\"）帮助您自主开发软件。它是一个基于 LangGraph（一种基于图的代理工作流框架）的代理任务执行框架构建的独立编码代理（coding agent）。该工具提供智能助手，可协助研究、规划以及实施多步开发任务。RA.Aid 可通过 `--use-aider` 标志选择性与 `aider`（https:\u002F\u002Faider.chat\u002F，一种代码编辑工具）集成，以利用其专门的代码编辑能力。\n\n结果是**近乎全自动的软件开发**。\n\n**喜欢 RA.Aid 吗？** 在 [GitHub](https:\u002F\u002Fgithub.com\u002Fai-christianson\u002FRA.Aid) 上给我们一颗星 ⭐ 以示支持！\n\n以下是 RA.Aid 为自己添加功能的演示：\n\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fai-christianson_RA.Aid_readme_089c094adf9c.gif\" alt=\"RA.Aid Demo\" autoplay loop style=\"width: 100%; max-width: 800px;\">\n\n## 文档\n\n完整文档位于 https:\u002F\u002Fdocs.ra-aid.ai\n\n主要章节：\n- [安装指南](https:\u002F\u002Fdocs.ra-aid.ai\u002Fquickstart\u002Finstallation)\n- [推荐配置](https:\u002F\u002Fdocs.ra-aid.ai\u002Fquickstart\u002Frecommended)\n- [开放模型设置](https:\u002F\u002Fdocs.ra-aid.ai\u002Fquickstart\u002Fopen-models)\n- [使用示例](https:\u002F\u002Fdocs.ra-aid.ai\u002Fcategory\u002Fusage)\n- [日志系统](https:\u002F\u002Fdocs.ra-aid.ai\u002Fconfiguration\u002Flogging)\n- [内存管理](https:\u002F\u002Fdocs.ra-aid.ai\u002Fconfiguration\u002Fmemory-management)\n- [贡献指南](https:\u002F\u002Fdocs.ra-aid.ai\u002Fcontributing)\n- [获取帮助](https:\u002F\u002Fdocs.ra-aid.ai\u002Fgetting-help)\n\n## 目录\n\n- [功能特性](#features)\n- [安装](#installation)\n- [用法](#usage)\n- [架构](#architecture)\n- [依赖项](#dependencies)\n- [开发设置](#development-setup)\n- [贡献](#contributing)\n- [许可证](#license)\n- [联系方式](#contact)\n\n> 👋 **非常欢迎提交 Pull requests！** 有关于如何改进 RA.Aid 的想法吗？不要害羞——您的帮助能带来真正的改变！\n>\n> 💬 **加入我们的 Discord 社区：** [点击此处加入](https:\u002F\u002Fdiscord.gg\u002Ff6wYbzHYxV)\n\n⚠️ **重要提示：风险自担** ⚠️\n- 此工具**能够且将会**自动执行 shell 命令（命令行指令）并进行代码更改\n- 可启用 --cowboy-mode 标志以跳过 shell 命令批准提示\n- 不提供任何明示或暗示的保证\n- 始终在版本控制的仓库中使用\n- 在提交之前，请在 git diff（差异对比）中审查提议的更改\n\n## 主要功能\n\n- **多步任务规划**：代理将复杂任务分解为离散的、可管理的步骤，并按顺序执行它们。这种系统化的方法确保了彻底的实施并减少了错误。\n\n- **自动化命令执行**：代理可以自动运行 shell 命令以完成任务。虽然这使其功能强大，但也意味着您应该仔细审查其操作。\n\n- **利用专家推理模型的能力**：代理可以使用高级推理模型，例如 OpenAI 的 o1，*仅在需要时*，例如解决复杂的调试问题或规划复杂功能实施。\n\n- **网络研究能力**：利用 Tavily API（网络搜索 API）进行智能网络搜索，以增强研究并为开发任务收集现实世界背景\n\n- **三阶段架构**：\n  1. **研究**：分析代码库并收集上下文\n  2. **规划**：将任务分解为具体的、可操作的步骤\n  3. **实施**：按顺序执行每个计划的步骤\n\nRA.Aid 的独特之处在于它能够处理超出单次代码编辑的复杂编程任务。通过将研究、战略规划和实施结合到一个连贯的工作流中，RA.Aid 可以：\n\n- 分解并执行多步编程任务\n- 研究和分析复杂代码库以回答架构问题\n- 规划并实施跨多个文件的重大代码更改\n- 提供现有代码结构和功能的详细解释\n- 执行具有适当规划的复杂重构操作\n\n## 功能特性\n\n- **三阶段架构**：工作流由三个强大的阶段组成：\n  1. **研究** 🔍 - 收集和分析信息\n  2. **规划** 📋 - 制定执行策略\n  3. **实施** ⚡ - 在 AI 协助下执行计划\n\n  每个阶段都由专用的 AI 代理和专用工具集提供支持。\n- **高级 AI 集成**：基于 LangChain（语言模型应用开发框架）构建，并利用最新的 LLMs（大型语言模型）进行自然语言理解和生成。\n- **人机交互（Human-in-the-Loop）**：可选模式，使代理能够在任务执行期间向您提问，确保更高的准确性并更好地处理可能需要您输入或澄清的复杂任务\n- **综合工具集**：\n  - Shell 命令执行\n  - 专家查询系统\n  - 文件操作和管理\n  - 内存管理\n  - 研究和规划工具\n  - 代码分析能力\n- **交互式 CLI 接口**：简单而强大的 CLI（命令行界面）接口，用于无缝交互\n- **模块化设计**：构建为 Python 包，具有用于控制台输出、处理、文本实用程序和工具的专用模块\n- **Git 集成**：内置支持 Git（版本控制系统）操作和仓库管理\n\n## 安装\n\n### Windows 安装\n1. 从 [python.org](https:\u002F\u002Fwww.python.org\u002Fdownloads\u002F) 安装 Python 3.8 或更高版本\n2. 安装所需的系统依赖项：\n   ```powershell\n   # Install Chocolatey if not already installed (run in admin PowerShell)\n   Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https:\u002F\u002Fcommunity.chocolatey.org\u002Finstall.ps1'))\n   \n   # Install ripgrep using Chocolatey\n   choco install ripgrep\n   ```\n3. 安装 RA.Aid：\n   ```powershell\n   pip install ra-aid\n   ```\n4. 安装 Windows 特定依赖项：\n   ```powershell\n   pip install pywin32\n   ```\n5. 在 `.env` 文件中设置您的 API（应用程序编程接口）密钥：\n   ```env\n   ANTHROPIC_API_KEY=your_anthropic_key\n   OPENAI_API_KEY=your_openai_key\n   ```\n\n### Unix\u002FLinux 安装\nRA.Aid 可以直接使用 pip 安装：\n\n```bash\npip install ra-aid\n```\n### 使用 Homebrew 安装 macOS\n\n```bash\nbrew tap ai-christianson\u002Fhomebrew-ra-aid\nbrew install ra-aid\n```\n\n**注意：** macOS 也可以使用上述 pip 方式进行安装。\n\n\n### 前提条件\n\n在使用 RA.Aid 之前，您需要所需 AI 服务的 API 密钥：\n\n```bash\n\n```bash\n# Set up API keys based on your preferred provider:\n\n# For Anthropic Claude models (recommended)\nexport ANTHROPIC_API_KEY=your_api_key_here\n\n# For OpenAI models (optional)\nexport OPENAI_API_KEY=your_api_key_here\n\n# For OpenRouter provider (optional)\nexport OPENROUTER_API_KEY=your_api_key_here\n\n# For Makehub provider (optional)\nexport MAKEHUB_API_KEY=your_api_key_here\n\n# For OpenAI-compatible providers (optional)\nexport OPENAI_API_BASE=your_api_base_url\n\n# For Gemini provider (optional)\nexport GEMINI_API_KEY=your_api_key_here\n\n# For web research capabilities\nexport TAVILY_API_KEY=your_api_key_here\n```\n\n**注意：** 当使用 `--use-aider` 标志 (flag) 时，编程工具 (aider) 将根据您可用的 API (应用程序接口) 密钥自动选择其模型：\n- 如果设置了 ANTHROPIC_API_KEY，它将使用 Claude 模型\n- 如果仅设置了 OPENAI_API_KEY，它将使用 OpenAI 模型\n- 您可以设置多个 API 密钥以启用不同功能\n\n您可以从以下位置获取 API 密钥：\n- Anthropic API 密钥：https:\u002F\u002Fconsole.anthropic.com\u002F\n- OpenAI API 密钥：https:\u002F\u002Fplatform.openai.com\u002Fapi-keys\n- OpenRouter API 密钥：https:\u002F\u002Fopenrouter.ai\u002Fkeys\n- Makehub API 密钥：https:\u002F\u002Fmakehub.ai\u002F\n- Gemini API 密钥：https:\u002F\u002Faistudio.google.com\u002Fapp\u002Fapikey\n\n**注意：** `aider` 必须单独安装，因为它不包含在 RA.Aid 包中。详见 [aider-chat](https:\u002F\u002Fpypi.org\u002Fproject\u002Faider-chat\u002F) 了解更多详情。\n\n完整的安装文档可在我们的 [安装指南](https:\u002F\u002Fdocs.ra-aid.ai\u002Fquickstart\u002Finstallation) 中找到。\n\n## 使用方法\n\nRA.Aid 旨在简单而强大。使用方法如下：\n\n```bash\n# Basic usage\nra-aid -m \"Your task or query here\"\n\n# Research-only mode (no implementation)\nra-aid -m \"Explain the authentication flow\" --research-only\n\n# File logging with console warnings (default mode)\nra-aid -m \"Add new feature\" --log-mode file\n\n# Console-only logging with detailed output\nra-aid -m \"Add new feature\" --log-mode console --log-level debug\n```\n\n更多信息请参阅我们的 [使用示例](https:\u002F\u002Fdocs.ra-aid.ai\u002Fcategory\u002Fusage)、[日志系统](https:\u002F\u002Fdocs.ra-aid.ai\u002Fconfiguration\u002Flogging) 和 [内存管理](https:\u002F\u002Fdocs.ra-aid.ai\u002Fconfiguration\u002Fmemory-management) 文档。\n\n### 命令行选项\n\n- `-m, --message`: 要执行的任务或查询（必需，聊天模式除外，不能与 --msg-file 一起使用）\n- `--msg-file`: 包含任务\u002F消息的文本文件路径（不能与 --message 一起使用）\n- `--research-only`: 仅执行研究而不进行实现\n- `--provider`: 要使用的 LLM (大型语言模型) 提供商（选项：anthropic, openai, openrouter, openai-compatible, makehub, gemini）\n- `--model`: 要使用的模型名称（非 Anthropic 提供商必需）\n- `--use-aider`: 启用 aider 集成以进行代码编辑。启用后，RA.Aid 将使用 aider 的专业代码编辑功能，而不是其原生的文件修改工具。当您需要 aider 的特定编辑功能或更喜欢其代码修改方法时，此选项很有用。此功能是可选的，默认禁用。\n- `--research-provider`: 专门用于研究任务的提供商（如果未指定，则 fallback 到 --provider）\n- `--research-model`: 专门用于研究任务的模型（如果未指定，则 fallback 到 --model）\n- `--planner-provider`: 专门用于规划任务的提供商（如果未指定，则 fallback 到 --provider）\n- `--planner-model`: 专门用于规划任务的模型（如果未指定，则 fallback 到 --model）\n- `--cowboy-mode`: 跳过 Shell 命令的交互式确认\n- `--expert-provider`: 用于专家知识查询的 LLM 提供商（选项：anthropic, openai, openrouter, openai-compatible, makehub, gemini）\n- `--expert-model`: 用于专家知识查询的模型名称（非 OpenAI 提供商必需）\n- `--hil, -H`: 启用人在回路 (Human-in-the-loop) 模式，以便在任务执行期间提供交互式协助\n- `--chat`: 启用具有直接人工交互的聊天模式（隐含 --hil）\n- `--log-mode`: 日志模式（选项：file, console）\n  - `file`（默认）：记录到文件和控制台（控制台仅显示警告和错误）\n  - `console`: 仅记录到控制台，处于指定的日志级别，不进行文件日志记录\n- `--log-level`: 设置特定日志级别（debug, info, warning, error, critical）\n  - 使用 `--log-mode=file` 时：控制文件日志级别（控制台仍仅显示 warning+）\n  - 使用 `--log-mode=console` 时：直接控制控制台日志级别\n  - 默认：warning\n- `--experimental-fallback-handler`: 启用实验性回退处理器 (Experimental Fallback Handler)，当同一工具连续失败 3 次时尝试修复工具调用。（推荐使用 OPENAI_API_KEY，因为 openai 拥有前 5 名的工具调用模型。）详见 `ra_aid\u002Ftool_leaderboard.py` 了解更多信息。\n- `--pretty-logger`: 启用彩色面板式格式化日志输出以提高可读性。\n- `--temperature`: LLM 温度 (Temperature)（0.0-2.0），用于控制响应中的随机性\n- `--disable-limit-tokens`: 禁用 Anthropic Claude ReAct 代理 (Agent) 的 Token (词元) 限制\n- `--recursion-limit`: 代理操作的最大递归深度（默认：100）\n- `--test-cmd`: 运行测试的自定义命令。如果设置，将询问用户是否要运行测试命令\n- `--auto-test`: 每次代码更改后自动运行测试\n- `--max-test-cmd-retries`: 测试命令重试的最大次数（默认：3）\n- `--test-cmd-timeout`: 测试命令执行的超时时间（秒）（默认：300）\n- `--show-cost`: 当代理工作时显示成本信息 - 目前仅支持 Claude 模型代理\n- `--track-cost`: 跟踪 Token (词元) 使用量和成本（默认：False）\n- `--no-track-cost`: 禁用跟踪 Token (词元) 使用量和成本\n- `--max-cost`: 最大成本阈值（美元）（正浮点数）\n- `--max-tokens`: 最大 Token (词元) 阈值（正整数）\n- `--exit-at-limit`: 当达到 --max-cost 或 --max-tokens 限制时立即退出而不提示\n- `--price-performance-ratio`: Makehub API 的性价比（0.0-1.0，其中 0.0 优先考虑速度，1.0 优先考虑成本效率）\n- `--version`: 显示程序版本号并退出\n- `--server`: 启动带有 Web 界面的服务器（Alpha (测试版) 功能）\n- `--server-host`: 服务器监听的主机（默认：0.0.0.0）（Alpha (测试版) 功能）\n- `--server-port`: 服务器监听的端口（默认：1818）（Alpha (测试版) 功能）\n\n### 示例任务\n\n1. 代码分析 (Code Analysis)：\n   ```bash\n   ra-aid -m \"Explain how the authentication middleware works\" --research-only\n   ```\n\n2. 复杂变更 (Complex Changes)：\n   ```bash\n   ra-aid -m \"Refactor the database connection code to use connection pooling\" --cowboy-mode\n   ```\n\n3. 自动更新 (Automated Updates)：\n   ```bash\n   ra-aid -m \"Update deprecated API calls across the entire codebase\" --cowboy-mode\n   ```\n\n4. 代码研究 (Code Research)：\n   ```bash\n   ra-aid -m \"Analyze the current error handling patterns\" --research-only\n   ```\n\n2. 代码研究 (Code Research)：\n   ```bash\n   ra-aid -m \"Explain how the authentication middleware works\" --research-only\n   ```\n\n3. 重构 (Refactoring)：\n   ```bash\n   ra-aid -m \"Refactor the database connection code to use connection pooling\" --cowboy-mode\n   ```\n\n### 人机回环模式 (Human-in-the-Loop Mode)\n\n启用交互模式以允许代理 (agent) 在任务执行期间向您提问：\n\n```bash\nra-aid -m \"Implement a new feature\" --hil\n# or\nra-aid -m \"Implement a new feature\" -H\n```\n\n此模式特别适用于：\n- 需要人类判断的复杂任务\n- 澄清模糊的需求\n- 制定架构决策\n- 验证关键变更\n- 提供特定领域知识\n\n### 网络研究 (Web Research)\n\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fai-christianson_RA.Aid_readme_a2f241205ff6.gif\" alt=\"RA.Aid Demo\" autoplay loop style=\"width: 100%; max-width: 800px;\">\n\n该代理 (agent) 具备由 [Tavily](https:\u002F\u002Ftavily.com\u002F) API (应用程序接口) 驱动的自主网络研究能力，无缝地将现实世界信息集成到其问题解决工作流中。当代理确定需要额外上下文 (context) 时，会自动进行网络研究——无需显式配置。\n\n例如，在研究现代认证实践或调查新的 API 要求时，代理将自主：\n- 搜索当前的最佳实践和安全建议\n- 查找相关文档和技术规范\n- 收集现实世界的实现示例\n- 保持对最新行业标准的了解\n\n虽然网络研究会在需要时自动进行，但您也可以显式请求以研究为重点的任务：\n\n```bash\n# Focused research task with web search capabilities\nra-aid -m \"Research current best practices for API rate limiting\" --research-only\n```\n\n确保设置您的 TAVILY_API_KEY 环境变量 (Environment variable) 以启用此功能。\n\n### 聊天模式 (Chat Mode)\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fai-christianson_RA.Aid_readme_ae49978f83b4.gif\" alt=\"Chat Mode Demo\" autoplay loop style=\"display: block; margin: 0 auto; width: 100%; max-width: 800px;\">\n\n使用 `--chat` 启用，将 ra-aid 转变为交互式助手，指导您完成研究和实施任务。就您想要构建的内容进行自然对话，共同探索选项，并分发工作——同时保持讨论的上下文 (context)。当您希望协作思考问题而不仅仅是执行命令时，此模式非常完美。\n\n### 带 Web 界面 (Web Interface) 的服务器\n\nRA.Aid 包含一个带有 Web 界面的现代服务器，提供：\n\n- 美观的深色主题聊天界面\n- 代理 (agent) 轨迹的实时流式传输 (streaming)\n- 适用于所有设备的响应式设计 (Responsive design)\n\n启动带 Web 界面的服务器：\n\n```bash\n# Start with default settings (0.0.0.0:1818)\nra-aid --server\n\n# Specify custom host and port\nra-aid --server --server-host 127.0.0.1 --server-port 3000\n```\n\n带 Web 界面的服务器命令行选项：\n- `--server`：启动带 Web 界面的服务器\n- `--server-host`：监听的主机 (默认：0.0.0.0)\n- `--server-port`：监听的端口 (默认：1818)\n\n启动服务器后，在 Web 浏览器中打开显示的 URL（例如 http:\u002F\u002Flocalhost:1818）。\n\n### 命令中断和反馈\n\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fai-christianson_RA.Aid_readme_4529107044ef.gif\" alt=\"Command Interrupt Demo\" autoplay loop style=\"display: block; margin: 0 auto; width: 100%; max-width: 800px;\">\n\n您可以随时按 `Ctrl-C` 中断代理 (agent)。这将暂停代理，允许您提供反馈、调整指令或将执行引导至新方向。如果您想完全退出程序，请再次按 `Ctrl-C`。\n\n\n### 使用 Cowboy 模式自动化 Shell (命令行环境) 命令 🏇\n\n`--cowboy-mode` 标志启用自动化 Shell 命令执行，无需确认提示。这适用于：\n\n- CI\u002FCD (持续集成\u002F持续部署) 流水线\n- 自动化测试环境\n- 批量处理操作\n- 脚本化工作流\n\n```bash\nra-aid -m \"Update all deprecated API calls\" --cowboy-mode\n```\n\n**⚠️ 重要安全说明：**\n- Cowboy 模式跳过 Shell 命令的确认提示\n- 始终在版本控制仓库 (version-controlled repositories) 中使用\n- 确保运行前工作树 (working tree) 干净\n- 提交前在 git diff (Git 差异对比) 中审查变更\n\n### 模型 (Model) 配置\n\nRA.Aid 支持多个 AI 提供商 (Provider) 和模型。默认模型是 Anthropic 的 Claude 3 Sonnet (`claude-3-7-sonnet-20250219`)。\n\n使用 `--use-aider` 标志时，程序员工具 (aider) 会根据可用的 API 密钥自动选择模型。如果设置了 ANTHROPIC_API_KEY，它将使用 Claude 模型，或者如果只有 OPENAI_API_KEY 可用，则回退到 OpenAI 模型。\n\n注意：专家工具可以配置为使用不同的提供商（OpenAI, Anthropic, OpenRouter, Gemini），使用 --expert-provider 标志以及相应的 EXPERT_*API_KEY 环境变量 (Environment variable)。每个提供商都需要通过适当的环境变量设置其自己的 API 密钥。\n\n#### 环境变量 (Environment Variables)\n\nRA.Aid 通过环境变量支持多个提供商：\n\n- `ANTHROPIC_API_KEY`：默认 Anthropic 提供商所需\n- `OPENAI_API_KEY`：OpenAI 提供商所需\n- `OPENROUTER_API_KEY`：OpenRouter 提供商所需\n- `MAKEHUB_API_KEY`：Makehub 提供商所需\n- `DEEPSEEK_API_KEY`：DeepSeek 提供商所需\n- `OPENAI_API_BASE`：OpenAI 兼容提供商所需，连同 `OPENAI_API_KEY`\n- `GEMINI_API_KEY`：Gemini 提供商所需\n\n专家工具环境变量：\n- `EXPERT_OPENAI_API_KEY`：使用 OpenAI 提供商的专家工具 API 密钥\n- `EXPERT_ANTHROPIC_API_KEY`：使用 Anthropic 提供商的专家工具 API 密钥\n- `EXPERT_OPENROUTER_API_KEY`：使用 OpenRouter 提供商的专家工具 API 密钥\n- `EXPERT_MAKEHUB_API_KEY`：使用 Makehub 提供商的专家工具 API 密钥（如果未设置则自动使用 `MAKEHUB_API_KEY`）\n- `EXPERT_OPENAI_API_BASE`：使用 OpenAI 兼容提供商的专家工具基础 URL\n- `EXPERT_GEMINI_API_KEY`：使用 Gemini 提供商的专家工具 API 密钥\n- `EXPERT_DEEPSEEK_API_KEY`：使用 DeepSeek 提供商的专家工具 API 密钥\n\n您可以在 Shell (命令行环境) 的配置文件中永久设置这些变量（例如 `~\u002F.bashrc` 或 `~\u002F.zshrc`）：\n\n```bash\n# Default provider (Anthropic)\nexport ANTHROPIC_API_KEY=your_api_key_here\n\n# For OpenAI features and expert tool\nexport OPENAI_API_KEY=your_api_key_here\n\n# For OpenRouter provider\nexport OPENROUTER_API_KEY=your_api_key_here\n\n# For Makehub provider\nexport MAKEHUB_API_KEY=your_api_key_here\n\n# For OpenAI-compatible providers\nexport OPENAI_API_BASE=your_api_base_url\n```\n\n# 针对 Gemini provider（提供商）\n\n```bash\nexport GEMINI_API_KEY=your_api_key_here\n```\n\n### 自定义 model（模型）示例\n\n1. **使用 Anthropic（默认）**\n   ```bash\n   # Uses default model (claude-3-7-sonnet-20250219)\n   ra-aid -m \"Your task\"\n\n   # Or explicitly specify:\n   ra-aid -m \"Your task\" --provider anthropic --model claude-3-5-sonnet-20241022\n   ```\n\n2. **使用 OpenAI**\n   ```bash\n   ra-aid -m \"Your task\" --provider openai --model gpt-4o\n   ```\n\n3. **使用 OpenRouter**\n   ```bash\n   ra-aid -m \"Your task\" --provider openrouter --model mistralai\u002Fmistral-large-2411\n   ```\n\n4. **使用 Makehub**\n   ```bash\n   ra-aid -m \"Your task\" --provider makehub --model openai\u002Fgpt-4o\n   \n   # With price-performance optimization\n   ra-aid -m \"Your task\" --provider makehub --model anthropic\u002Fclaude-4-sonnet --price-performance-ratio 0.7\n   ```\n\n5. **使用 DeepSeek**\n   ```bash\n   # Direct DeepSeek provider (requires DEEPSEEK_API_KEY)\n   ra-aid -m \"Your task\" --provider deepseek --model deepseek-reasoner\n   \n   # DeepSeek via OpenRouter\n   ra-aid -m \"Your task\" --provider openrouter --model deepseek\u002Fdeepseek-r1\n   ```\n\n6. **配置 Expert provider（提供商）**\n\n   expert tool（专家工具）由 agent（代理）用于复杂逻辑和调试任务。可以使用 --expert-provider flag（标志）以及相应的 EXPERT_*API_KEY environment variables（环境变量）将其配置为使用不同的 provider（提供商）（OpenAI, Anthropic, OpenRouter, Gemini, openai-compatible）。\n\n   ```bash\n   # Use Anthropic for expert tool\n   export EXPERT_ANTHROPIC_API_KEY=your_anthropic_api_key\n   ra-aid -m \"Your task\" --expert-provider anthropic --expert-model claude-3-5-sonnet-20241022\n\n   # Use OpenRouter for expert tool\n   export OPENROUTER_API_KEY=your_openrouter_api_key\n   ra-aid -m \"Your task\" --expert-provider openrouter --expert-model mistralai\u002Fmistral-large-2411\n\n   # Use DeepSeek for expert tool\n   export DEEPSEEK_API_KEY=your_deepseek_api_key\n   ra-aid -m \"Your task\" --expert-provider deepseek --expert-model deepseek-reasoner\n\n   # Use Makehub for expert tool (automatically uses MAKEHUB_API_KEY)\n   export MAKEHUB_API_KEY=your_makehub_api_key\n   ra-aid -m \"Your task\" --expert-provider makehub --expert-model anthropic\u002Fclaude-4-sonnet\n\n   # Use default OpenAI for expert tool\n   export EXPERT_OPENAI_API_KEY=your_openai_api_key\n   ra-aid -m \"Your task\" --expert-provider openai --expert-model o1\n\n   # Use Gemini for expert tool\n   export EXPERT_GEMINI_API_KEY=your_gemini_api_key\n   ra-aid -m \"Your task\" --expert-provider gemini --expert-model gemini-2.0-flash-thinking-exp-1219\n   ```\n\n您可以添加的 Aider 特定 environment variables（环境变量）：\n\n- `AIDER_FLAGS`：可选的逗号分隔 flags（标志）列表，用于传递给底层的 aider 工具（例如，\"yes-always,dark-mode\"）\n\n```bash\n# Optional: Configure aider behavior\nexport AIDER_FLAGS=\"yes-always,dark-mode,no-auto-commits\"\n```\n\n注意：对于 `AIDER_FLAGS`，您可以指定带有或不带有前导 `--` 的 flags（标志）。多个 flags（标志）应以逗号分隔，flag（标志）周围的空格会自动处理。例如，`\"yes-always,dark-mode\"` 和 `\"--yes-always, --dark-mode\"` 都是有效的。\n\n**重要注意事项：**\n- 性能因 model（模型）而异。默认的 Claude 3 Sonnet model（模型）目前提供最佳且最可靠的结果。\n- Model（模型）配置通过命令行参数完成：`--provider` 和 `--model`\n- 除了 Anthropic（默认为 `claude-3-7-sonnet-20250219`）之外，所有 provider（提供商）都需要 `--model` 参数\n\n更多信息请参阅我们的 [Open Models Setup（开放模型设置）](https:\u002F\u002Fdocs.ra-aid.ai\u002Fquickstart\u002Fopen-models) 指南。\n\n## Architecture（架构）\n\nRA.Aid 实现了一个三阶段 architecture（架构），用于处理开发和研发任务：\n\n1. **Research Stage（研究阶段）**：\n   - 收集信息和上下文\n   - 分析需求\n   - 识别关键 components（组件）和 dependencies（依赖项）\n\n2. **Planning Stage（规划阶段）**：\n   - 制定详细的实施计划\n   - 将任务分解为可管理的步骤\n   - 识别潜在挑战和解决方案\n\n3. **Implementation Stage（实施阶段）**：\n   - 执行计划任务\n   - 生成代码或文档\n   - 执行必要的系统操作\n\n### Core Components（核心组件）\n\n- **Console Module（控制台模块）** (`console\u002F`)：处理控制台输出格式化和用户交互\n- **Processing Module（处理模块）** (`proc\u002F`)：管理交互处理和 workflow（工作流）控制\n- **Text Module（文本模块）** (`text\u002F`)：提供文本处理和操作 utilities（实用工具）\n- **Tools Module（工具模块）** (`tools\u002F`)：包含用于文件操作、搜索等的各种 utility tools（实用工具）\n\n## Dependencies（依赖项）\n\n### Core Dependencies（核心依赖项）\n- `langchain-anthropic`：与 Anthropic 的 Claude 集成的 LangChain\n- `tavily-python`：用于 web research（网络研究）的 Tavily API 客户端\n- `langgraph`：基于 Graph（图）的 workflow（工作流）管理\n- `rich>=13.0.0`：终端格式化和输出\n- `GitPython==3.1.41`：Git repository（仓库）管理\n- `fuzzywuzzy==0.18.0`：模糊字符串匹配\n- `python-Levenshtein==0.23.0`：快速字符串匹配\n- `pathspec>=0.11.0`：路径规范 utilities（实用工具）\n\n### Development Dependencies（开发依赖项）\n- `pytest>=7.0.0`：Testing framework（测试框架）\n- `pytest-timeout>=2.2.0`：测试超时管理\n\n## Development Setup（开发设置）\n\n1. Clone（克隆）repository（仓库）：\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002Fai-christianson\u002FRA.Aid.git\ncd RA.Aid\n```\n\n2. Create（创建）并 activate（激活）一个 virtual environment（虚拟环境）：\n```bash\npython -m venv venv\nsource venv\u002Fbin\u002Factivate  # On Windows use `venv\\Scripts\\activate`\n```\n\n3. Install（安装）development dependencies（开发依赖项）：\n```bash\npip install -e \".[dev]\"\n```\n\n4. Run（运行）tests（测试）：\n```bash\npython -m pytest\n```\n\n## Contributing（贡献）\n\n欢迎贡献！请遵循以下步骤：\n\n1. Fork（复刻）repository（仓库）\n2. Create（创建）一个 feature branch（功能分支）：\n```bash\ngit checkout -b feature\u002Fyour-feature-name\n```\n\n3. 进行更改并 commit（提交）：\n```bash\ngit commit -m 'Add some feature'\n```\n\n4. Push（推送）到您的 fork（复刻）：\n```bash\ngit push origin feature\u002Fyour-feature-name\n```\n\n5. Open（开启）一个 Pull Request（拉取请求）\n\n### Guidelines（指南）\n\n- 遵循 PEP 8 风格指南\n- 为新功能添加 tests（测试）\n- 根据需要更新 documentation（文档）\n- 保持 commits（提交）专注且 message（消息）清晰\n- 在提交 PR 之前确保所有 tests（测试）通过\n\n更多信息请参阅我们的 [Contributing Guide（贡献指南）](https:\u002F\u002Fdocs.ra-aid.ai\u002Fcontributing)。\n\n## License（许可证）\n\n本项目根据 Apache License 2.0 许可 - 详见 [LICENSE](LICENSE) 文件。\n\nCopyright (c) 2024 AI Christianson\n\n## Contact（联系方式）\n\n- **Issues（问题）**：请在我们的 [Issue Tracker（问题追踪器）](https:\u002F\u002Fgithub.com\u002Fai-christianson\u002FRA.Aid\u002Fissues) 上报告 bugs（错误）和 feature requests（功能请求）\n- **Repository（仓库）**：[https:\u002F\u002Fgithub.com\u002Fai-christianson\u002FRA.Aid](https:\u002F\u002Fgithub.com\u002Fai-christianson\u002FRA.Aid)\n- **Documentation（文档）**：[https:\u002F\u002Fgithub.com\u002Fai-christianson\u002FRA.Aid#readme](https:\u002F\u002Fgithub.com\u002Fai-christianson\u002FRA.Aid#readme)","# RA.Aid 快速上手指南\n\nRA.Aid 是一个自主软件开发代理（Coding Agent），基于 LangGraph 构建。它能够自主进行研究、规划和实施多步骤开发任务，支持近乎全自动的软件开发流程。\n\n## 环境准备\n\n### 系统要求\n- **Python**: 3.8 或更高版本\n- **操作系统**: Windows, Linux, macOS\n- **版本控制**: 建议在 Git 仓库中使用，以便审查变更\n\n### 前置依赖与 API 密钥\n在使用前，需配置所需 AI 服务的 API 密钥。可以通过环境变量或 `.env` 文件设置。\n\n**必需\u002F推荐密钥：**\n```bash\n# Anthropic Claude 模型（推荐）\nexport ANTHROPIC_API_KEY=your_anthropic_key\n\n# 网络研究能力（可选但推荐）\nexport TAVILY_API_KEY=your_tavily_key\n\n# 其他可选提供商\nexport OPENAI_API_KEY=your_openai_key\nexport OPENROUTER_API_KEY=your_openrouter_key\n```\n\n> **注意**：若使用 `--use-aider` 功能，需单独安装 `aider`，RA.Aid 会根据可用的 API 密钥自动选择模型。\n\n## 安装步骤\n\n### Unix \u002F Linux \u002F macOS (pip)\n直接使用 pip 安装：\n```bash\npip install ra-aid\n```\n\n### macOS (Homebrew)\n```bash\nbrew tap ai-christianson\u002Fhomebrew-ra-aid\nbrew install ra-aid\n```\n\n### Windows\n1. 安装 Python 3.8+。\n2. 安装系统依赖 `ripgrep` (需管理员权限 PowerShell)：\n   ```powershell\n   # 安装 Chocolatey (如未安装)\n   Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https:\u002F\u002Fcommunity.chocolatey.org\u002Finstall.ps1'))\n   \n   # 安装 ripgrep\n   choco install ripgrep\n   ```\n3. 安装 RA.Aid 及 Windows 特定依赖：\n   ```powershell\n   pip install ra-aid\n   pip install pywin32\n   ```\n\n## 基本使用\n\n### 执行任务\n最基本的用法是通过 `-m` 参数指定任务描述：\n```bash\nra-aid -m \"为项目添加用户登录功能\"\n```\n\n### 常用模式\n- **仅研究模式**（不实施代码更改）：\n  ```bash\n  ra-aid -m \"解释当前的认证流程\" --research-only\n  ```\n- **详细日志输出**：\n  ```bash\n  ra-aid -m \"修复登录 bug\" --log-mode console --log-level debug\n  ```\n- **人机协作模式**（执行过程中询问用户）：\n  ```bash\n  ra-aid -m \"重构数据库模块\" --hil\n  ```\n\n### ⚠️ 安全警告\n- **自动执行**：RA.Aid 会自动执行 Shell 命令并修改代码。\n- **版本控制**：务必在 Git 仓库中使用，提交前通过 `git diff` 审查变更。\n- **牛仔模式**：`--cowboy-mode` 会跳过命令确认提示，请谨慎使用。\n- **风险自负**：工具处于 Beta 阶段，不提供保修。","某后端工程师需要在现有 Python 电商项目中紧急集成 Stripe 支付功能，涉及第三方文档调研、核心代码修改及全流程测试验证。\n\n### 没有 RA.Aid 时\n- 手动查阅 Stripe 官方文档耗时费力，容易遗漏关键配置项或版本差异。\n- 需人工拆解任务步骤，规划不当容易导致代码耦合或逻辑错误，返工率高。\n- 在调研、编码和测试间频繁切换上下文，注意力分散导致效率低下且易出错。\n- 担心修改现有支付逻辑引发回归问题，心理负担重，不敢大胆重构。\n\n### 使用 RA.Aid 后\n- RA.Aid 自动联网搜索最新 API 文档，快速获取准确集成信息并分析代码库上下文。\n- RA.Aid 将任务拆解为安装依赖、配置密钥、编写接口等具体步骤并顺序自主执行。\n- RA.Aid 自主运行 shell 命令和编辑代码，工程师只需专注审查 git diff 确认变更。\n- RA.Aid 在关键节点调用推理模型确保逻辑严密，降低破坏现有功能的风险，增强信心。\n\nRA.Aid 将繁琐的多步开发流程自动化，让开发者从执行者转变为监督者，显著提升交付效率与代码质量。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fai-christianson_RA.Aid_3c81f1a5.png","ai-christianson","Andrew I. Christianson","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fai-christianson_429e96fb.jpg","building https:\u002F\u002Fgobii.ai and https:\u002F\u002Fra-aid.ai","@gobii-ai ",null,"ai_christianson","https:\u002F\u002Fgithub.com\u002Fai-christianson",[85,89,93,97,100,104,107,111],{"name":86,"color":87,"percentage":88},"Python","#3572A5",87.5,{"name":90,"color":91,"percentage":92},"TypeScript","#3178c6",11.7,{"name":94,"color":95,"percentage":96},"JavaScript","#f1e05a",0.3,{"name":98,"color":99,"percentage":96},"Makefile","#427819",{"name":101,"color":102,"percentage":103},"CSS","#663399",0.1,{"name":105,"color":106,"percentage":103},"HTML","#e34c26",{"name":108,"color":109,"percentage":110},"Nix","#7e7eff",0,{"name":112,"color":113,"percentage":110},"Shell","#89e051",2217,213,"2026-04-01T11:54:37","Apache-2.0","Windows, Linux, macOS","无需本地 GPU (基于云端 API 调用)","未说明",{"notes":122,"python":123,"dependencies":124},"1. 必须配置至少一个 LLM 提供商的 API 密钥（如 Anthropic、OpenAI 等）及 Tavily API 密钥。2. Windows 用户需预先通过 Chocolatey 安装 ripgrep 系统工具。3. 使用 --use-aider 标志时需单独安装 aider 包。4. 工具会自动执行 Shell 命令和代码更改，建议在版本控制仓库中使用并谨慎审查。5. 支持多种模型提供商配置（Anthropic, OpenAI, OpenRouter, Gemini 等）。","3.8+",[125,126,127,128,129],"langgraph","langchain","pywin32 (Windows)","ripgrep (Windows 系统工具)","aider (可选外部工具)",[13,14,15],[132,133,134],"agents","ai","software-engineering","2026-03-27T02:49:30.150509","2026-04-06T07:14:50.719375",[138,143,147,152,157,161],{"id":139,"question_zh":140,"answer_zh":141,"source_url":142},4812,"如何启用持久化内存功能？","从 v0.16.0 版本开始，RA.Aid 已支持完整的持久化内存系统。关键事实、代码片段和研究笔记都会自动持久化，并与人类输入关联。详细文档可查看：https:\u002F\u002Fdocs.ra-aid.ai\u002Fconfiguration\u002Fmemory-management","https:\u002F\u002Fgithub.com\u002Fai-christianson\u002FRA.Aid\u002Fissues\u002F67",{"id":144,"question_zh":145,"answer_zh":146,"source_url":142},4813,"RA.Aid 的运行状态和聊天记录存储在哪里？","状态存储在 Git 仓库根目录下的 `.raaid` 文件夹中。全局内存保存在 `.raaid\u002Fruns\u002F[run-id]\u002Fmemory.json`，完整的聊天历史保存在 `.raaid\u002Fruns\u002F[run-id]\u002Fchat.history.md` 文件中，每次运行都有隔离的历史记录。",{"id":148,"question_zh":149,"answer_zh":150,"source_url":151},4814,"如何为规划器和专家模型配置不同的 Provider 和 Model？","可以通过命令行参数分别指定。例如使用 `--planner-provider openai --planner-model o3-mini` 配置规划器，使用 `--expert-provider anthropic --expert-model claude-3-7-sonnet` 配置专家模型，从而实现不同任务使用不同模型。","https:\u002F\u002Fgithub.com\u002Fai-christianson\u002FRA.Aid\u002Fissues\u002F168",{"id":153,"question_zh":154,"answer_zh":155,"source_url":156},4815,"RA.Aid 如何客观评估 Agent 的性能？","项目使用 SWE-bench 评估框架来客观评估 SWE Agent 的性能。理想情况下会通过 GitHub Action 运行，以便跟踪性能随时间的变化、检测回归并测试多种配置（如不同模型、是否启用网络研究等）。","https:\u002F\u002Fgithub.com\u002Fai-christianson\u002FRA.Aid\u002Fissues\u002F12",{"id":158,"question_zh":159,"answer_zh":160,"source_url":156},4816,"系统的 Token 估算率是如何设置的？","为了更准确地运行预测和评估，Token 估算率已调整为每字节 3 个 Token（此前为 4 个）。这有助于解决上下文窗口估算不准确的问题。",{"id":162,"question_zh":163,"answer_zh":164,"source_url":165},4817,"使用 openai-compatible provider 时遇到无限循环怎么办？","这是一个已知问题，通常是因为生成了文本而非代码导致的。该问题已在 PR #37 中修复。建议更新到包含该修复的版本，或参考相关 Pull Request 获取解决方案。","https:\u002F\u002Fgithub.com\u002Fai-christianson\u002FRA.Aid\u002Fissues\u002F35",[167,172,177,182,187,192,197,202,207,212,217,222,227,232,237,242,247,252,257,262],{"id":168,"version":169,"summary_zh":170,"released_at":171},104317,"v0.30.2","## [0.30.2] - 2025-05-06\n\n- Handle list response from LLM API\n","2025-05-07T01:51:45",{"id":173,"version":174,"summary_zh":175,"released_at":176},104318,"v0.30.1","## [0.30.1] - 2025-05-06\n\n- Switch to CIAYN backend for `gemini-2.5-pro-preview-05-06`\n","2025-05-07T00:22:06",{"id":178,"version":179,"summary_zh":180,"released_at":181},104319,"v0.30.0","## [0.30.0] - 2025-05-06\n\n### Added\n- **Agent Thread Management:** Introduced a new system (`ra_aid\u002Futils\u002Fagent_thread_manager.py`) for managing the lifecycle of agent threads, allowing for better control and monitoring of running agents. Includes functions to register, unregister, stop, and check the status of agent threads.\n- **Session Deletion API:** Added a `DELETE \u002Fv1\u002Fsession\u002F{session_id}` endpoint to allow for stopping an active agent session and marking it as \"halting\" (`ra_aid\u002Fserver\u002Fapi_v1_sessions.py`).\n- **Session ID in Agent Creation:** The `create_agent` function and its callers now utilize a `session_id` for improved agent tracking and context management (`ra_aid\u002Fagent_utils.py`, `ra_aid\u002Fagents\u002Fresearch_agent.py`, `ra_aid\u002Fserver\u002Fapi_v1_spawn_agent.py`).\n- **User Query Trajectory in UI:** Added a new `UserQueryTrajectory.tsx` component to display the initial user query in the frontend timeline.\n- **Copy to Clipboard Button in UI:** Implemented a `CopyToClipboardButton.tsx` component and integrated it into various UI parts (e.g., `MarkdownCodeBlock.tsx`, Task and Expert Response trajectories) for easy content copying.\n- **Persistent CLI Configuration:** Users can now set and persist default LLM provider and model via CLI (`--set-default-provider`, `--set-default-model`), stored in `config.json` in the `.ra-aid` directory (`ra_aid\u002Fconfig.py`).\n- **Tests for Agent Thread Manager:** Added new unit tests for the agent thread management module (`tests\u002Fra_aid\u002Futils\u002Ftest_agent_thread_manager.py`).\n- **Tests for Session Deletion API:** Added new tests for the session deletion API endpoint (`tests\u002Fra_aid\u002Fserver\u002Ftest_api_v1_sessions.py`).\n\n### Changed\n- **Default Gemini Model:** Updated the default Google Gemini model to `gemini-2.5-pro-preview-05-06` (from `gemini-2.5-pro-preview-03-25`) in `ra_aid\u002F__main__.py`, `ra_aid\u002Fmodels_params.py`, `docs\u002Fdocs\u002Fquickstart\u002Frecommended.md`, and related tests.\n- **Async Tool Wrapper Optimization:** Refined the creation of synchronous wrappers for asynchronous tools to only pass necessary (non-default or required) arguments to the underlying coroutine, improving efficiency (`ra_aid\u002Ftool_configs.py`).\n- **Agent Creation Tests:** Updated tests for `create_agent` to reflect the new `session_id` parameter (`tests\u002Fra_aid\u002Ftest_agent_utils.py`).\n- **Session Statuses:** The `Session` model now includes 'halting' and 'halted' statuses to support the new session termination API.\n- **User Query Storage:** The initial `user_query` is now stored with session and trajectory data.\n- **`DEFAULT_SHOW_COST`:** Changed to `True` by default.\n\n### Fixed\n- **Tool Name Sanitization:** Corrected an issue where tool names with special characters (`.` or `-`) could cause errors during the creation of synchronous wrappers for async tools. These characters are now consistently replaced with `_` (`ra_aid\u002Ftool_configs.py`).\n- **Token Limiter Model Name Handling:** Improved `get_model_token_limit` in `ra_aid\u002Fanthropic_token_limiter.py` to better handle model name variations for token limit lookups.\n","2025-05-06T23:55:06",{"id":183,"version":184,"summary_zh":185,"released_at":186},104320,"v0.29.0","## [0.29.0] 2025-04-24\n\n### Changed\n- **Frontend Port Configuration:**\n    - Frontend development server port is now configurable via `VITE_FRONTEND_PORT` environment variable (defaults to 5173) (`frontend\u002Fweb\u002Fvite.config.js`).\n    - Frontend now dynamically determines the backend port using `VITE_BACKEND_PORT` in dev (default 1818) and `window.location.port` in production (`frontend\u002Fcommon\u002Fsrc\u002Fstore\u002FclientConfigStore.ts`).\n- **Expert Model Temperature Handling:** The backend (`ra_aid\u002Fllm.py`) now checks if an expert model supports the `temperature` parameter before passing it, preventing errors with models like newer OpenAI versions that don't. It continues to set `reasoning_effort` to `\"high\"` where supported.\n- **OpenAI Model Definitions:** Updated definitions for `o4-mini` and `o3` in `ra_aid\u002Fmodels_params.py` to set `supports_temperature=False` and `supports_reasoning_effort=True`.\n\n### Added\n- **Frontend Development Documentation:** Added instructions to `docs\u002Fdocs\u002Fcontributing.md` on running the frontend dev server and configuring ports using environment variables.\n- **New OpenAI Model Definitions:** Added definitions for `o4-mini-2025-04-16`, `o3-2025-04-16`, and `o3-mini-2025-01-31` to `ra_aid\u002Fmodels_params.py`.\n\n### Fixed\n- **Custom Tool Result Handling:** Ensured results from custom tools are always wrapped in a Langchain `BaseMessage` (`AIMessage`) to maintain consistency (`ra_aid\u002Fagent_backends\u002Fciayn_agent.py`).\n- **Custom Tool Console Output:** Corrected minor formatting issues (escaped newlines) in the console output message when executing custom tools (`ra_aid\u002Fagent_backends\u002Fciayn_agent.py`).\n","2025-04-24T16:03:43",{"id":188,"version":189,"summary_zh":190,"released_at":191},104321,"v0.28.1","## [0.28.1] 2025-04-17\n\n- Update web prebuilt assets\n","2025-04-17T15:43:59",{"id":193,"version":194,"summary_zh":195,"released_at":196},104322,"v0.28.0","## [0.28.0] 2025-04-17\n\n### Documentation\n- Updated expert model API key environment variables (`EXPERT_GEMINI_API_KEY`, `EXPERT_DEEPSEEK_API_KEY`) and clarified selection priority in `docs\u002Fdocs\u002Fconfiguration\u002Fexpert-model.md`.\n- Updated recommendation to Google Gemini 1.5 Pro as the primary default model in `docs\u002Fdocs\u002Fintro.md` & `docs\u002Fdocs\u002Fquickstart\u002Frecommended.md`, explaining automatic detection via `GEMINI_API_KEY`.\n\n### Frontend\n- Improved autoscroll logic in `frontend\u002Fcommon\u002Fsrc\u002Fcomponents\u002FDefaultAgentScreen.tsx`.\n- Added new trajectory visualization components for file modifications: `FileStrReplaceTrajectory.tsx` and `FileWriteTrajectory.tsx` in `frontend\u002Fcommon\u002Fsrc\u002Fcomponents\u002Ftrajectories\u002F`.\n- Integrated new trajectory components into `frontend\u002Fcommon\u002Fsrc\u002Fcomponents\u002FTrajectoryPanel.tsx` and `frontend\u002Fcommon\u002Fsrc\u002Fcomponents\u002Ftrajectories\u002Findex.ts`.\n\n### Backend Core & Configuration\n- Refined expert model provider selection logic in `ra_aid\u002F__main__.py` with updated priority order based on API keys.\n- Minor cleanup in `ra_aid\u002Fagent_backends\u002Fciayn_agent.py` (removed unused import, refined fallback warning).\n- Set default backend for `o4-mini` to `CIAYN` in `ra_aid\u002Fmodels_params.py`.\n\n### Tools & Prompts\n- Added `file_str_replace` tool (`ra_aid\u002Ftools\u002Ffile_str_replace.py`) for replacing strings in files.\n- Replaced `write_file_tool` with `put_complete_file_contents` tool (`ra_aid\u002Ftools\u002Fwrite_file.py`) for writing complete file content.\n- Updated `read_file_tool` (`ra_aid\u002Ftools\u002Fread_file.py`) to strip whitespace from filepaths.\n- Added `file_str_replace` and `put_complete_file_contents` to tool configurations and removed old `write_file_tool` (`ra_aid\u002Ftool_configs.py`).\n- Removed `ripgrep_search` tool from default CIAYN tools (use `run_shell_command` instead) (`ra_aid\u002Ftool_configs.py`).\n- Updated core agent prompts (Research, Planning, Implementation) to emphasize using `rg` via `run_shell_command`, mandate `emit_research_notes`, and refine instructions (`ra_aid\u002Fprompts\u002F`).\n\n### Testing\n- Added tests for fallback warning logic in `tests\u002Fra_aid\u002Fagent_backends\u002Ftest_ciayn_fallback_warning.py`.\n- Updated tests for `put_complete_file_contents` tool in `tests\u002Fra_aid\u002Ftools\u002Ftest_write_file.py`.\n","2025-04-17T15:12:14",{"id":198,"version":199,"summary_zh":200,"released_at":201},104323,"v0.27.0","## [0.27.0] 2025-04-16\n\n### Added\n- Support for `o4-mini` and `o3` models\n\n### Changed\n- **Default Model\u002FProvider Logic (`ra_aid\u002F__main__.py`):**\n    - Changed the default OpenAI model from `gpt-4o` to `o4-mini`.\n    - Updated the default LLM provider selection priority based on available API keys to: Gemini (`GEMINI_API_KEY`), then OpenAI (`OPENAI_API_KEY`), then Anthropic (`ANTHROPIC_API_KEY`).\n- **Expert Model Selection Logic (`ra_aid\u002F__main__.py`, `ra_aid\u002Fllm.py`):**\n    - Introduced dedicated environment variables for expert model API keys (e.g., `EXPERT_OPENAI_API_KEY`, `EXPERT_ANTHROPIC_API_KEY`).\n    - Updated the priority order for selecting the *expert* provider when none is explicitly set: `EXPERT_OPENAI_API_KEY` > `GEMINI_API_KEY` > `EXPERT_ANTHROPIC_API_KEY` > `DEEPSEEK_API_KEY`.\n    - Refined fallback logic: If no specific expert key is found, it uses the main provider configuration. A special case ensures that if the main provider is OpenAI and no expert model is specified, the expert model defaults to auto-selection (prioritizing `o3`).\n    - Updated the default OpenAI *expert* model selection to prioritize only `\"o3\"`. An error is now raised if `\"o3\"` is unavailable via the API key and no specific expert model was requested by the user.\n- **Model Parameters (`ra_aid\u002Fmodels_params.py`):**\n    - Added configuration parameters (token limits, capabilities) for the `o4-mini` and `o3` models.\n\n### Testing (`tests\u002Fra_aid\u002Ftest_default_provider.py`, `tests\u002Fra_aid\u002Ftest_llm.py`)\n- Added\u002Fupdated tests to verify the new default provider logic, ensuring correct prioritization.\n- Added\u002Fupdated tests for expert model selection to reflect the new prioritization and the default selection of `o3` for OpenAI expert.\n","2025-04-16T20:24:55",{"id":203,"version":204,"summary_zh":205,"released_at":206},104324,"v0.26.0","## [0.26.0] 2025-04-16\n\n### Frontend\n- Implement improved autoscroll logic with user scroll detection in `DefaultAgentScreen.tsx`.\n- Add `Ctrl+Space` shortcut for new session and completion message in `DefaultAgentScreen.tsx`.\n- Make session title header sticky in `DefaultAgentScreen.tsx`.\n- Add `Ctrl+Enter` (submit) and `Ctrl+Shift+Enter` (research-only) shortcuts with visual key indicators in `InputSection.tsx`.\n- Create new `EnterKeySvg.tsx` component for shortcut key visuals.\n- Add `updateSessionDetails` action to `sessionStore.ts` for faster session name updates via WebSocket.\n\n### Backend\n- Add `--cowboy-mode` flag with server warning confirmation in `__main__.py`.\n- Adjust console output padding in `console\u002Fformatting.py` and `console\u002Foutput.py`.\n- Refactor `research_notes_formatter.py` to return raw content.\n- Add model parameters for `gpt-4.1`, `gpt-4.1-mini`, `gpt-4.1-nano` in `models_params.py`.\n- Update CIAYN agent prompts to mandate triple quotes for all string tool arguments in `prompts\u002Fciayn_prompts.py`.\n- Broadcast full session details immediately after creation via WebSocket in `server\u002Fapi_v1_spawn_agent.py`.\n\n### Build\n- Update prebuilt frontend assets (`index-*.js`, `index-*.css`, `index.html`).\n","2025-04-16T15:06:29",{"id":208,"version":209,"summary_zh":210,"released_at":211},104325,"v0.25.0","## [0.25.0] 2025-04-09\n\n### Backend Changes\n- Refactored `ra_aid\u002Ftools\u002Fripgrep.py`:\n  - Removed old search parameter string construction.\n  - Introduced new variables: `final_output`, `final_return_code`, `final_success` for capturing command-line output and error handling.\n  - Updated trajectory recording logic using consolidated parameters (`tool_parameters` and `step_data`).\n  - Enhanced UTF-8 decoding with error replacement and improved error panel displays.\n- Updated backend modules:\n  - Modified `ra_aid\u002Fproject_info.py` and `ra_aid\u002Fserver\u002Fapi_v1_spawn_agent.py` for improved logging, error handling, and user feedback.\n  - Updated server-side prebuilt assets (JavaScript, CSS, and `index.html`) for better asset management.\n\n### Frontend Changes\n- Updated several UI components in `frontend\u002Fcommon` including:\n  - `DefaultAgentScreen.tsx`, `SessionList.tsx`, `SessionSidebar.tsx`, `TimelineStep.tsx`, and `TrajectoryPanel.tsx`.\n- Adjusted state management and utility\u002Fstore files to support updated UI displays for agent outputs, sessions, and trajectories.\n\n### Configuration & Minor Changes\n- Modified configuration files:\n  - Updated `.gitignore` and `.ra-prompt` with newer patterns.\n  - Revised `frontend\u002Fcommon\u002Fpackage.json` and `frontend\u002Fcommon\u002Ftailwind.preset.js` for improved dependency and styling management.\n  - Updated `package-lock.json` files and server-side asset references.\n","2025-04-09T22:51:17",{"id":213,"version":214,"summary_zh":215,"released_at":216},104326,"v0.24.0","## [0.24.0] 2025-04-08\n\n### Added\n- Web UI is now available at localhost:1818 when ra-aid is started with `--server`\n- Session status tracking (pending, running, completed, failed) in the database and API.\n- Robust WebSocket connection handling in the frontend with auto-reconnect and heartbeats (`frontend\u002Fcommon\u002Fsrc\u002Fwebsocket\u002Fconnection.ts`).\n- Serve prebuilt web UI static files directly from the backend server (`ra_aid\u002Fserver\u002Fserver.py`, `ra_aid\u002Fserver\u002Fprebuilt\u002F`).\n- `broadcast_sender.py` module for decoupled WebSocket message broadcasting via a queue (`ra_aid\u002Fserver\u002Fbroadcast_sender.py`).\n- `SessionNotFoundError` custom exception (`ra_aid\u002Fexceptions.py`).\n- `build:prebuilt` npm script to build frontend assets into the backend distribution (`frontend\u002Fpackage.json`).\n\n### Changed\n- Refactored backend WebSocket broadcasting to use the new `broadcast_sender` queue, improving reliability and decoupling (`ra_aid\u002Fserver\u002Fserver.py`, `ra_aid\u002Fserver\u002Fapi_v1_spawn_agent.py`).\n- Updated various frontend components and stores to integrate with the new WebSocket logic and session status (`frontend\u002Fcommon\u002F`).\n- Enhanced logging in `ra_aid\u002Fagents\u002Fresearch_agent.py` with thread IDs.\n\n### Fixed\n- Resolved WebSocket message serialization error for `session_update` payloads by ensuring proper JSON serialization (`mode='json'`) before queuing messages in the new broadcast sender mechanism (`ra_aid\u002Fserver\u002Fapi_v1_spawn_agent.py`, `ra_aid\u002Fserver\u002Fbroadcast_sender.py`).\n\n### Build\n- Added script (`frontend\u002Fpackage.json#build:prebuilt`) to build and copy frontend assets to `ra_aid\u002Fserver\u002Fprebuilt\u002F` for server distribution.\n\n### Internal\n- Added database migration for the new session `status` field (`ra_aid\u002Fmigrations\u002F015_20250408_140800_add_session_status.py`).\n- Updated `.gitignore`.\n\n\n# Changelog\n\nAll notable changes to this project will be documented in this file.\n\nThe format is based on [Keep a Changelog](https:\u002F\u002Fkeepachangelog.com\u002Fen\u002F1.0.0\u002F),\nand this project adheres to [Semantic Versioning](https:\u002F\u002Fsemver.org\u002Fspec\u002Fv2.0.0.html).\n","2025-04-08T20:33:26",{"id":218,"version":219,"summary_zh":220,"released_at":221},104327,"v0.23.0","## [0.23.0] 2025-04-07\n\n### Added\n- Added configuration parameters for the `gemini-2.5-pro-preview-03-25` model (`ra_aid\u002Fmodels_params.py`).\n\n### Changed\n- Updated default provider logic in `ra_aid\u002F__main__.py` to prioritize the Gemini provider (`gemini-2.5-pro-preview-03-25` model) if the `GEMINI_API_KEY` environment variable is set. The previous OpenAI\u002FAnthropic logic serves as a fallback.\n- Updated default *expert* provider logic in `ra_aid\u002F__main__.py` to prioritize Gemini (`gemini-2.5-pro-preview-03-25` model) if `GEMINI_API_KEY` is set, before falling back to OpenAI or DeepSeek.\n\n### Fixed\n- Improved robustness of triple-quoted string handling in tool calls generated by the CIAYN agent, particularly for `put_complete_file_contents`, ensuring the fix is applied only when necessary (`ra_aid\u002Fagent_backends\u002Fciayn_agent.py`).\n","2025-04-07T19:25:36",{"id":223,"version":224,"summary_zh":225,"released_at":226},104328,"v0.22.0","## [0.22.0] 2025-04-03\n\n### Added\n\n- Support for Anthropic's `claude-3.7` series models (`ra_aid\u002Fmodels_params.py`, `tests\u002Fra_aid\u002Ftest_anthropic_token_limiter.py`).\n- Support for Fireworks AI provider and models (`fireworks\u002Ffirefunction-v2`, `fireworks\u002Fdbrx-instruct`) (`ra_aid\u002Fmodels_params.py`, `ra_aid\u002Fllm.py`).\n- Implicit think tag detection: `process_thinking_content` now checks for `\u003Cthink>` tags even if `supports_think_tag` is not explicitly `True` in model config, provided it's not `False` and the content starts with the tag (`ra_aid\u002Ftext\u002Fprocessing.py`, `tests\u002Fra_aid\u002Ftext\u002Ftest_process_thinking.py`).\n- Command-line arguments `--project-dir` and `--db-path` added to `ra-aid usage latest` and `ra-aid usage all` subcommands for specifying database location (`ra_aid\u002Fscripts\u002Fcli.py`, `ra_aid\u002Fscripts\u002Fall_sessions_usage.py`, `ra_aid\u002Fscripts\u002Flast_session_usage.py`).\n- Reinitialization capability for Singleton classes via `_initialize` method (`ra_aid\u002Futils\u002Fsingleton.py`).\n- Metadata tracking (`model_name`, `provider`) added during LLM initialization (`ra_aid\u002Fllm.py`, `tests\u002Fra_aid\u002Ftest_llm.py`).\n\n### Changed\n\n- **Refactored Callback Handling:** Replaced `AnthropicCallbackHandler` with a generalized `DefaultCallbackHandler` located in `ra_aid\u002Fcallbacks\u002Fdefault_callback_handler.py`. This new handler supports multiple providers, improves cost\u002Ftoken tracking logic, standardizes initialization, enhances database interaction for trajectory logging, and provides better context management (`ra_aid\u002Fcallbacks\u002Fdefault_callback_handler.py`, `ra_aid\u002Fagent_utils.py`, `ra_aid\u002Fconsole\u002Foutput.py`, `tests\u002Fra_aid\u002Fcallbacks\u002Ftest_default_callback_handler.py`, `tests\u002Fra_aid\u002Ftest_token_usage_tracking.py`).\n- **Refactored Thinking Processing:** Significantly updated `process_thinking_content` in `ra_aid\u002Ftext\u002Fprocessing.py` for clearer logic flow. It now explicitly handles structured thinking (list format) separately from string-based `\u003Cthink>` tag extraction. The logic for tag extraction now depends on the `supports_think_tag` configuration value (True: always check, False: never check, None: check only if content starts with `\u003Cthink>`) (`ra_aid\u002Ftext\u002Fprocessing.py`, `tests\u002Fra_aid\u002Ftext\u002Ftest_process_thinking.py`, `tests\u002Fra_aid\u002Fagent_backends\u002Ftest_ciayn_agent_think_tag.py`).\n- **Refactored Token Limiting:** Renamed `sonnet_35_state_modifier` to `base_state_modifier` in `ra_aid\u002Fanthropic_token_limiter.py` for broader applicability and adjusted associated logic (`ra_aid\u002Fanthropic_token_limiter.py`, `ra_aid\u002Fagent_utils.py`, `tests\u002Fra_aid\u002Ftest_anthropic_token_limiter.py`).\n- Updated LLM initialization (`initialize_llm` in `ra_aid\u002Fllm.py`) to include provider\u002Fmodel metadata and refine DeepSeek provider logic for different DeepSeek models (`ra_aid\u002Fllm.py`, `tests\u002Fra_aid\u002Ftest_llm.py`).\n- Improved rate limit error handling and retry logic in `_handle_api_error` (`ra_aid\u002Fagent_utils.py`).\n- Removed redundant console output from `ripgrep_search` tool; results are now only in the returned dictionary (`ra_aid\u002Ftools\u002Fripgrep.py`).\n- Updated numerous unit tests across the codebase to reflect the extensive refactoring in callbacks, thinking processing, token limiting, and LLM initialization.\n- Updated project dependencies as recorded in `uv.lock` and `pyproject.toml`.\n\n### Fixed\n\n- Corrected an import path typo in `ra_aid\u002Fdependencies.py`.\n- Ensured the correct callback handler instance (`DefaultCallbackHandler`) is used for fetching cost information for display (`ra_aid\u002Fconsole\u002Foutput.py`).\n- Addressed various test failures arising from the refactoring of core components.\n\n### Removed\n\n- Removed the dedicated `ra_aid\u002Fcallbacks\u002Fanthropic_callback_handler.py` file. Its functionality has been merged and generalized into `ra_aid\u002Fcallbacks\u002Fdefault_callback_handler.py`.\n","2025-04-02T19:20:23",{"id":228,"version":229,"summary_zh":230,"released_at":231},104329,"v0.21.0","## [0.21.0] 2025-03-27\n\n### Added\n- Add `include_paths` argument to ripgrep tool (`881d4f0`).\n- Add trajectory hooks (`3160dcd`).\n- Add support for `--msg-file` argument to read task\u002Fmessage from a file (`a827742`).\n\n### Changed\n- Improve CIAYN agent's robust code handling (`ac5abb3`, `38340b2`).\n- Optimize LLM-based tool call extraction for specific models (`67b268f`).\n- Set max context for Fireworks models (`44cbe84`).\n- Update model parameters for R1 on Fireworks (`328d49d`).\n- Correct CIAYN capitalization (`d07ccc0`).\n- Remove duplicate example for ra-aid command in documentation (`3d119e7`).\n- Update README to clarify command line options for message and msg-file arguments (`a827742`).\n\n### Fixed\n- Fix tests (`ce5301d`, `feb9a11`).\n- Fix ripgrep tool functionality (`4d28d68`).\n- Add tests for `--msg-file` argument handling and exclusivity (`a827742`).\n","2025-03-27T19:13:34",{"id":233,"version":234,"summary_zh":235,"released_at":236},104330,"v0.20.0","## [0.20.0] 2025-03-26\n\n### Added\n- Added `mark_research_complete_no_implementation_required` tool to prevent infinite research loops when no implementation is needed after research. (83b03bf)\n\n### Changed\n- Improved messaging around API rate limits to be less alarming. (9baee8c)\n- Updated dependencies and optimized model parameters for Gemini. (4197822)\n- Improved support for the `gemini-2.5-pro-exp-03-25` model. (110efc6)\n- Updated model parameters. (83d2192)\n\n### Fixed\n- Fixed tool call validation logic, improving compatibility with models like `gemini-2.5-pro-exp-03-25`. (3e2d888)\n- Prevent console logs from showing when `log_mode` is set to \"file\". (2ca0da2)\n- Fixed a test related to the `mark_research_complete_no_implementation_required` tool. (065747b)\n","2025-03-26T17:52:27",{"id":238,"version":239,"summary_zh":240,"released_at":241},104331,"v0.19.1","## [0.19.1] 2025-03-25\n\n### Added\n- Support for Fireworks.ai LLM provider with error handling\n- Support for Groq provider\n- Cloudflare build scripts and logging\n\n### Changed\n- Updated model parameters and providers configuration\n- Multiple package-lock.json updates\n\n### Fixed\n- npm version specification in package.json\n","2025-03-25T11:37:17",{"id":243,"version":244,"summary_zh":245,"released_at":246},104332,"v0.18.4","## [0.18.4] 2025-03-24\n\n### Added\n- Custom Tools Feature\n  - Added support for custom tools with `--custom-tools \u003Cpath>` CLI flag\n  - Implemented MCP (Model-Completion-Protocol) client for integrating external tool providers\n  - Created documentation on custom tools usage in `docs\u002Fdocs\u002Fusage\u002Fcustom-tools.md`\n  - Added example code in `examples\u002Fcustom-tools-mcp\u002F` directory\n- API Documentation\n  - Added comprehensive OpenAPI documentation for REST API endpoints\n  - Implemented API documentation in Docusaurus with new MDX files\n  - Added YAML OpenAPI specification file `docs\u002Fra-aid.openapi.yml`\n  - Created script to generate OpenAPI documentation automatically\n- Session Usage Statistics\n  - Added CLI commands for retrieving usage statistics for all sessions and the latest session\n  - Enhanced session and trajectory repositories with new methods\n  - Moved scripts into proper Python package structure (`ra_aid\u002Fscripts\u002F`)\n- Web UI Improvements\n  - Added new UI components including input box, session screen, and buttons\n  - Improved session management UI\n  - Enhanced styling and layout\n\n### Changed\n- WebSocket Endpoint Migration\n  - Migrated WebSocket endpoint from `\u002Fws` to `\u002Fv1\u002Fws` to align with REST API endpoint pattern\n  - Updated root HTML endpoint to reflect the new WebSocket path\n- Project Maintenance\n  - Refactored agent creation logic to use model capabilities for selecting agent type\n  - Improved model detection and normalization\n  - Updated dependencies via uv.lock\n  - Fixed various typos and improved prompts\n","2025-03-24T11:55:29",{"id":248,"version":249,"summary_zh":250,"released_at":251},104333,"v0.18.0","## [0.18.0] 2025-03-19\n\n### Added\n- Project State Directory Feature\n  - Added `--project-state-dir` parameter to allow customization of where project data is stored\n  - Modified database connection, logging, and memory wiping to support custom directories\n  - Created comprehensive documentation in docs\u002Fdocs\u002Fconfiguration\u002Fproject-state.md\n- Ollama Integration\n  - Added support for running models locally via Ollama\n  - Implemented configuration options including model selection and context window size\n  - Added documentation for Ollama in docs\u002Fdocs\u002Fconfiguration\u002Follama.md\n  - Updated open-models.md to include Ollama as a supported provider\n- Web UI and API Progress (partially implemented)\n  - Created API endpoints for session management (create, list, retrieve)\n  - Added trajectory tracking and visualization\n  - Implemented UI components for session management\n  - Added server infrastructure for web interface\n- Token Usage and Cost Tracking\n  - Enhanced trajectory tracking with token counting\n  - Added session-level token usage and cost tracking\n  - Improved cost calculation and logging\n","2025-03-19T20:21:24",{"id":253,"version":254,"summary_zh":255,"released_at":256},104334,"v0.17.1","## [0.17.1] 2025-03-13\n\n### Fixed\n- Fixed bug with `process_thinking_content` function by moving it from `agent_utils` to `ra_aid.text.processing` module\n- Fixed config parameter handling in research request functions\n- Updated development setup instructions in README to use `pip install -e \".[dev]\"` instead of `pip install -r requirements-dev.txt`\n","2025-03-13T11:20:22",{"id":258,"version":259,"summary_zh":260,"released_at":261},104335,"v0.17.0","## [0.17.0] 2025-03-12\n\n### Added\n- Added support for think tags in models with the new extract_think_tag function\n- Enhanced CiaynAgent and expert tool to extract and display thinking content from \u003Cthink>...\u003C\u002Fthink> tags\n- Added model parameters for think tag support\n- Added comprehensive testing for think tag functionality\n- Added `--show-thoughts` flag to show thoughts of thinking models\n- Added `--show-cost` flag to display cost information during agent operations\n- Enhanced cost tracking with AnthropicCallbackHandler for monitoring token usage and costs\n- Added Session and Trajectory models to track application state and agent actions\n- Added comprehensive environment inventory system for collecting and providing system information to agents\n- Added repository implementations for Session and Trajectory models\n- Added support for reasoning assistance in research phase\n- Added new config parameters for managing cost display and reasoning assistance\n\n### Changed\n- Updated langchain\u002Flanggraph deps\n- Improved trajectory tracking for better debugging and analysis\n- Enhanced prompts throughout the system for better performance\n- Improved token management with better handling of thinking tokens in Claude models\n- Updated project information inclusion in prompts\n- Reorganized agent code with better extraction of core functionality\n- Refactored anthropic token limiting for better control over token usage\n\n### Fixed\n- Fixed binary file detection\n- Fixed environment inventory sorting\n- Fixed token limiter functionality\n- Various test improvements and fixes\n","2025-03-12T21:19:01",{"id":263,"version":264,"summary_zh":265,"released_at":266},104336,"v0.16.1","## [0.16.1] 2025-03-07\n\n### Changed\n- Replaced thread-local storage with contextvars in agent_context.py for better context isolation\n- Improved React agent execution with LangGraph's interrupt mechanism\n- Enhanced _run_agent_stream function to properly handle agent state and continuation\n\n### Fixed\n- Fixed tests to work with the new implementation\n","2025-03-08T00:23:25"]