[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-hijohnnylin--neuronpedia":3,"tool-hijohnnylin--neuronpedia":64},[4,17,27,35,43,56],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":16},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,3,"2026-04-05T11:01:52",[13,14,15],"开发框架","图像","Agent","ready",{"id":18,"name":19,"github_repo":20,"description_zh":21,"stars":22,"difficulty_score":23,"last_commit_at":24,"category_tags":25,"status":16},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",138956,2,"2026-04-05T11:33:21",[13,15,26],"语言模型",{"id":28,"name":29,"github_repo":30,"description_zh":31,"stars":32,"difficulty_score":23,"last_commit_at":33,"category_tags":34,"status":16},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,"2026-04-03T11:11:01",[13,14,15],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":23,"last_commit_at":41,"category_tags":42,"status":16},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[13,26],{"id":44,"name":45,"github_repo":46,"description_zh":47,"stars":48,"difficulty_score":23,"last_commit_at":49,"category_tags":50,"status":16},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,"2026-04-05T10:45:23",[14,51,52,53,15,54,26,13,55],"数据工具","视频","插件","其他","音频",{"id":57,"name":58,"github_repo":59,"description_zh":60,"stars":61,"difficulty_score":10,"last_commit_at":62,"category_tags":63,"status":16},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,"2026-04-04T04:44:48",[15,14,13,26,54],{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":69,"readme_en":70,"readme_zh":71,"quickstart_zh":72,"use_case_zh":73,"hero_image_url":74,"owner_login":75,"owner_name":76,"owner_avatar_url":77,"owner_bio":78,"owner_company":79,"owner_location":78,"owner_email":78,"owner_twitter":78,"owner_website":80,"owner_url":81,"languages":82,"stars":122,"forks":123,"last_commit_at":124,"license":125,"difficulty_score":126,"env_os":127,"env_gpu":128,"env_ram":129,"env_deps":130,"category_tags":139,"github_topics":140,"view_count":23,"oss_zip_url":78,"oss_zip_packed_at":78,"status":16,"created_at":143,"updated_at":144,"faqs":145,"releases":175},3639,"hijohnnylin\u002Fneuronpedia","neuronpedia","open source interpretability platform 🧠","Neuronpedia 是一个开源的模型可解释性平台，旨在帮助人们深入理解大型语言模型内部的运作机制。它就像为 AI 模型打造的一座“透明博物馆”，让原本黑盒般的神经网络变得清晰可见。\n\n在 AI 研究中，理解模型为何做出特定决策一直是个难题。Neuronpedia 通过可视化神经元激活、自动生成功能解释、构建电路图谱等核心功能，让用户能够轻松探索模型内部结构，分析特定神经元的触发条件，甚至尝试对模型行为进行干预和引导。\n\n该平台特别适合 AI 研究人员、机器学习工程师以及对模型机理感兴趣的技术开发者使用。无论是需要调试模型行为的研究者，还是希望深入理解训练过程的工程师，都能从中获得直观的数据洞察。\n\n技术亮点方面，Neuronpedia 提供了丰富的工具链：支持自动解释生成（Autointerp）、神经元相似度搜索、多维数据降维可视化（UMAP）、实时激活监控仪表盘等功能。其模块化架构允许用户灵活部署本地环境，既可进行小规模实验，也能支撑大规模分析任务。作为开源项目，它还鼓励社区贡献数据和工具，共同推动模型可解释性研究的发展。","\u003Cp align=\"center\">\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fhijohnnylin_neuronpedia_readme_dd5e0dfd4fca.png\" alt=\"Splash GIF\"\u002F>\n  \u003C\u002Fa>\n\n\u003Ch3 align=\"center\">\u003Ca href=\"https:\u002F\u002Fneuronpedia.org\">neuronpedia.org 🧠🔍\u003C\u002Fa>\u003C\u002Fh3>\n\n  \u003Cp align=\"center\">\n    open source interpretability platform\n    \u003Cbr \u002F>\n    \u003Csub>\n    \u003Cstrong>api · steering · activations · circuits\u002Fgraphs · autointerp · scoring · inference · search · filter · dashboards · benchmarks · cossim · umap · embeds · probes · saes · lists · exports · uploads\u003C\u002Fstrong>\n    \u003C\u002Fsub>\n  \u003C\u002Fp>\n\u003C\u002Fp>\n\n\u003Cp align=\"center\" style=\"color: #cccccc;\">\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fblob\u002Fmain\u002FLICENSE\">\u003Cimg height=\"20px\" src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Flicense-MIT-yellow.svg\" alt=\"MIT\">\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fstatus.neuronpedia.org\">\u003Cimg height=\"20px\" src=\"https:\u002F\u002Fuptime.betterstack.com\u002Fstatus-badges\u002Fv2\u002Fmonitor\u002F1roih.svg\" alt=\"Uptime\">\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fjoin.slack.com\u002Ft\u002Fopensourcemechanistic\u002Fshared_invite\u002Fzt-3m2fulfeu-0LnVnF8yCrKJYQvWLuCQaQ\">\u003Cimg height=\"20px\" src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fslack-purple?logo=slack&logoColor=white\" alt=\"Slack\">\u003C\u002Fa>\n  \u003Ca href=\"mailto:johnny@neuronpedia.org\">\u003Cimg height=\"20px\" src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fcontact-blue.svg?logo=data:image\u002Fsvg%2bxml;base64,PHN2ZyB2aWV3Qm94PSIwIDAgMjQgMjQiIGZpbGw9Im5vbmUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGcgaWQ9IlNWR1JlcG9fYmdDYXJyaWVyIiBzdHJva2Utd2lkdGg9IjAiPjwvZz48ZyBpZD0iU1ZHUmVwb190cmFjZXJDYXJyaWVyIiBzdHJva2UtbGluZWNhcD0icm91bmQiIHN0cm9rZS1saW5lam9pbj0icm91bmQiPjwvZz48ZyBpZD0iU1ZHUmVwb19pY29uQ2FycmllciI+IDxwYXRoIGQ9Ik00IDcuMDAwMDVMMTAuMiAxMS42NUMxMS4yNjY3IDEyLjQ1IDEyLjczMzMgMTIuNDUgMTMuOCAxMS42NUwyMCA3IiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48L3BhdGg+IDxyZWN0IHg9IjMiIHk9IjUiIHdpZHRoPSIxOCIgaGVpZ2h0PSIxNCIgcng9IjIiIHN0cm9rZT0iI2ZmZmZmZiIgc3Ryb2tlLXdpZHRoPSIyIiBzdHJva2UtbGluZWNhcD0icm91bmQiPjwvcmVjdD4gPC9nPjwvc3ZnPg==\" alt=\"Email\">\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fneuronpedia.org\u002Fblog\">\u003Cimg height=\"20px\" src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fblog-10b981.svg\" alt=\"blog\">\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fneuronpedia.org\">\u003Cimg height=\"20px\" src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fwebsite-gray.svg\" alt=\"website\">\u003C\u002Fa>\n\n\u003C\u002Fp>\n\n- [About Neuronpedia](#about-neuronpedia)\n- [Setting Up Your Local Environment](#setting-up-your-local-environment)\n  - [\"I Want to Use a Local Database \u002F Import More Neuronpedia Data\"](#i-want-to-use-a-local-database--import-more-neuronpedia-data)\n  - [\"I Want to Do Webapp (Frontend + API) Development\"](#i-want-to-do-webapp-frontend--api-development)\n  - [\"I Want to Run\u002FDevelop Inference Locally\"](#i-want-to-rundevelop-inference-locally)\n  - ['I Want to Run\u002FDevelop the Graph Server Locally'](#i-want-to-rundevelop-the-graph-server-locally)\n  - ['I Want to Run\u002FDevelop Autointerp Locally'](#i-want-to-rundevelop-autointerp-locally)\n  - ['I Want to Do High Volume Autointerp Explanations'](#i-want-to-do-high-volume-autointerp-explanations)\n  - ['I Want to Generate My Own Dashboards\u002FData and Add It to Neuronpedia'](#i-want-to-generate-my-own-dashboardsdata-and-add-it-to-neuronpedia)\n- [Architecture](#architecture)\n  - [Requirements](#requirements)\n  - [Services](#services)\n    - [Services Are Standalone Apps](#services-are-standalone-apps)\n    - [Service-Specific Documentation](#service-specific-documentation)\n  - [OpenAPI Schema](#openapi-schema)\n  - [Monorepo Directory Structure](#monorepo-directory-structure)\n- [Security](#security)\n- [Contact \u002F Support](#contact--support)\n- [Contributing](#contributing)\n- [Appendix](#appendix)\n    - ['Make' Commands Reference](#make-commands-reference)\n    - [Import Data Into Your Local Database](#import-data-into-your-local-database)\n    - [Why an OpenAI API Key Is Needed for Search Explanations](#why-an-openai-api-key-is-needed-for-search-explanations)\n\n# About Neuronpedia\n\nCheck out our [blog post](https:\u002F\u002Fwww.neuronpedia.org\u002Fblog\u002Fneuronpedia-is-now-open-source) about Neuronpedia, why we're open sourcing it, and other details. There's also a [tweet thread](https:\u002F\u002Fx.com\u002Fneuronpedia\u002Fstatus\u002F1906793456879775745) with quick demos.\n\n**Feature Overview**\n\nA diagram showing the main features of Neuronpedia as of March 2025.\n![neuronpedia-features](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fhijohnnylin_neuronpedia_readme_fd67af1704cc.png)\n\n# Setting Up Your Local Environment\n\nStart by setting up your [local database](#i-want-to-use-a-local-database--import-more-neuronpedia-data).\n\n> 🔥 **pro-tip:** Neuronpedia is configured for AI agent development. Here's an example using a [single prompt](https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fblob\u002Fmain\u002Fapps\u002Fexperiments\u002Fsteerify\u002FREADME.md#claude-code-prompt) to build a custom app (Steerify) using Neuronpedia's inference server as a backend:\n\nhttps:\u002F\u002Fgithub.com\u002Fuser-attachments\u002Fassets\u002Fbc82f88b-8155-4c1d-948a-ea5d987ae0f8\n\n## \"I Want to Use a Local Database \u002F Import More Neuronpedia Data\"\n\n#### What This Does + What You'll Get\n\nThese steps show you how to configure and connect to your own local database. You can then download sources\u002FSAEs of your choosing:\n\nhttps:\u002F\u002Fgithub.com\u002Fuser-attachments\u002Fassets\u002Fd7fbb46e-8522-4f98-aa08-21c6529424af\n\n> ⚠️ **warning:** your database will start out empty. you will need to use the admin panel to [import sources\u002Fdata](#import-data-into-your-local-database) (activations, explanations, etc).\n\n> ⚠️ **warning:** the local database environment does not have any inference servers connected, so you won't be able to do activation testing, steering, etc initially. you will need to [configure a local inference instance]().\n\n#### Steps\n\n1. Build the webapp\n   ```\n   make webapp-localhost-build\n   ```\n2. Bring up the webapp\n   ```\n   make webapp-localhost-run\n   ```\n3. Go to [localhost:3000](http:\u002F\u002Flocalhost:3000) to see your local webapp instance, which is now connected to your local database\n4. See the `warnings` above for caveats, and `next steps` to finish setting up\n\n#### Next Steps\n\n1. [click here](#import-data-into-your-local-database) for how to import data into your local database (activations, explanations, etc), because your local database will be empty to start\n2. [click here](#i-want-to-rundevelop-inference-locally) for how to bring up a local `inference` service for the model\u002Fsource\u002FSAE you're working with\n\n## \"I Want to Do Webapp (Frontend + API) Development\"\n\n#### What This Does\n\nThe webapp builds you've been doing so far are _production builds_, which are slow to build, and fast to run. since they are slow to build and don't have debug information, they are not ideal for development.\n\nThis subsection installs the development build on your local machine (not docker), then mounts the build inside your docker instance.\n\n#### What You'll Get\n\nOnce you do this section, you'll be able to do local development and quickly see changes that are made, as well as see more informative debug\u002Ferrors. If you are purely interested in doing frontend\u002Fapi development for Neuronpedia, you don't need to set up anything else!\n\n#### Steps\n\n1. Install [nodejs](https:\u002F\u002Fnodejs.org) via [node version manager](https:\u002F\u002Fgithub.com\u002Fnvm-sh\u002Fnvm)\n   ```\n   make install-nodejs\n   ```\n2. Install the webapp's dependencies\n   ```\n   make webapp-localhost-install\n   ```\n3. Run the development instance\n   ```\n   make webapp-localhost-dev\n   ```\n4. go to [localhost:3000](http:\u002F\u002Flocalhost:3000) to see your local webapp instance\n\n#### Doing Local Webapp Development\n\n- **auto-reload**: when you change any files in the `apps\u002Fwebapp` subdirectory, the `localhost:3000` will automatically reload\n- **install commands**: you do not need to run `make install-nodejs` again, and you only need to run `make webapp-localhost-install` if dependencies change\n\n## \"I Want to Run\u002FDevelop Inference Locally\"\n\n#### What This Does + What You'll Get\n\nThis subsection shows you how to run an inference instance locally so you can do things like steering, activation testing, etc on the sources\u002FSAEs you've downloaded.\n\n> ⚠️ **warning:** for the local environment, we only support running one inference server at a time. this is because you are unlikely to be running multiple models simultaneously on one machine, as they are memory and compute intensive.\n\n#### Steps\n\n1. Ensure you have [installed poetry](https:\u002F\u002Fpython-poetry.org\u002Fdocs\u002F#installation)\n2. Install the inference server's dependencies\n   ```\n   make inference-localhost-install\n   ```\n3. Build the image, picking the correct command based on if the machine has CUDA or not:\n   ```\n   # CUDA\n   make inference-localhost-build-gpu USE_LOCAL_HF_CACHE=1\n   ```\n   ```\n   # no CUDA\n   make inference-localhost-build USE_LOCAL_HF_CACHE=1\n   ```\n   > ➡️ The [`USE_LOCAL_HF_CACHE=1` flag](https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fpull\u002F89) mounts your local HuggingFace cache at `${HOME}\u002F.cache\u002Fhuggingface\u002Fhub:\u002Froot\u002F.cache\u002Fhuggingface\u002Fhub`. If you wish to create a new cache in your container instead, you can omit this flag here and in the next step.\n4. run the inference server, using the `MODEL_SOURCESET` argument to specify the `.env.inference.[model_sourceset]` file you're loading from. for this example, we will run `gpt2-small`, and load the `res-jb` sourceset\u002FSAE set, which is configured in the `.env.inference.gpt2-small.res-jb` file. you can see the other [pre-loaded inference configs](#pre-loaded-inference-server-configurations) or [create your own config](#making-your-own-inference-server-configurations) as well.\n\n   ```\n   # CUDA\n   make inference-localhost-dev-gpu \\\n        MODEL_SOURCESET=gpt2-small.res-jb \\\n        USE_LOCAL_HF_CACHE=1\n\n   # no CUDA\n   make inference-localhost-dev \\\n        MODEL_SOURCESET=gpt2-small.res-jb \\\n        USE_LOCAL_HF_CACHE=1\n   ```\n\n5. wait for it to load (first time will take longer). when you see `Initialized: True`, the local inference server is now ready on `localhost:5002`\n\n#### Using the Inference Server\n\nTo interact with the inference server, you have a few options - note that this will only work for the model \u002F selected source you have loaded:\n\n1.  Load the webapp with the [local database setup](#i-want-to-use-a-local-database--import-more-neuronpedia-data), then using the model \u002F selected source as you would normally do on Neuronpedia.\n2.  Use the pre-generated inference python client at `packages\u002Fpython\u002Fneuronpedia-inference-client` (set environment variable `INFERENCE_SERVER_SECRET` to `public`, or whatever it's set to in `.env.localhost` if you've changed it)\n3.  Use the openapi spec, located at `schemas\u002Fopenapi\u002Finference-server.yaml` to make calls with any client of your choice. You can get a Swagger interactive spec at `\u002Fdocs` after the server starts up. See the `apps\u002Finference\u002FREADME.md` for details.\n\n#### Pre-Loaded Inference Server Configurations\n\nWe've provided some pre-loaded inference configs as examples of how to load a specific model and sourceset for inference. View them by running `make inference-list-configs`:\n\n```\n$ make inference-list-configs\n\nAvailable Inference Configurations (.env.inference.*)\n================================================\n\ndeepseek-r1-distill-llama-8b.llamascope-slimpj-res-32k\n    Model: meta-llama\u002FLlama-3.1-8B\n    Source\u002FSAE Sets: '[\"llamascope-slimpj-res-32k\"]'\n    make inference-localhost-dev MODEL_SOURCESET=deepseek-r1-distill-llama-8b.llamascope-slimpj-res-32k\n\ngemma-2-2b-it.gemmascope-res-16k\n    Model: gemma-2-2b-it\n    Source\u002FSAE Sets: '[\"gemmascope-res-16k\"]'\n    make inference-localhost-dev MODEL_SOURCESET=gemma-2-2b-it.gemmascope-res-16k\n\ngpt2-small.res-jb\n    Model: gpt2-small\n    Source\u002FSAE Sets: '[\"res-jb\"]'\n    make inference-localhost-dev MODEL_SOURCESET=gpt2-small.res-jb\n```\n\n#### Making Your Own Inference Server Configurations\n\nLook at the `.env.inference.*` files for examples on how to make these inference server configurations.\n\nThe `MODEL_ID` is the model id from the [transformerlens model table](https:\u002F\u002Ftransformerlensorg.github.io\u002FTransformerLens\u002Fgenerated\u002Fmodel_properties_table.html) and each of `SAE_SETS` is the text after the layer number and hyphen in a Neuronpedia source ID - for example, if you have a Neuronpedia feature at url `http:\u002F\u002Fneuronpedia.org\u002Fgpt2-small\u002F0-res-jb\u002F123`, the `0-res-jb` is the source ID, and the item in the `SAE_SETS` is `res-jb`. This example matches the `.env.inference.gpt2-small.res-jb` file exactly.\n\nYou can find Neuronpedia source IDs in the saelens [pretrained saes yaml file](https:\u002F\u002Fgithub.com\u002FjbloomAus\u002FSAELens\u002Fblob\u002Fmain\u002Fsae_lens\u002Fpretrained_saes.yaml) or by clicking into models in the [neuronpedia datasets exports](https:\u002F\u002Fneuronpedia-datasets.s3.us-east-1.amazonaws.com\u002Findex.html?prefix=v1\u002F) directory.\n\n**Using Models Not Officially Supported by TransformerLens**\nLook at the `.env.inference.deepseek-r1-distill-llama-8b.llamascope-slimpj-res-32k` to see an example of how to load a model not officially supported by transformerlens. This is mostly for swapping in weights of a distilled\u002Ffine-tuned model.\n\n**Loading Non-SAELens Sources\u002FSAEs**\n\n- [TODO #2](https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fissues\u002F2) document how to load SAEs\u002Fsources that are not in saelens pretrained yaml\n\n#### Doing Local Inference Development\n\n- **schema-driven development**: to add new endpoints or change existing endpoints, you will need to start by updating the openapi schemas, then generating clients from that, then finally updating the actual inference and webapp code. for details on how to do this, see the [openapi readme: making changes to the inference server](schemas\u002FREADME.md#making-changes-to-the-inference-server)\n- **no auto-reload**: when you change any files in the `apps\u002Finference` subdirectory, the inference server will _NOT_ automatically reload, because server reloads are slow: they reload the model and all sources\u002FSAEs. if you want to enable autoreload, then append `AUTORELOAD=1` to the `make inference-localhost-dev` call, like so:\n  ```\n  make inference-localhost-dev \\\n       MODEL_SOURCESET=gpt2-small.res-jb \\\n       AUTORELOAD=1\n  ```\n\n## 'I Want to Run\u002FDevelop the Graph Server Locally'\n\n#### What This Does + What You'll Get\n\nThe graph server powers the attribution graph generation functionality, built on top of [circuit-tracer](https:\u002F\u002Fgithub.com\u002Fsafety-research\u002Fcircuit-tracer) by Piotrowski & Hanna. This service handles the backend processing when you create new graphs through the [Neuronpedia Circuit Tracer](https:\u002F\u002Fwww.neuronpedia.org\u002Fgemma-2-2b\u002Fgraph) interface.\n\n#### Steps\n\n1. Ensure you have [installed poetry](https:\u002F\u002Fpython-poetry.org\u002Fdocs\u002F#installation)\n2. Install the graph server's dependencies\n   ```\n   make graph-localhost-install\n   ```\n3. Within the `apps\u002Fgraph` directory, create a `.env` file with `SECRET` and `HF_TOKEN` (see `apps\u002Fgraph\u002F.env.example`)\n   - `SECRET` is the server secret that needs to be passed in the `x-secret-key` request header\n   - Make sure your `HF_TOKEN` has access to the [Gemma-2-2B model](https:\u002F\u002Fhuggingface.co\u002Fgoogle\u002Fgemma-2-2b) on Huggingface.\n\n4. Build the image, picking the correct command based on if the machine has CUDA or not:\n   ```\n   # CUDA\n   make graph-localhost-build-gpu USE_LOCAL_HF_CACHE=1\n   ```\n   ```\n   # no CUDA\n   make graph-localhost-build USE_LOCAL_HF_CACHE=1\n   ```\n   > ➡️ The [`USE_LOCAL_HF_CACHE=1` flag](https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fpull\u002F89) mounts your local HuggingFace cache at `${HOME}\u002F.cache\u002Fhuggingface\u002Fhub:\u002Froot\u002F.cache\u002Fhuggingface\u002Fhub`. If you wish to create a new cache in your container instead, you can omit this flag here and in the next step.\n5. run the graph server:\n\n   ```\n   # CUDA\n   make graph-localhost-dev-gpu \\\n        USE_LOCAL_HF_CACHE=1\n\n   # no CUDA\n   make graph-localhost-dev \\\n        USE_LOCAL_HF_CACHE=1\n   ```\n\n6. Wait for the container to spin up\n\nFor example requests, see the [Graph Server README](apps\u002Fgraph\u002FREADME.md#example-request---output-graph-json-directly).\n\n## 'I Want to Run\u002FDevelop Autointerp Locally'\n\n#### What This Does + What You'll Get\n\nThe autointerp server provides automatic interpretation and scoring of neural network features. It uses eleutherAI's [delphi](https:\u002F\u002Fgithub.com\u002FEleutherAI\u002Fdelphi) for generating explanations and scoring.\n\n> ⚠️ **warning:** the eleuther embedding scorer uses an embedding model only supported on CUDA (it won't work on mac mps or cpu)\n\n#### Steps\n\n1. Ensure you have [installed poetry](https:\u002F\u002Fpython-poetry.org\u002Fdocs\u002F#installation)\n2. Install the autointerp server's dependencies\n   ```\n   make autointerp-localhost-install\n   ```\n3. Build the image, picking the correct command based on if the machine has CUDA or not:\n   ```\n   # CUDA\n   make autointerp-localhost-build-gpu USE_LOCAL_HF_CACHE=1\n   ```\n   ```\n   # no CUDA\n   make autointerp-localhost-build USE_LOCAL_HF_CACHE=1\n   ```\n   > ➡️ The [`USE_LOCAL_HF_CACHE=1` flag](https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fpull\u002F89) mounts your local HuggingFace cache at `${HOME}\u002F.cache\u002Fhuggingface\u002Fhub:\u002Froot\u002F.cache\u002Fhuggingface\u002Fhub`. If you wish to create a new cache in your container instead, you can omit this flag here and in the next step.\n4. run the autointerp server:\n\n   ```\n   # CUDA\n   make autointerp-localhost-dev-gpu \\\n        USE_LOCAL_HF_CACHE=1\n\n   # no CUDA\n   make autointerp-localhost-dev \\\n        USE_LOCAL_HF_CACHE=1\n   ```\n\n5. wait for it to load\n\n#### Using the Autointerp Server\n\nTo interact with the autointerp server, you have a few options:\n\n1. Use the pre-generated autointerp python client at `packages\u002Fpython\u002Fneuronpedia-autointerp-client` (set environment variable `AUTOINTERP_SERVER_SECRET` to `public`, or whatever it's set to in `.env.localhost` if you've changed it)\n2. Use the openapi spec, located at `schemas\u002Fopenapi\u002Fautointerp-server.yaml` to make calls with any client of your choice. You can get a Swagger interactive spec at `\u002Fdocs` after the server starts up. See the `apps\u002Finference\u002FREADME.md` for details.\n\n#### Doing Local Autointerp Development\n\n- **schema-driven development**: to add new endpoints or change existing endpoints, you will need to start by updating the openapi schemas, then generating clients from that, then finally updating the actual autointerp and webapp code. for details on how to do this, see the [openapi readme: making changes to the autointerp server](schemas\u002FREADME.md#making-changes-to-the-autointerp-server)\n- **no auto-reload**: when you change any files in the `apps\u002Fautointerp` subdirectory, the autointerp server will _NOT_ automatically reload by default. if you want to enable autoreload, then append `AUTORELOAD=1` to the `make autointerp-localhost-dev` call, like so:\n  ```\n  make autointerp-localhost-dev \\\n       AUTORELOAD=1\n  ```\n\n## 'I Want to Do High Volume Autointerp Explanations'\n\nThis section is under construction.\n\n- use EleutherAI's [Delphi library](https:\u002F\u002Fgithub.com\u002FEleutherAI\u002Fdelphi)\n- for OpenAI's autointerp, use [utils\u002Fneuronpedia_utils\u002Fbatch-autointerp.py](utils\u002Fneuronpedia_utils\u002Fbatch-autointerp.py)\n\n## 'I Want to Generate My Own Dashboards\u002FData and Add It to Neuronpedia'\n\nThis section is under construction.\n\n[TODO: simplify generation + upload of data to neuronpedia](https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fissues\u002F46)\n\n[TODO: neuronpedia-utils should use poetry](https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fissues\u002F43)\n\nIn this example, we will generate dashboards\u002Fdata for an [SAELens](https:\u002F\u002Fgithub.com\u002FjbloomAus\u002FSAELens)-compatible SAE, and upload it to our own Neuronpedia instance.\n\n1. ensure you have [Poetry installed](https:\u002F\u002Fpython-poetry.org\u002Fdocs\u002F)\n2. [upload](https:\u002F\u002Fgithub.com\u002FjbloomAus\u002FSAELens\u002Fblob\u002Fmain\u002Ftutorials\u002Fuploading_saes_to_huggingface.ipynb) your SAELens-compatible source\u002FSAE to HuggingFace.\n   > Example\n   > ➡️ [https:\u002F\u002Fhuggingface.co\u002Fchanind\u002Fgemma-2-2b-batch-topk-matryoshka-saes-w-32k-l0-40](https:\u002F\u002Fhuggingface.co\u002Fchanind\u002Fgemma-2-2b-batch-topk-matryoshka-saes-w-32k-l0-40)\n3. clone SAELens locally.\n   ```\n   git clone https:\u002F\u002Fgithub.com\u002FjbloomAus\u002FSAELens.git\n   ```\n4. open your cloned SAELens and edit the file `sae_lens\u002Fpretrained_saes.yaml`. add a new entry at the bottom, based on the template below (see comments for how to fill it out):\n   > Example\n   > ➡️ [https:\u002F\u002Fgithub.com\u002FjbloomAus\u002FSAELens\u002Fpull\u002F455\u002Ffiles](https:\u002F\u002Fgithub.com\u002FjbloomAus\u002FSAELens\u002Fpull\u002F455\u002Ffiles)\n   ```\n   gemma-2-2b-res-matryoshka-dc:                 # a unique ID for your set of SAEs\n     conversion_func: null                       # null if your SAE config is already compatible with SAELens\n     links:                                      # optional links\n       model: https:\u002F\u002Fhuggingface.co\u002Fgoogle\u002Fgemma-2-2b\n     model: gemma-2-2b                           # transformerlens model id - https:\u002F\u002Ftransformerlensorg.github.io\u002FTransformerLens\u002Fgenerated\u002Fmodel_properties_table.html\n     repo_id: chanind\u002Fgemma-2-2b-batch-topk-matryoshka-saes-w-32k-l0-40  # the huggingface repo path\n     saes:\n     - id: blocks.0.hook_resid_post                 # an id for this SAE\n       path: standard\u002Fblocks.0.hook_resid_post      # the path in the repo_id to the SAE\n       l0: 40.0\n       neuronpedia: gemma-2-2b\u002F0-matryoshka-res-dc  # what you expect the Neuronpedia URI to be - neuronpedia.org\u002F[this_slug]. should be [model_id]\u002F[layer]-[identical_slug_for_this_sae_set]\n     - id: blocks.1.hook_resid_post                 # more SAEs in this SAE set\n       path: standard\u002Fblocks.1.hook_resid_post\n       l0: 40.0\n       neuronpedia: gemma-2-2b\u002F1-matryoshka-res-dc  # note that this is identical to the entry above, except 1 instead of 0 for the layer\n     - [...]\n   ```\n5. clone [SAEDashboard](https:\u002F\u002Fgithub.com\u002FjbloomAus\u002FSAEDashboard.git) locally.\n   ```\n   git clone https:\u002F\u002Fgithub.com\u002FjbloomAus\u002FSAEDashboard.git\n   ```\n6. configure your cloned `SAEDashboard` to use your cloned modified `SAELens`, instead of the one in production\n   ```\n   cd SAEDashboard                    # set directory\n   poetry lock && poetry install      # install dependencies\n   poetry remove sae-lens             # remove production dependency\n   poetry add PATH\u002FTO\u002FCLONED\u002FSAELENS  # set local dependency\n   ```\n7. generate dashboards for the SAE. this will take from 30 min to a few hours, depending on your hardware and size of model.\n\n   ```\n   cd SAEDashboard                    # set directory\n   rm -rf cached_activations          # clear old cached data\n\n   # start the generation. details for each argument (full details: https:\u002F\u002Fgithub.com\u002FjbloomAus\u002FSAEDashboard\u002Fblob\u002Fmain\u002Fsae_dashboard\u002Fneuronpedia\u002Fneuronpedia_runner_config.py)\n   #     - sae-set = should match the unique ID for the set from pretrained_saes.yaml\n   #     - sae-path = should match the id for the sae in from pretrained_saes.yaml\n   #     - np-set-name = should match the [identical_slug_for_this_sae_set] for the sae.Neuronpedia from pretrained_saes.yaml\n   #     - dataset-path = the huggingface dataset to use for generating activations. usually you want to use the same dataset the model was trained on.\n   #     - output-dir = the output directory of the dashboard data\n   #     - n-prompts = number of activation texts to test from the dataset\n   #     - n-tokens-in-prompt, n-features-per-batch, n-prompts-in-forward-pass = keep these at 128\n   poetry run neuronpedia-runner \\\n        --sae-set=\"gemma-2-2b-res-matryoshka-dc\" \\\n        --sae-path=\"blocks.12.hook_resid_post\" \\\n        --np-set-name=\"matryoshka-res-dc\" \\\n        --dataset-path=\"monology\u002Fpile-uncopyrighted\" \\\n        --output-dir=\"neuronpedia_outputs\u002F\" \\\n        --sae_dtype=\"float32\" \\\n        --model_dtype=\"bfloat16\" \\\n        --sparsity-threshold=1 \\\n        --n-prompts=24576 \\\n        --n-tokens-in-prompt=128 \\\n        --n-features-per-batch=128 \\\n        --n-prompts-in-forward-pass=128\n   ```\n\n8. Convert these dashboards for import into Neuronpedia\n   ```\n   cd neuronpedia\u002Futils\u002Fneuronpedia-utils          # get into this current repository's util directory\n   python convert-saedashboard-to-neuronpedia.py   # start guided conversion script. follow the steps.\n   ```\n9. Once dashboard files are generated for Neuronpedia, upload these to the global Neuronpedia S3 bucket - currently you need to [contact us](mailto:johnny@neuronpedia.org) to do this.\n10. From a localhost instance, [import your data](#i-want-to-use-a-local-database--import-more-neuronpedia-data)\n\n# Architecture\n\nHere's how the services\u002Fscripts connect in Neuronpedia. It's easiest to read this diagram by starting at the image of the laptop (\"User\").\n\n![architecture diagram](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fhijohnnylin_neuronpedia_readme_23f4c1ff0061.png)\n\n## Requirements\n\nYou can run Neuronpedia on any cloud and on any modern OS. Neuronpedia is designed to avoid vendor lock-in. These instructions were written for and tested on macos 15 (sequoia), so you may need to repurpose commands for windows\u002Fubuntu\u002Fetc. At least 16GB ram is recommended.\n\n## Services\n\n| name       | description                                                                                                                                                  | powered by                            |\n| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------- |\n| webapp     | serves the neuronpedia.org frontend and [the api](neuronpedia.org\u002Fapi-doc)                                                                                   | [next.js](https:\u002F\u002Fnextjs.org) \u002F react |\n| database   | stores features, activations, explanations, users, lists, etc                                                                                                | postgres                              |\n| inference  | [support server] steering, activation testing, search via inference, topk, etc. a separate instance is required for each model you want to run inference on. | python \u002F torch                        |\n| autointerp | [support server] auto-interp explanations and scoring, using eleutherAI's [delphi](https:\u002F\u002Fgithub.com\u002FEleutherAI\u002Fdelphi) (formerly `sae-auto-interp`)        | python                                |\n\n### Services Are Standalone Apps\n\nby design, each service can be run independently as a standalone app. this is to enable extensibility and forkability.\n\nFor example, if you like the Neuronpedia webapp frontend but want to use a different API for inference, you can do that! Just ensure your alternative inference server supports the `schema\u002Fopenapi\u002Finference-server.yaml` spec, and\u002For that you modify the Neuronpedia calls to inference under `apps\u002Fwebapp\u002Flib\u002Futils`.\n\n### Service-Specific Documentation\n\nthere are draft `README`s for each specific app\u002Fservice under `apps\u002F[service]`, but they are heavily WIP. you can also check out the `Dockerfile` under the same directory to build your own images.\n\n## OpenAPI Schema\n\nfor services to communicate with each other in a typed and consistent way, we use openapi schemas. there are some exceptions - for example, streaming is not offically supported by the openapi spec. however, even in that case, we still try our best to define a schema and use it.\n\nespecially for inference and autointerp server development, it is critical to understand and use the instructions under the [openapi readme](schemas\u002FREADME.md).\n\nopenapi schemas are located under `\u002Fschemas`. we use openapi generators to generate clients in both typescript and python.\n\n## Monorepo Directory Structure\n\n`apps` - the three Neuronpedia services: webapp, inference, and autointerp. most of the code is here.\n`schemas` - the openapi schemas. to make changes to inference and autointerp endpoints, first make changes to their schemas - see details in the [openapi readme](schemas\u002FREADME.md).\n`packages` - clients generated from the `schemas` using generator tools. you will mostly not need to manually modify these files.\n`utils` - various utilities that help do offline processing, like high volume autointerp, or generating dashboards, or exporting data.\n\n# Security\n\nPlease report vulnerabilities to [johnny@neuronpedia.org](mailto:johnny@neuronpedia.org).\n\nWe don't currently have an official bounty program, but we'll try our best to give compensation based on the severity of the vulnerability - though it's likely we will not able able to offer awards for any low-severity vulnerabilities.\n\n# Contact \u002F Support\n\n- slack: [join #neuronpedia](https:\u002F\u002Fjoin.slack.com\u002Ft\u002Fopensourcemechanistic\u002Fshared_invite\u002Fzt-3m2fulfeu-0LnVnF8yCrKJYQvWLuCQaQ)\n- email: [johnny@neuronpedia.org](mailto:johnny@neuronpedia.org)\n- issues: [github issues](https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fissues)\n\n# Contributing\n\nSee [CONTRIBUTING.md](CONTRIBUTING.md).\n\n# Appendix\n\n### 'Make' Commands Reference\n\nyou can view all available `make` commands and brief descriptions of them by running `make help`\n\n### Import Data Into Your Local Database\n\nIf you set up your own database, it will start out empty - no features, explanations, activations, etc. To load this data, there's a built-in `admin panel` where you can download this data for SAEs (or \"sources\") of your choosing.\n\n> ⚠️ **warning:** the admin panel is finicky and does not currently support resuming imports. if an import is interrupted, you must manually click `re-sync`. the admin panel currently does not check if your download is complete or missing parts - it is up to you to check if the data is complete, and if not, to click `re-sync` to re-download the entire dataset.\n\n> ℹ️ **recommendation:** When importing data, start with just one source (like `gpt2-small`@`10-res-jb`) instead of downloading everything at once. This makes it easier to verify the data imported correctly and lets you start using Neuronpedia faster.\n\nThe instructions below demonstrate how to download the `gpt2-small`@`10-res-jb` SAE data.\n\n1. navigate to [localhost:3000\u002Fadmin](http:\u002F\u002Flocalhost:3000\u002Fadmin).\n2. scroll down to `gpt2-small`, and expand `res-jb` with the `▶`.\n3. click `Download` next to `10-res-jb`.\n4. wait patiently - this can be a _LOT_ of data, and depending on your connection\u002Fcpu speed it can take up to 30 minutes or an hour.\n5. once it's done, click `Browse` or use the navbar to try it out: `Jump To`\u002F`Search`\u002F`Steer`.\n6. repeat for other SAE\u002Fsource data you wish to download.\n\n### Why an OpenAI API Key Is Needed for Search Explanations\n\nIn the webapp, the `Search Explanations` feature requires you to set an `OPENAI_API_KEY`. Otherwise you will get no search results.\n\nThis is because the `search explanations` functionality searches for features by semantic similarity. If you search `cat`, it will also return `feline`, `tabby`, `animal`, etc. To do this, it needs to calculate the embedding for your input `cat`. We use openai's embedding api (specifically, `text-embedding-3-large` with `dimension: 256`) to calculate the embeddings.\n","\u003Cp align=\"center\">\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fhijohnnylin_neuronpedia_readme_dd5e0dfd4fca.png\" alt=\"Splash GIF\"\u002F>\n  \u003C\u002Fa>\n\n\u003Ch3 align=\"center\">\u003Ca href=\"https:\u002F\u002Fneuronpedia.org\">neuronpedia.org 🧠🔍\u003C\u002Fa>\u003C\u002Fh3>\n\n  \u003Cp align=\"center\">\n    开源可解释性平台\n    \u003Cbr \u002F>\n    \u003Csub>\n    \u003Cstrong>API · 引导 · 激活值 · 电路\u002F图谱 · 自动解释 · 评分 · 推理 · 搜索 · 过滤 · 仪表盘 · 基准测试 · 余弦相似度 · UMAP · 嵌入 · 探针 · SAE · 列表 · 导出 · 上传\u003C\u002Fstrong>\n    \u003C\u002Fsub>\n  \u003C\u002Fp>\n\u003C\u002Fp>\n\n\u003Cp align=\"center\" style=\"color: #cccccc;\">\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fblob\u002Fmain\u002FLICENSE\">\u003Cimg height=\"20px\" src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Flicense-MIT-yellow.svg\" alt=\"MIT\">\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fstatus.neuronpedia.org\">\u003Cimg height=\"20px\" src=\"https:\u002F\u002Fuptime.betterstack.com\u002Fstatus-badges\u002Fv2\u002Fmonitor\u002F1roih.svg\" alt=\"Uptime\">\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fjoin.slack.com\u002Ft\u002Fopensourcemechanistic\u002Fshared_invite\u002Fzt-3m2fulfeu-0LnVnF8yCrKJYQvWLuCQaQ\">\u003Cimg height=\"20px\" src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fslack-purple?logo=slack&logoColor=white\" alt=\"Slack\">\u003C\u002Fa>\n  \u003Ca href=\"mailto:johnny@neuronpedia.org\">\u003Cimg height=\"20px\" src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fcontact-blue.svg?logo=data:image\u002Fsvg%2bxml;base64,PHN2ZyB2aWV3Qm94PSIwIDAgMjQgMjQiIGZpbGw9Im5vbmUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+PGcgaWQ9IlNWR1JlcG9fYmdDYXJyaWVyIiBzdHJva2Utd2lkdGg9IjAiPjwvZz48ZyBpZD0iU1ZHUmVwb190cmFjZXJDYXJyaWVyIiBzdHJva2UtbGluZWNhcD0icm91bmQiIHN0cm9rZS1saW5lam9pbj0icm91bmQiPjwvZz48ZyBpZD0iU1ZHUmVwb19pY29uQ2FycmllciI+IDxwYXRoIGQ9Ik00IDcuMDAwMDVMMTAuMiAxMS.6NUMxMS.2NjY3IDEyLjQ1IDEyLjczMzMgMTI.45gMTSuCAxLS.6NUMyMCA3IiBzdHJva2U9IiNmZmZmZmYiIHN0cm9rZS13aWR0aD0iMiIgc3Ryb2tlLWxpbmVjYXA9InJvdW5kIiBzdHJva2UtbGluZWpvaW49InJvdW5kIj48L3BhdGg+IDxyZWN0IHg9IjMiIHk9IjUiIHdpZHRoPSIxOCIgaGVpZ2h0PSIxNCIgcng9IjIiIHN0cm9rZT0iI2ZmZmZmZiIgc3Ryb2tlLXdpZHRoPSIyIiBzdHJva2UtbGluZWNhcD0icm91bmQiPjwvcmVjdD4gPC9nPjwvc3ZnPg==\" alt=\"Email\">\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fneuronpedia.org\u002Fblog\">\u003Cimg height=\"20px\" src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fblog-10b981.svg\" alt=\"blog\">\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fneuronpedia.org\">\u003Cimg height=\"20px\" src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fwebsite-gray.svg\" alt=\"website\">\u003C\u002Fa>\n\n\u003C\u002Fp>\n\n- [关于 Neuronpedia](#about-neuronpedia)\n- [设置本地环境](#setting-up-your-local-environment)\n  - [\"我想使用本地数据库 \u002F 导入更多 Neuronpedia 数据\"](#i-want-to-use-a-local-database--import-more-neuronpedia-data)\n  - [\"我想进行 Web 应用开发（前端 + API）\"](#i-want-to-do-webapp-frontend--api-development)\n  - [\"我想在本地运行\u002F开发推理功能\"](#i-want-to-rundevelop-inference-locally)\n  - ['我想在本地运行\u002F开发图谱服务器'](#i-want-to-rundevelop-the-graph-server-locally)\n  - ['我想在本地运行\u002F开发自动解释功能'](#i-want-to-rundevelop-autointerp-locally)\n  - ['我想进行高吞吐量的自动解释'](#i-want-to-do-high-volume-autointerp-explanations)\n  - ['我想生成自己的仪表盘\u002F数据并添加到 Neuronpedia 中'](#i-want-to-generate-my-own-dashboardsdata-and-add-it-to-neuronpedia)\n- [架构](#architecture)\n  - [要求](#requirements)\n  - [服务](#services)\n    - [服务是独立的应用程序] (#services-are-standalone-apps)\n    - [服务特定文档] (#service-specific-documentation)\n  - [OpenAPI 模式] (#openapi-schema)\n  - [Monorepo 目录结构] (#monorepo-directory-structure)\n- [安全](#security)\n- [联系 \u002F 支持] (#contact--support)\n- [贡献](#contributing)\n- [附录] (#appendix)\n    - ['Make' 命令参考] (#make-commands-reference)\n    - [将数据导入本地数据库] (#import-data-into-your-local-database)\n    - [为什么搜索解释需要 OpenAI API 密钥] (#why-an-openai-api-key-is-needed-for-search-explanations)\n\n# 关于 Neuronpedia\n\n请查看我们的[博客文章](https:\u002F\u002Fwww.neuronpedia.org\u002Fblog\u002Fneuronpedia-is-now-open-source)，了解有关 Neuronpedia 的信息、我们开源它的原因以及其他细节。此外，还有一个包含快速演示的[Twitter 线程](https:\u002F\u002Fx.com\u002Fneuronpedia\u002Fstatus\u002F1906793456879775745)。\n\n**功能概览**\n\n一张展示截至 2025 年 3 月 Neuronpedia 主要功能的示意图。\n![neuronpedia-features](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fhijohnnylin_neuronpedia_readme_fd67af1704cc.png)\n\n# 设置本地环境\n\n首先设置你的[本地数据库](#i-want-to-use-a-local-database--import-more-neuronpedia-data)。\n\n> 🔥 **小贴士：** Neuronpedia 针对 AI 代理开发进行了配置。以下是一个使用[单个提示](https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fblob\u002Fmain\u002Fapps\u002Fexperiments\u002Fsteerify\u002FREADME.md#claude-code-prompt)构建自定义应用（Steerify）的例子，该应用以 Neuronpedia 的推理服务器作为后端：\n\nhttps:\u002F\u002Fgithub.com\u002Fuser-attachments\u002Fassets\u002Fbc82f88b-8155-4c1d-948a-ea5d987ae0f8\n\n## “我想使用本地数据库 \u002F 导入更多 Neuronpedia 数据”\n\n#### 这些步骤的作用及你将获得的内容\n\n这些步骤将指导你如何配置并连接到自己的本地数据库。随后，你可以下载自己选择的数据源\u002FSAE：\n\nhttps:\u002F\u002Fgithub.com\u002Fuser-attachments\u002Fassets\u002Fd7fbb46e-8522-4f98-aa08-21c6529424af\n\n> ⚠️ **警告：** 你的数据库初始为空。你需要使用管理面板来[导入数据](#import-data-into-your-local-database)（激活值、解释等）。\n\n> ⚠️ **警告：** 本地数据库环境中没有连接任何推理服务器，因此你最初无法进行激活测试、引导等操作。你需要[配置一个本地推理实例]()。\n\n#### 步骤\n\n1. 构建 Web 应用\n   ```\n   make webapp-localhost-build\n   ```\n2. 启动 Web 应用\n   ```\n   make webapp-localhost-run\n   ```\n3. 访问 [localhost:3000](http:\u002F\u002Flocalhost:3000) 查看你的本地 Web 应用实例，它现已连接到你的本地数据库\n4. 参阅上述“警告”部分以及“后续步骤”，完成设置\n\n#### 后续步骤\n\n1. [点击此处](#import-data-into-your-local-database)了解如何将数据导入本地数据库（激活值、解释等），因为你的本地数据库初始为空\n2. [点击此处](#i-want-to-rundevelop-inference-locally)了解如何为你正在使用的模型\u002F数据源\u002FSAE 启动一个本地“推理”服务\n\n## “我想进行 Web 应用（前端 + API）开发”\n\n#### 这部分的作用\n\n到目前为止，你一直在构建的 Web 应用都是 _生产构建_，这种构建方式虽然运行速度快，但构建过程较慢。由于生产构建速度慢且不包含调试信息，因此并不适合开发使用。\n\n本小节会在你的本地机器上安装开发构建版本（不使用 Docker），然后将该构建挂载到你的 Docker 容器中。\n\n#### 你将获得什么\n\n完成本小节后，你就可以在本地进行开发，并能快速看到代码更改的效果，同时还能看到更详细的调试信息和错误提示。如果你只是想专注于 Neuronpedia 的前端或 API 开发，那么无需再进行其他设置！\n\n#### 操作步骤\n\n1. 通过 [Node 版本管理工具](https:\u002F\u002Fgithub.com\u002Fnvm-sh\u002Fnvm) 安装 [Node.js](https:\u002F\u002Fnodejs.org)：\n   ```\n   make install-nodejs\n   ```\n2. 安装 Web 应用的依赖项：\n   ```\n   make webapp-localhost-install\n   ```\n3. 启动开发实例：\n   ```\n   make webapp-localhost-dev\n   ```\n4. 打开 [localhost:3000](http:\u002F\u002Flocalhost:3000)，即可查看你的本地 Web 应用实例。\n\n#### 在本地进行 Web 应用开发\n\n- **自动刷新**：当你修改 `apps\u002Fwebapp` 子目录中的任何文件时，`localhost:3000` 会自动重新加载。\n- **安装命令**：你无需再次运行 `make install-nodejs`，并且只有在依赖项发生变化时才需要运行 `make webapp-localhost-install`。\n\n## “我想在本地运行\u002F开发推理”\n\n#### 这部分的作用及你将获得的内容\n\n本小节将向你展示如何在本地运行一个推理实例，以便你可以对你下载的源数据或 SAE 集合进行引导、激活测试等操作。\n\n> ⚠️ **警告：** 在本地环境中，我们仅支持同时运行一个推理服务器。这是因为你在同一台机器上不太可能同时运行多个模型，因为这些模型对内存和计算资源的需求都非常高。\n\n#### 操作步骤\n\n1. 确保你已安装 [Poetry](https:\u002F\u002Fpython-poetry.org\u002Fdocs\u002F#installation)。\n2. 安装推理服务器的依赖项：\n   ```\n   make inference-localhost-install\n   ```\n3. 根据你的机器是否配备 CUDA 来构建镜像：\n   ```\n   # CUDA\n   make inference-localhost-build-gpu USE_LOCAL_HF_CACHE=1\n   ```\n   ```\n   # 无 CUDA\n   make inference-localhost-build USE_LOCAL_HF_CACHE=1\n   ```\n   > ➡️ [`USE_LOCAL_HF_CACHE=1` 标志](https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fpull\u002F89)会将你本地的 HuggingFace 缓存挂载到 `${HOME}\u002F.cache\u002Fhuggingface\u002Fhub:\u002Froot\u002F.cache\u002Fhuggingface\u002Fhub`。如果你希望在容器中创建一个新的缓存，可以在此处以及下一步中省略该标志。\n4. 运行推理服务器，使用 `MODEL_SOURCESET` 参数指定你要加载的 `.env.inference.[model_sourceset]` 文件。以本示例为例，我们将运行 `gpt2-small`，并加载配置在 `.env.inference.gpt2-small.res-jb` 文件中的 `res-jb` 源数据集\u002FSAE 集合。你也可以查看其他[预加载的推理配置](#pre-loaded-inference-server-configurations)，或者[创建自己的配置](#making-your-own-inference-server-configurations)。\n\n   ```\n   # CUDA\n   make inference-localhost-dev-gpu \\\n        MODEL_SOURCESET=gpt2-small.res-jb \\\n        USE_LOCAL_HF_CACHE=1\n\n   # 无 CUDA\n   make inference-localhost-dev \\\n        MODEL_SOURCESET=gpt2-small.res-jb \\\n        USE_LOCAL_HF_CACHE=1\n   ```\n5. 等待加载完成（首次启动会较慢）。当看到 `Initialized: True` 时，本地推理服务器已在 `localhost:5002` 上准备就绪。\n\n#### 使用推理服务器\n\n要与推理服务器交互，你有几种选择——请注意，这仅适用于你已加载的模型和选定的源数据：\n\n1. 加载带有[本地数据库设置](#i-want-to-use-a-local-database--import-more-neuronpedia-data)的 Web 应用程序，然后像在 Neuronpedia 上一样正常使用模型和选定的源数据。\n2. 使用位于 `packages\u002Fpython\u002Fneuronpedia-inference-client` 的预生成推理 Python 客户端（将环境变量 `INFERENCE_SERVER_SECRET` 设置为 `public`，或者根据你在 `.env.localhost` 中的设置进行调整）。\n3. 使用位于 `schemas\u002Fopenapi\u002Finference-server.yaml` 的 OpenAPI 规范，通过任何你喜欢的客户端发起调用。服务器启动后，你可以在 `\u002Fdocs` 获得交互式的 Swagger 文档。详细信息请参阅 `apps\u002Finference\u002FREADME.md`。\n\n#### 预加载的推理服务器配置\n\n我们提供了一些预加载的推理配置，作为如何加载特定模型和源数据集进行推理的示例。可通过运行 `make inference-list-configs` 查看：\n\n```\n$ make inference-list-configs\n\n可用的推理配置 (.env.inference.*)\n================================================\n\ndeepseek-r1-distill-llama-8b.llamascope-slimpj-res-32k\n    模型：meta-llama\u002FLlama-3.1-8B\n    源\u002FSAE 集合：'[\"llamascope-slimpj-res-32k\"]'\n    make inference-localhost-dev MODEL_SOURCESET=deepseek-r1-distill-llama-8b.llamascope-slimpj-res-32k\n\ngemma-2-2b-it.gemmascope-res-16k\n    模型：gemma-2-2b-it\n    源\u002FSAE 集合：'[\"gemmascope-res-16k\"]'\n    make inference-localhost-dev MODEL_SOURCESET=gemma-2-2b-it.gemmascope-res-16k\n\ngpt2-small.res-jb\n    模型：gpt2-small\n    源\u002FSAE 集合：'[\"res-jb\"]'\n    make inference-localhost-dev MODEL_SOURCESET=gpt2-small.res-jb\n```\n\n#### 创建你自己的推理服务器配置\n\n可参考 `.env.inference.*` 文件，了解如何制作这些推理服务器配置。\n\n`MODEL_ID` 是来自 [transformerlens 模型表](https:\u002F\u002Ftransformerlensorg.github.io\u002FTransformerLens\u002Fgenerated\u002Fmodel_properties_table.html)的模型 ID，而每个 `SAE_SETS` 则是 Neuronpedia 源 ID 中层号和连字符之后的部分——例如，如果你有一个 Neuronpedia 特征的 URL 是 `http:\u002F\u002Fneuronpedia.org\u002Fgpt2-small\u002F0-res-jb\u002F123`，那么 `0-res-jb` 就是源 ID，而 `SAE_SETS` 中的条目就是 `res-jb`。此示例与 `.env.inference.gpt2-small.res-jb` 文件完全一致。\n\n你可以在 saelens 的[预训练 SAE YAML 文件](https:\u002F\u002Fgithub.com\u002FjbloomAus\u002FSAELens\u002Fblob\u002Fmain\u002Fsae_lens\u002Fpretrained_saes.yaml)中找到 Neuronpedia 源 ID，或者通过点击进入 [Neuronpedia 数据集导出](https:\u002F\u002Fneuronpedia-datasets.s3.us-east-1.amazonaws.com\u002Findex.html?prefix=v1\u002F)目录来查找。\n\n**使用 TransformerLens 官方不支持的模型**\n\n可参考 `.env.inference.deepseek-r1-distill-llama-8b.llamascope-slimpj-res-32k` 文件，了解如何加载 TransformerLens 官方不支持的模型。这主要用于替换蒸馏或微调后的权重。\n\n**加载非 Saelens 的源数据\u002FSAE**\n\n- [待办事项 #2](https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fissues\u002F2) 记录了如何加载不在 saelens 预训练 YAML 文件中的 SAE 或源数据。\n\n#### 进行本地推理开发\n\n- **基于 schema 的开发**：若要添加新端点或更改现有端点，你需要先更新 OpenAPI schema，然后从中生成客户端，最后再更新实际的推理和 Web 应用程序代码。有关具体操作方法，请参阅 [OpenAPI 说明文档：修改推理服务器](schemas\u002FREADME.md#making-changes-to-the-inference-server)。\n- **无自动重载**：当你更改 `apps\u002Finference` 子目录中的任何文件时，推理服务器不会自动重新加载，因为服务器重启非常耗时——它会重新加载模型以及所有源数据和 SAE。如果你想启用自动重载，可以在 `make inference-localhost-dev` 命令中添加 `AUTORELOAD=1`，如下所示：\n  ```\n  make inference-localhost-dev \\\n       MODEL_SOURCESET=gpt2-small.res-jb \\\n       AUTORELOAD=1\n  ```\n\n## ‘我想在本地运行\u002F开发图服务器’\n\n#### 这能做什么 + 你会得到什么\n\n图服务器为归因图生成功能提供支持，该功能基于 Piotrowski 和 Hanna 开发的 [circuit-tracer](https:\u002F\u002Fgithub.com\u002Fsafety-research\u002Fcircuit-tracer) 构建。当您通过 [Neuronpedia Circuit Tracer](https:\u002F\u002Fwww.neuronpedia.org\u002Fgemma-2-2b\u002Fgraph) 界面创建新图时，此服务会处理后端流程。\n\n#### 步骤\n\n1. 确保已安装 [Poetry](https:\u002F\u002Fpython-poetry.org\u002Fdocs\u002F#installation)\n2. 安装图服务器的依赖项：\n   ```\n   make graph-localhost-install\n   ```\n3. 在 `apps\u002Fgraph` 目录下，创建一个包含 `SECRET` 和 `HF_TOKEN` 的 `.env` 文件（参见 `apps\u002Fgraph\u002F.env.example`）：\n   - `SECRET` 是服务器密钥，需在 `x-secret-key` 请求头中传递。\n   - 确保您的 `HF_TOKEN` 具有访问 Hugging Face 上 [Gemma-2-2B 模型](https:\u002F\u002Fhuggingface.co\u002Fgoogle\u002Fgemma-2-2b)的权限。\n4. 根据机器是否配备 CUDA，选择正确的命令来构建镜像：\n   ```\n   # CUDA\n   make graph-localhost-build-gpu USE_LOCAL_HF_CACHE=1\n   ```\n   ```\n   # 无 CUDA\n   make graph-localhost-build USE_LOCAL_HF_CACHE=1\n   ```\n   > ➡️ [`USE_LOCAL_HF_CACHE=1` 标志](https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fpull\u002F89)会将您本地的 Hugging Face 缓存挂载到 `${HOME}\u002F.cache\u002Fhuggingface\u002Fhub:\u002Froot\u002F.cache\u002Fhuggingface\u002Fhub`。如果您希望在容器内创建新的缓存，则可以在此处及下一步省略此标志。\n5. 运行图服务器：\n   ```\n   # CUDA\n   make graph-localhost-dev-gpu \\\n        USE_LOCAL_HF_CACHE=1\n\n   # 无 CUDA\n   make graph-localhost-dev \\\n        USE_LOCAL_HF_CACHE=1\n   ```\n6. 等待容器启动。\n\n有关示例请求，请参阅 [图服务器 README](apps\u002Fgraph\u002FREADME.md#example-request---output-graph-json-directly)。\n\n## ‘我想在本地运行\u002F开发自动解释器’\n\n#### 这能做什么 + 你会得到什么\n\n自动解释器服务器提供对神经网络特征的自动解释和评分功能。它使用 eleutherAI 的 [delphi](https:\u002F\u002Fgithub.com\u002FEleutherAI\u002Fdelphi) 来生成解释并进行评分。\n\n> ⚠️ **警告：** eleuther 嵌入评分器使用的嵌入模型仅支持 CUDA（无法在 Mac MPS 或 CPU 上运行）。\n\n#### 步骤\n\n1. 确保已安装 [Poetry](https:\u002F\u002Fpython-poetry.org\u002Fdocs\u002F#installation)\n2. 安装自动解释器服务器的依赖项：\n   ```\n   make autointerp-localhost-install\n   ```\n3. 根据机器是否配备 CUDA，选择正确的命令来构建镜像：\n   ```\n   # CUDA\n   make autointerp-localhost-build-gpu USE_LOCAL_HF_CACHE=1\n   ```\n   ```\n   # 无 CUDA\n   make autointerp-localhost-build USE_LOCAL_HF_CACHE=1\n   ```\n   > ➡️ [`USE_LOCAL_HF_CACHE=1` 标志](https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fpull\u002F89)会将您本地的 Hugging Face 缓存挂载到 `${HOME}\u002F.cache\u002Fhuggingface\u002Fhub:\u002Froot\u002F.cache\u002Fhuggingface\u002Fhub`。如果您希望在容器内创建新的缓存，则可以在此处及下一步省略此标志。\n4. 运行自动解释器服务器：\n   ```\n   # CUDA\n   make autointerp-localhost-dev-gpu \\\n        USE_LOCAL_HF_CACHE=1\n\n   # 无 CUDA\n   make autointerp-localhost-dev \\\n        USE_LOCAL_HF_CACHE=1\n   ```\n5. 等待加载完成。\n\n#### 使用自动解释器服务器\n\n要与自动解释器服务器交互，您可以选择以下几种方式：\n\n1. 使用预生成的自动解释器 Python 客户端，位于 `packages\u002Fpython\u002Fneuronpedia-autointerp-client`（将环境变量 `AUTOINTERP_SERVER_SECRET` 设置为 `public`，或根据您在 `.env.localhost` 中的设置进行更改）。\n2. 使用 OpenAPI 规范文件，位于 `schemas\u002Fopenapi\u002Fautointerp-server.yaml`，以便使用您选择的任何客户端发起调用。服务器启动后，您可以在 `\u002Fdocs` 获取 Swagger 交互式规范。详细信息请参阅 `apps\u002Finference\u002FREADME.md`。\n\n#### 进行本地自动解释器开发\n\n- **基于 schema 的开发**：要添加新端点或更改现有端点，您需要先更新 OpenAPI schema，然后从中生成客户端，最后再更新实际的自动解释器和 Web 应用程序代码。有关具体操作方法，请参阅 [OpenAPI README：修改自动解释器服务器](schemas\u002FREADME.md#making-changes-to-the-autointerp-server)。\n- **无自动重载**：当您更改 `apps\u002Fautointerp` 子目录中的任何文件时，默认情况下自动解释器服务器不会自动重新加载。如果您希望启用自动重载，可在 `make autointerp-localhost-dev` 命令中附加 `AUTORELOAD=1`，如下所示：\n  ```\n  make autointerp-localhost-dev \\\n       AUTORELOAD=1\n  ```\n\n## ‘我想进行高容量的自动解释说明’ \n\n本节正在建设中。\n\n- 使用 EleutherAI 的 [Delphi 库](https:\u002F\u002Fgithub.com\u002FEleutherAI\u002Fdelphi)。\n- 对于 OpenAI 的自动解释，可使用 [utils\u002Fneuronpedia_utils\u002Fbatch-autointerp.py](utils\u002Fneuronpedia_utils\u002Fbatch-autointerp.py)。\n\n## ‘我想生成自己的仪表板\u002F数据，并将其添加到 Neuronpedia’\n\n本节正在建设中。\n\n[待办事项：简化数据生成及上传至 Neuronpedia 的流程](https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fissues\u002F46)\n\n[待办事项：neuronpedia-utils 应使用 Poetry](https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fissues\u002F43)\n\n在本示例中，我们将为一个与 SAELens 兼容的 SAE 生成仪表板\u002F数据，并将其上传到我们自己的 Neuronpedia 实例。\n\n1. 确保已安装 [Poetry](https:\u002F\u002Fpython-poetry.org\u002Fdocs\u002F)。\n2. 将您与 SAELens 兼容的源代码\u002FSAE [上传](https:\u002F\u002Fgithub.com\u002FjbloomAus\u002FSAELens\u002Fblob\u002Fmain\u002Ftutorials\u002Fuploading_saes_to_huggingface.ipynb) 至 HuggingFace。\n   > 示例\n   > ➡️ [https:\u002F\u002Fhuggingface.co\u002Fchanind\u002Fgemma-2-2b-batch-topk-matryoshka-saes-w-32k-l0-40](https:\u002F\u002Fhuggingface.co\u002Fchanind\u002Fgemma-2-2b-batch-topk-matryoshka-saes-w-32k-l0-40)\n3. 在本地克隆 SAELens。\n   ```\n   git clone https:\u002F\u002Fgithub.com\u002FjbloomAus\u002FSAELens.git\n   ```\n4. 打开您克隆的 SAELens，并编辑文件 `sae_lens\u002Fpretrained_saes.yaml`。根据下方模板，在文件末尾添加一条新条目（请参阅注释以了解如何填写）：\n   > 示例\n   > ➡️ [https:\u002F\u002Fgithub.com\u002FjbloomAus\u002FSAELens\u002Fpull\u002F455\u002Ffiles](https:\u002F\u002Fgithub.com\u002FjbloomAus\u002FSAELens\u002Fpull\u002F455\u002Ffiles)\n   ```\n   gemma-2-2b-res-matryoshka-dc:                 # 您 SAE 集合的唯一标识符\n     conversion_func: null                       # 如果您的 SAE 配置已与 SAELens 兼容，则设为 null\n     links:                                      # 可选链接\n       model: https:\u002F\u002Fhuggingface.co\u002Fgoogle\u002Fgemma-2-2b\n     model: gemma-2-2b                           # TransformerLens 模型 ID - https:\u002F\u002Ftransformerlensorg.github.io\u002FTransformerLens\u002Fgenerated\u002Fmodel_properties_table.html\n     repo_id: chanind\u002Fgemma-2-2b-batch-topk-matryoshka-saes-w-32k-l0-40  # HuggingFace 仓库路径\n     saes:\n     - id: blocks.0.hook_resid_post                 # 此 SAE 的标识符\n       path: standard\u002Fblocks.0.hook_resid_post      # 仓库路径中指向该 SAE 的位置\n       l0: 40.0\n       neuronpedia: gemma-2-2b\u002F0-matryoshka-res-dc  # 您期望的 Neuronpedia URI - neuronpedia.org\u002F[此_slug]。应为 [模型ID]\u002F[层]-[与此 SAE 集相同的 slug]\n     - id: blocks.1.hook_resid_post                 # 此 SAE 集中的更多 SAE\n       path: standard\u002Fblocks.1.hook_resid_post\n       l0: 40.0\n       neuronpedia: gemma-2-2b\u002F1-matryoshka-res-dc  # 注意，此处与上方条目相同，只是层号从 0 改为 1\n     - [...]\n   ```\n5. 在本地克隆 [SAEDashboard](https:\u002F\u002Fgithub.com\u002FjbloomAus\u002FSAEDashboard.git)。\n   ```\n   git clone https:\u002F\u002Fgithub.com\u002FjbloomAus\u002FSAEDashboard.git\n   ```\n6. 配置您克隆的 `SAEDashboard` 以使用您修改后的本地 `SAELens`，而非生产环境中的版本。\n   ```\n   cd SAEDashboard                    # 进入目录\n   poetry lock && poetry install      # 安装依赖\n   poetry remove sae-lens             # 移除生产依赖\n   poetry add PATH\u002FTO\u002FCLONED\u002FSAELENS  # 设置本地依赖\n   ```\n7. 为 SAE 生成仪表板。这将耗时 30 分钟至数小时不等，具体取决于您的硬件和模型规模。\n   ```\n   cd SAEDashboard                    # 进入目录\n   rm -rf cached_activations          # 清除旧的缓存数据\n\n   # 开始生成。各参数说明如下（详细信息：https:\u002F\u002Fgithub.com\u002FjbloomAus\u002FSAEDashboard\u002Fblob\u002Fmain\u002Fsae_dashboard\u002Fneuronpedia\u002Fneuronpedia_runner_config.py）\n   #     - sae-set = 应与 pretrained_saes.yaml 中该集合的唯一标识符匹配\n   #     - sae-path = 应与 pretrained_saes.yaml 中该 SAE 的标识符匹配\n   #     - np-set-name = 应与 pretrained_saes.yaml 中该 SAE 的 neuronpedia 字段所指定的 [与此 SAE 集相同的 slug] 匹配\n   #     - dataset-path = 用于生成激活值的 HuggingFace 数据集。通常建议使用模型训练时所用的数据集。\n   #     - output-dir = 仪表板数据的输出目录\n   #     - n-prompts = 从数据集中测试的激活文本数量\n   #     - n-tokens-in-prompt、n-features-per-batch、n-prompts-in-forward-pass = 保持为 128\n   poetry run neuronpedia-runner \\\n        --sae-set=\"gemma-2-2b-res-matryoshka-dc\" \\\n        --sae-path=\"blocks.12.hook_resid_post\" \\\n        --np-set-name=\"matryoshka-res-dc\" \\\n        --dataset-path=\"monology\u002Fpile-uncopyrighted\" \\\n        --output-dir=\"neuronpedia_outputs\u002F\" \\\n        --sae_dtype=\"float32\" \\\n        --model_dtype=\"bfloat16\" \\\n        --sparsity-threshold=1 \\\n        --n-prompts=24576 \\\n        --n-tokens-in-prompt=128 \\\n        --n-features-per-batch=128 \\\n        --n-prompts-in-forward-pass=128\n   ```\n\n8. 将这些仪表板转换为可导入 Neuronpedia 的格式。\n   ```\n   cd neuronpedia\u002Futils\u002Fneuronpedia-utils          # 进入当前仓库的工具目录\n   python convert-saedashboard-to-neuronpedia.py   # 启动引导式转换脚本，按照步骤操作。\n   ```\n9. 一旦为 Neuronpedia 生成了仪表板文件，便将其上传至全球 Neuronpedia S3 存储桶——目前您需要通过 [联系我们](mailto:johnny@neuronpedia.org) 来完成此操作。\n10. 在本地实例上，[导入您的数据](#i-want-to-use-a-local-database--import-more-neuronpedia-data)。\n\n# 架构\n\n以下是 Neuronpedia 中各服务\u002F脚本之间的连接方式。阅读此图时，建议从笔记本电脑图像（“用户”）开始。\n\n![架构图](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fhijohnnylin_neuronpedia_readme_23f4c1ff0061.png)\n\n## 要求\n\n您可以在任何云平台和现代操作系统上运行 Neuronpedia。Neuronpedia 的设计旨在避免供应商锁定。本指南是在 macOS 15（Sequoia）上编写并测试的，因此您可能需要针对 Windows、Ubuntu 等系统调整命令。建议至少配备 16GB 内存。\n\n## 服务\n\n| 名称       | 描述                                                                                                                                                  | 技术栈                            |\n| ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------- |\n| webapp     | 提供 neuronpedia.org 前端及 [API](neuronpedia.org\u002Fapi-doc)                                                                                   | [next.js](https:\u002F\u002Fnextjs.org) \u002F react |\n| database   | 存储特征、激活值、解释、用户、列表等信息                                                                                                                | postgres                              |\n| inference  | [支持服务器] 方向控制、激活测试、通过推理进行搜索、topk 等功能。每运行一个模型的推理，都需要单独的实例。                                         | python \u002F torch                        |\n| autointerp | [支持服务器] 使用 eleutherAI 的 [delphi](https:\u002F\u002Fgithub.com\u002FEleutherAI\u002Fdelphi)（原 `sae-auto-interp`）自动生成解释和评分。                             | python                                |\n\n### 服务是独立的应用程序\n\n根据设计，每个服务都可以作为独立的应用程序单独运行。这样做的目的是为了提高可扩展性和可分叉性。\n\n例如，如果你喜欢 Neuronpedia 的 Web 应用前端，但想使用不同的 API 进行推理，你完全可以做到！只需确保你的替代推理服务器支持 `schema\u002Fopenapi\u002Finference-server.yaml` 规范，或者修改 `apps\u002Fwebapp\u002Flib\u002Futils` 中调用推理的部分即可。\n\n### 服务特定文档\n\n目前在 `apps\u002F[service]` 目录下有针对每个应用\u002F服务的草稿 `README`，但这些文档仍在 heavily WIP 阶段。你也可以查看同一目录下的 `Dockerfile` 来构建自己的镜像。\n\n## OpenAPI 模式\n\n为了让各个服务之间能够以类型安全且一致的方式进行通信，我们使用 OpenAPI 模式。当然也有一些例外情况——比如流式传输并不被 OpenAPI 规范正式支持。不过即便如此，我们仍然会尽力定义并使用相应的模式。\n\n尤其对于推理和自动解释服务器的开发来说，理解并遵循 [OpenAPI README](schemas\u002FREADME.md) 中的说明至关重要。\n\nOpenAPI 模式的文件位于 `\u002Fschemas` 目录下。我们使用 OpenAPI 生成工具来分别生成 TypeScript 和 Python 客户端。\n\n## 单仓库目录结构\n\n`apps` - Neuronpedia 的三个服务：webapp、inference 和 autointerp。大部分代码都放在这里。\n`schemas` - OpenAPI 模式。如果要修改推理和自动解释的接口，首先需要修改它们的模式——详情请参阅 [OpenAPI README](schemas\u002FREADME.md)。\n`packages` - 由 `schemas` 使用生成工具生成的客户端。通常情况下，你不需要手动修改这些文件。\n`utils` - 各种实用工具，用于执行离线处理任务，比如大批量的自动解释、生成仪表盘或导出数据等。\n\n# 安全\n\n请将漏洞报告发送至 [johnny@neuronpedia.org](mailto:johnny@neuronpedia.org)。\n\n我们目前还没有正式的漏洞赏金计划，但我们会根据漏洞的严重程度尽力给予补偿——尽管对于低危漏洞，我们可能无法提供奖励。\n\n# 联系方式 \u002F 支持\n\n- Slack: [加入 #neuronpedia](https:\u002F\u002Fjoin.slack.com\u002Ft\u002Fopensourcemechanistic\u002Fshared_invite\u002Fzt-3m2fulfeu-0LnVnF8yCrKJYQvWLuCQaQ)\n- 邮箱: [johnny@neuronpedia.org](mailto:johnny@neuronpedia.org)\n- 问题: [GitHub issues](https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fissues)\n\n# 贡献\n\n请参阅 [CONTRIBUTING.md](CONTRIBUTING.md)。\n\n# 附录\n\n### `make` 命令参考\n\n你可以通过运行 `make help` 查看所有可用的 `make` 命令及其简要说明。\n\n### 将数据导入本地数据库\n\n如果你设置了属于自己的数据库，它一开始将是空的——没有特征、解释、激活值等。要加载这些数据，可以使用内置的“管理面板”，从中下载你选择的 SAE（或“源”）的数据。\n\n> ⚠️ **警告:** 管理面板比较挑剔，目前不支持断点续传。如果导入过程中断，你必须手动点击“重新同步”。管理面板目前不会检查下载是否完整或是否有缺失部分——你需要自己确认数据是否完整，如果不完整则需点击“重新同步”以重新下载整个数据集。\n\n> ℹ️ **建议:** 导入数据时，建议先从一个源开始（比如 `gpt2-small`@`10-res-jb`），而不是一次性下载所有数据。这样更容易验证数据是否正确导入，并能更快地开始使用 Neuronpedia。\n\n以下步骤演示如何下载 `gpt2-small`@`10-res-jb` 的 SAE 数据。\n\n1. 打开 [localhost:3000\u002Fadmin](http:\u002F\u002Flocalhost:3000\u002Fadmin)。\n2. 向下滚动到 `gpt2-small`，然后展开 `res-jb`。\n3. 点击 `10-res-jb` 旁边的“下载”按钮。\n4. 请耐心等待——这可能会涉及大量数据，具体时间取决于你的网络连接和 CPU 速度，可能需要 30 分钟甚至一个小时。\n5. 下载完成后，点击“浏览”或使用导航栏尝试使用：跳转、搜索、方向控制。\n6. 对其他你想下载的 SAE\u002F源数据重复上述步骤。\n\n### 为什么搜索解释功能需要 OpenAI API 密钥\n\n在 Web 应用中，“搜索解释”功能要求你设置一个 `OPENAI_API_KEY`。否则你将无法获得任何搜索结果。\n\n这是因为“搜索解释”功能是通过语义相似度来查找特征的。例如，如果你搜索“猫”，它也会返回“猫科动物”、“虎斑猫”、“动物”等。要做到这一点，系统需要计算你输入“猫”的嵌入向量。我们使用 OpenAI 的嵌入 API（具体为 `text-embedding-3-large`，维度为 256）来计算这些嵌入向量。","# Neuronpedia 快速上手指南\n\nNeuronpedia 是一个开源的模型可解释性平台，支持神经元激活分析、电路可视化、自动解释（Autointerp）、 Steering（干预）等功能。本指南帮助中国开发者快速在本地搭建基础环境并运行核心服务。\n\n## 环境准备\n\n### 系统要求\n- **操作系统**: Linux (推荐 Ubuntu 20.04+) 或 macOS\n- **GPU (可选但推荐)**: 如需运行本地推理（Inference）或处理大模型，需配备 NVIDIA GPU 及 CUDA 环境。无 GPU 亦可运行 Web 端和数据库，但无法进行实时激活测试。\n- **内存**: 建议 16GB+（运行大模型推理需更多）\n\n### 前置依赖\n请确保已安装以下工具：\n- **Docker & Docker Compose**: 用于编排数据库和服务容器。\n- **Make**: 用于执行项目定义的自动化命令。\n- **Node.js & NVM**: 用于前端开发（可选，仅当需要修改前端代码时）。\n- **Poetry**: Python 包管理工具，用于推理服务依赖管理。\n- **Git**: 克隆代码仓库。\n\n> **国内加速建议**：\n> - 拉取 Docker 镜像时，建议配置国内镜像加速器（如阿里云、腾讯云等）。\n> - 安装 Python\u002FNode 依赖时，可临时指定国内源（如 `pip install -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple`），但在执行本项目 `make` 命令时，请优先遵循项目默认配置以确保兼容性。\n\n## 安装步骤\n\n### 1. 克隆项目\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia.git\ncd neuronpedia\n```\n\n### 2. 启动本地数据库与 Web 应用\n这是最基础的运行方式，用于浏览数据和导入自定义数据源。\n\n```bash\n# 构建 Web 应用\nmake webapp-localhost-build\n\n# 启动 Web 应用（连接本地空数据库）\nmake webapp-localhost-run\n```\n启动完成后，访问 `http:\u002F\u002Flocalhost:3000` 即可看到本地实例。\n> **注意**：初次启动数据库为空，需通过后台管理面板导入数据（见“基本使用”部分）。\n\n### 3. (可选) 配置本地推理服务\n若需进行激活测试、Steering 或自动解释，需启动推理服务器。\n\n**安装依赖：**\n```bash\n# 确保已安装 Poetry\nmake inference-localhost-install\n```\n\n**构建并运行（根据是否有 GPU 选择命令）：**\n\n*   **有 CUDA 环境：**\n    ```bash\n    make inference-localhost-dev-gpu \\\n         MODEL_SOURCESET=gpt2-small.res-jb \\\n         USE_LOCAL_HF_CACHE=1\n    ```\n*   **无 CUDA 环境（CPU 模式，速度较慢）：**\n    ```bash\n    make inference-localhost-dev \\\n         MODEL_SOURCESET=gpt2-small.res-jb \\\n         USE_LOCAL_HF_CACHE=1\n    ```\n\n> **参数说明**：\n> - `MODEL_SOURCESET`: 指定要加载的模型和数据集组合（例如 `gpt2-small.res-jb`）。可通过 `make inference-list-configs` 查看支持的配置。\n> - `USE_LOCAL_HF_CACHE=1`: 挂载本地 HuggingFace 缓存，避免重复下载模型权重。\n\n等待终端输出 `Initialized: True` 后，推理服务将在 `localhost:5002` 就绪。\n\n## 基本使用\n\n### 1. 导入数据到本地数据库\n由于本地数据库初始为空，您需要导入神经元激活数据、解释数据等。\n\n1.  访问 `http:\u002F\u002Flocalhost:3000` 登录管理面板（首次使用可能需要注册管理员账号，具体参考界面提示）。\n2.  在 Admin Panel 中找到 **Import Sources\u002FData** 选项。\n3.  上传或指定您想要分析的 SAE (Sparse Autoencoder) 源文件和激活数据。\n    *   您可以从 Neuronpedia 官方导出数据包，或使用自己生成的数据。\n\n### 2. 探索神经元与可视化\n数据导入成功后：\n1.  在 Web 界面搜索特定的神经元（Neuron）或特征（Feature）。\n2.  查看该神经元的激活分布、Top 激活样本以及自动生成的自然语言解释。\n3.  如果已启动**本地推理服务**，您可以：\n    *   进行 **Activation Testing**：输入文本查看特定神经元的响应。\n    *   进行 **Steering**：手动干预神经元激活值，观察模型输出的变化。\n\n### 3. 开发者模式（前端修改）\n如果您需要修改前端代码或 API 逻辑，请使用开发模式构建，以支持热重载：\n\n```bash\n# 安装 Node 环境\nmake install-nodejs\n\n# 安装前端依赖\nmake webapp-localhost-install\n\n# 启动开发服务器\nmake webapp-localhost-dev\n```\n此后，修改 `apps\u002Fwebapp` 目录下的文件，浏览器将自动刷新。\n\n---\n*注：更多高级功能（如大规模自动解释、自定义 Dashboard 生成）请参考项目根目录下的详细文档。*","某 AI 安全团队正在对开源大模型进行“机械可解释性”研究，试图定位并修复模型中产生种族歧视言论的特定神经元回路。\n\n### 没有 neuronpedia 时\n- **黑盒摸索**：研究人员只能面对数亿个权重的原始矩阵，无法直观看到哪些神经元被特定有害概念激活，排查如同大海捞针。\n- **手工复现困难**：想要验证某个神经元的假设，需手动编写复杂的推理代码和激活提取脚本，环境配置耗时且容易出错。\n- **协作效率低下**：团队成员发现的潜在“恶意神经元”散落在各自的笔记和日志中，缺乏统一平台进行标注、评分和共享验证结果。\n- **解释生成缓慢**：为每个候选神经元生成人类可读的功能描述（AutoInterp）需要单独调用 API 并整理数据，流程繁琐且难以规模化。\n\n### 使用 neuronpedia 后\n- **可视化透视**：利用其内置的激活热力图和 UMAP 降维图表，团队迅速锁定了几个对歧视性词汇响应强烈的稀疏自编码器（SAE）特征。\n- **一键干预验证**：通过平台的 Steering（引导）功能，直接在网页端对这些神经元进行抑制测试，实时观察模型输出是否变得中立，无需重写推理代码。\n- **社区协同标注**：团队成员将可疑特征添加到公共 Dashboard，利用评分系统和评论功能快速对齐认知，确认了关键的“歧视回路”。\n- **自动化解释**：调用集成的 AutoInterp 服务，批量生成了这些神经元的自然语言解释（如“此特征专门编码针对特定族群的负面刻板印象”），大幅加速了分析报告的撰写。\n\nneuronpedia 将原本需要数周的黑盒逆向工程，缩短为几天内可完成的可视化诊断与精准修复流程，让大模型内部机制真正变得透明可控。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fhijohnnylin_neuronpedia_1fc33a79.png","hijohnnylin","johnny","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fhijohnnylin_7e24bdd4.jpg",null,"neuronpedia.org","johnnylin.co","https:\u002F\u002Fgithub.com\u002Fhijohnnylin",[83,87,91,95,99,103,107,110,114,118],{"name":84,"color":85,"percentage":86},"TypeScript","#3178c6",68.5,{"name":88,"color":89,"percentage":90},"Python","#3572A5",27.8,{"name":92,"color":93,"percentage":94},"MDX","#fcb32c",1.5,{"name":96,"color":97,"percentage":98},"Jupyter Notebook","#DA5B0B",0.8,{"name":100,"color":101,"percentage":102},"CSS","#663399",0.4,{"name":104,"color":105,"percentage":106},"Makefile","#427819",0.3,{"name":108,"color":109,"percentage":106},"Dockerfile","#384d54",{"name":111,"color":112,"percentage":113},"JavaScript","#f1e05a",0.2,{"name":115,"color":116,"percentage":117},"Shell","#89e051",0.1,{"name":119,"color":120,"percentage":121},"HTML","#e34c26",0,781,96,"2026-04-04T21:00:59","MIT",4,"未说明","可选。支持 CUDA (NVIDIA GPU) 以加速推理；若无 GPU 可使用 CPU 模式运行。具体显存大小和 CUDA 版本未在片段中明确，但提及模型计算密集且内存占用高。","未说明（但提示模型运行对内存和计算资源要求较高）",{"notes":131,"python":132,"dependencies":133},"该项目采用单体仓库结构，包含 Web 前端、API、推理服务和图服务器等多个独立服务。本地开发推荐使用 Make 命令管理环境。推理服务支持通过环境变量配置加载不同模型（如 GPT-2, Llama, Gemma 等）和稀疏自编码器（SAE）。首次运行需下载模型文件，建议配置本地 HuggingFace 缓存以加速加载。本地环境默认一次仅支持运行一个推理实例。","未说明（需安装 Poetry 包管理器）",[134,135,136,137,138],"poetry","nodejs","docker","huggingface_hub","transformerlens",[14,15,13],[141,142],"ai","interpretability","2026-03-27T02:49:30.150509","2026-04-06T07:13:19.211231",[146,151,156,161,166,171],{"id":147,"question_zh":148,"answer_zh":149,"source_url":150},16683,"如何在 API 响应中获取被引导（steered）生成的 token 的 logprobs？","该功能已部署到生产服务器。在调用 `\u002Fapi\u002Fsteer` 接口时，响应中将包含 `steeredLogProbs` 字段（或根据最新设计为 `logprobs` 和 `top_logprobs`）。每个 token 会返回其 logprob 值以及候选 token 列表。示例请求需包含 prompt、modelId、features 等参数，设置 `stream: false` 即可在返回结果中看到详细的概率数据。","https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fissues\u002F141",{"id":152,"question_zh":153,"answer_zh":154,"source_url":155},16684,"如何最大化或扩展 Attribution Graph（归因图）和子图窗口以便更好地查看细节？","项目已更新，为各个面板添加了“展开”按钮。具体操作位置如下：左上角的链接图（Link Graph）、右上角的节点连接（Node Connections）、左下角的子图（Subgraph）均有独立的展开按钮。右下角的功能详情面板无需额外按钮，直接点击功能层（feature layer）和索引即可展开详细信息。","https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fissues\u002F176",{"id":157,"question_zh":158,"answer_zh":159,"source_url":160},16685,"是否有仅对输入文本进行分词（tokenize）并返回结果的 API 端点？","是的，可以通过新增的端点实现。请求时需指定 `modelId`（字符串）、`text`（字符串）和 `includeBos`（布尔值）。如果提供的 `modelId` 与当前加载的模型不匹配，系统将报错。这要求用户在请求中明确指定模型，以防止在本地运行推理服务器时误用模型配置。","https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fissues\u002F40",{"id":162,"question_zh":163,"answer_zh":164,"source_url":165},16686,"为什么我在本地复现的推理搜索结果（如特征激活总和）与 Neuronpedia 网站上的不一致？","这通常是由于注意力掩码（attention mask）的处理差异导致的。在本地使用 SAELens 或其他工具复现时，请确保正确设置了 `get_attention_mask`（可参考 transformerlens 库），并检查是否忽略了 BOS（开始符）token。网站结果可能默认忽略 BOS 或在掩码处理上有特定逻辑，需保持本地代码与网站逻辑一致才能复现相同的激活值总和。","https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fissues\u002F32",{"id":167,"question_zh":168,"answer_zh":169,"source_url":170},16687,"如何设置 CI\u002FCD 以实现基于语义化版本控制的自动发布？","可以通过配置 CI\u002FCD 流程自动切割发布版本。维护者曾尝试通过 PR #34 提供一种不需要密钥的修复方案。但在实施过程中需注意避免配置错误导致的无限循环问题（infinite loop），确保版本递增逻辑正确触发且仅执行一次。","https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia\u002Fissues\u002F11",{"id":172,"question_zh":173,"answer_zh":174,"source_url":160},16688,"在发送推理请求时，如果传入了无效的 modelId，系统会如何处理？","系统应当进行验证并报错，但在某些旧版本中存在 Bug，即传入无效的 `model` 参数（如 \"bogus-model\"）时，请求仍会成功返回结果而未报错。维护者已确认这是一个需要修复的问题，建议在使用时确保传入有效的模型 ID，并关注后续版本中对此类非法输入的校验增强。",[176,181,186,191,196,201,206,211,216,221,226,231,236,241,246,251,256,261,266,271],{"id":177,"version":178,"summary_zh":179,"released_at":180},98954,"v1.0.688","## 本次发布中的提交\n* 00571bf - 标签更新（Johnny Lin）","2026-04-04T21:01:17",{"id":182,"version":183,"summary_zh":184,"released_at":185},98955,"v1.0.687","## 本版本中的提交\n* 8b7c850 - 修复高度（Johnny Lin）","2026-04-04T20:53:13",{"id":187,"version":188,"summary_zh":189,"released_at":190},98956,"v1.0.686","## 本版本中的提交\n* dc75a40 - 扩展行为更新（Johnny Lin）","2026-04-04T20:43:58",{"id":192,"version":193,"summary_zh":194,"released_at":195},98957,"v1.0.685","## 本次发布中的提交\n* b82b941 - 更新布局（Johnny Lin）","2026-04-04T20:35:12",{"id":197,"version":198,"summary_zh":199,"released_at":200},98958,"v1.0.684","## 本版本中的提交\n* 97c29ee - 格式化（Johnny Lin）","2026-04-04T05:04:55",{"id":202,"version":203,"summary_zh":204,"released_at":205},98959,"v1.0.683","## 本版本中的提交\n* aebbb82 - 布局（Johnny Lin）","2026-04-04T04:53:19",{"id":207,"version":208,"summary_zh":209,"released_at":210},98960,"v1.0.682","## 本版本中的提交\n* a75f6e1 - 图表更新（Johnny Lin）","2026-04-04T04:22:42",{"id":212,"version":213,"summary_zh":214,"released_at":215},98961,"v1.0.681","## 本版本中的提交\n* 2a614bc - 更新界面图表工具栏（Johnny Lin）","2026-04-04T02:30:28",{"id":217,"version":218,"summary_zh":219,"released_at":220},98962,"v1.0.680","## 本次发布中的提交\n* b521e8b - 图表参数调整（Johnny Lin）","2026-04-04T01:54:37",{"id":222,"version":223,"summary_zh":224,"released_at":225},98963,"v1.0.679","## 本版本中的提交\n* ea2dd91 - 合并 https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia 的 main 分支 (Johnny Lin)\n* 36b9b4d - 调整图表设置 (Johnny Lin)\n* 0992e04 - 合并 https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia 的 main 分支 (Johnny Lin)\n* a1deeab - 使用 main lmsaes 分支，自动添加版本信息 (Johnny Lin)","2026-04-04T01:03:56",{"id":227,"version":228,"summary_zh":229,"released_at":230},98964,"v1.0.678","## Commits in this release\n* 0992e04 - Merge branch 'main' of https:\u002F\u002Fgithub.com\u002Fhijohnnylin\u002Fneuronpedia (Johnny Lin)\n* a1deeab - use main lmsaes branch, auto version info (Johnny Lin)\n* 77763bf - remove zoom on double click (Johnny Lin)\n* 670e4ee - graph updates (Johnny Lin)\n* 2b4aeb1 - package updates (Johnny Lin)\n* c4321c1 - improved gpt-oss streaming handling (Johnny Lin)\n* ca4dca0 - fix temp = 0 nnsight edge case (Johnny Lin)\n* e59d6d4 - fix build (Johnny Lin)","2026-04-04T00:19:10",{"id":232,"version":233,"summary_zh":234,"released_at":235},98965,"v1.0.677","## Commits in this release\n* 77763bf - remove zoom on double click (Johnny Lin)","2026-04-03T23:42:58",{"id":237,"version":238,"summary_zh":239,"released_at":240},98966,"v1.0.676","## Commits in this release\n* 670e4ee - graph updates (Johnny Lin)","2026-04-03T22:07:36",{"id":242,"version":243,"summary_zh":244,"released_at":245},98967,"v1.0.675","## Commits in this release\n* 2b4aeb1 - package updates (Johnny Lin)","2026-04-03T03:07:00",{"id":247,"version":248,"summary_zh":249,"released_at":250},98968,"v1.0.674","## Commits in this release\n* c4321c1 - improved gpt-oss streaming handling (Johnny Lin)","2026-04-03T02:46:28",{"id":252,"version":253,"summary_zh":254,"released_at":255},98969,"v1.0.673","## Commits in this release\n* ca4dca0 - fix temp = 0 nnsight edge case (Johnny Lin)","2026-04-03T02:41:40",{"id":257,"version":258,"summary_zh":259,"released_at":260},98970,"v1.0.672","## Commits in this release\n* e59d6d4 - fix build (Johnny Lin)","2026-04-03T01:57:39",{"id":262,"version":263,"summary_zh":264,"released_at":265},98971,"v1.0.671","## Commits in this release\n* b8efe84 - initial crm support (Johnny Lin)","2026-04-03T01:48:26",{"id":267,"version":268,"summary_zh":269,"released_at":270},98972,"v1.0.670","## Commits in this release\n* 8b079f5 - fix view permission for own items (Johnny Lin)","2026-04-01T07:47:15",{"id":272,"version":273,"summary_zh":274,"released_at":275},98973,"v1.0.669","## Commits in this release\n* 74ce854 - Update copy on sign in (Johnny Lin)","2026-04-01T07:18:05"]