[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-datawranglerai--self-host-n8n-on-gcr":3,"tool-datawranglerai--self-host-n8n-on-gcr":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",147882,2,"2026-04-09T11:32:47",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",108111,"2026-04-08T11:23:26",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":32,"last_commit_at":50,"category_tags":51,"status":17},4721,"markitdown","microsoft\u002Fmarkitdown","MarkItDown 是一款由微软 AutoGen 团队打造的轻量级 Python 工具，专为将各类文件高效转换为 Markdown 格式而设计。它支持 PDF、Word、Excel、PPT、图片（含 OCR）、音频（含语音转录）、HTML 乃至 YouTube 链接等多种格式的解析，能够精准提取文档中的标题、列表、表格和链接等关键结构信息。\n\n在人工智能应用日益普及的今天，大语言模型（LLM）虽擅长处理文本，却难以直接读取复杂的二进制办公文档。MarkItDown 恰好解决了这一痛点，它将非结构化或半结构化的文件转化为模型“原生理解”且 Token 效率极高的 Markdown 格式，成为连接本地文件与 AI 分析 pipeline 的理想桥梁。此外，它还提供了 MCP（模型上下文协议）服务器，可无缝集成到 Claude Desktop 等 LLM 应用中。\n\n这款工具特别适合开发者、数据科学家及 AI 研究人员使用，尤其是那些需要构建文档检索增强生成（RAG）系统、进行批量文本分析或希望让 AI 助手直接“阅读”本地文件的用户。虽然生成的内容也具备一定可读性，但其核心优势在于为机器",93400,"2026-04-06T19:52:38",[52,14],"插件",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":10,"last_commit_at":59,"category_tags":60,"status":17},4487,"LLMs-from-scratch","rasbt\u002FLLMs-from-scratch","LLMs-from-scratch 是一个基于 PyTorch 的开源教育项目，旨在引导用户从零开始一步步构建一个类似 ChatGPT 的大型语言模型（LLM）。它不仅是同名技术著作的官方代码库，更提供了一套完整的实践方案，涵盖模型开发、预训练及微调的全过程。\n\n该项目主要解决了大模型领域“黑盒化”的学习痛点。许多开发者虽能调用现成模型，却难以深入理解其内部架构与训练机制。通过亲手编写每一行核心代码，用户能够透彻掌握 Transformer 架构、注意力机制等关键原理，从而真正理解大模型是如何“思考”的。此外，项目还包含了加载大型预训练权重进行微调的代码，帮助用户将理论知识延伸至实际应用。\n\nLLMs-from-scratch 特别适合希望深入底层原理的 AI 开发者、研究人员以及计算机专业的学生。对于不满足于仅使用 API，而是渴望探究模型构建细节的技术人员而言，这是极佳的学习资源。其独特的技术亮点在于“循序渐进”的教学设计：将复杂的系统工程拆解为清晰的步骤，配合详细的图表与示例，让构建一个虽小但功能完备的大模型变得触手可及。无论你是想夯实理论基础，还是为未来研发更大规模的模型做准备",90106,"2026-04-06T11:19:32",[35,15,13,14],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":67,"readme_en":68,"readme_zh":69,"quickstart_zh":70,"use_case_zh":71,"hero_image_url":72,"owner_login":73,"owner_name":74,"owner_avatar_url":75,"owner_bio":76,"owner_company":76,"owner_location":77,"owner_email":76,"owner_twitter":76,"owner_website":76,"owner_url":78,"languages":79,"stars":92,"forks":93,"last_commit_at":94,"license":95,"difficulty_score":96,"env_os":97,"env_gpu":98,"env_ram":99,"env_deps":100,"category_tags":108,"github_topics":109,"view_count":32,"oss_zip_url":76,"oss_zip_packed_at":76,"status":17,"created_at":124,"updated_at":125,"faqs":126,"releases":155},5889,"datawranglerai\u002Fself-host-n8n-on-gcr","self-host-n8n-on-gcr","Self-host n8n on Google Cloud without the subscription fees or server headaches - because your automation workflows shouldn't cost more than your coffee budget","self-host-n8n-on-gcr 是一份详尽的实战指南，旨在帮助用户将强大的工作流自动化平台 n8n 部署在 Google Cloud Run 上。它核心解决了两大痛点：一是让用户彻底摆脱 n8n 官方云服务的月度订阅费用，实现按需付费，闲置时几乎零成本；二是避免了传统自建服务器所需的复杂运维工作，如系统更新、安全补丁和容量规划，真正实现了“无服务器”托管。\n\n这套方案非常适合希望掌控数据主权、拥有定制化自动化需求的技术爱好者、开发者以及中小团队。通过结合 Google Cloud Run 的弹性伸缩能力与 Cloud SQL PostgreSQL 的数据持久化存储，用户既能享受云端部署的便捷，又能确保工作流数据的安全与完整。此外，指南还深度集成了 Google OAuth 认证，方便无缝连接谷歌生态服务。\n\n其独特的技术亮点在于提供了两种部署路径：既包含适合学习原理的手动分步教程，也支持利用 Terraform 进行一键式自动化部署，极大降低了上手门槛。无论你是否熟悉命令行操作，都能借此搭建起一个高可用、低成本且完全由自己掌控的自动化中枢，让复杂的业务流程不再受限于预算或执行","self-host-n8n-on-gcr 是一份详尽的实战指南，旨在帮助用户将强大的工作流自动化平台 n8n 部署在 Google Cloud Run 上。它核心解决了两大痛点：一是让用户彻底摆脱 n8n 官方云服务的月度订阅费用，实现按需付费，闲置时几乎零成本；二是避免了传统自建服务器所需的复杂运维工作，如系统更新、安全补丁和容量规划，真正实现了“无服务器”托管。\n\n这套方案非常适合希望掌控数据主权、拥有定制化自动化需求的技术爱好者、开发者以及中小团队。通过结合 Google Cloud Run 的弹性伸缩能力与 Cloud SQL PostgreSQL 的数据持久化存储，用户既能享受云端部署的便捷，又能确保工作流数据的安全与完整。此外，指南还深度集成了 Google OAuth 认证，方便无缝连接谷歌生态服务。\n\n其独特的技术亮点在于提供了两种部署路径：既包含适合学习原理的手动分步教程，也支持利用 Terraform 进行一键式自动化部署，极大降低了上手门槛。无论你是否熟悉命令行操作，都能借此搭建起一个高可用、低成本且完全由自己掌控的自动化中枢，让复杂的业务流程不再受限于预算或执行次数。","# Self-Hosting n8n on Google Cloud Run: Complete Guide #\n\nSo you want to run n8n without the monthly subscription fees, keep your data under your own control, and avoid the headache of server maintenance? Google Cloud Run offers exactly that sweet spot - serverless deployment with per-use pricing. Let's build this thing properly.\n\nThis guide walks you through deploying n8n (that powerful workflow automation platform) on Google Cloud Run with PostgreSQL persistence. You'll end up with a fully functional system that scales automatically, connects to Google services via OAuth, and won't drain your wallet when idle.\n\n> **🚀 Quick Start Option**: Want to skip the manual setup? Jump to the [Terraform Deployment Option](#terraform-deployment-option) section for a streamlined, automated deployment. The step-by-step guide below is valuable for understanding what's happening under the hood, but Terraform will handle all the heavy lifting for you!\n\n## Table of Contents ##\n- [Quick Start with Terraform](#terraform-deployment-option)\n- [Manual Step-by-Step Guide](#step-1-set-up-your-google-cloud-project)\n- [Configuration](#step-8-configure-n8n-for-oauth-with-google-services)\n- [Queue Mode Deployment](#queue-mode-deployment-scaling-n8n-for-production)\n- [Updates & Maintenance](#keeping-n8n-updated-dont-be-that-person-running-year-old-software)\n- [Cost Estimates](#cost-estimates-yes-it-really-can-be-that-cheap)\n- [Troubleshooting](#troubleshooting)\n\n## Overview ##\n\nn8n is brilliant for automating all those tedious tasks you'd rather not do manually. This setup uses:\n\n* Google Cloud Run for hosting the application (pay only when it runs)\n\n* Cloud SQL PostgreSQL for database persistence (because your workflows should survive restarts)\n\n* Google Auth Platform for connecting to Google services (sheets, drive, etc.)\n\nWhy self-host? Complete control over your automation workflows and data. No arbitrary execution limits. No wondering where your sensitive data is being stored. And with Cloud Run, you get the best of both worlds - the control of self-hosting with the convenience of not having to manage actual servers.\n\n## Prerequisites ##\n\nBefore diving in, make sure you've got:\n\n* A Google Cloud account (they offer a generous free tier for new accounts)\n\n* gcloud CLI installed and configured (trust me, it's worth not clicking through web consoles)\n\n* Basic familiarity with Docker and command line\n\n* **Docker** (only needed if using custom image - Option B)\n\n* A domain name (optional, but recommended for production use)\n\nThe command line approach might seem intimidating at first, but it means we can script the entire deployment process. And when you need to update or recreate your instance, you'll thank yourself for having everything in a reusable format.\n\n## Community Video Walkthrough ##\n\nMassive thanks to Terra Femme for creating this brilliant step-by-step video walkthrough of the entire deployment process! If you're more of a visual learner or want to see someone actually troubleshoot the common gotchas in real-time, this is gold.\n\n▶️ [Watch the deployment video](https:\u002F\u002Fyoutu.be\u002FbLDv07BR9Hw \"Host n8n on Google Cloud RUN Free in Under 20 - @TerraFemme-Tech\") by [Terra Femme (@terra.femme)](https:\u002F\u002Fgithub.com\u002Fterra-femme \"terra-femme (Terra.Femme\").\n\nThe video covers the full Terraform deployment using Google Cloud Shell Editor, including the port configuration fix that trips up most people. It's basically a live demo of everything in this guide, which is pretty awesome.\n\n## Step 1: Set Up Your Google Cloud Project ##\n\nFirst, let's get our Google Cloud environment sorted:\n\n```bash\n# Set your Google Cloud project ID\nexport PROJECT_ID=\"your-project-id\"\nexport REGION=\"europe-west2\"  # Choose your preferred region\n\n# Log in to gcloud\ngcloud auth login\n\n# Set your active project\ngcloud config set project $PROJECT_ID\n\n# Enable required APIs\ngcloud services enable artifactregistry.googleapis.com\ngcloud services enable run.googleapis.com\ngcloud services enable sqladmin.googleapis.com\ngcloud services enable secretmanager.googleapis.com\n```\n\nThese commands establish your project environment and enable the necessary Google Cloud APIs. We're turning on all the services we'll need upfront to avoid those annoying \"please enable this API first\" errors later.\n\n## Step 2: Prepare n8n for Cloud Run Deployment ##\n\nn8n needs a small startup delay when connecting to external databases to avoid a race condition during initialisation. There are two ways to handle this:\n\n### Option A: Using the Official Image (Recommended)\n\nThis is the simplest approach - use [n8n's official Docker image](https:\u002F\u002Fhub.docker.com\u002Fr\u002Fn8nio\u002Fn8n) with a command override to add a 5-second startup delay. This pattern comes from [n8n's own Kubernetes deployments](https:\u002F\u002Fgithub.com\u002Fn8n-io\u002Fn8n-hosting\u002Fblob\u002Fmain\u002Fkubernetes\u002Fn8n-deployment.yaml#L32) and works perfectly on Cloud Run.\n\n**No additional files needed!** You'll use command overrides when deploying (covered in Step 7).\n\n### Option B: Custom Docker Image (Advanced)\n\nIf you need custom startup logic or want detailed debugging output, use a custom Docker image. This approach gives you more control but requires building and maintaining your own image.\n\nCreate these two files in your working directory:\n\n**startup.sh:**\n\n```bash\n#!\u002Fbin\u002Fsh\n\n# Add startup delay for database initialization\nsleep 5\n\n# Map Cloud Run's PORT to N8N_PORT if it exists\n# Otherwise fall back to explicitly set N8N_PORT or default to 5678\nif [ -n \"$PORT\" ]; then\n  export N8N_PORT=$PORT\nelif [ -z \"$N8N_PORT\" ]; then\n  export N8N_PORT=5678\nfi\n\n# Print environment variables for debugging\necho \"Database settings:\"\necho \"DB_TYPE: $DB_TYPE\"\necho \"DB_POSTGRESDB_HOST: $DB_POSTGRESDB_HOST\"\necho \"DB_POSTGRESDB_PORT: $DB_POSTGRESDB_PORT\"\necho \"N8N_PORT: $N8N_PORT\"\n\n# Start n8n with its original entrypoint\nexec \u002Fdocker-entrypoint.sh\n```\n\nThe port mapping script gives you flexibility - you can let Cloud Run assign the port dynamically OR set it explicitly. This is useful because:\n\n1. Cloud Run auto-assigns ports - if someone deploys without setting --port=5678, Cloud Run will inject a PORT variable\n\n2. Future-proofing - if Cloud Run changes port handling, the script adapts\n\n3. Works in multiple environments - the same image works on Cloud Run, Cloud Run Jobs, or other container platforms\n\nOption A doesn't need this because it explicitly sets everything via command-line flags. \n\n**Dockerfile:**\n\n```Dockerfile\nFROM docker.n8n.io\u002Fn8nio\u002Fn8n:latest\n\n# Copy the script and ensure it has proper permissions\nCOPY startup.sh \u002F\nUSER root\nRUN chmod +x \u002Fstartup.sh\nUSER node\nEXPOSE 5678\n\n# Use shell form to help avoid exec format issues\nENTRYPOINT [\"\u002Fbin\u002Fsh\", \"\u002Fstartup.sh\"]\n```\n\nThis custom setup solves the port mismatch problem and helps with debugging. Without it, you'd just see a failed container with no helpful error messages. And yes, that's exactly as frustrating as it sounds.\n\n**If you run into problems with Option B:**\n\n* Check that your `startup.sh` file has Unix-style line endings (LF, not CRLF)\n\n* Verify the file has proper execute permissions\n\n### Which option should you choose?\n\n* **Go with Option A** if you just want n8n working reliably with minimal fuss\n\n* **Use Option B** if you need debugging output or custom startup scripts\n\nThe rest of this guide will show commands for both approaches where they differ.\n\n## Step 3: Set Up a Container Repository (Optional - Custom Image Only) ##\n\n**If you're using Option A (official image)**, skip this step entirely and go straight to Step 4.\n\n**If you're using Option B (custom image)**, you'll need a place to store your custom container image:\n\n```bash\n# Create a repository in Artifact Registry\ngcloud artifacts repositories create n8n-repo \\\n    --repository-format=docker \\\n    --location=$REGION \\\n    --description=\"Repository for n8n workflow images\"\n\n# Configure Docker to use gcloud as a credential helper\ngcloud auth configure-docker $REGION-docker.pkg.dev\n\n# Build and push your image\ndocker build --platform linux\u002Famd64 -t $REGION-docker.pkg.dev\u002F$PROJECT_ID\u002Fn8n-repo\u002Fn8n:latest .\ndocker push $REGION-docker.pkg.dev\u002F$PROJECT_ID\u002Fn8n-repo\u002Fn8n:latest\n```\n\nWe're explicitly building for linux\u002Famd64 because Cloud Run doesn't support ARM architecture. This is particularly important if you're developing on an M1\u002FM2 Mac - Docker will happily build an ARM image by default, which then fails mysteriously when deployed. Ask me how I know.\n\n## Step 4: Set Up Cloud SQL PostgreSQL Instance ##\n\nNow for the database. We'll use the smallest instance type to keep costs reasonable:\n\n```bash\n# Create a Cloud SQL instance (lowest cost tier)\ngcloud sql instances create n8n-db \\\n    --database-version=POSTGRES_13 \\\n    --tier=db-f1-micro \\\n    --region=$REGION \\\n    --root-password=\"supersecure-rootpassword\" \\\n    --storage-size=10GB \\\n    --availability-type=ZONAL \\\n    --no-backup \\\n    --storage-type=HDD\n\n# Create a database\ngcloud sql databases create n8n --instance=n8n-db\n\n# Create a user for n8n\ngcloud sql users create n8n-user \\\n    --instance=n8n-db \\\n    --password=\"supersecure-userpassword\"\n```\n\nThe db-f1-micro tier is perfect for most personal n8n deployments. I've run hundreds of workflows on this setup without issue. And you can always upgrade later if needed.\n\n## Step 5: Create Secrets for Sensitive Data ##\n\nNever put passwords in your deployment configuration. Let's use Secret Manager instead:\n\n```bash\n# Create a secret for the database password\necho -n \"supersecure-userpassword\" | \\\n    gcloud secrets create n8n-db-password \\\n    --data-file=- \\\n    --replication-policy=\"automatic\"\n\n# Create a secret for n8n encryption key\necho -n \"your-random-encryption-key\" | \\\n    gcloud secrets create n8n-encryption-key \\\n    --data-file=- \\\n    --replication-policy=\"automatic\"\n```\n\nThat encryption key is particularly important - it protects all the credentials stored in your n8n instance. Make it long, random, and keep it safe. If you lose it, you'll need to reconfigure all your connected services.\n\n## Step 6: Create a Service Account for Cloud Run ##\n\nTime to set up the identity your n8n instance will use:\n\n```bash\n# Create a service account\ngcloud iam service-accounts create n8n-service-account \\\n    --display-name=\"n8n Service Account\"\n\n# Grant access to secrets\ngcloud secrets add-iam-policy-binding n8n-db-password \\\n    --member=\"serviceAccount:n8n-service-account@$PROJECT_ID.iam.gserviceaccount.com\" \\\n    --role=\"roles\u002Fsecretmanager.secretAccessor\"\n\ngcloud secrets add-iam-policy-binding n8n-encryption-key \\\n    --member=\"serviceAccount:n8n-service-account@$PROJECT_ID.iam.gserviceaccount.com\" \\\n    --role=\"roles\u002Fsecretmanager.secretAccessor\"\n\n# Grant Cloud SQL Client role\ngcloud projects add-iam-policy-binding $PROJECT_ID \\\n    --member=\"serviceAccount:n8n-service-account@$PROJECT_ID.iam.gserviceaccount.com\" \\\n    --role=\"roles\u002Fcloudsql.client\"\n```\n\nFollowing the principle of least privilege here means your n8n service can access exactly what it needs and nothing more. It's a small thing that makes a big difference to your security posture.\n\n## Step 7: Deploy to Cloud Run ##\n\nThe moment of truth - let's deploy n8n. The command differs slightly depending on which approach you chose in Step 2.\n\nFirst, get your Cloud SQL conection namne:\n\n```bash\nexport SQL_CONNECTION=$(gcloud sql instances describe n8n-db --format=\"value(connectionName)\")\n```\n\n### Option A: Deploy Using Official Image (Recommended)\n\n```bash\ngcloud run deploy n8n \\\n    --image=docker.io\u002Fn8nio\u002Fn8n:latest \\\n    --command=\"\u002Fbin\u002Fsh\" \\\n    --args=\"-c,sleep 5; n8n start\" \\\n    --platform=managed \\\n    --region=$REGION \\\n    --allow-unauthenticated \\\n    --port=5678 \\\n    --cpu=1 \\\n    --memory=2Gi \\\n    --min-instances=0 \\\n    --max-instances=1 \\\n    --no-cpu-throttling \\\n    --set-env-vars=\"N8N_PORT=5678,N8N_PROTOCOL=https,DB_TYPE=postgresdb,DB_POSTGRESDB_DATABASE=n8n,DB_POSTGRESDB_USER=n8n-user,DB_POSTGRESDB_HOST=\u002Fcloudsql\u002F$SQL_CONNECTION,DB_POSTGRESDB_PORT=5432,DB_POSTGRESDB_SCHEMA=public,N8N_USER_FOLDER=\u002Fhome\u002Fnode\u002F.n8n,GENERIC_TIMEZONE=UTC,QUEUE_HEALTH_CHECK_ACTIVE=true\" \\\n    --set-secrets=\"DB_POSTGRESDB_PASSWORD=n8n-db-password:latest,N8N_ENCRYPTION_KEY=n8n-encryption-key:latest\" \\\n    --add-cloudsql-instances=$SQL_CONNECTION \\\n    --service-account=n8n-service-account@$PROJECT_ID.iam.gserviceaccount.com\n```\n\n### Option B: Deploy Using Custom Image ###\n\n```bash\n# Deploy to Cloud Run\ngcloud run deploy n8n \\\n    --image=$REGION-docker.pkg.dev\u002F$PROJECT_ID\u002Fn8n-repo\u002Fn8n:latest \\\n    --platform=managed \\\n    --region=$REGION \\\n    --allow-unauthenticated \\\n    --port=5678 \\\n    --cpu=1 \\\n    --memory=2Gi \\\n    --min-instances=0 \\\n    --max-instances=1 \\\n    --no-cpu-throttling \\\n    --set-env-vars=\"N8N_PATH=\u002F,N8N_PORT=443,N8N_PROTOCOL=https,DB_TYPE=postgresdb,DB_POSTGRESDB_DATABASE=n8n,DB_POSTGRESDB_USER=n8n-user,DB_POSTGRESDB_HOST=\u002Fcloudsql\u002F$SQL_CONNECTION,DB_POSTGRESDB_PORT=5432,DB_POSTGRESDB_SCHEMA=public,N8N_USER_FOLDER=\u002Fhome\u002Fnode,EXECUTIONS_PROCESS=main,EXECUTIONS_MODE=regular,GENERIC_TIMEZONE=UTC,QUEUE_HEALTH_CHECK_ACTIVE=true\" \\\n    --set-secrets=\"DB_POSTGRESDB_PASSWORD=n8n-db-password:latest,N8N_ENCRYPTION_KEY=n8n-encryption-key:latest\" \\\n    --add-cloudsql-instances=$SQL_CONNECTION \\\n    --service-account=n8n-service-account@$PROJECT_ID.iam.gserviceaccount.com\n```\n\nAfter deployment, Cloud Run will provide a URL for your n8n instance. Note it down - you'll need it for the next steps.\n\n### Key Configuration Notes ###\n\n**Why `--no-cpu-throttling`?**\nn8n does background processing (database connections, scheduled checks) that happens outside HTTP requests. With CPU throttling enabled, these background tasks get starved and can cause startup issues. This flag ensures n8n gets continuous CPU access, which actually works out cheaper due to eliminated per-request fees and lower CPU\u002Fmemory rates. Thanks to the Google Cloud Run team for this insight.\n\n**Other important settings:**\n- `min-instances=0` and `max-instances=1` means your service scales to zero when idle (saving money) but won't run multiple instances simultaneously (which can cause database conflicts with n8n)\n- CPU and memory allocation is sufficient for most workflows without excessive costs\n- The `sleep 5` in Option A handles database initialization timing\n\n### Key Differences Between Options ###\n\n| Setting | Option A (Official) | Option B (Custom) | Why Different? |\n|---------|-------------------|-------------------|----------------|\n| Image | `docker.io\u002Fn8nio\u002Fn8n:latest` | Your custom image | Direct from n8n vs your registry |\n| Command | `--command=\"\u002Fbin\u002Fsh\" --args=\"-c,sleep 5; n8n start\"` | Uses custom entrypoint | Sleep added via command vs built into script |\n| N8N_PORT | `5678` | `443` | Direct port vs mapped through startup script |\n| N8N_PATH | Not needed | `\u002F` | Custom image can handle path prefixing |\n\n### n8n Google Cloud Run Environment Variables ###\n\nHere's what all those environment variables do:\n\n|    Environment Variable   |   Option A Value    |   Option B Value    |                                  Description                                 |\n|:-------------------------:|:-------------------:|:-------------------:|:----------------------------------------------------------------------------:|\n| N8N_PATH                  | Not needed          | \u002F                   | Base path where n8n will be accessible (custom image only)                   |\n| N8N_PORT                  | 5678                | 443                 | Port configuration (direct vs mapped)                                        |\n| N8N_PROTOCOL              | https               | https               | Protocol used for external access                                            |\n| DB_TYPE                   | postgresdb          | postgresdb          | Must be exactly \"postgresdb\" (not postgresql) for proper database connection |\n| N8N_USER_FOLDER           | \u002Fhome\u002Fnode\u002F.n8n     | \u002Fhome\u002Fnode          | Location for n8n data                                                        |\n| EXECUTIONS_PROCESS        | Not needed          | main (deprecated)   | Deprecated - remove in newer versions                                        |\n| EXECUTIONS_MODE           | Not needed          | regular (deprecated)| Deprecated - remove in newer versions                                        |\n| QUEUE_HEALTH_CHECK_ACTIVE | true                | true                | Critical for Cloud Run to verify container health                            |\n\n\nPay special attention to DB_TYPE. It must be \"postgresdb\" not \"postgresql\" - a quirk that's caused many deployment headaches. And don't explicitly set the PORT variable as Cloud Run injects this automatically.\n\n## Step 8: Configure n8n for OAuth with Google Services ##\n\nNow we need to update the deployment with environment variables that tell n8n how to properly generate URLs for OAuth callbacks:\n\n```bash\n# Get your service URL (replace with your actual URL)\nexport SERVICE_URL=\"https:\u002F\u002Fn8n-YOUR_ID.REGION.run.app\"\n\n# Update the deployment with proper URL configuration\ngcloud run services update n8n \\\n    --region=$REGION \\\n    --update-env-vars=\"N8N_HOST=$(echo $SERVICE_URL | sed 's\u002Fhttps:\\\u002F\\\u002F\u002F\u002F'),N8N_WEBHOOK_URL=$SERVICE_URL,N8N_EDITOR_BASE_URL=$SERVICE_URL\"\n```\n\nWithout these variables, OAuth would fail with utterly unhelpful \"redirect_uri_mismatch\" errors that make you question your life choices. Setting them correctly means n8n can construct proper callback URLs during authentication flows.\n\nFor newer versions of n8n use `WEBHOOK_URL` instead of `N8N_WEBHOOK_URL`.\n\n## Step 9: Set Up Google OAuth Credentials ##\n\nFinally, to connect n8n with Google services like Sheets:\n\n1. Access the Google Cloud Console:\n    * Navigate to the Google Cloud Console\n    * Select your project\n2. Enable Required APIs:\n    * Go to \"APIs & Services\" > \"Library\"\n    * Search for and enable the APIs you need (e.g., \"Google Sheets API\", \"Google Drive API\")\n3. Configure OAuth Consent Screen:\n    * Go to \"APIs & Services\" > \"OAuth consent screen\n    * Select \"External\" user type (or \"Internal\" if using Google Workspace)\n    * Fill in the required information (App name, user support email, etc.)\n    * Add test users if using External type\n    * For scopes, for now add the following:\n        * `https:\u002F\u002Fgoogleapis.com\u002Fauth\u002Fdrive.file`\n        * `https:\u002F\u002Fgoogleapis.com\u002Fauth\u002Fspreadsheets\n\n    > Note: The OAuth consent screen configuration determines how your application appears to users during authentication. Using 'External' type is necessary for personal projects, but requires adding test users during development. The scopes requested determine what level of access n8n will have to Google services - we request only the minimum necessary for working with Google Sheets.\n\n4. Create OAuth Client ID:\n    * Go to \"APIs & Services\" > \"Credentials\"\n    * Click \"CREATE CREDENTIALS\" and select \"OAuth client ID\"\n    * Select \"Web application\" as the application type\n    * Add your n8n URL to \"Authorized JavaScript origins\":\n\n        ```bash\n        https:\u002F\u002Fn8n-YOUR_ID.REGION.run.app\n        ```\n\n    * When creating credentials in n8n, it will show you the required redirect URL. Add this to \"Authorized redirect URIs\":\n\n        ```bash\n        https:\u002F\u002Fn8n-YOUR_ID.REGION.run.app\u002Frest\u002Foauth2-credential\u002Fcallback\n        ```\n\n    * Click \"CREATE\" to generate your client ID and client secret\n\n5. Add Credentials to n8n:\n    * In your n8n instance, create a new credential for Google Sheets\n    * Select \"OAuth2\" as the authentication type\n    * Copy your OAuth client ID and client secret from Google Cloud Console\n    * Complete the authentication flow\n\n---\n\n## Queue Mode Deployment: Scaling n8n for Production ##\n\nThe steps above give you a solid single-process n8n deployment. For most personal and small-team setups, that's all you'll ever need. But if you start running many concurrent heavy workflows, or if you want to keep the editor responsive while long-running workflows execute in the background, **Queue Mode** is the answer.\n\n### What is Queue Mode? ###\n\nIn regular mode, the n8n process handles everything: serving the UI, processing API calls, receiving webhooks, *and* executing every workflow. Queue Mode splits those responsibilities:\n\n```\nInternet\n    │\n    ▼\nCloud Run — n8n main       ← Serves the editor UI, REST API, and webhooks\n    │  enqueues jobs into\n    ▼\nCloud Memorystore (Redis)  ← Job queue \u002F message broker\n    │  workers poll\n    ▼\nCloud Run — n8n worker × N ← Executes workflow jobs\n    │  reads\u002Fwrites\n    ▼\nCloud SQL (PostgreSQL)     ← Shared database for both main and workers\n```\n\nThe main process no longer runs workflows directly. It puts them onto a Redis queue and immediately returns to handling new requests. Workers continuously poll that queue and run executions to completion. The result: a responsive editor even while CPU-intensive workflows are running, and independent horizontal scaling of execution capacity.\n\n### When Should You Use Queue Mode? ###\n\n**Stick with regular mode if:**\n- You're running n8n for personal automation on a light-to-moderate workload\n- Cost is a priority (Queue Mode adds ~£50–£70\u002Fmonth for Redis + always-on workers)\n- Your workflows are mostly quick webhook-triggered tasks\n\n**Switch to Queue Mode if:**\n- You have many concurrent long-running or CPU-intensive workflows\n- The n8n editor becomes unresponsive during heavy workflow runs\n- You want to scale execution capacity independently of the UI\n- You're running n8n for a team and need reliable throughput\n\n### Additional Prerequisites ###\n\nQueue Mode requires:\n- The `redis.googleapis.com` and `compute.googleapis.com` APIs enabled\n- A VPC network in your project (the default auto-mode VPC is fine)\n- The VPC's **Private Service Access** peering range available (used by Cloud Memorystore)\n\nCloud Memorystore Redis instances are only reachable via a **private VPC IP address** — they have no public endpoint. Cloud Run connects to them through **Direct VPC Egress**, which routes private-range traffic (`10.x.x.x`, `172.16.x.x`, `192.168.x.x`) through the VPC while leaving public internet traffic on the normal path.\n\n### Step QM-1: Enable Additional APIs ###\n\n```bash\ngcloud services enable redis.googleapis.com\ngcloud services enable compute.googleapis.com\n```\n\n### Step QM-2: Create a Cloud Memorystore Redis Instance ###\n\n```bash\nexport REDIS_NAME=\"n8n-redis\"\n\n# Create a Redis instance — BASIC tier, 1 GB, Redis 7.2 with AUTH enabled\ngcloud redis instances create $REDIS_NAME \\\n    --region=$REGION \\\n    --tier=BASIC \\\n    --size=1 \\\n    --redis-version=redis_7_2 \\\n    --network=default \\\n    --enable-auth\n\n# Retrieve the private IP, port, and AUTH string\nexport REDIS_HOST=$(gcloud redis instances describe $REDIS_NAME \\\n    --region=$REGION --format=\"value(host)\")\nexport REDIS_PORT=$(gcloud redis instances describe $REDIS_NAME \\\n    --region=$REGION --format=\"value(port)\")\nexport REDIS_AUTH=$(gcloud redis instances get-auth-string $REDIS_NAME \\\n    --region=$REGION --format=\"value(authString)\")\n\necho \"Redis host: $REDIS_HOST\"\necho \"Redis port: $REDIS_PORT\"\n```\n\n> **Tier guidance:**\n> - `BASIC` — single node, no replication. Cheapest (~£35\u002Fmonth for 1 GB). Fine for personal use; if Redis restarts you'll lose any in-flight execution jobs (they'll need to be re-triggered).\n> - `STANDARD_HA` — primary + replica with automatic failover. Higher availability for production workloads (~£70\u002Fmonth for 1 GB).\n\n> **Networking note:** `--network=default` peers the Memorystore instance into the default VPC. Replace with your custom VPC name if you're not using the default network.\n\n### Step QM-3: Store the Redis AUTH String in Secret Manager ###\n\n```bash\n# Store the Redis AUTH string\necho -n \"$REDIS_AUTH\" | \\\n    gcloud secrets create n8n-redis-auth \\\n    --data-file=- \\\n    --replication-policy=\"automatic\"\n\n# Grant the n8n service account access to the secret\ngcloud secrets add-iam-policy-binding n8n-redis-auth \\\n    --member=\"serviceAccount:n8n-service-account@$PROJECT_ID.iam.gserviceaccount.com\" \\\n    --role=\"roles\u002Fsecretmanager.secretAccessor\"\n```\n\n### Step QM-4: Update the Main n8n Service for Queue Mode ###\n\nThe main service needs three additions: `EXECUTIONS_MODE=queue` to activate queue mode, Redis connection details to know where the queue is, and Direct VPC Egress so it can actually reach the Memorystore private IP.\n\n```bash\ngcloud run services update n8n \\\n    --region=$REGION \\\n    --vpc-egress=private-ranges-only \\\n    --network=default \\\n    --subnet=default \\\n    --update-env-vars=\"EXECUTIONS_MODE=queue,QUEUE_BULL_REDIS_HOST=$REDIS_HOST,QUEUE_BULL_REDIS_PORT=$REDIS_PORT\" \\\n    --update-secrets=\"QUEUE_BULL_REDIS_PASSWORD=n8n-redis-auth:latest\"\n```\n\n> **Subnet note:** For the default auto-mode VPC, `--subnet=default` selects the regional subnet automatically. If you use a custom VPC specify the exact subnet name for your region (e.g., `--subnet=my-subnet`).\n\n### Step QM-5: Deploy the n8n Worker Service ###\n\nWorkers run the `n8n worker` command instead of `n8n start`. They do not serve public internet traffic — they poll Redis and connect to Cloud SQL.\n\n```bash\ngcloud run deploy n8n-worker \\\n    --image=docker.io\u002Fn8nio\u002Fn8n:latest \\\n    --command=\"\u002Fbin\u002Fsh\" \\\n    --args=\"-c,sleep 5; n8n worker\" \\\n    --platform=managed \\\n    --region=$REGION \\\n    --no-allow-unauthenticated \\\n    --ingress=internal \\\n    --port=5678 \\\n    --cpu=1 \\\n    --memory=2Gi \\\n    --min-instances=1 \\\n    --max-instances=3 \\\n    --no-cpu-throttling \\\n    --vpc-egress=private-ranges-only \\\n    --network=default \\\n    --subnet=default \\\n    --set-env-vars=\"EXECUTIONS_MODE=queue,DB_TYPE=postgresdb,DB_POSTGRESDB_DATABASE=n8n,DB_POSTGRESDB_USER=n8n-user,DB_POSTGRESDB_HOST=\u002Fcloudsql\u002F$SQL_CONNECTION,DB_POSTGRESDB_PORT=5432,DB_POSTGRESDB_SCHEMA=public,N8N_USER_FOLDER=\u002Fhome\u002Fnode\u002F.n8n,GENERIC_TIMEZONE=UTC,QUEUE_HEALTH_CHECK_ACTIVE=true,N8N_RUNNERS_ENABLED=true,QUEUE_BULL_REDIS_HOST=$REDIS_HOST,QUEUE_BULL_REDIS_PORT=$REDIS_PORT\" \\\n    --set-secrets=\"DB_POSTGRESDB_PASSWORD=n8n-db-password:latest,N8N_ENCRYPTION_KEY=n8n-encryption-key:latest,QUEUE_BULL_REDIS_PASSWORD=n8n-redis-auth:latest\" \\\n    --add-cloudsql-instances=$SQL_CONNECTION \\\n    --service-account=n8n-service-account@$PROJECT_ID.iam.gserviceaccount.com\n```\n\n**Key worker settings explained:**\n\n| Setting | Value | Why |\n|---|---|---|\n| `--ingress=internal` | internal | Workers receive no public HTTP traffic — internal only |\n| `--no-allow-unauthenticated` | — | Workers should not be publicly invokeable |\n| `--min-instances=1` | 1 | At least one worker must always be running or queued jobs never start |\n| `--no-cpu-throttling` | — | Workers poll Redis continuously and need CPU between \"requests\" |\n| `EXECUTIONS_MODE=queue` | queue | Tells the worker process to pick up jobs from Redis |\n\n### Queue Mode Environment Variables Reference ###\n\n| Variable | Main Service | Worker | Description |\n|---|---|---|---|\n| `EXECUTIONS_MODE` | `queue` | `queue` | Activates queue mode. Workers won't run without this. |\n| `QUEUE_BULL_REDIS_HOST` | Redis private IP | Redis private IP | Host of the Memorystore instance |\n| `QUEUE_BULL_REDIS_PORT` | `6379` | `6379` | Redis port (Memorystore default) |\n| `QUEUE_BULL_REDIS_PASSWORD` | (from secret) | (from secret) | AUTH string stored in Secret Manager |\n| `QUEUE_HEALTH_CHECK_ACTIVE` | `true` | `true` | Exposes `\u002Fhealthz` — required by Cloud Run health checks |\n| `N8N_RUNNERS_ENABLED` | not set | `true` | Enables the task runner subsystem on workers. **Do not set this on the main service in queue mode** — the main process doesn't execute workflows, and setting it here causes n8n to crash at startup before the HTTP server can come up. |\n\n### Scaling Workers ###\n\nOne of the main benefits of Queue Mode is being able to add execution capacity without touching the main service. If workflows are backing up in the queue:\n\n```bash\n# Increase the maximum number of worker instances\ngcloud run services update n8n-worker \\\n    --region=$REGION \\\n    --max-instances=10\n```\n\nCloud Run scales workers up toward `max-instances` as load increases. For lower cold-start latency on the worker tier, consider bumping `min-instances` to 2.\n\n### Verifying Queue Mode is Working ###\n\nAfter deploying, open the n8n editor and navigate to **Settings → Workers** (n8n ≥ 1.x). You should see your worker instances listed there. If the list is empty or workers show as disconnected, check the [Queue Mode troubleshooting](#queue-mode-issues) section below.\n\n---\n\n## Keeping n8n Updated: Don't Be That Person Running Year-Old Software ##\n\nUpdating your n8n deployment is surprisingly straightforward, and it's something you should do regularly to get new features and security patches. Here's how to update when new versions are released:\n\n### For Option A (Official Image)\n\nThe simplest approach - just update the image tag:\n\n**Update to latest version**:\n\n```bash\ngcloud run services update n8n \\\n    --image=docker.io\u002Fn8nio\u002Fn8n:latest \\\n    --region=$REGION\n```\n\n**Or specify a version**:\n\n```bash\ngcloud run services update n8n \\\n    --image=docker.io\u002Fn8nio\u002Fn8n:latest:1.115.2 \\\n    --region=$REGION\n```\n\nCloud Run will pull the new image and deploy it automatically. Takes about 1-2 minutes.\n\n**If you're using Queue Mode**, update the worker service too:\n\n```bash\ngcloud run services update n8n-worker \\\n    --image=docker.io\u002Fn8nio\u002Fn8n:latest \\\n    --region=$REGION\n```\n\nIt's important that the main service and all workers run the **same n8n version**. Mixed versions in queue mode can cause queue protocol mismatches.\n\n### For Option B (Custom Image)\n\n### Method 1: Rebuild and Redeploy (The Clean Way) ###\n\n```bash\n# Pull the latest n8n image\ndocker pull n8nio\u002Fn8n:latest\n\n# Rebuild your custom image\ndocker build --platform linux\u002Famd64 -t $REGION-docker.pkg.dev\u002F$PROJECT_ID\u002Fn8n-repo\u002Fn8n:latest .\n\n# Push to your artifact registry\ndocker push $REGION-docker.pkg.dev\u002F$PROJECT_ID\u002Fn8n-repo\u002Fn8n:latest\n\n# Redeploy your Cloud Run service\ngcloud run services update n8n \\\n    --image=$REGION-docker.pkg.dev\u002F$PROJECT_ID\u002Fn8n-repo\u002Fn8n:latest \\\n    --region=$REGION\n\n# If using Queue Mode, update workers too\ngcloud run services update n8n-worker \\\n    --image=$REGION-docker.pkg.dev\u002F$PROJECT_ID\u002Fn8n-repo\u002Fn8n:latest \\\n    --region=$REGION\n```\n\nThis process typically takes about 2-3 minutes and your n8n instance will experience a brief downtime as Cloud Run swaps over to the new container. Any running workflows will be interrupted, but scheduled triggers will resume once the service is back up.\n\n### Method 2: Specify Versions (The Controlled Way) ###\n\nIf you prefer to manage version upgrades more deliberately (recommended for production use), specify the exact n8n version:\n\n```bash\n# Pull a specific version\ndocker pull n8nio\u002Fn8n:0.230.0  # Replace with your target version\n\n# Update your Dockerfile to use this specific version\n# FROM n8nio\u002Fn8n:0.230.0\n\n# Then rebuild and redeploy as above\n```\n\nThis approach lets you test new versions before committing to them. Check the n8n GitHub releases page to see what's new before upgrading.\n\n### Database Migrations ###\n\nOne of the benefits of using n8n with PostgreSQL is automatic database migrations. When you deploy a new version, n8n will:\n\n1. Detect that the database schema needs updating\n\n2. Run migrations automatically on startup\n\n3. Proceed only when the database is compatible\n\nThis means you generally don't need to worry about database changes. However, it's always wise to:\n\n* Back up your database before major version upgrades\n\n* Check the release notes for any breaking changes\n\n* Test upgrades on a staging environment if you have critical workflows\n\nI typically take a snapshot of my Cloud SQL instance before significant version jumps, just in case.\n\n### Updating Environment Variables ###\n\nSometimes you'll need to update environment variables rather than the container itself:\n\n```bash\ngcloud run services update n8n \\\n    --region=$REGION \\\n    --update-env-vars=\"NEW_VARIABLE=value,UPDATED_VARIABLE=new_value\"\n```\n\nThis is useful when you need to change configuration without rebuilding the container.\n\n### Automation Tip\n\nCreate a simple shell script that combines these commands, and you can update your n8n instance with a single command. I keep mine in a private GitHub repo alongside my Dockerfile and startup script, making it easy to maintain and update from anywhere.\n\nRegular updates keep your instance secure and give you access to new nodes and features as they're released. The n8n team typically releases updates every couple of weeks, so checking monthly is a good cadence for most deployments.\n\n---\n\n## Cost Estimates: Yes, It Really Can Be That Cheap ##\n\nLet's talk money. One of the main reasons to use this setup is cost efficiency. This way of self-hosting is cheaper than the much more documented Kubernetes approach, and at most, half the price of n8n's lowest paid tier. Here's what you can expect to pay monthly:\n\n### Regular Mode (Default) ###\n\n**Google Cloud SQL (db-f1-micro)**: About £8.00 if running constantly. This is your main cost driver - a basic PostgreSQL instance that's plenty powerful for personal use.\n\n**Google Cloud Run**: Practically free for light usage thanks to the generous free tier:\n\n* 2 million requests per month\n\n* 360,000 GB-seconds of memory\n\n* 180,000 vCPU-seconds\n\nWith our configuration setting min-instances=0, your n8n container shuts down completely when not in use, costing literally nothing during idle periods. When it runs, it only burns through your free tier allocation.\n\n**Secret Manager, Artifact Registry**, etc.: These additional services all have free tiers that you'll likely stay within.\n\n**Total expected monthly cost (regular mode)**: £2–£12\n\n### Queue Mode Additional Costs ###\n\nQueue Mode adds persistent infrastructure that does **not** scale to zero:\n\n**Cloud Memorystore Redis (BASIC, 1 GB)**: ~£35–£45\u002Fmonth. Redis runs continuously and is your largest Queue Mode cost.\n\n**n8n Worker (Cloud Run Service, min 1 instance)**: Workers are kept alive with `min-instances=1` and `--no-cpu-throttling`, so they bill continuously. At 1 vCPU + 2 GiB in most regions: ~£15–£25\u002Fmonth.\n\n**Total expected monthly cost (queue mode)**: ~£55–£80\n\nQueue Mode is worth it when you're running n8n heavily enough that the extra capacity and responsiveness justify the cost. For light personal use, regular mode is the better choice by a wide margin.\n\n#### How to Keep Costs Down ####\n\n* **Schedule maintenance during off-hours**: If you have workflows that process data in batches, schedule them during times you're not actively using the system.\n\n* **Use webhooks efficiently**: Design workflows that trigger via webhooks rather than constant polling where possible.\n\n* **Monitor your usage**: Google Cloud provides excellent usage dashboards - check them regularly during your first month to understand your consumption patterns.\n\n* **Set budget alerts**: Configure budget alerts in Google Cloud to notify you if spending exceeds your threshold.\n\n* **Queue Mode sizing**: In Queue Mode, start with 1 worker instance and only scale `max-instances` up if you actually see execution backpressure. Over-provisioning workers is the quickest way to inflate your bill.\n\nWhat could push costs higher? Running CPU-intensive workflows frequently, storing large amounts of data in PostgreSQL, or configuring your instance with more resources than necessary.\n\nBut for most personal automation needs, this setup offers enterprise-level capabilities at coffee-money prices. I've been running this exact configuration for months, and my bill consistently stays under £5 even with dozens of active workflows.\n\n---\n\n## Troubleshooting ##\n\n> **Note:** If you used Terraform for deployment, see the [Terraform Troubleshooting](#terraform-troubleshooting) section for deployment-specific issues.\n\nWhen things inevitably go sideways, here are the most common issues and how to fix them:\n\n1. Container Fails to Start:\n\n    * Check Cloud Run logs for specific error messages\n\n    * Verify `DB_TYPE` is set to \"postgresdb\" (not \"postgresql\")\n\n    * Ensure `QUEUE_HEALTH_CHECK_ACTIVE` is set to \"true\"\n  \n    * Remove the `EXECUTIONS_PROCESS=main` and `EXECUTIONS_MODE=regular` environment variables as these are now deprecated in newer versions\n\n2. OAuth Redirect Issues:\n\n    * Ensure `N8N_HOST`, `N8N_PORT`, and `N8N_EDITOR_BASE_URL` are correctly set\n\n    * Verify redirect URIs in Google Cloud Console match exactly what n8n generates\n\n    * Confirm `N8N_PORT` is set to 443 (not 5678) for external URL formatting\n\n3. Database Connection Problems:\n\n    * Check `DB_POSTGRESDB_HOST` format for Cloud SQL connections\n\n    * Ensure service account has Cloud SQL Client role\n\n4. Node Trigger Issues:\n\n    * Use `WEBHOOK_URL` instead of `N8N_WEBHOOK_URL` for newer n8n versions\n  \n    * Add proxy hop configurations by including `N8N_PROXY_HOPS=1` as Cloud Run acts as a reverse proxy\n\n### Queue Mode Issues ###\n\n5. Workers Not Appearing in n8n Settings → Workers:\n\n    * Confirm both the main service and workers have `EXECUTIONS_MODE=queue` set\n    \n    * Verify `QUEUE_BULL_REDIS_HOST` and `QUEUE_BULL_REDIS_PORT` are set correctly on both services — the host must be the private IP of the Memorystore instance, not a hostname or public address\n    \n    * Check that `QUEUE_BULL_REDIS_PASSWORD` is being injected correctly from Secret Manager — a missing or wrong AUTH string will cause silent connection failures\n    \n    * Confirm both Cloud Run services have Direct VPC Egress enabled (`--vpc-egress=private-ranges-only`) and are using the same VPC network as the Memorystore instance\n\n6. Executions Stuck in \"Waiting\" State:\n\n    * This means jobs are being enqueued but no worker is picking them up\n    \n    * Check worker Cloud Run service logs for Redis connection errors\n    \n    * Ensure `--min-instances=1` is set on the worker service — if workers have scaled to zero, jobs will wait indefinitely\n    \n    * Verify the worker is running `n8n worker` (not `n8n start`) — check the command override\n\n7. \"Could not connect to Redis\" Errors in Logs:\n\n    * Confirm the Memorystore instance is in the same VPC network as the Cloud Run services\n    \n    * Verify Direct VPC Egress is configured on the failing Cloud Run service\n    \n    * Check that the `redis.googleapis.com` and `compute.googleapis.com` APIs are enabled\n    \n    * Try fetching the Redis host IP directly: `gcloud redis instances describe n8n-redis --region=$REGION --format=\"value(host)\"` and confirm it matches what's in the environment variable\n\n8. VPC Egress Causing Outbound Connectivity Issues:\n\n    * The `private-ranges-only` egress setting routes only `10.x.x.x`, `172.16.x.x`, and `192.168.x.x` traffic through the VPC — all other traffic (including external API calls from your workflows) still uses the normal internet path, so this should not affect most workflows\n\n    * If you do see connectivity problems with external services, double-check that you used `private-ranges-only` and not `all-traffic`\n\n9. Main n8n Service Container Crashes at Startup (`exit(1)`) in Queue Mode:\n\n    * This happens when `N8N_RUNNERS_ENABLED=true` is set on the main service (`n8n start`) in queue mode. The main process is responsible only for the UI, API, and webhooks — it doesn't execute workflows. With that flag set, n8n eagerly starts a task runner launcher process on boot, which crashes before the HTTP server is ready. Cloud Run reports a health check failure, but the real problem is `exit(1)` in the application logs\n\n    * The fix: do not set `N8N_RUNNERS_ENABLED` on the main service. Workers need it (they run code). The main service does not\n\n    * To confirm the root cause, filter Cloud Logging for `run.googleapis.com\u002Fstdout` and `run.googleapis.com\u002Fstderr` on the failed revision — the exact crash message from n8n will be there, which is more informative than the Cloud Run system event\n\n---\n\n## Terraform Deployment Option\n\nThanks to a generous contribution from the community, there is now a Terraform configuration available to automate the entire deployment process described in this guide. This Terraform setup provisions all necessary Google Cloud resources including Cloud Run, Cloud SQL, Secret Manager, IAM roles, and Artifact Registry.\n\nUsing Terraform can simplify and speed up deployment, especially for those familiar with infrastructure as code. The Terraform files and a deployment script are included in the repository.\n\n### Quick Terraform Deployment\n\nClone the repository and navigate to terraform directory\n\n```bash\ngit clone \u003Cyour-repo-url>\ncd \u003Crepo-name>\u002Fterraform\n```\n\nInitialize Terraform\n\n```tf\nterraform init\n```\n\nReview the planned changes\n\n```tf\nterraform plan\n```\n[use with flag `terraform plan -var-file=terraform.tfvars` if you created the terraform.tfvars file]\n\nDeploy the infrastructure.\n\n**For Option A (recommended - official image):**\n\n\n```tf\nterraform apply\n```\n[use with flag `terraform apply -var-file=terraform.tfvars` if you created the terraform.tfvars file]\n\n**For Option B (custom image):**\n\n\n```tf\nterraform apply -var=\"use_custom_image=true\"\n```\n\nOr in `terraform.tfvars`:\n\n```hcl\nuse_custom_image = true  # Only if you want custom image\n```\n\n### Terraform Queue Mode Deployment ###\n\nThe Terraform configuration supports Queue Mode through a single feature flag. When `enable_queue_mode = true`, Terraform will additionally provision:\n\n- A **Cloud Memorystore Redis** instance (private, VPC-peered)\n- A **Cloud Run worker service** (`n8n-worker`) with internal ingress and min 1 instance\n- A **Redis AUTH secret** in Secret Manager\n- **Direct VPC Egress** on both Cloud Run services\n- The `redis.googleapis.com` and `compute.googleapis.com` APIs\n\n#### Enable Queue Mode via CLI flag:\n\n```bash\nterraform apply -var=\"enable_queue_mode=true\"\n```\n\n#### Or in `terraform.tfvars`:\n\n```hcl\ngcp_project_id    = \"your-project-id\"\nenable_queue_mode = true\n\n# Optional: tune Redis and worker sizing\nredis_tier           = \"BASIC\"       # or \"STANDARD_HA\" for production\nredis_memory_size_gb = 1\nworker_min_instances = 1\nworker_max_instances = 3\nworker_cpu           = \"1\"\nworker_memory        = \"2Gi\"\n\n# Optional: specify VPC network\u002Fsubnet if not using the default VPC\n# vpc_network    = \"default\"\n# vpc_subnetwork = \"\"   # Leave empty for auto-selection\n```\n\nThen run:\n\n```bash\nterraform plan -var-file=terraform.tfvars\nterraform apply -var-file=terraform.tfvars\n```\n\nTerraform will output:\n- `cloud_run_service_url` — the public n8n editor URL\n- `cloud_run_worker_service_url` — the worker service's internal URL (not publicly accessible)\n- `redis_host` — the private IP of the Memorystore instance\n- `redis_port` — the Redis port\n- `cloud_sql_connection_name` — the Cloud SQL connection name\n\n> **Note on provisioning time:** Cloud Memorystore instances take 5–10 minutes to provision. Terraform will wait for the instance to be ready before creating the worker service. The overall `terraform apply` for a Queue Mode deployment typically takes 15–20 minutes.\n\n#### Upgrading an Existing Deployment to Queue Mode ####\n\nIf you've already deployed the standard (non-queue) setup with Terraform and want to add Queue Mode, simply add `enable_queue_mode = true` to your `terraform.tfvars` and run `terraform apply` again. Terraform will add the new resources incrementally without touching the existing ones.\n\n```bash\n# Add to existing terraform.tfvars\necho 'enable_queue_mode = true' >> terraform.tfvars\n\nterraform plan   # Review what will be added\nterraform apply  # Apply the changes\n```\n\n### Terraform Troubleshooting ###\n\nIf you're encountering issues with Terraform deployment, especially after a previous manual installation attempt or a failed Terraform run, you may need to clean up existing resources first.\n\n**Common scenarios requiring cleanup:**\n- You followed the manual steps before discovering the Terraform option\n- A previous Terraform deployment timed out or lost connectivity mid-build\n- You're seeing \"resource already exists\" errors\n\n**Clean up steps:**\n\n1. **Remove Terraform state files** (if you have a corrupted state):\n\n```bash\ncd terraform\u002F\nrm -rf terraform.tfstate*\nrm -rf .terraform\u002F\n```\n\n2. **Delete existing Google Cloud resources** via Console or CLI:\n\n**Artifact Registry:**\n\n```bash\ngcloud artifacts repositories delete n8n-repo --location=$REGION\n```\n\n**Cloud SQL Instance:**\n\n```bash\ngcloud sql instances delete n8n-db\n```\n\n**Secrets:**\n\n```bash\ngcloud secrets delete n8n-db-password\ngcloud secrets delete n8n-encryption-key\ngcloud secrets delete n8n-redis-auth  # Queue Mode only\n```\n\n**Service Account:**\n\n```bash\ngcloud iam service-accounts delete n8n-service-account@$PROJECT_ID.iam.gserviceaccount.com\n```\n\n**Cloud Run Services:**\n\n```bash\ngcloud run services delete n8n --region=$REGION\ngcloud run services delete n8n-worker --region=$REGION  # Queue Mode only\n```\n\n**Cloud Memorystore Redis (Queue Mode only):**\n\n```bash\ngcloud redis instances delete n8n-redis --region=$REGION\n```\n\n3. **Alternative: Use Google Cloud Console**\n- Navigate to each service (Cloud Run, Cloud SQL, Memorystore, Secret Manager, IAM, Artifact Registry)\n- Identify and delete resources with names matching the Terraform configuration\n- This visual approach can be easier for identifying partially-created resources\n\n4. **Re-run Terraform:**\n\n```tf\nterraform init\nterraform plan # Verify no conflicts remain\nterraform apply\n```\n\n**Pro tip:** If you're unsure which resources were created, check the Terraform configuration files to see the exact resource names and types that will be provisioned.\n\nHuge thanks to [@alliecatowo](https:\u002F\u002Fgithub.com\u002Falliecatowo) for this valuable addition!\n\nFor more details and usage instructions, please see the `terraform\u002F` directory in this repository.\n\n---\n\nAnd there you have it - a fully functional n8n instance running on Google Cloud Run. You get all the benefits of self-hosting without the headache of managing servers. Your workflows run reliably, your data stays under your control, and you only pay for what you use.\n","# 在 Google Cloud Run 上自托管 n8n：完整指南 #\n\n那么，您是否希望在无需支付月度订阅费用的情况下运行 n8n，将数据完全掌握在自己手中，并避免服务器维护的繁琐工作呢？Google Cloud Run 正好提供了这样一个理想的解决方案——无服务器部署，按使用量计费。让我们来正确地搭建这个系统吧。\n\n本指南将引导您在 Google Cloud Run 上部署 n8n（这款强大的工作流自动化平台），并使用 PostgreSQL 数据库进行持久化存储。最终，您将拥有一个功能齐全、可自动扩展的系统，能够通过 OAuth 与 Google 服务集成，并且在空闲时不会产生高额费用。\n\n> **🚀 快速入门选项**：想跳过手动设置吗？请直接前往 [Terraform 部署选项](#terraform-deployment-option) 部分，享受简化、自动化的部署流程。下面的逐步指南有助于您理解背后的实现原理，但 Terraform 将为您完成所有繁重的工作！\n\n## 目录 ##\n- [使用 Terraform 快速部署](#terraform-deployment-option)\n- [手动逐步指南](#step-1-set-up-your-google-cloud-project)\n- [配置](#step-8-configure-n8n-for-oauth-with-google-services)\n- [队列模式部署](#queue-mode-deployment-scaling-n8n-for-production)\n- [更新与维护](#keeping-n8n-updated-dont-be-that-person-running-year-old-software)\n- [成本估算](#cost-estimates-yes-it-really-can-be-that-cheap)\n- [故障排除](#troubleshooting)\n\n## 概述 ##\n\nn8n 非常适合自动化那些您不想手动执行的繁琐任务。在此方案中：\n\n* 使用 Google Cloud Run 托管应用程序（仅在运行时付费）\n\n* 使用 Cloud SQL PostgreSQL 数据库进行持久化存储（确保您的工作流在重启后仍能继续运行）\n\n* 利用 Google 身份验证平台连接到 Google 服务（如 Sheets、Drive 等）\n\n为什么要选择自托管呢？因为您可以完全掌控自己的自动化工作流和数据，不受任何执行限制，也无需担心敏感数据的存储位置。而借助 Cloud Run，您既能享受自托管的控制权，又能体验无需管理物理服务器的便利性。\n\n## 前提条件 ##\n\n在开始之前，请确保您已具备以下条件：\n\n* 一个 Google Cloud 账号（新账号通常享有丰厚的免费套餐）\n\n* 已安装并配置好 gcloud CLI（相信我，比起在网页控制台中点击操作，命令行更高效）\n\n* 对 Docker 和命令行的基本熟悉程度\n\n* **Docker**（仅在使用自定义镜像时需要 - 选项 B）\n\n* 一个域名（可选，但建议用于生产环境）\n\n虽然一开始可能会觉得命令行方式有些复杂，但它使我们能够编写脚本来自动化整个部署过程。当您需要更新或重新创建实例时，会感谢自己将所有内容都以可复用的形式组织起来。\n\n## 社区视频教程 ##\n\n非常感谢 Terra Femme 制作的这份精彩的逐步视频教程，详细展示了整个部署过程！如果您是视觉型学习者，或者希望看到有人实时解决常见问题，那么这段视频绝对不容错过。\n\n▶️ [观看部署视频](https:\u002F\u002Fyoutu.be\u002FbLDv07BR9Hw \"在 Google Cloud RUN 上免费托管 n8n，不到 20 分钟搞定 - @TerraFemme-Tech\")，由 [Terra Femme (@terra.femme)](https:\u002F\u002Fgithub.com\u002Fterra-femme \"terra-femme (Terra.Femme)\") 录制。\n\n该视频涵盖了使用 Google Cloud Shell Editor 完成的完整 Terraform 部署过程，包括修复大多数用户都会遇到的端口配置问题。它基本上就是本指南内容的现场演示，非常棒！\n\n## 第一步：设置您的 Google Cloud 项目 ##\n\n首先，让我们准备好 Google Cloud 环境：\n\n```bash\n# 设置您的 Google Cloud 项目 ID\nexport PROJECT_ID=\"your-project-id\"\nexport REGION=\"europe-west2\"  # 选择您偏好的区域\n\n# 登录 gcloud\ngcloud auth login\n\n# 设置当前活动项目\ngcloud config set project $PROJECT_ID\n\n# 启用所需 API\ngcloud services enable artifactregistry.googleapis.com\ngcloud services enable run.googleapis.com\ngcloud services enable sqladmin.googleapis.com\ngcloud services enable secretmanager.googleapis.com\n```\n\n这些命令将为您建立项目环境，并启用所需的 Google Cloud API。我们提前开启所有必要的服务，以避免后续出现“请先启用此 API”的错误提示。\n\n## 第二步：为 n8n 的 Cloud Run 部署做准备 ##\n\nn8n 在连接外部数据库时需要短暂的启动延迟，以避免初始化过程中发生竞态条件。有两种方法可以处理这个问题：\n\n### 选项 A：使用官方镜像（推荐）  \n\n这是最简单的方法——使用 [n8n 的官方 Docker 镜像](https:\u002F\u002Fhub.docker.com\u002Fr\u002Fn8nio\u002Fn8n)，并通过覆盖启动命令添加 5 秒的延迟。这种做法源自 n8n 自己的 Kubernetes 部署配置文件[链接](https:\u002F\u002Fgithub.com\u002Fn8n-io\u002Fn8n-hosting\u002Fblob\u002Fmain\u002Fkubernetes\u002Fn8n-deployment.yaml#L32)，并且在 Cloud Run 上同样适用。\n\n**无需额外文件！** 您将在第 7 步部署时使用命令覆盖参数。\n\n### 选项 B：自定义 Docker 镜像（进阶）  \n\n如果您需要自定义启动逻辑或希望获取详细的调试信息，可以使用自定义 Docker 镜像。这种方法提供了更高的灵活性，但也需要您自行构建和维护镜像。\n\n在您的工作目录中创建以下两个文件：\n\n**startup.sh:**\n\n```bash\n#!\u002Fbin\u002Fsh\n\n# 添加启动延迟，以便数据库初始化\nsleep 5\n\n# 如果存在 Cloud Run 的 PORT 变量，则将其映射到 N8N_PORT；\n# 否则，使用显式设置的 N8N_PORT，或默认为 5678\nif [ -n \"$PORT\" ]; then\n  export N8N_PORT=$PORT\nelif [ -z \"$N8N_PORT\" ]; then\n  export N8N_PORT=5678\nfi\n\n# 打印环境变量以供调试\necho \"数据库设置:\"\necho \"DB_TYPE: $DB_TYPE\"\necho \"DB_POSTGRESDB_HOST: $DB_POSTGRESDB_HOST\"\necho \"DB_POSTGRESDB_PORT: $DB_POSTGRESDB_PORT\"\necho \"N8N_PORT: $N8N_PORT\"\n\n# 使用 n8n 的原始入口点启动程序\nexec \u002Fdocker-entrypoint.sh\n```\n\n端口映射脚本为您提供更大的灵活性：您可以允许 Cloud Run 动态分配端口，也可以显式指定端口。这样做有以下几个好处：\n\n1. Cloud Run 会自动分配端口——如果部署时未设置 `--port=5678`，Cloud Run 会注入 `PORT` 变量。\n2. 具备未来兼容性——即使 Cloud Run 改变端口处理方式，脚本也能适应。\n3. 适用于多种环境——同一镜像既可在 Cloud Run 上运行，也可用于 Cloud Run Jobs 或其他容器平台。\n\n相比之下，选项 A 不需要此脚本，因为它通过命令行参数明确设置了所有内容。\n\n**Dockerfile:**\n\n```Dockerfile\nFROM docker.n8n.io\u002Fn8nio\u002Fn8n:latest\n\n# 复制脚本并确保其具有正确的权限\nCOPY startup.sh \u002F\nUSER root\nRUN chmod +x \u002Fstartup.sh\nUSER node\nEXPOSE 5678\n\n# 使用 shell 形式以避免 exec 格式问题\nENTRYPOINT [\"\u002Fbin\u002Fsh\", \"\u002Fstartup.sh\"]\n```\n\n这种自定义设置解决了端口不匹配的问题，并有助于调试。如果没有它，你只会看到一个失败的容器，而没有任何有用的错误信息。是的，这确实像听起来那样令人沮丧。\n\n**如果你在使用选项 B 时遇到问题：**\n\n* 检查你的 `startup.sh` 文件是否使用 Unix 风格的换行符（LF，而不是 CRLF）\n\n* 确保该文件具有正确的执行权限\n\n### 你应该选择哪个选项？\n\n* **选择选项 A** 如果你只想让 n8n 在尽量少操心的情况下可靠运行\n\n* **使用选项 B** 如果你需要调试输出或自定义启动脚本\n\n本指南的其余部分将针对两种方法的不同之处分别展示相应的命令。\n\n## 第 3 步：设置容器仓库（可选——仅适用于自定义镜像） ##\n\n**如果你使用的是选项 A（官方镜像）**，请完全跳过此步骤，直接进入第 4 步。\n\n**如果你使用的是选项 B（自定义镜像）**，你需要一个地方来存储你的自定义容器镜像：\n\n```bash\n# 在 Artifact Registry 中创建一个仓库\ngcloud artifacts repositories create n8n-repo \\\n    --repository-format=docker \\\n    --location=$REGION \\\n    --description=\"用于 n8n 工作流镜像的仓库\"\n\n# 配置 Docker 以使用 gcloud 作为凭据助手\ngcloud auth configure-docker $REGION-docker.pkg.dev\n\n# 构建并推送你的镜像\ndocker build --platform linux\u002Famd64 -t $REGION-docker.pkg.dev\u002F$PROJECT_ID\u002Fn8n-repo\u002Fn8n:latest .\ndocker push $REGION-docker.pkg.dev\u002F$PROJECT_ID\u002Fn8n-repo\u002Fn8n:latest\n```\n\n我们明确地为 linux\u002Famd64 架构构建镜像，因为 Cloud Run 不支持 ARM 架构。这一点尤其重要，如果你是在 M1\u002FM2 Mac 上开发——Docker 默认会愉快地构建一个 ARM 镜像，但部署时却会莫名其妙地失败。就问我是怎么知道的吧。\n\n## 第 4 步：设置 Cloud SQL PostgreSQL 实例 ##\n\n现在该设置数据库了。我们将使用最小的实例类型以保持合理的成本：\n\n```bash\n# 创建一个 Cloud SQL 实例（最低成本层级）\ngcloud sql instances create n8n-db \\\n    --database-version=POSTGRES_13 \\\n    --tier=db-f1-micro \\\n    --region=$REGION \\\n    --root-password=\"supersecure-rootpassword\" \\\n    --storage-size=10GB \\\n    --availability-type=ZONAL \\\n    --no-backup \\\n    --storage-type=HDD\n\n# 创建数据库\ngcloud sql databases create n8n --instance=n8n-db\n\n# 为 n8n 创建用户\ngcloud sql users create n8n-user \\\n    --instance=n8n-db \\\n    --password=\"supersecure-userpassword\"\n```\n\ndb-f1-micro 层级非常适合大多数个人 n8n 部署。我在这个配置上运行过数百个工作流，从未出现问题。如果需要，你随时可以升级。\n\n## 第 5 步：为敏感数据创建 Secret ##\n\n切勿将密码放入部署配置中。让我们改用 Secret Manager：\n\n```bash\n# 为数据库密码创建一个 Secret\necho -n \"supersecure-userpassword\" | \\\n    gcloud secrets create n8n-db-password \\\n    --data-file=- \\\n    --replication-policy=\"automatic\"\n\n# 为 n8n 加密密钥创建一个 Secret\necho -n \"your-random-encryption-key\" | \\\n    gcloud secrets create n8n-encryption-key \\\n    --data-file=- \\\n    --replication-policy=\"automatic\"\n```\n\n这个加密密钥尤为重要——它保护了你 n8n 实例中存储的所有凭据。请确保密钥足够长、随机且妥善保管。一旦丢失，你将需要重新配置所有连接的服务。\n\n## 第 6 步：为 Cloud Run 创建服务账户 ##\n\n现在该设置 n8n 实例将使用的身份了：\n\n```bash\n# 创建服务账户\ngcloud iam service-accounts create n8n-service-account \\\n    --display-name=\"n8n Service Account\"\n\n# 授予访问 Secrets 的权限\ngcloud secrets add-iam-policy-binding n8n-db-password \\\n    --member=\"serviceAccount:n8n-service-account@$PROJECT_ID.iam.gserviceaccount.com\" \\\n    --role=\"roles\u002Fsecretmanager.secretAccessor\"\n\ngcloud secrets add-iam-policy-binding n8n-encryption-key \\\n    --member=\"serviceAccount:n8n-service-account@$PROJECT_ID.iam.gserviceaccount.com\" \\\n    --role=\"roles\u002Fsecretmanager.secretAccessor\"\n\n# 授予 Cloud SQL Client 角色\ngcloud projects add-iam-policy-binding $PROJECT_ID \\\n    --member=\"serviceAccount:n8n-service-account@$PROJECT_ID.iam.gserviceaccount.com\" \\\n    --role=\"roles\u002Fcloudsql.client\"\n```\n\n在这里遵循最小权限原则意味着你的 n8n 服务只能访问它真正需要的内容，而不会越权。这虽是小事，却对你的安全态势有着重大影响。\n\n## 第 7 步：部署到 Cloud Run ##\n\n关键时刻到了——让我们部署 n8n。根据你在第 2 步中选择的方法，部署命令略有不同。\n\n首先获取你的 Cloud SQL 连接名称：\n\n```bash\nexport SQL_CONNECTION=$(gcloud sql instances describe n8n-db --format=\"value(connectionName)\")\n```\n\n### 选项 A：使用官方镜像部署（推荐） ###\n\n```bash\ngcloud run deploy n8n \\\n    --image=docker.io\u002Fn8nio\u002Fn8n:latest \\\n    --command=\"\u002Fbin\u002Fsh\" \\\n    --args=\"-c,sleep 5; n8n start\" \\\n    --platform=managed \\\n    --region=$REGION \\\n    --allow-unauthenticated \\\n    --port=5678 \\\n    --cpu=1 \\\n    --memory=2Gi \\\n    --min-instances=0 \\\n    --max-instances=1 \\\n    --no-cpu-throttling \\\n    --set-env-vars=\"N8N_PORT=5678,N8N_PROTOCOL=https,DB_TYPE=postgresdb,DB_POSTGRESDB_DATABASE=n8n,DB_POSTGRESDB_USER=n8n-user,DB_POSTGRESDB_HOST=\u002Fcloudsql\u002F$SQL_CONNECTION,DB_POSTGRESDB_PORT=5432,DB_POSTGRESDB_SCHEMA=public,N8N_USER_FOLDER=\u002Fhome\u002Fnode\u002F.n8n,GENERIC_TIMEZONE=UTC,QUEUE_HEALTH_CHECK_ACTIVE=true\" \\\n    --set-secrets=\"DB_POSTGRESDB_PASSWORD=n8n-db-password:latest,N8N_ENCRYPTION_KEY=n8n-encryption-key:latest\" \\\n    --add-cloudsql-instances=$SQL_CONNECTION \\\n    --service-account=n8n-service-account@$PROJECT_ID.iam.gserviceaccount.com\n```\n\n### 选项 B：使用自定义镜像部署 ###\n\n```bash\n# 部署到 Cloud Run\ngcloud run deploy n8n \\\n    --image=$REGION-docker.pkg.dev\u002F$PROJECT_ID\u002Fn8n-repo\u002Fn8n:latest \\\n    --platform=managed \\\n    --region=$REGION \\\n    --allow-unauthenticated \\\n    --port=5678 \\\n    --cpu=1 \\\n    --memory=2Gi \\\n    --min-instances=0 \\\n    --max-instances=1 \\\n    --no-cpu-throttling \\\n    --set-env-vars=\"N8N_PATH=\u002F,N8N_PORT=443,N8N_PROTOCOL=https,DB_TYPE=postgresdb,DB_POSTGRESDB_DATABASE=n8n,DB_POSTGRESDB_USER=n8n-user,DB_POSTGRESDB_HOST=\u002Fcloudsql\u002F$SQL_CONNECTION,DB_POSTGRESDB_PORT=5432,DB_POSTGRESDB_SCHEMA=public,N8N_USER_FOLDER=\u002Fhome\u002Fnode,EXECUTIONS_PROCESS=main,EXECUTIONS_MODE=regular,GENERIC_TIMEZONE=UTC,QUEUE_HEALTH_CHECK_ACTIVE=true\" \\\n    --set-secrets=\"DB_POSTGRESDB_PASSWORD=n8n-db-password:latest,N8N_ENCRYPTION_KEY=n8n-encryption-key:latest\" \\\n    --add-cloudsql-instances=$SQL_CONNECTION \\\n    --service-account=n8n-service-account@$PROJECT_ID.iam.gserviceaccount.com\n```\n\n部署完成后，Cloud Run 会提供一个指向你的 n8n 实例的 URL。请记下它——你将在后续步骤中需要用到。\n\n### 关键配置说明 ###\n\n**为什么使用 `--no-cpu-throttling`？**\nn8n 会执行一些后台处理任务（如数据库连接、定时检查等），这些任务并不依赖于 HTTP 请求。如果启用了 CPU 节流功能，这些后台任务就会被限制资源，从而导致启动问题。通过添加该标志，可以确保 n8n 获得持续的 CPU 访问权限，实际上还能节省成本，因为消除了按请求计费的费用，并且降低了 CPU 和内存的费率。感谢 Google Cloud Run 团队提供的这一见解。\n\n**其他重要设置：**\n- `min-instances=0` 和 `max-instances=1` 表示服务在空闲时会缩放至零实例（节省费用），但不会同时运行多个实例（这可能会导致 n8n 的数据库冲突）。\n- CPU 和内存的分配对于大多数工作流来说已经足够，同时不会产生过高的成本。\n- 选项 A 中的 `sleep 5` 用于处理数据库初始化的时序问题。\n\n### 选项之间的关键区别 ###\n\n| 设置                | 选项 A（官方）         | 选项 B（自定义）       | 为何不同？                     |\n|---------------------|-----------------------|-----------------------|--------------------------------|\n| 镜像                | `docker.io\u002Fn8nio\u002Fn8n:latest` | 您的自定义镜像        | 直接从 n8n 官方仓库拉取 vs 您的私有仓库 |\n| 命令                | `--command=\"\u002Fbin\u002Fsh\" --args=\"-c,sleep 5; n8n start\"` | 使用自定义入口点      | 通过命令添加延迟 vs 在脚本中内置延迟 |\n| N8N_PORT            | `5678`               | `443`                 | 直接指定端口 vs 通过启动脚本映射 |\n| N8N_PATH            | 不需要               | `\u002F`                   | 自定义镜像可以处理路径前缀 |\n\n### n8n Google Cloud Run 环境变量 ###\n\n以下是所有环境变量的作用：\n\n|    环境变量   |   选项 A 值    |   选项 B 值    |                                  描述                                 |\n|:-------------:|:---------------:|:---------------:|:--------------------------------------------------------------------:|\n| N8N_PATH      | 不需要          | \u002F               | n8n 可访问的基础路径（仅适用于自定义镜像）                           |\n| N8N_PORT      | 5678            | 443             | 端口配置（直接指定 vs 映射）                                         |\n| N8N_PROTOCOL  | https           | https           | 用于外部访问的协议                                                   |\n| DB_TYPE       | postgresdb      | postgresdb      | 必须精确为 “postgresdb”（而非 “postgresql”），以正确连接数据库     |\n| N8N_USER_FOLDER | \u002Fhome\u002Fnode\u002F.n8n | \u002Fhome\u002Fnode      | n8n 数据存储位置                                                     |\n| EXECUTIONS_PROCESS | 不需要        | main（已弃用）  | 已弃用——在较新版本中应移除                                           |\n| EXECUTIONS_MODE | 不需要        | regular（已弃用） | 已弃用——在较新版本中应移除                                           |\n| QUEUE_HEALTH_CHECK_ACTIVE | true        | true            | 对于 Cloud Run 验证容器健康状态至关重要                              |\n\n\n请特别注意 `DB_TYPE`。它必须是 “postgresdb”，而不是 “postgresql”——这是一个常见的部署陷阱。此外，不要显式设置 `PORT` 变量，因为 Cloud Run 会自动注入该值。\n\n## 第 8 步：为 n8n 配置与 Google 服务的 OAuth ##\n\n现在我们需要更新部署，添加环境变量，以告知 n8n 如何正确生成 OAuth 回调 URL：\n\n```bash\n# 获取您的服务 URL（替换为您实际的 URL）\nexport SERVICE_URL=\"https:\u002F\u002Fn8n-YOUR_ID.REGION.run.app\"\n\n# 更新部署，配置正确的 URL\ngcloud run services update n8n \\\n    --region=$REGION \\\n    --update-env-vars=\"N8N_HOST=$(echo $SERVICE_URL | sed 's\u002Fhttps:\\\u002F\\\u002F\u002F\u002F'),N8N_WEBHOOK_URL=$SERVICE_URL,N8N_EDITOR_BASE_URL=$SERVICE_URL\"\n```\n\n如果没有这些变量，OAuth 将会失败，并抛出毫无帮助的 “redirect_uri_mismatch” 错误，让人怀疑人生。正确设置这些变量后，n8n 就能在认证流程中构建正确的回调 URL。\n\n对于较新的 n8n 版本，请使用 `WEBHOOK_URL` 而不是 `N8N_WEBHOOK_URL`。\n\n## 第 9 步：设置 Google OAuth 凭证 ##\n\n最后，为了将 n8n 与 Google 服务（如 Sheets）连接起来：\n\n1. 访问 Google Cloud 控制台：\n    * 导航到 Google Cloud 控制台\n    * 选择您的项目\n2. 启用所需 API：\n    * 转到“APIs & Services” > “Library”\n    * 搜索并启用您需要的 API（例如，“Google Sheets API”、“Google Drive API”）\n3. 配置 OAuth 同意屏幕：\n    * 转到“APIs & Services” > “OAuth consent screen”\n    * 选择“External”用户类型（或“Internal”类型，如果您使用的是 Google Workspace）\n    * 填写必要信息（应用名称、用户支持邮箱等）\n    * 如果是 External 类型，需添加测试用户\n    * 对于范围，目前请添加以下内容：\n        * `https:\u002F\u002Fgoogleapis.com\u002Fauth\u002Fdrive.file`\n        * `https:\u002F\u002Fgoogleapis.com\u002Fauth\u002Fspreadsheets`\n\n    > 注意：OAuth 同意屏幕的配置决定了您的应用在用户认证时的显示方式。使用 “External” 类型对于个人项目是必要的，但在开发过程中需要添加测试用户。请求的范围决定了 n8n 对 Google 服务的访问权限级别——我们只请求操作 Google Sheets 所必需的最低权限。\n    \n4. 创建 OAuth 客户端 ID：\n    * 转到“APIs & Services” > “Credentials”\n    * 点击“CREATE CREDENTIALS”，选择“OAuth client ID”\n    * 选择“Web application”作为应用类型\n    * 将您的 n8n URL 添加到“Authorized JavaScript origins”：\n\n        ```bash\n        https:\u002F\u002Fn8n-YOUR_ID.REGION.run.app\n        ```\n\n    * 在 n8n 中创建凭据时，系统会显示所需的重定向 URL。将其添加到“Authorized redirect URIs”：\n\n        ```bash\n        https:\u002F\u002Fn8n-YOUR_ID.REGION.run.app\u002Frest\u002Foauth2-credential\u002Fcallback\n        ```\n\n    * 点击“CREATE”以生成您的客户端 ID 和客户端密钥。\n    \n5. 将凭据添加到 n8n：\n    * 在您的 n8n 实例中，创建一个新的 Google Sheets 凭据\n    * 选择“OAuth2”作为认证类型\n    * 将您的 OAuth 客户端 ID 和客户端密钥从 Google Cloud 控制台复制过来\n    * 完成认证流程\n\n---\n\n## 队列模式部署：扩展 n8n 以适应生产环境 ##\n\n以上步骤为您提供了一个稳定的单进程 n8n 部署方案。对于大多数个人和小型团队来说，这已经足够了。然而，如果您开始运行大量并发的高负载工作流，或者希望在长时间运行的工作流在后台执行时保持编辑器的响应速度，那么 **队列模式** 就是解决方案。\n\n### 什么是队列模式？ ###\n\n在常规模式下，n8n 进程负责所有任务：提供 UI、处理 API 请求、接收 Webhook，*以及* 执行每个工作流。而队列模式则将这些职责分开：\n\n```\n互联网\n    │\n    ▼\nCloud Run — n8n 主进程       ← 提供编辑器 UI、REST API 和 Webhook\n    │  将任务加入队列\n    ▼\nCloud Memorystore (Redis)  ← 任务队列 \u002F 消息代理\n    │  工作进程轮询\n    ▼\nCloud Run — n8n 工作进程 × N ← 执行工作流任务\n    │  读取\u002F写入\n    ▼\nCloud SQL (PostgreSQL)     ← 主进程与工作进程共享的数据库\n```\n\n主进程不再直接运行工作流。它会将工作流放入 Redis 队列，并立即返回以处理新的请求。工作进程会持续轮询该队列，直到执行完成。这样做的结果是：即使在 CPU 密集型工作流运行时，编辑器依然保持响应；同时，执行能力可以独立进行水平扩展。\n\n### 何时应使用队列模式？ ###\n\n**如果满足以下条件，请继续使用常规模式：**\n- 您在轻度至中度负载下将 n8n 用于个人自动化。\n- 成本是首要考虑因素（队列模式会增加约 £50–£70\u002F月 的 Redis 和常驻工作进程费用）。\n- 您的工作流大多是快速的、由 Webhook 触发的任务。\n\n**如果满足以下条件，请切换到队列模式：**\n- 您有许多并发的长时间运行或 CPU 密集型工作流。\n- 在大量工作流运行期间，n8n 编辑器变得无响应。\n- 您希望独立于 UI 扩展执行能力。\n- 您正在为团队使用 n8n，并需要可靠的吞吐量。\n\n### 其他先决条件 ###\n\n队列模式需要：\n- 启用 `redis.googleapis.com` 和 `compute.googleapis.com` API。\n- 在您的项目中有一个 VPC 网络（默认的自动模式 VPC 即可）。\n- VPC 的 **Private Service Access** 对等范围可用（用于 Cloud Memorystore）。\n\nCloud Memorystore Redis 实例只能通过 **私有 VPC IP 地址** 访问——它们没有公共端点。Cloud Run 通过 **Direct VPC Egress** 连接到这些实例，该功能会将私有范围的流量（`10.x.x.x`、`172.16.x.x`、`192.168.x.x`）路由到 VPC，而公共互联网流量则走正常路径。\n\n### 步骤 QM-1：启用额外的 API ###\n\n```bash\ngcloud services enable redis.googleapis.com\ngcloud services enable compute.googleapis.com\n```\n\n### 步骤 QM-2：创建 Cloud Memorystore Redis 实例 ###\n\n```bash\nexport REDIS_NAME=\"n8n-redis\"\n\n# 创建一个 Redis 实例 — BASIC 层级，1 GB，Redis 7.2 并启用 AUTH\ngcloud redis instances create $REDIS_NAME \\\n    --region=$REGION \\\n    --tier=BASIC \\\n    --size=1 \\\n    --redis-version=redis_7_2 \\\n    --network=default \\\n    --enable-auth\n\n# 获取私有 IP、端口和 AUTH 字符串\nexport REDIS_HOST=$(gcloud redis instances describe $REDIS_NAME \\\n    --region=$REGION --format=\"value(host)\")\nexport REDIS_PORT=$(gcloud redis instances describe $REDIS_NAME \\\n    --region=$REGION --format=\"value(port)\")\nexport REDIS_AUTH=$(gcloud redis instances get-auth-string $REDIS_NAME \\\n    --region=$REGION --format=\"value(authString)\")\n\necho \"Redis 主机: $REDIS_HOST\"\necho \"Redis 端口: $REDIS_PORT\"\n```\n\n> **层级建议：**\n> - `BASIC` — 单节点，无复制。最便宜（1 GB 大概每月 £35）。适合个人使用；如果 Redis 重启，任何正在进行的执行任务都会丢失（需要重新触发）。\n> - `STANDARD_HA` — 主节点 + 副本，具备自动故障转移功能。适用于生产环境，可用性更高（1 GB 大概每月 £70）。\n\n> **网络说明：** `--network=default` 会将 Memorystore 实例对等接入默认 VPC。如果您不使用默认网络，请替换为您自定义的 VPC 名称。\n\n### 步骤 QM-3：将 Redis AUTH 字符串存储到 Secret Manager 中 ###\n\n```bash\n# 存储 Redis AUTH 字符串\necho -n \"$REDIS_AUTH\" | \\\n    gcloud secrets create n8n-redis-auth \\\n    --data-file=- \\\n    --replication-policy=\"automatic\"\n\n# 授予 n8n 服务账号访问该密钥的权限\ngcloud secrets add-iam-policy-binding n8n-redis-auth \\\n    --member=\"serviceAccount:n8n-service-account@$PROJECT_ID.iam.gserviceaccount.com\" \\\n    --role=\"roles\u002Fsecretmanager.secretAccessor\"\n```\n\n### 步骤 QM-4：更新主 n8n 服务以支持队列模式 ###\n\n主服务需要添加三项内容：`EXECUTIONS_MODE=queue` 以激活队列模式，Redis 连接信息以告知队列位置，以及 Direct VPC Egress 以便能够实际连接到 Memorystore 的私有 IP。\n\n```bash\ngcloud run services update n8n \\\n    --region=$REGION \\\n    --vpc-egress=private-ranges-only \\\n    --network=default \\\n    --subnet=default \\\n    --update-env-vars=\"EXECUTIONS_MODE=queue,QUEUE_BULL_REDIS_HOST=$REDIS_HOST,QUEUE_BULL_REDIS_PORT=$REDIS_PORT\" \\\n    --update-secrets=\"QUEUE_BULL_REDIS_PASSWORD=n8n-redis-auth:latest\"\n```\n\n> **子网说明：** 对于默认的自动模式 VPC，`--subnet=default` 会自动选择区域子网。如果您使用自定义 VPC，请指定您所在区域的确切子网名称（例如 `--subnet=my-subnet`）。\n\n### 步骤 QM-5：部署 n8n 工作进程服务 ###\n\n工作进程运行的是 `n8n worker` 命令，而不是 `n8n start`。它们不提供面向公众的流量服务，而是轮询 Redis 并连接到 Cloud SQL。\n\n```bash\ngcloud run deploy n8n-worker \\\n    --image=docker.io\u002Fn8nio\u002Fn8n:latest \\\n    --command=\"\u002Fbin\u002Fsh\" \\\n    --args=\"-c,sleep 5; n8n worker\" \\\n    --platform=managed \\\n    --region=$REGION \\\n    --no-allow-unauthenticated \\\n    --ingress=internal \\\n    --port=5678 \\\n    --cpu=1 \\\n    --memory=2Gi \\\n    --min-instances=1 \\\n    --max-instances=3 \\\n    --no-cpu-throttling \\\n    --vpc-egress=private-ranges-only \\\n    --network=default \\\n    --subnet=default \\\n    --set-env-vars=\"EXECUTIONS_MODE=queue,DB_TYPE=postgresdb,DB_POSTGRESDB_DATABASE=n8n,DB_POSTGRESDB_USER=n8n-user,DB_POSTGRESDB_HOST=\u002Fcloudsql\u002F$SQL_CONNECTION,DB_POSTGRESDB_PORT=5432,DB_POSTGRESDB_SCHEMA=public,N8N_USER_FOLDER=\u002Fhome\u002Fnode\u002F.n8n,GENERIC_TIMEZONE=UTC,QUEUE_HEALTH_CHECK_ACTIVE=true,N8N_RUNNERS_ENABLED=true,QUEUE_BULL_REDIS_HOST=$REDIS_HOST,QUEUE_BULL_REDIS_PORT=$REDIS_PORT\" \\\n    --set-secrets=\"DB_POSTGRESDB_PASSWORD=n8n-db-password:latest,N8N_ENCRYPTION_KEY=n8n-encryption-key:latest,QUEUE_BULL_REDIS_PASSWORD=n8n-redis-auth:latest\" \\\n    --add-cloudsql-instances=$SQL_CONNECTION \\\n    --service-account=n8n-service-account@$PROJECT_ID.iam.gserviceaccount.com\n```\n\n**关键工作进程设置说明：**\n\n| 设置 | 值 | 作用 |\n|---|---|---|\n| `--ingress=internal` | internal | 工作进程不接收公共 HTTP 流量——仅限内部访问 |\n| `--no-allow-unauthenticated` | — | 工作进程不应被公开调用 |\n| `--min-instances=1` | 1 | 至少需要有一个工作进程始终运行，否则排队的任务永远不会开始 |\n| `--no-cpu-throttling` | — | 工作进程会持续轮询 Redis，在“请求”之间也需要 CPU 资源 |\n| `EXECUTIONS_MODE=queue` | queue | 告诉工作进程从 Redis 中获取任务 |\n\n### 队列模式环境变量参考 ###\n\n| 变量 | 主服务 | 工作进程 | 描述 |\n|---|---|---|---|\n| `EXECUTIONS_MODE` | `queue` | `queue` | 激活队列模式。没有此设置，工作进程将无法运行。 |\n| `QUEUE_BULL_REDIS_HOST` | Redis 私有 IP | Redis 私有 IP | Memorystore 实例的主机地址 |\n| `QUEUE_BULL_REDIS_PORT` | `6379` | `6379` | Redis 端口（Memorystore 默认值） |\n| `QUEUE_BULL_REDIS_PASSWORD` | （来自 Secret） | （来自 Secret） | 存储在 Secret Manager 中的 AUTH 字符串 |\n| `QUEUE_HEALTH_CHECK_ACTIVE` | `true` | `true` | 暴露 `\u002Fhealthz` — Cloud Run 健康检查所需 |\n| `N8N_RUNNERS_ENABLED` | 未设置 | `true` | 在工作进程中启用任务运行子系统。**请勿在队列模式下的主服务上设置此选项**——主进程不会执行工作流，若在此处设置会导致 n8n 在启动时崩溃，HTTP 服务器无法启动。 |\n\n### 扩展工作进程 ###\n\n队列模式的主要优势之一是无需修改主服务即可增加执行容量。如果队列中的工作流开始积压：\n\n```bash\n# 提高工作进程的最大实例数\ngcloud run services update n8n-worker \\\n    --region=$REGION \\\n    --max-instances=10\n```\n\nCloud Run 会根据负载动态扩展工作进程数量，直至达到 `max-instances` 的上限。为降低工作进程层的冷启动延迟，建议将 `min-instances` 调整至 2。\n\n### 验证队列模式是否正常运行 ###\n\n部署完成后，打开 n8n 编辑器，导航至 **Settings → Workers**（n8n ≥ 1.x）。您应能看到工作进程实例列表。若列表为空或显示工作进程已断开连接，请参阅下方的 [队列模式故障排除](#queue-mode-issues) 部分。\n\n---\n\n## 保持 n8n 更新：别再使用一年前的老版本了 ##\n\n更新您的 n8n 部署其实非常简单，而且为了获取新功能和安全补丁，您应该定期进行更新。以下是新版本发布时的更新方法：\n\n### 对于方案 A（官方镜像）\n\n最简单的方法就是更新镜像标签：\n\n**升级到最新版本**：\n\n```bash\ngcloud run services update n8n \\\n    --image=docker.io\u002Fn8nio\u002Fn8n:latest \\\n    --region=$REGION\n```\n\n**或指定特定版本**：\n\n```bash\ngcloud run services update n8n \\\n    --image=docker.io\u002Fn8nio\u002Fn8n:latest:1.115.2 \\\n    --region=$REGION\n```\n\nCloud Run 会自动拉取新镜像并完成部署，整个过程大约需要 1–2 分钟。\n\n**如果您使用的是队列模式**，也需要更新工作进程服务：\n\n```bash\ngcloud run services update n8n-worker \\\n    --image=docker.io\u002Fn8nio\u002Fn8n:latest \\\n    --region=$REGION\n```\n\n确保主服务和所有工作进程运行**相同版本的 n8n**非常重要。队列模式下版本不一致可能导致队列协议不匹配。\n\n### 对于方案 B（自定义镜像）\n\n### 方法 1：重新构建并重新部署（干净的方式） ###\n\n```bash\n# 拉取最新的 n8n 镜像\ndocker pull n8nio\u002Fn8n:latest\n\n# 重新构建您的自定义镜像\ndocker build --platform linux\u002Famd64 -t $REGION-docker.pkg.dev\u002F$PROJECT_ID\u002Fn8n-repo\u002Fn8n:latest .\n\n# 推送至您的制品仓库\ndocker push $REGION-docker.pkg.dev\u002F$PROJECT_ID\u002Fn8n-repo\u002Fn8n:latest\n\n# 重新部署您的 Cloud Run 服务\ngcloud run services update n8n \\\n    --image=$REGION-docker.pkg.dev\u002F$PROJECT_ID\u002Fn8n-repo\u002Fn8n:latest \\\n    --region=$REGION\n\n# 如果使用队列模式，也需更新工作进程\ngcloud run services update n8n-worker \\\n    --image=$REGION-docker.pkg.dev\u002F$PROJECT_ID\u002Fn8n-repo\u002Fn8n:latest \\\n    --region=$REGION\n```\n\n此过程通常需要 2–3 分钟，您的 n8n 实例会在 Cloud Run 切换到新容器时短暂中断。任何正在运行的工作流都会被中断，但一旦服务恢复，计划触发器将自动继续执行。\n\n### 方法 2：指定版本（可控的方式） ###\n\n如果您希望更谨慎地管理版本升级（推荐用于生产环境），可以指定确切的 n8n 版本：\n\n```bash\n# 拉取特定版本\ndocker pull n8nio\u002Fn8n:0.230.0  # 替换为您目标版本\n\n# 更新 Dockerfile 以使用该特定版本\n# FROM n8nio\u002Fn8n:0.230.0\n\n# 然后按照上述步骤重新构建并重新部署\n```\n\n这种方式允许您在正式升级之前先测试新版本。升级前请查看 n8n GitHub 发布页面，了解新功能和变更内容。\n\n### 数据库迁移 ###\n\n使用 PostgreSQL 与 n8n 结合的一大优势是自动数据库迁移。当您部署新版本时，n8n 将：\n\n1. 检测到数据库模式需要更新；\n2. 在启动时自动执行迁移；\n3. 仅在数据库兼容时继续运行。\n\n因此，通常您无需担心数据库变更。不过，始终建议：\n\n* 在重大版本升级前备份数据库；\n* 查看发行说明，确认是否存在破坏性变更；\n* 如果有关键工作流，可在预发布环境中测试升级。\n\n我通常会在进行重大版本跳跃前，为我的 Cloud SQL 实例创建快照，以防万一。\n\n### 更新环境变量 ###\n\n有时您可能需要更新环境变量，而非直接更新容器：\n\n```bash\ngcloud run services update n8n \\\n    --region=$REGION \\\n    --update-env-vars=\"NEW_VARIABLE=value,UPDATED_VARIABLE=new_value\"\n```\n\n这在无需重建容器的情况下更改配置时非常有用。\n\n### 自动化提示\n\n您可以编写一个简单的 Shell 脚本，将这些命令整合在一起，只需一条命令即可更新您的 n8n 实例。我会将脚本保存在私有 GitHub 仓库中，与 Dockerfile 和启动脚本一起存放，方便随时随地维护和更新。\n\n定期更新可确保您的实例安全，并让您及时获得新节点和功能。n8n 团队通常每两周发布一次更新，因此对于大多数部署来说，每月检查一次是一个不错的频率。\n\n---\n\n## 成本估算：确实可以非常便宜 ##\n\n让我们谈谈成本。采用这种部署方式的主要原因之一就是其经济高效性。相比更为成熟的 Kubernetes 方案，这种自托管方式成本更低；即使与 n8n 最低付费层级相比，也最多只有一半的价格。以下是您每月可能需要支付的费用：\n\n### 普通模式（默认） ###\n\n**Google Cloud SQL (db-f1-micro)**：如果持续运行，每月约 £8.00。这是主要的成本驱动因素——一个基础的 PostgreSQL 实例，对于个人使用来说已经足够强大。\n\n**Google Cloud Run**：对于轻量级使用而言，几乎免费，因为其提供了慷慨的免费配额：\n\n* 每月 200 万次请求；\n* 36 万 GB-秒内存；\n* 18 万 vCPU-秒。\n\n通过将我们的配置设置为 `min-instances=0`，您的 n8n 容器在不使用时会完全关闭，在空闲期间几乎不产生任何费用。当它运行时，只会消耗您的免费配额。\n\n**Secret Manager、Artifact Registry 等**：这些附加服务也都提供免费配额，您很可能不会超出。\n\n**普通模式下预计每月总成本**：£2–£12\n\n### 队列模式的额外成本 ###\n\n队列模式会添加持久性基础设施，这些基础设施**不会**缩放到零：\n\n**Cloud Memorystore Redis（BASIC，1 GB）**：约£35–£45\u002F月。Redis 会持续运行，是队列模式中最大的开销。\n\n**n8n 工作器（Cloud Run 服务，最小 1 个实例）**：工作器通过 `min-instances=1` 和 `--no-cpu-throttling` 保持运行状态，因此会持续计费。在大多数地区，1 vCPU + 2 GiB 的配置下，费用约为£15–£25\u002F月。\n\n**队列模式的预计每月总成本**：约£55–£80\n\n当您大量使用 n8n，额外的容量和响应速度能够证明其成本合理性时，队列模式就值得采用。对于轻量级的个人使用，常规模式无疑是更好的选择。\n\n#### 如何降低成本 ####\n\n* **在非高峰时段安排维护**：如果您有批量处理数据的工作流，请将其安排在不活跃的时间段内执行。\n  \n* **高效使用 Webhook**：尽可能设计通过 Webhook 触发而非持续轮询的工作流。\n\n* **监控使用情况**：Google Cloud 提供了出色的使用情况仪表板——请在第一个月定期查看，以了解您的资源消耗模式。\n\n* **设置预算警报**：在 Google Cloud 中配置预算警报，以便在支出超过阈值时收到通知。\n\n* **队列模式的规模调整**：在队列模式下，先从 1 个工作器实例开始，只有在确实出现执行压力时才增加 `max-instances`。过度 provision 工作器是导致账单飙升的最快方式。\n\n哪些因素可能会推高成本？频繁运行 CPU 密集型工作流、在 PostgreSQL 中存储大量数据，或为实例配置过多资源。\n\n但对于大多数个人自动化需求而言，这种配置以“咖啡钱”般的成本提供了企业级功能。我已使用这一配置运行数月，即使有数十个工作流在运行，我的账单也始终低于 £5。\n\n---\n\n## 故障排除 ##\n\n> **注意**：如果您使用 Terraform 进行部署，请参阅[Terraform 故障排除](#terraform-troubleshooting)部分，以获取与部署相关的具体问题。\n\n当不可避免地出现问题时，以下是常见问题及解决方法：\n\n1. 容器无法启动：\n\n    * 检查 Cloud Run 日志以获取具体的错误信息\n    * 确认 `DB_TYPE` 已设置为 “postgresdb”（而不是 “postgresql”）\n    * 确保 `QUEUE_HEALTH_CHECK_ACTIVE` 设置为 “true”\n    * 移除 `EXECUTIONS_PROCESS=main` 和 `EXECUTIONS_MODE=regular` 环境变量，因为这些变量在较新版本中已被弃用\n\n2. OAuth 重定向问题：\n\n    * 确保 `N8N_HOST`、`N8N_PORT` 和 `N8N_EDITOR_BASE_URL` 设置正确\n    * 验证 Google Cloud 控制台中的重定向 URI 是否与 n8n 生成的完全一致\n    * 确认 `N8N_PORT` 已设置为 443（而非 5678），以确保外部 URL 格式正确\n\n3. 数据库连接问题：\n\n    * 检查 Cloud SQL 连接的 `DB_POSTGRESDB_HOST` 格式\n    * 确保服务账户拥有 Cloud SQL Client 角色\n\n4. 节点触发问题：\n\n    * 对于较新版本的 n8n，请使用 `WEBHOOK_URL` 而不是 `N8N_WEBHOOK_URL`\n    * 添加代理跳转配置，通过设置 `N8N_PROXY_HOPS=1` 来实现，因为 Cloud Run 充当反向代理\n\n### 队列模式相关问题 ###\n\n5. 工作器未出现在 n8n 设置 → 工作器中：\n\n    * 确认主服务和工作器均将 `EXECUTIONS_MODE=queue` 设置正确\n    * 验证两个服务上的 `QUEUE_BULL_REDIS_HOST` 和 `QUEUE_BULL_REDIS_PORT` 是否正确设置——主机必须是 Memorystore 实例的私有 IP，而非主机名或公有地址\n    * 检查是否已从 Secret Manager 正确注入 `QUEUE_BULL_REDIS_PASSWORD`——缺失或错误的 AUTH 字符串会导致连接静默失败\n    * 确认两个 Cloud Run 服务均已启用 Direct VPC 出站流量（`--vpc-egress=private-ranges-only`），并且与 Memorystore 实例位于同一 VPC 网络中\n\n6. 执行任务一直停留在“等待”状态：\n\n    * 这意味着作业已入队，但没有工作器来处理它们\n    * 检查工作器 Cloud Run 服务的日志，查看是否有 Redis 连接错误\n    * 确保工作器服务设置了 `--min-instances=1`——如果工作器缩放到了零，作业将无限期等待\n    * 验证工作器正在运行 `n8n worker`（而非 `n8n start`）——检查命令覆盖设置\n\n7. 日志中出现“无法连接到 Redis”的错误：\n\n    * 确认 Memorystore 实例与 Cloud Run 服务位于同一 VPC 网络中\n    * 验证出错的 Cloud Run 服务是否已配置 Direct VPC 出站流量\n    * 检查 `redis.googleapis.com` 和 `compute.googleapis.com` API 是否已启用\n    * 尝试直接获取 Redis 主机 IP：`gcloud redis instances describe n8n-redis --region=$REGION --format=\"value(host)\"`，并确认其与环境变量中的值一致\n\n8. VPC 出站流量导致对外连接问题：\n\n    * `private-ranges-only` 出站设置仅允许 `10.x.x.x`、`172.16.x.x` 和 `192.168.x.x` 流量通过 VPC 路由——其他所有流量（包括来自工作流的外部 API 调用）仍会通过普通互联网路径传输，因此这通常不会影响大多数工作流。\n    * 如果确实遇到与外部服务的连接问题，请再次确认您使用的是 `private-ranges-only` 而不是 `all-traffic`。\n\n9. 队列模式下主 n8n 服务容器在启动时崩溃（`exit(1)`）：\n\n    * 当主服务（`n8n start`）在队列模式下设置了 `N8N_RUNNERS_ENABLED=true` 时，就会发生这种情况。主进程仅负责 UI、API 和 Webhook，而不执行工作流。设置该标志后，n8n 会在启动时急于启动一个任务运行程序，而该程序会在 HTTP 服务器准备就绪之前崩溃。Cloud Run 会报告健康检查失败，但真正的问题是应用程序日志中的 `exit(1)` 错误。\n    * 解决方法：不要在主服务上设置 `N8N_RUNNERS_ENABLED`。工作器需要它（它们会运行代码）。主服务则不需要。\n    * 要确认根本原因，请在失败的修订版上筛选 Cloud Logging 中的 `run.googleapis.com\u002Fstdout` 和 `run.googleapis.com\u002Fstderr`——n8n 的确切崩溃信息会显示在那里，比 Cloud Run 的系统事件更具有参考价值。\n\n---\n\n## Terraform 部署选项\n\n感谢社区的慷慨贡献，现在提供了一个 Terraform 配置，可自动完成本指南中描述的整个部署过程。此 Terraform 设置会预配所有必要的 Google Cloud 资源，包括 Cloud Run、Cloud SQL、Secret Manager、IAM 角色和 Artifact Registry。\n\n使用 Terraform 可简化并加速部署，尤其适合熟悉基础设施即代码的人士。Terraform 文件和部署脚本已包含在仓库中。\n\n### Terraform 快速部署\n\n克隆仓库并进入 terraform 目录：\n\n```bash\ngit clone \u003Cyour-repo-url>\ncd \u003Crepo-name>\u002Fterraform\n```\n\n初始化 Terraform：\n\n```tf\nterraform init\n```\n\n查看计划中的更改：\n\n```tf\nterraform plan\n```\n[如果已创建 `terraform.tfvars` 文件，可使用标志 `terraform plan -var-file=terraform.tfvars`]\n\n部署基础设施。\n\n**选项 A（推荐——官方镜像）：**\n\n```tf\nterraform apply\n```\n[如果已创建 `terraform.tfvars` 文件，可使用标志 `terraform apply -var-file=terraform.tfvars`]\n\n**选项 B（自定义镜像）：**\n\n```tf\nterraform apply -var=\"use_custom_image=true\"\n```\n\n或者在 `terraform.tfvars` 中：\n\n```hcl\nuse_custom_image = true  # 仅当您希望使用自定义镜像时\n```\n\n### Terraform 队列模式部署 ###\n\nTerraform 配置通过一个功能标志支持队列模式。当 `enable_queue_mode = true` 时，Terraform 还会额外预配以下资源：\n- 一个 **Cloud Memorystore Redis** 实例（私有、与 VPC 对等）\n- 一个带有内部入口且至少 1 个实例的 **Cloud Run 工作器服务** (`n8n-worker`)\n- Secret Manager 中的一个 **Redis AUTH 密钥**\n- 在两个 Cloud Run 服务上启用 **直接 VPC 出口**\n- 启用 `redis.googleapis.com` 和 `compute.googleapis.com` API\n\n#### 通过 CLI 标志启用队列模式：\n\n```bash\nterraform apply -var=\"enable_queue_mode=true\"\n```\n\n#### 或者在 `terraform.tfvars` 中：\n\n```hcl\ngcp_project_id    = \"your-project-id\"\nenable_queue_mode = true\n\n# 可选：调整 Redis 和工作器的规模\nredis_tier           = \"BASIC\"       # 或 \"STANDARD_HA\" 用于生产环境\nredis_memory_size_gb = 1\nworker_min_instances = 1\nworker_max_instances = 3\nworker_cpu           = \"1\"\nworker_memory        = \"2Gi\"\n\n# 可选：指定 VPC 网络\u002F子网，如果不使用默认 VPC\n# vpc_network    = \"default\"\n# vpc_subnetwork = \"\"   # 留空以自动选择\n```\n\n然后运行：\n\n```bash\nterraform plan -var-file=terraform.tfvars\nterraform apply -var-file=terraform.tfvars\n```\n\nTerraform 将输出：\n- `cloud_run_service_url` — 公开的 n8n 编辑器 URL\n- `cloud_run_worker_service_url` — 工作器服务的内部 URL（不可公开访问）\n- `redis_host` — Memorystore 实例的私有 IP 地址\n- `redis_port` — Redis 端口\n- `cloud_sql_connection_name` — Cloud SQL 连接名称\n\n> **关于预配时间的说明：** Cloud Memorystore 实例需要 5–10 分钟才能完成预配。Terraform 会等待该实例就绪后再创建工作器服务。总体而言，队列模式部署的 `terraform apply` 通常需要 15–20 分钟。\n\n#### 将现有部署升级到队列模式 ####\n\n如果您已经使用 Terraform 部署了标准（非队列）配置，并希望添加队列模式，只需在 `terraform.tfvars` 中添加 `enable_queue_mode = true`，然后再次运行 `terraform apply`。Terraform 会逐步添加新资源，而不会影响现有资源。\n\n```bash\n# 添加到现有的 terraform.tfvars\necho 'enable_queue_mode = true' >> terraform.tfvars\n\nterraform plan   # 检查将要添加的内容\nterraform apply  # 应用更改\n```\n\n### Terraform 故障排除 ###\n\n如果您在 Terraform 部署过程中遇到问题，尤其是在之前手动安装或 Terraform 运行失败之后，可能需要先清理现有资源。\n\n**常见的需要清理的情况：**\n- 您在发现 Terraform 选项之前已经按照手动步骤操作过\n- 之前的 Terraform 部署在构建过程中超时或失去连接\n- 您看到“资源已存在”的错误\n\n**清理步骤：**\n\n1. **移除 Terraform 状态文件**（如果状态文件已损坏）：\n\n```bash\ncd terraform\u002F\nrm -rf terraform.tfstate*\nrm -rf .terraform\u002F\n```\n\n2. **通过控制台或 CLI 删除现有的 Google Cloud 资源：**\n\n**Artifact Registry：**\n\n```bash\ngcloud artifacts repositories delete n8n-repo --location=$REGION\n```\n\n**Cloud SQL 实例：**\n\n```bash\ngcloud sql instances delete n8n-db\n```\n\n**密钥：**\n\n```bash\ngcloud secrets delete n8n-db-password\ngcloud secrets delete n8n-encryption-key\ngcloud secrets delete n8n-redis-auth  # 仅队列模式\n```\n\n**服务账户：**\n\n```bash\ngcloud iam service-accounts delete n8n-service-account@$PROJECT_ID.iam.gserviceaccount.com\n```\n\n**Cloud Run 服务：**\n\n```bash\ngcloud run services delete n8n --region=$REGION\ngcloud run services delete n8n-worker --region=$REGION  # 仅队列模式\n```\n\n**Cloud Memorystore Redis（仅队列模式）：**\n\n```bash\ngcloud redis instances delete n8n-redis --region=$REGION\n```\n\n3. **另一种方法：使用 Google Cloud 控制台**\n- 导航到每个服务（Cloud Run、Cloud SQL、Memorystore、Secret Manager、IAM、Artifact Registry）\n- 查找并删除与 Terraform 配置匹配的资源\n- 这种可视化方式更容易识别部分创建的资源。\n\n4. **重新运行 Terraform：**\n\n```tf\nterraform init\nterraform plan # 确认没有冲突\nterraform apply\n```\n\n**实用提示：** 如果您不确定哪些资源已被创建，可以查看 Terraform 配置文件，了解将要预配的资源的确切名称和类型。\n\n非常感谢 [@alliecatowo](https:\u002F\u002Fgithub.com\u002Falliecatowo) 的这一宝贵贡献！\n\n有关更多详细信息和使用说明，请参阅此仓库中的 `terraform\u002F` 目录。\n\n---\n\n就这样，您现在拥有一个完全可用的 n8n 实例，运行在 Google Cloud Run 上。您既享受到了自托管的所有优势，又无需为服务器管理操心。您的工作流稳定可靠，数据始终掌握在您手中，而且只需按实际用量付费。","# self-host-n8n-on-gcr 快速上手指南\n\n本指南帮助你在 Google Cloud Run 上以无服务器（Serverless）方式自托管 n8n 工作流自动化平台，并使用 PostgreSQL 持久化数据。该方案支持自动扩缩容，仅在运行时计费，适合个人及中小团队使用。\n\n## 环境准备\n\n在开始之前，请确保满足以下条件：\n\n*   **Google Cloud 账号**：拥有有效账号（新账号通常享有免费额度）。\n*   **gcloud CLI**：已安装并完成认证配置。\n    ```bash\n    gcloud auth login\n    gcloud config set project \u003C你的项目 ID>\n    ```\n*   **基础技能**：熟悉 Docker 基本概念及命令行操作。\n*   **可选依赖**：若选择自定义镜像方案（进阶），需本地安装 **Docker**。\n*   **域名（可选）**：生产环境建议绑定自定义域名。\n\n> **注意**：本指南主要面向国际版 Google Cloud 用户。国内开发者若访问 Google 服务受限，需自行配置网络代理或寻找替代云厂商方案。\n\n## 安装步骤\n\n### 第一步：初始化项目与启用 API\n\n设置环境变量并启用所需的 Google Cloud 服务：\n\n```bash\nexport PROJECT_ID=\"your-project-id\"\nexport REGION=\"europe-west2\"  # 根据需求选择区域\n\ngcloud services enable artifactregistry.googleapis.com\ngcloud services enable run.googleapis.com\ngcloud services enable sqladmin.googleapis.com\ngcloud services enable secretmanager.googleapis.com\n```\n\n### 第二步：准备 n8n 部署方案\n\nn8n 连接外部数据库时需要短暂的启动延迟以避免竞争条件。推荐以下两种方案：\n\n#### 方案 A：使用官方镜像（推荐）\n无需额外文件，通过在部署命令中覆盖启动参数来实现延迟。\n\n#### 方案 B：自定义 Docker 镜像（进阶）\n若需调试或定制启动逻辑，创建以下两个文件：\n\n**startup.sh**:\n```bash\n#!\u002Fbin\u002Fsh\n\nsleep 5\n\nif [ -n \"$PORT\" ]; then\n  export N8N_PORT=$PORT\nelif [ -z \"$N8N_PORT\" ]; then\n  export N8N_PORT=5678\nfi\n\necho \"Database settings:\"\necho \"DB_TYPE: $DB_TYPE\"\necho \"DB_POSTGRESDB_HOST: $DB_POSTGRESDB_HOST\"\necho \"DB_POSTGRESDB_PORT: $DB_POSTGRESDB_PORT\"\necho \"N8N_PORT: $N8N_PORT\"\n\nexec \u002Fdocker-entrypoint.sh\n```\n\n**Dockerfile**:\n```Dockerfile\nFROM docker.n8n.io\u002Fn8nio\u002Fn8n:latest\n\nCOPY startup.sh \u002F\nUSER root\nRUN chmod +x \u002Fstartup.sh\nUSER node\nEXPOSE 5678\n\nENTRYPOINT [\"\u002Fbin\u002Fsh\", \"\u002Fstartup.sh\"]\n```\n\n若选择方案 B，构建并推送镜像（务必指定 `linux\u002Famd64` 架构）：\n```bash\ngcloud artifacts repositories create n8n-repo \\\n    --repository-format=docker \\\n    --location=$REGION \\\n    --description=\"Repository for n8n workflow images\"\n\ngcloud auth configure-docker $REGION-docker.pkg.dev\n\ndocker build --platform linux\u002Famd64 -t $REGION-docker.pkg.dev\u002F$PROJECT_ID\u002Fn8n-repo\u002Fn8n:latest .\ndocker push $REGION-docker.pkg.dev\u002F$PROJECT_ID\u002Fn8n-repo\u002Fn8n:latest\n```\n\n### 第三步：创建 Cloud SQL PostgreSQL 数据库\n\n创建最低配置的数据库实例以节省成本：\n\n```bash\ngcloud sql instances create n8n-db \\\n    --database-version=POSTGRES_13 \\\n    --tier=db-f1-micro \\\n    --region=$REGION \\\n    --root-password=\"supersecure-rootpassword\" \\\n    --storage-size=10GB \\\n    --availability-type=ZONAL \\\n    --no-backup \\\n    --storage-type=HDD\n\ngcloud sql databases create n8n --instance=n8n-db\n\ngcloud sql users create n8n-user \\\n    --instance=n8n-db \\\n    --password=\"supersecure-userpassword\"\n```\n\n### 第四步：配置敏感信息密钥\n\n使用 Secret Manager 存储数据库密码和加密密钥，避免硬编码：\n\n```bash\necho -n \"supersecure-userpassword\" | \\\n    gcloud secrets create n8n-db-password \\\n    --data-file=- \\\n    --replication-policy=\"automatic\"\n\necho -n \"your-random-encryption-key\" | \\\n    gcloud secrets create n8n-encryption-key \\\n    --data-file=- \\\n    --replication-policy=\"automatic\"\n```\n> **重要**：请妥善保管 `n8n-encryption-key`，丢失后将无法解密已保存的凭证。\n\n### 第五步：创建服务账号并授权\n\n```bash\ngcloud iam service-accounts create n8n-service-account \\\n    --display-name=\"n8n Service Account\"\n\ngcloud secrets add-iam-policy-binding n8n-db-password \\\n    --member=\"serviceAccount:n8n-service-account@$PROJECT_ID.iam.gserviceaccount.com\" \\\n    --role=\"roles\u002Fsecretmanager.secretAccessor\"\n\ngcloud secrets add-iam-policy-binding n8n-encryption-key \\\n    --member=\"serviceAccount:n8n-service-account@$PROJECT_ID.iam.gserviceaccount.com\" \\\n    --role=\"roles\u002Fsecretmanager.secretAccessor\"\n\ngcloud projects add-iam-policy-binding $PROJECT_ID \\\n    --member=\"serviceAccount:n8n-service-account@$PROJECT_ID.iam.gserviceaccount.com\" \\\n    --role=\"roles\u002Fcloudsql.client\"\n```\n\n### 第六步：部署到 Cloud Run\n\n获取数据库连接名称：\n```bash\nexport SQL_CONNECTION=$(gcloud sql instances describe n8n-db --format=\"value(connectionName)\")\n```\n\n#### 执行部署（以方案 A 为例）\n\n```bash\ngcloud run deploy n8n \\\n    --image=docker.io\u002Fn8nio\u002Fn8n:latest \\\n    --command=\"\u002Fbin\u002Fsh\" \\\n    --args=\"-c,sleep 5; n8n start\" \\\n    --platform=managed \\\n    --region=$REGION \\\n    --allow-unauthenticated \\\n    --port=5678 \\\n    --cpu=1 \\\n    --memory=2Gi \\\n    --min-instances=0 \\\n    --max-instances=1 \\\n    --no-cpu-throttling \\\n    --set-env-vars=\"N8N_PORT=5678,N8N_PROTOCOL=https,DB_TYPE=postgresdb,DB_POSTGRESDB_DATABASE=n8n,DB_POSTGRESDB_USER=n8n-user,DB_POSTGRESDB_HOST=\u002Fcloudsql\u002F$SQL_CONNECTION,DB_POSTGRESDB_PORT=5432\" \\\n    --add-cloudsql-instances=$SQL_CONNECTION \\\n    --service-account=n8n-service-account@$PROJECT_ID.iam.gserviceaccount.com \\\n    --set-secrets=\"N8N_ENCRYPTION_KEY=n8n-encryption-key:latest,DB_POSTGRESDB_PASSWORD=n8n-db-password:latest\"\n```\n\n*(若使用方案 B，请将 `--image` 替换为你的自定义镜像地址，并移除 `--command` 和 `--args` 参数)*\n\n## 基本使用\n\n1.  **获取访问地址**：\n    部署完成后，终端会输出 Service URL，例如：\n    `https:\u002F\u002Fn8n-xxxxxx-uc.a.run.app`\n\n2.  **初始化管理员账户**：\n    在浏览器打开上述链接，按照指引设置所有者邮箱和密码。\n\n3.  **配置 Google 服务 OAuth**：\n    *   进入 n8n 设置 -> \"Public API\" 或 \"OAuth2 Credentials\"。\n    *   由于运行在 Cloud Run，回调 URL 通常为 `https:\u002F\u002F\u003C你的域名>\u002Frest\u002Foauth2-credential\u002Fcallback`。\n    *   在 Google Cloud Console 中配置 OAuth 同意屏幕，并将上述回调地址填入授权重定向 URI。\n\n4.  **创建工作流**：\n    现在你可以像使用 SaaS 版一样创建自动化工作流，数据将持久化存储在 Cloud SQL 中，且无需支付月费。","某电商初创公司的运营团队需要每日自动同步 Google Sheets 中的订单数据至内部 CRM 系统，并触发邮件通知，但受限于预算和运维能力陷入两难。\n\n### 没有 self-host-n8n-on-gcr 时\n- **成本高昂**：使用 n8n 官方云服务需支付昂贵的月度订阅费，对于订单量波动大的初创公司，固定支出远超实际资源消耗。\n- **数据隐私担忧**：敏感的客户订单数据必须流经第三方服务器，无法满足公司对数据完全自主可控的合规要求。\n- **执行限制严格**：免费或低价套餐存在严格的任务执行次数限制，促销高峰期频繁触发限额导致自动化流程中断。\n- **运维负担重**：若尝试在传统虚拟机上自建，团队需耗费大量精力处理服务器维护、扩容及安全补丁更新。\n\n### 使用 self-host-n8n-on-gcr 后\n- **极致成本优化**：依托 Google Cloud Run 的按量付费模式，无请求时不产生费用，每月自动化成本低至几美元，真正实现了“比咖啡还便宜”。\n- **数据主权回归**：所有工作流数据持久化在自有的 Cloud SQL PostgreSQL 数据库中，确保核心业务数据不出私域环境。\n- **弹性无限扩展**：Serverless 架构自动应对流量洪峰，促销期间订单激增也能无缝处理，彻底告别执行次数焦虑。\n- **免运维部署**：通过 Terraform 脚本一键完成部署与更新，团队无需管理底层服务器，即可享受企业级的自动扩缩容能力。\n\nself-host-n8n-on-gcr 让中小企业能以极低的边际成本，拥有数据完全可控且具备弹性伸缩能力的私有自动化工作流平台。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdatawranglerai_self-host-n8n-on-gcr_0927f3c6.png","datawranglerai","datawrangler.AI","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fdatawranglerai_d812dafd.jpg",null,"UK","https:\u002F\u002Fgithub.com\u002Fdatawranglerai",[80,84,88],{"name":81,"color":82,"percentage":83},"HCL","#844FBA",83.2,{"name":85,"color":86,"percentage":87},"Shell","#89e051",16.1,{"name":89,"color":90,"percentage":91},"Dockerfile","#384d54",0.7,592,132,"2026-04-05T07:49:17","MIT",4,"Linux, macOS, Windows","不需要 GPU","最低 2GB (Cloud Run 配置)，推荐根据工作流复杂度增加",{"notes":101,"python":102,"dependencies":103},"该工具是基于 Google Cloud Run 的无服务器部署方案，而非本地运行。主要依赖 Docker 构建容器（若在 M1\u002FM2 Mac 上构建自定义镜像，需指定 --platform linux\u002Famd64），并使用 gcloud CLI 进行资源管理。数据库使用 Cloud SQL PostgreSQL，敏感信息通过 Secret Manager 管理。无需本地安装 Python 或特定深度学习库。","未说明 (基于 Docker 镜像，内部包含 Node.js 环境)",[104,105,106,107],"Docker","gcloud CLI","PostgreSQL (Cloud SQL)","n8n Docker Image",[14,13,15],[110,111,112,113,114,115,116,117,118,119,120,121,122,123],"google-cloud-platform","google-cloud-run","n8n","n8n-self-hosting","serverless","agentic-ai","ai","ai-agent-framework","ai-agents","managed-service","n8n-automation","terraform","terraform-cloud","terraformed","2026-03-27T02:49:30.150509","2026-04-09T21:41:49.475816",[127,132,137,142,147,151],{"id":128,"question_zh":129,"answer_zh":130,"source_url":131},26728,"部署后访问根路径出现 \"Cannot GET \u002F\" 错误怎么办？","这是常见配置问题，可尝试以下解决方案：\n1. 添加缺失的环境变量：\n   - EXECUTIONS_PROCESS=main\n   - EXECUTIONS_MODE=regular\n   - N8N_LOG_LEVEL=debug\n2. 增加启动探测延迟（initialDelaySeconds），建议设置为 120 秒，给容器更多启动时间。\n3. 检查是否错误设置了 N8N_PORT 环境变量。有用户反馈移除 N8N_PORT 变量或确保其值与容器端口一致（通常容器监听 5678，而 N8N_PORT 设为 443 可能导致冲突）可解决问题。\n4. 确保 Docker 镜像中包含自定义启动脚本。","https:\u002F\u002Fgithub.com\u002Fdatawranglerai\u002Fself-host-n8n-on-gcr\u002Fissues\u002F1",{"id":133,"question_zh":134,"answer_zh":135,"source_url":136},26729,"容器为何会间歇性收到 SIGTERM 信号并重启？","这是 Cloud Run 在 min-instances=0 时的正常行为。机制如下：\n- Cloud Run 会将空闲实例保留最多 15 分钟以应对流量峰值。\n- 若 15 分钟内无请求，Cloud Run 会发送 SIGTERM 优雅关闭实例以节省成本。\n- 新请求到达时，会触发新的容器启动（即“冷启动”）。\n\n如果您需要运行定时触发器（Scheduled Triggers），容器不能缩容到 0，否则无法执行定时任务。Webhooks 通常不受影响，因为 HTTP 请求会唤醒容器。\n\n解决方案：将 min-instances 设置为 1，保持一个实例始终运行以避免冷启动和确保定时任务执行。或者使用外部调度器（如 Google Cloud Scheduler）定期调用 n8n 的 Webhook URL 来触发工作流。","https:\u002F\u002Fgithub.com\u002Fdatawranglerai\u002Fself-host-n8n-on-gcr\u002Fissues\u002F39",{"id":138,"question_zh":139,"answer_zh":140,"source_url":141},26730,"部署时报错 \"container failed to start and listen on the port\" 如何解决？","该错误通常表示容器未能成功启动或在指定端口监听，常见原因包括数据库配置错误或启动超时。解决步骤：\n1. 检查数据库连接配置（如 Postgres 的用户名、密码、主机地址等）是否正确。\n2. 添加环境变量 N8N_LOG_LEVEL=debug，将日志级别从 info 调整为 debug，以便在 Cloud Logging 中查看更详细的启动报错信息。\n3. 如果使用的是 Terraform 部署失败，可以尝试手动使用 gcloud CLI 命令部署以排查差异，反之亦然。","https:\u002F\u002Fgithub.com\u002Fdatawranglerai\u002Fself-host-n8n-on-gcr\u002Fissues\u002F34",{"id":143,"question_zh":144,"answer_zh":145,"source_url":146},26731,"遇到 \"Cannot GET \u002F\" 错误时，N8N_PORT 环境变量应该如何设置？","在某些情况下，显式设置 N8N_PORT 环境变量会导致此错误。有用户通过直接从 YAML 配置或部署命令中移除 N8N_PORT 变量解决了问题。这是因为 n8n 默认会在容器内部监听 5678 端口，而 Cloud Run 会自动处理端口映射。如果强制设置 N8N_PORT=443 但容器实际未在该端口监听，就会引发路由错误。建议先尝试移除该变量，让 n8n 使用默认端口行为。","https:\u002F\u002Fgithub.com\u002Fdatawranglerai\u002Fself-host-n8n-on-gcr\u002Fissues\u002F11",{"id":148,"question_zh":149,"answer_zh":150,"source_url":141},26732,"如何在 Cloud Run 上正确配置 n8n 的数据库连接？","配置数据库连接时，需确保以下环境变量准确无误（以 PostgreSQL 为例）：\n- DB_TYPE=postgresdb\n- DB_POSTGRESDB_DATABASE=\u003C数据库名>\n- DB_POSTGRESDB_USER=\u003C用户名>\n- DB_POSTGRESDB_PASSWORD=\u003C密码>（注意不要硬编码在代码中，建议使用 Secret Manager）\n- DB_POSTGRESDB_HOST=\u002Fcloudsql\u002F\u003C项目 ID>:\u003C区域>:\u003C实例名>（使用 Cloud SQL Unix Socket）\n- DB_POSTGRESDB_PORT=5432\n- DB_POSTGRESDB_SCHEMA=public\n\n如果配置错误，容器将无法启动并报 \"failed to start\" 错误。建议开启 debug 日志（N8N_LOG_LEVEL=debug）来定位具体的连接失败原因。",{"id":152,"question_zh":153,"answer_zh":154,"source_url":136},26733,"为什么我的定时工作流（Scheduled Workflows）没有执行？","这通常是因为 Cloud Run 服务配置了 min-instances=0，导致在没有流量时容器被完全关闭。n8n 的定时触发器需要进程持续运行才能调度任务。\n\n解决方法：\n1. 将 Cloud Run 服务的 min-instances 设置为 1，确保至少有一个实例始终运行。\n2. 如果为了节省成本不想常驻实例，可以使用外部调度服务（如 Google Cloud Scheduler）配置定时任务，定期向 n8n 的 Webhook URL 发送请求，从而间接触发工作流。",[156,161,166],{"id":157,"version":158,"summary_zh":159,"released_at":160},171951,"v3.1.0","## 简介\n\n本次发布新增了可选的队列模式支持，适用于在 Google Cloud Run 上使用 Redis、专用工作节点和 Direct VPC 出站流量配置的高负载 n8n 部署。\n\n## 亮点\n\n- 在 README 中添加了队列模式部署指南。\n- Terraform 支持 Redis、工作节点服务、VPC 出站流量以及队列专用的环境变量。\n- 新增 Redis 和工作节点的可见性输出。\n- 修复了 Cloud Run 推送\u002F来源问题、健康检查端点行为以及队列模式启动稳定性问题。\n\n## 重要提示\n\n- 队列模式默认关闭。\n- 现有的常规模式部署不受影响。\n- 启用队列模式会带来一定的基础成本，因为 Redis 和至少一个工作节点需要保持运行状态。\n- 主服务和工作节点服务必须运行相同版本的 n8n。\n\n## 升级建议\n\n- 现有用户无需更改，可继续使用常规模式。\n- 若要启用队列模式，请将 `enable_queue_mode` 设置为 `true`，并提供 VPC 网络\u002F子网值。\n- 请注意，Memorystore 的资源 provision 时间可能会比其他组件更长。","2026-03-25T22:51:43",{"id":162,"version":163,"summary_zh":164,"released_at":165},171952,"v3.0.0","# 🎉 重大更新 #\n\n## 简化部署（选项 A - 推荐） ##\n\n* 不再需要构建自定义 Docker 镜像\n\n* 使用官方 n8n 镜像并覆盖命令\n\n* 部署现在更加简单快捷\n\n* 基于 Google Cloud Run 团队的反馈与贡献\n\n## 可靠性提升 ##\n\n* 添加了 `--no-cpu-throttling` 标志以提升性能\n\n* 消除了间歇性的加载问题\n\n* 正确处理 n8n 的后台任务\n\n* 由于取消了按请求计费，实际成本更低\n\n## 🔧 新增内容 ##\n\n* **选项 A（官方镜像）**：使用 n8n 官方 Docker 镜像进行简化部署\n\n* **选项 B（自定义镜像）**：适用于自定义启动逻辑和调试的高级选项\n\n* 更新了 Terraform 配置以支持两种部署方式\n\n* 移除了已弃用的环境变量（`EXECUTIONS_PROCESS`、`EXECUTIONS_MODE`）\n\n* 添加了 `N8N_PROXY_HOPS=1` 以支持 Cloud Run 反向代理处理\n\n* 全面更新了文档\n\n## 🙏 致谢 ##\n\n特别感谢：\n\n* **@ryanpei 和 Google Cloud Run 团队** 提供的 `--no-cpu-throttling` 优化建议及简化部署方案\n\n* **[@terra-femme](https:\u002F\u002Fgithub.com\u002Fterra-femme)** 提供的 [视频教程](https:\u002F\u002Fyoutu.be\u002FbLDv07BR9Hw)\n\n* **[@alliecatowo](https:\u002F\u002Fgithub.com\u002Falliecatowo)** 对 Terraform 配置的贡献\n\n\n\n## ⚠️ 迁移说明 ##\n\n**现有用户：** 您当前的部署将继续正常运行。如需采用新方案：\n\n* 对于新部署：请使用选项 A（更简单）\n\n* 对于现有部署：您可以迁移到选项 A，或继续沿用当前配置\n\n* 详情请参阅更新后的 README 文件\n\n## 📦 包含内容 ##\n\n* 更新的 README，包含双选项部署指南\n* Terraform v2，支持切换官方镜像与自定义镜像\n","2025-10-07T22:04:10",{"id":167,"version":168,"summary_zh":169,"released_at":170},171953,"v2.0.0","## 新内容 🚀\n\n得益于社区的宝贵反馈，本次发布让部署体验更加顺畅、更友好。\n\n### 新增 Terraform 自动化\n- **一键部署**：通过全新的 Terraform 配置，完全跳过手动配置步骤（特别感谢 [@alliecatowo](https:\u002F\u002Fgithub.com\u002Falliecatowo)！）\n- **全面清理指南**：再也不用为半完成的资源而烦恼\n- **清晰选择**：自动化部署还是逐步学习\n\n### 文档优化\n- **提前提示 Terraform**：用户现在可以在一开始就看到自动化选项，而不是在手动配置完成后才了解到它\n- **更完善的故障排除**：针对混合使用手动与 Terraform 的场景提供了具体指导\n- **更简洁的项目描述**：更新后与整篇文档的对话式风格保持一致\n\n### 为什么是 v2.0.0？\n这标志着用户部署方式的根本性转变——从仅支持手动操作，到提供完全自动化的选项。用户体验因此发生了显著变化，也更加出色。\n\n## 依然出色的地方 ☕\n- 保持原汁原味的“咖啡预算”定价模式\n- 完全掌控您的 n8n 实例\n- 为希望深入了解配置过程的用户提供详尽说明\n- 开箱即用的生产级配置\n\n## 欢迎反馈\n本次发布完全基于用户反馈驱动。请继续提出您的建议——它们能让这份指南对每个人都有所帮助！","2025-08-04T22:44:32"]