[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-HenriquesLab--ZeroCostDL4Mic":3,"tool-HenriquesLab--ZeroCostDL4Mic":64},[4,17,26,40,48,56],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":16},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,3,"2026-04-05T11:01:52",[13,14,15],"开发框架","图像","Agent","ready",{"id":18,"name":19,"github_repo":20,"description_zh":21,"stars":22,"difficulty_score":23,"last_commit_at":24,"category_tags":25,"status":16},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,2,"2026-04-03T11:11:01",[13,14,15],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":23,"last_commit_at":32,"category_tags":33,"status":16},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,"2026-04-05T10:45:23",[14,34,35,36,15,37,38,13,39],"数据工具","视频","插件","其他","语言模型","音频",{"id":41,"name":42,"github_repo":43,"description_zh":44,"stars":45,"difficulty_score":10,"last_commit_at":46,"category_tags":47,"status":16},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,"2026-04-04T04:44:48",[15,14,13,38,37],{"id":49,"name":50,"github_repo":51,"description_zh":52,"stars":53,"difficulty_score":10,"last_commit_at":54,"category_tags":55,"status":16},519,"PaddleOCR","PaddlePaddle\u002FPaddleOCR","PaddleOCR 是一款基于百度飞桨框架开发的高性能开源光学字符识别工具包。它的核心能力是将图片、PDF 等文档中的文字提取出来，转换成计算机可读取的结构化数据，让机器真正“看懂”图文内容。\n\n面对海量纸质或电子文档，PaddleOCR 解决了人工录入效率低、数字化成本高的问题。尤其在人工智能领域，它扮演着连接图像与大型语言模型（LLM）的桥梁角色，能将视觉信息直接转化为文本输入，助力智能问答、文档分析等应用场景落地。\n\nPaddleOCR 适合开发者、算法研究人员以及有文档自动化需求的普通用户。其技术优势十分明显：不仅支持全球 100 多种语言的识别，还能在 Windows、Linux、macOS 等多个系统上运行，并灵活适配 CPU、GPU、NPU 等各类硬件。作为一个轻量级且社区活跃的开源项目，PaddleOCR 既能满足快速集成的需求，也能支撑前沿的视觉语言研究，是处理文字识别任务的理想选择。",74913,"2026-04-05T10:44:17",[38,14,13,37],{"id":57,"name":58,"github_repo":59,"description_zh":60,"stars":61,"difficulty_score":23,"last_commit_at":62,"category_tags":63,"status":16},2471,"tesseract","tesseract-ocr\u002Ftesseract","Tesseract 是一款历史悠久且备受推崇的开源光学字符识别（OCR）引擎，最初由惠普实验室开发，后由 Google 维护，目前由全球社区共同贡献。它的核心功能是将图片中的文字转化为可编辑、可搜索的文本数据，有效解决了从扫描件、照片或 PDF 文档中提取文字信息的难题，是数字化归档和信息自动化的重要基础工具。\n\n在技术层面，Tesseract 展现了强大的适应能力。从版本 4 开始，它引入了基于长短期记忆网络（LSTM）的神经网络 OCR 引擎，显著提升了行识别的准确率；同时，为了兼顾旧有需求，它依然支持传统的字符模式识别引擎。Tesseract 原生支持 UTF-8 编码，开箱即用即可识别超过 100 种语言，并兼容 PNG、JPEG、TIFF 等多种常见图像格式。输出方面，它灵活支持纯文本、hOCR、PDF、TSV 等多种格式，方便后续数据处理。\n\nTesseract 主要面向开发者、研究人员以及需要构建文档处理流程的企业用户。由于它本身是一个命令行工具和库（libtesseract），不包含图形用户界面（GUI），因此最适合具备一定编程能力的技术人员集成到自动化脚本或应用程序中",73286,"2026-04-03T01:56:45",[13,14],{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":70,"readme_en":71,"readme_zh":72,"quickstart_zh":73,"use_case_zh":74,"hero_image_url":75,"owner_login":76,"owner_name":77,"owner_avatar_url":78,"owner_bio":79,"owner_company":80,"owner_location":80,"owner_email":80,"owner_twitter":76,"owner_website":81,"owner_url":82,"languages":83,"stars":111,"forks":112,"last_commit_at":113,"license":114,"difficulty_score":115,"env_os":116,"env_gpu":117,"env_ram":118,"env_deps":119,"category_tags":123,"github_topics":80,"view_count":10,"oss_zip_url":80,"oss_zip_packed_at":80,"status":16,"created_at":124,"updated_at":125,"faqs":126,"releases":155},472,"HenriquesLab\u002FZeroCostDL4Mic","ZeroCostDL4Mic","ZeroCostDL4Mic: A Google Colab based no-cost toolbox to explore Deep-Learning in Microscopy","ZeroCostDL4Mic 是一个基于 Google Colab 的开源工具箱，旨在帮助显微镜领域的研究人员快速入门并应用深度学习技术。它通过预配置的 Jupyter Notebook 提供图形化操作界面，用户无需编写代码即可完成数据预处理、模型训练和结果分析等全流程任务。所有计算资源由 Google Colab 免费提供，用户无需购买硬件或配置本地环境即可运行复杂模型。\n\n这一工具解决了传统深度学习应用中常见的两大难题：高昂的计算成本和复杂的编程门槛。对于显微图像处理需求（如细胞分割、图像增强等），研究人员可以基于已集成的主流网络模型（如 U-Net、StarDist 等）直接调用预训练参数，或通过简单参数调整完成自定义训练。其设计特别适合生物医学、材料科学等领域的科研人员，尤其是缺乏编程经验但需要处理显微图像数据的实验人员。\n\n工具的核心亮点包括：1）完全零成本运行，依托云端算力；2）可视化操作界面降低使用难度；3）支持多种经典深度学习架构；4）提供示例数据集和操作指南。项目由多个国际实验室联合开发，已通过《Nature Communications》论文验证其有效性，配套的 W","ZeroCostDL4Mic 是一个基于 Google Colab 的开源工具箱，旨在帮助显微镜领域的研究人员快速入门并应用深度学习技术。它通过预配置的 Jupyter Notebook 提供图形化操作界面，用户无需编写代码即可完成数据预处理、模型训练和结果分析等全流程任务。所有计算资源由 Google Colab 免费提供，用户无需购买硬件或配置本地环境即可运行复杂模型。\n\n这一工具解决了传统深度学习应用中常见的两大难题：高昂的计算成本和复杂的编程门槛。对于显微图像处理需求（如细胞分割、图像增强等），研究人员可以基于已集成的主流网络模型（如 U-Net、StarDist 等）直接调用预训练参数，或通过简单参数调整完成自定义训练。其设计特别适合生物医学、材料科学等领域的科研人员，尤其是缺乏编程经验但需要处理显微图像数据的实验人员。\n\n工具的核心亮点包括：1）完全零成本运行，依托云端算力；2）可视化操作界面降低使用难度；3）支持多种经典深度学习架构；4）提供示例数据集和操作指南。项目由多个国际实验室联合开发，已通过《Nature Communications》论文验证其有效性，配套的 Wiki 页面包含完整教程和案例演示。无论是初学者还是希望快速验证模型效果的研究者，都能通过这一平台高效开展显微图像分析工作。","# ZeroCostDL4Mic: exploiting Google Colab to develop a free and open-source toolbox for Deep-Learning in microscopy\n\n_**Tl;dr**_: this [**wiki page**][wikiPage] has everything you need to get started.\n\n[![DOI](https:\u002F\u002Fzenodo.org\u002Fbadge\u002F239971181.svg)](https:\u002F\u002Fzenodo.org\u002Fbadge\u002Flatestdoi\u002F239971181)\n\n## What is this?\n\nZeroCostDL4Mic is a collection of self-explanatory Jupyter Notebooks for [**Google Colab**][1] that features an **easy-to-use graphical user interface**. They are meant to quickly get you started on learning to use deep-learning for microscopy. Google Colab itself **provides the computations resources needed at no-cost**. **ZeroCostDL4Mic** is designed for researchers that have little or no coding expertise to quickly test, train and use popular Deep-Learning networks.\n\n## Want to see a short video demonstration?\n\n| Running a ZeroCostDL4Mic notebook | Example data in ZeroCostDL4Mic | Romain's talk @ Aurox conference | Talk @ SPAOM |\n|:-:|:-:|:-:|:-:|\n| [![](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHenriquesLab_ZeroCostDL4Mic_readme_ff6137d571e7.jpg)](https:\u002F\u002Fyoutu.be\u002FTrDuidvO85s) | [![](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHenriquesLab_ZeroCostDL4Mic_readme_d61910eaafc3.jpg)](https:\u002F\u002Fyoutu.be\u002FKauKEr0Kkkc) | [![](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHenriquesLab_ZeroCostDL4Mic_readme_5937624864f1.jpg)](https:\u002F\u002Fyoutu.be\u002FrCEbYOnNJp0) | [![](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHenriquesLab_ZeroCostDL4Mic_readme_8b9528ecf189.jpg)](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=ozffChqtgJw) |\n\n## Who is it for?\n\nAny researcher interested in microscopy, independent of their background training. **ZeroCostDL4Mi**c is designed for anyone with little or no coding expertise to quickly test, train and use popular Deep-Learning networks used to process microscopy data.\n\n## Acknowledgements\n\nThis project initiated as a collaboration between the [**Jacquemet**][6] and [**Henriques**][5] laboratories, considerably expanding with the help of laboratories spread across the planet. There is a long list of contributors associated with the project acknowledged in our [**related paper**](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-021-22518-0) and the [**wiki page**][wikiPageContributors].\n\n## How to cite this work\n\n_Lucas von Chamier*,  Romain F. Laine*, Johanna Jukkala,  Christoph Spahn, Daniel Krentzel, Elias Nehme,  Martina Lerche, Sara Hernández-pérez,  Pieta Mattila,  Eleni Karinou,  Séamus Holden, Ahmet Can Solak,  Alexander Krull,  Tim-Oliver Buchholz,  Martin L Jones,  Loic Alain Royer,  Christophe Leterrier, Yoav Shechtman,  Florian Jug,  Mike Heilemann,  Guillaume Jacquemet,  Ricardo Henriques. \n**Democratising deep learning for microscopy with ZeroCostDL4Mic. Nature Communications, 2021.** \nDOI: [https:\u002F\u002Fdoi.org\u002F10.1038\u002Fs41467-021-22518-0](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-021-22518-0)_\n\n[![](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHenriquesLab_ZeroCostDL4Mic_readme_3fa75ea5826d.png)](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-021-22518-0)\n\n  [1]: https:\u002F\u002Fcolab.research.google.com\u002Fnotebooks\u002Fintro.ipynb\n  [2]: https:\u002F\u002Ftwitter.com\u002Fguijacquemet\n  [3]: https:\u002F\u002Ftwitter.com\u002FLaineBioImaging\n  [4]: https:\u002F\u002Ftwitter.com\u002FHenriquesLab\n  [5]: https:\u002F\u002Fhenriqueslab.github.io\u002F\n  [6]: https:\u002F\u002Fcellmig.org\u002F\n  [7]: https:\u002F\u002Fgithub.com\u002FHenriquesLab\u002FZeroCostDL4Mic\u002Fblob\u002Fmaster\u002FWiki_files\u002FColabPaperFigure1_v4.png\n  [8]: https:\u002F\u002Fgithub.com\u002FHenriquesLab\u002FZeroCostDL4Mic\u002Fblob\u002Fmaster\u002FWiki_files\u002FVideoDemoScreenshot1.png\n  [wikiPage]: https:\u002F\u002Fgithub.com\u002FHenriquesLab\u002FDeepLearning_Collab\u002Fwiki\n  [wikiPageContributors]: https:\u002F\u002Fgithub.com\u002FHenriquesLab\u002FZeroCostDL4Mic\u002Fwiki#contributors\n","# ZeroCostDL4Mic：利用 Google Colab 开发用于显微成像 (microscopy) Deep-Learning (深度学习) 的免费开源工具箱\n\n_**简而言之**_：这个 [**Wiki 页面**][wikiPage] 包含了你入门所需的一切。\n\n[![DOI](https:\u002F\u002Fzenodo.org\u002Fbadge\u002F239971181.svg)](https:\u002F\u002Fzenodo.org\u002Fbadge\u002Flatestdoi\u002F239971181)\n\n## 这是什么？\n\nZeroCostDL4Mic 是一组面向 [**Google Colab**][1] 的自解释 **Jupyter Notebooks (Jupyter 笔记本)**，具有**易于使用的图形用户界面 (GUI)**。旨在帮助您快速开始学习使用深度学习进行显微成像分析。Google Colab 本身**免费提供所需的计算资源 (computations resources)**。**ZeroCostDL4Mic** 专为那些编程经验很少或没有编程经验的科研人员设计，以便他们能够快速测试、训练并使用流行的深度学习网络来处理显微数据。\n\n## 想看简短的视频演示吗？\n\n| 运行 ZeroCostDL4Mic 笔记本 | ZeroCostDL4Mic 中的示例数据 | Romain 在 Aurox 会议上的演讲 | SPAOM 会议演讲 |\n|:-:|:-:|:-:|:-:|\n| [![](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHenriquesLab_ZeroCostDL4Mic_readme_ff6137d571e7.jpg)](https:\u002F\u002Fyoutu.be\u002FTrDuidvO85s) | [![](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHenriquesLab_ZeroCostDL4Mic_readme_d61910eaafc3.jpg)](https:\u002F\u002Fyoutu.be\u002FKauKEr0Kkkc) | [![](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHenriquesLab_ZeroCostDL4Mic_readme_5937624864f1.jpg)](https:\u002F\u002Fyoutu.be\u002FrCEbYOnNJp0) | [![](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHenriquesLab_ZeroCostDL4Mic_readme_8b9528ecf189.jpg)](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=ozffChqtgJw) |\n\n## 它适合谁？\n\n任何对显微成像感兴趣的研究人员，无论其背景培训如何。**ZeroCostDL4Mic** 专为那些编程经验很少或没有编程经验的人员设计，以便他们能够快速测试、训练并使用流行的深度学习网络来处理显微数据。\n\n## 致谢\n\n本项目最初由 [**Jacquemet**][6] 和 [**Henriques**][5] 实验室合作发起，并在全球各地实验室的帮助下不断扩大规模。与项目相关的贡献者名单可在我们的 [**相关论文**](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-021-22518-0) 和 [**Wiki 页面**][wikiPageContributors] 中找到。\n\n## 如何引用此工作\n\n_Lucas von Chamier*,  Romain F. Laine*, Johanna Jukkala,  Christoph Spahn, Daniel Krentzel, Elias Nehme,  Martina Lerche, Sara Hernández-pérez,  Pieta Mattila,  Eleni Karinou,  Séamus Holden, Ahmet Can Solak,  Alexander Krull,  Tim-Oliver Buchholz,  Martin L Jones,  Loic Alain Royer,  Christophe Leterrier, Yoav Shechtman,  Florian Jug,  Mike Heilemann,  Guillaume Jacquemet,  Ricardo Henriques._ \n**利用 ZeroCostDL4Mic 实现显微成像深度学习的民主化。Nature Communications, 2021.** \nDOI: [https:\u002F\u002Fdoi.org\u002F10.1038\u002Fs41467-021-22518-0](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-021-22518-0)\n\n[![](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHenriquesLab_ZeroCostDL4Mic_readme_3fa75ea5826d.png)](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-021-22518-0)\n\n  [1]: https:\u002F\u002Fcolab.research.google.com\u002Fnotebooks\u002Fintro.ipynb\n  [2]: https:\u002F\u002Ftwitter.com\u002Fguijacquemet\n  [3]: https:\u002F\u002Ftwitter.com\u002FLaineBioImaging\n  [4]: https:\u002F\u002Ftwitter.com\u002FHenriquesLab\n  [5]: https:\u002F\u002Fhenriqueslab.github.io\u002F\n  [6]: https:\u002F\u002Fcellmig.org\u002F\n  [7]: https:\u002F\u002Fgithub.com\u002FHenriquesLab\u002FZeroCostDL4Mic\u002Fblob\u002Fmaster\u002FWiki_files\u002FColabPaperFigure1_v4.png\n  [8]: https:\u002F\u002Fgithub.com\u002FHenriquesLab\u002FZeroCostDL4Mic\u002Fblob\u002Fmaster\u002FWiki_files\u002FVideoDemoScreenshot1.png\n  [wikiPage]: https:\u002F\u002Fgithub.com\u002FHenriquesLab\u002FDeepLearning_Collab\u002Fwiki\n  [wikiPageContributors]: https:\u002F\u002Fgithub.com\u002FHenriquesLab\u002FZeroCostDL4Mic\u002Fwiki#contributors","# ZeroCostDL4Mic 快速上手指南\n\n## 简介\nZeroCostDL4Mic 是一个基于 **Google Colab** 的免费开源工具箱，专为显微镜图像处理中的深度学习应用设计。它提供易用的图形界面，无需本地编程基础即可快速测试、训练和使用流行的深度学习网络。\n\n## 环境准备\n本工具完全托管于云端，**无需在本地安装 Python 或深度学习框架**。\n*   **硬件要求**: 无（计算资源由 Google Colab 免费提供）。\n*   **软件要求**: 支持 JavaScript 的现代网页浏览器（推荐 Chrome 或 Edge）。\n*   **账号要求**: 有效的 Google 账号（用于登录 Colab）。\n*   **网络环境**: 需能稳定访问 Google 服务。**强烈建议使用网络加速工具**以确保访问 Google Colab 和 GitHub 的稳定性。\n\n## 安装步骤\n本工具无需传统命令行安装，主要通过以下方式获取：\n\n### 方式一：通过 Wiki 页面（推荐）\n直接访问官方 Wiki 页面获取预配置的 Colab 笔记本链接，这是最快捷的使用方式。\n```bash\n# 请在浏览器中打开以下地址\nhttps:\u002F\u002Fgithub.com\u002FHenriquesLab\u002FDeepLearning_Collab\u002Fwiki\n```\n\n### 方式二：克隆代码库（可选）\n如需查看或修改底层代码，可克隆仓库到本地（注意：实际运行仍需依赖 Colab 环境）。\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002FHenriquesLab\u002FZeroCostDL4Mic.git\n```\n\n## 基本使用\n1.  **进入 Wiki 页面**: 访问上述 Wiki 链接。\n2.  **选择任务**: 根据需求（如分割、去噪、超分辨率等）在列表中选择合适的 Notebook。\n3.  **启动 Colab**: 点击 Notebook 旁的 **\"Open in Colab\"** 按钮，在新标签页中打开云端环境。\n4.  **上传数据**: 在左侧文件栏中上传您的显微镜图像数据。\n5.  **运行代码**: 按照 Notebook 内的说明，依次运行单元格（Cells）以加载模型、训练或推理。\n6.  **下载结果**: 处理完成后，从左侧文件栏下载生成的结果文件。\n\n---\n*引用*: Lucas von Chamier et al., *Nature Communications*, 2021. DOI: [https:\u002F\u002Fdoi.org\u002F10.1038\u002Fs4467-021-22518-0](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs4467-021-22518-0)","生物实验室的研究生李明负责处理大量荧光显微镜图像，急需利用深度学习技术实现高精度的细胞核自动分割。\n\n### 没有 ZeroCostDL4Mic 时\n- 本地电脑显卡性能不足，无法运行复杂的卷积神经网络模型，计算效率极低。\n- 配置 Python 环境和 CUDA 驱动耗时漫长，且频繁出现依赖库版本冲突报错。\n- 租用云服务器算力成本高昂，GPU 租赁费用严重超出课题组有限的科研预算。\n- 缺乏编程基础，面对 GitHub 上的原始代码难以独立部署、修改和调试。\n\n### 使用 ZeroCostDL4Mic 后\n- ZeroCostDL4Mic 基于 Google Colab 提供云端免费 GPU 资源，彻底解决了本地硬件瓶颈问题。\n- 图形化界面引导操作，无需编写复杂代码即可快速训练和测试分割模型，上手极快。\n- 内置多种成熟网络架构，一键加载示例数据即可验证算法在显微图像上的实际效果。\n- 完全开源免费，零成本完成从数据预处理到模型推理的全流程工作流，极大提升实验进度。\n\nZeroCostDL4Mic 成功降低了深度学习门槛，让非计算机背景的研究者也能轻松驾驭前沿图像处理技术。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHenriquesLab_ZeroCostDL4Mic_7c57918d.png","HenriquesLab","AI-driven Optical Biology Laboratory","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FHenriquesLab_b7d7ad51.png","Ricardo Henriques' research laboratory",null,"https:\u002F\u002Fhenriqueslab.org","https:\u002F\u002Fgithub.com\u002FHenriquesLab",[84,88,92,96,99,102,105,108],{"name":85,"color":86,"percentage":87},"Jupyter Notebook","#DA5B0B",99.7,{"name":89,"color":90,"percentage":91},"Python","#3572A5",0.2,{"name":93,"color":94,"percentage":95},"Dockerfile","#384d54",0,{"name":97,"color":98,"percentage":95},"Shell","#89e051",{"name":100,"color":101,"percentage":95},"R","#198CE7",{"name":103,"color":104,"percentage":95},"HTML","#e34c26",{"name":106,"color":107,"percentage":95},"ImageJ Macro","#99AAFF",{"name":109,"color":110,"percentage":95},"Makefile","#427819",634,142,"2026-03-29T06:40:12","MIT",1,"Google Colab (云端)","未说明 (由 Google Colab 免费提供计算资源)","未说明",{"notes":120,"python":118,"dependencies":121},"该工具专为 Google Colab 设计，无需本地安装环境。通过网页浏览器访问即可使用图形界面进行深度学习训练和测试。适合无编程经验的研究者。详细依赖和配置请参考项目 Wiki 页面。",[122],"未说明 (详见 Wiki 页面)",[14,37],"2026-03-27T02:49:30.150509","2026-04-06T05:17:54.117514",[127,132,136,141,145,150],{"id":128,"question_zh":129,"answer_zh":130,"source_url":131},1845,"是否可以在明场活细胞图像上进行细胞追踪？","可以。基于教程可以使用 YOLO 识别细胞，并进一步进行追踪。维护者已更新了 SplineDist Notebook 以支持此类任务，建议参考最新的 Notebook 版本进行操作。","https:\u002F\u002Fgithub.com\u002FHenriquesLab\u002FZeroCostDL4Mic\u002Fissues\u002F65",{"id":133,"question_zh":134,"answer_zh":135,"source_url":131},1846,"处理多帧堆栈图像时遇到内存溢出（OOM）如何解决？","避免使用 `X = list(map(imread,Y))` 一次性加载所有文件。建议改为循环逐个读取文件，例如 `X = imread(Y[0])`，并正确设置 `n_channel` 和 `axis_norm`。这样可显著降低内存占用，防止因同时加载多个堆栈导致的崩溃。",{"id":137,"question_zh":138,"answer_zh":139,"source_url":140},1847,"训练时报错 `patch_size` 负值或大于数据形状怎么办？","确保选择的 `patch_size` 能被 8 整除，且其数值不能大于数据形状（datas[0].shape）的对应维度。错误提示会指出具体哪个维度不匹配，需调整 patch_size 参数或检查输入数据的切片数量是否正确。","https:\u002F\u002Fgithub.com\u002FHenriquesLab\u002FZeroCostDL4Mic\u002Fissues\u002F29",{"id":142,"question_zh":143,"answer_zh":144,"source_url":140},1848,"预测大尺寸 3D 图像时程序崩溃或内存不足如何处理？","StarDist 在拼接瓦片（tiles）时需要大量 RAM。建议使用 StarDist 最新版本中的 `n_tiles` 参数进行分块预测，系统会自动拼接而无需额外内存。对于非极大值抑制步骤，如果候选对象过多也会消耗内存，可将图像分割为更小的立方体分别运行。",{"id":146,"question_zh":147,"answer_zh":148,"source_url":149},1849,"UNET3D 训练过程中断或报错的原因是什么？","常见原因是源文件夹（source）和目标文件夹（target）中的图像文件名不一致，或存在多余的非图像文件（如 Mac 的 .DS_store）。请确保两个文件夹内的图像数量相同且名称完全匹配，否则会导致训练中断。","https:\u002F\u002Fgithub.com\u002FHenriquesLab\u002FZeroCostDL4Mic\u002Fissues\u002F238",{"id":151,"question_zh":152,"answer_zh":153,"source_url":154},1850,"U-Net 分割结果模糊或不准确该如何排查？","这通常与训练数据的组织方式有关。请将源图像放入 `source` 文件夹，掩膜放入 `mask` 文件夹，且这两个文件夹应位于同一个父目录下（例如 'My Drive'）。请参考 Wiki 页面确认数据下载和目录结构是否符合要求。","https:\u002F\u002Fgithub.com\u002FHenriquesLab\u002FZeroCostDL4Mic\u002Fissues\u002F5",[156,161,166,171,176,181,186,191,196,201,206,211,216,221,226,231,236,241,246,251],{"id":157,"version":158,"summary_zh":159,"released_at":160},101336,"v1.13","\u003Cimg width=\"932\" alt=\"Screenshot 2021-07-02 at 07 48 30\" src=\"https:\u002F\u002Fuser-images.githubusercontent.com\u002F21193399\u002F124232773-ffef6600-db09-11eb-8561-48756731949a.png\">\r\n","2021-07-02T06:50:08",{"id":162,"version":163,"summary_zh":164,"released_at":165},101337,"1.12.2","Zenodo Webhook.\r\n\u003Cimg width=\"1057\" alt=\"Screenshot 2021-02-16 at 17 25 40\" src=\"https:\u002F\u002Fuser-images.githubusercontent.com\u002F21193399\u002F108099066-0bbb1e00-707c-11eb-8c55-d479cc4cc4b6.png\">\r\n","2021-02-16T17:26:33",{"id":167,"version":168,"summary_zh":169,"released_at":170},101338,"v1.12","\u003Cimg width=\"1050\" alt=\"Screenshot 2021-01-20 at 11 47 11\" src=\"https:\u002F\u002Fuser-images.githubusercontent.com\u002F21193399\u002F105170866-79c00400-5b15-11eb-8258-1cb1dd47f2b8.png\">\r\n","2021-01-20T11:49:17",{"id":172,"version":173,"summary_zh":174,"released_at":175},101339,"v1.11","\u003Cimg width=\"1055\" alt=\"Screenshot 2020-11-13 at 12 25 11\" src=\"https:\u002F\u002Fuser-images.githubusercontent.com\u002F21193399\u002F99072152-62134f00-25ab-11eb-8a69-61ef3c112982.png\">\r\n","2020-11-13T12:26:21",{"id":177,"version":178,"summary_zh":179,"released_at":180},101340,"v1.10","\u003Cimg width=\"1239\" alt=\"Screenshot 2020-08-15 at 14 06 45\" src=\"https:\u002F\u002Fuser-images.githubusercontent.com\u002F21193399\u002F90312995-9cb01180-df00-11ea-83c3-58d8b3ccdde0.png\">\r\n","2020-08-15T13:07:40",{"id":182,"version":183,"summary_zh":184,"released_at":185},101341,"v1.9","![Screen Shot 2020-08-07 at 14 18 00](https:\u002F\u002Fuser-images.githubusercontent.com\u002F21193399\u002F89649551-e1142f80-d8b8-11ea-9f2f-156db25fb6a8.png)\r\n","2020-08-07T13:18:57",{"id":187,"version":188,"summary_zh":189,"released_at":190},101342,"v1.8","\u003Cimg width=\"1064\" alt=\"Screen Shot 2020-08-05 at 16 28 20\" src=\"https:\u002F\u002Fuser-images.githubusercontent.com\u002F21193399\u002F89432243-c6b34800-d738-11ea-8ed6-03822b7cefde.png\">\r\n","2020-08-05T15:31:28",{"id":192,"version":193,"summary_zh":194,"released_at":195},101343,"v1.7","![Screen Shot 2020-06-25 at 11 30 11](https:\u002F\u002Fuser-images.githubusercontent.com\u002F21193399\u002F85703561-62cc4700-b6d7-11ea-9fa4-0210ffaf027e.png)\r\n![Screen Shot 2020-06-25 at 11 29 58](https:\u002F\u002Fuser-images.githubusercontent.com\u002F21193399\u002F85703567-64960a80-b6d7-11ea-9c3a-cc84d312a186.png)\r\n","2020-06-25T10:32:08",{"id":197,"version":198,"summary_zh":199,"released_at":200},101344,"v1.6","![Screen Shot 2020-06-04 at 18 59 27](https:\u002F\u002Fuser-images.githubusercontent.com\u002F21193399\u002F83794224-977b4e80-a695-11ea-9bb5-ad112a05957f.png)\r\n","2020-06-04T18:00:22",{"id":202,"version":203,"summary_zh":204,"released_at":205},101345,"v1.5.1","v1.5.1: Release fixing upload issues of DeepSTORM notebook to GitHub\r\n\u003Cimg width=\"1002\" alt=\"Screen Shot 2020-05-12 at 16 18 05\" src=\"https:\u002F\u002Fuser-images.githubusercontent.com\u002F21193399\u002F81713303-97f54080-946d-11ea-99b1-be9b31de7c5e.png\">\r\n","2020-05-12T15:28:45",{"id":207,"version":208,"summary_zh":209,"released_at":210},101346,"v1.5","v1.5: Transfer learning and first release of DeepSTORM as beta\r\n\u003Cimg width=\"1002\" alt=\"Screen Shot 2020-05-12 at 16 18 05\" src=\"https:\u002F\u002Fuser-images.githubusercontent.com\u002F21193399\u002F81712208-426c6400-946c-11ea-80df-ff90f8f534ff.png\">\r\n","2020-05-12T15:19:04",{"id":212,"version":213,"summary_zh":214,"released_at":215},101347,"v1.4.1","![Screen Shot 2020-04-28 at 16 42 19](https:\u002F\u002Fuser-images.githubusercontent.com\u002F21193399\u002F80507942-50e85500-896f-11ea-908b-e9e0e0d0db2f.png)\r\n","2020-04-28T15:44:52",{"id":217,"version":218,"summary_zh":219,"released_at":220},101348,"v1.4","ZeroCostDL4Mic v1.4 release\r\n\u003Cimg width=\"1002\" alt=\"Screen Shot 2020-04-24 at 20 24 44\" src=\"https:\u002F\u002Fuser-images.githubusercontent.com\u002F21193399\u002F80249396-aa980900-8669-11ea-8656-084e98f93901.png\">\r\n\r\n\r\nMajor changes:\r\nHyperparameters fail-safes have been added to all notebooks. \r\nThe implementation of SSIM from SciKit is now used. PSNR is now also calculated during quality control.\r\nIn the 3D notebooks (including fnet), the quality assessment metrics are now calculated on a slice-by-slice basis and not from the maximum intensity projection anymore.\r\n\r\nIn the CARE notebooks:\r\n- The new default batch_size is 16 (instead of 64 previously).\r\n\r\nIn the Sartdist 2D notebook: \r\n- The new default patch_size is automatically defined as the size of the images to analyse. \r\n- Stardist grid_size is now modifiable as an advanced parameter.\r\n\r\nIn the CARE 3D and Noise2VOID3D notebooks, the users can now modify the numbers of tile to use when performing the prediction.\r\n\r\n+ minor modifications.","2020-04-24T19:25:18",{"id":222,"version":223,"summary_zh":224,"released_at":225},101349,"v1.3","This release now includes the LICENSE.txt and the ReadMe.txt files, as well as the User manual (as PDF).\r\n\r\n![Screen Shot 2020-04-17 at 11 05 18](https:\u002F\u002Fuser-images.githubusercontent.com\u002F21193399\u002F79557986-66ba6800-809b-11ea-8cfc-b35edad1b8c5.png)\r\n","2020-04-17T10:07:58",{"id":227,"version":228,"summary_zh":229,"released_at":230},101350,"v1.2","\u003Cimg width=\"995\" alt=\"Screen Shot 2020-04-13 at 16 29 05\" src=\"https:\u002F\u002Fuser-images.githubusercontent.com\u002F21193399\u002F79133465-fa421f00-7da3-11ea-9949-a05f5b88f325.png\">\r\n","2020-04-13T15:30:07",{"id":232,"version":233,"summary_zh":234,"released_at":235},101351,"v1.1","![Screen Shot 2020-03-27 at 19 48 39](https:\u002F\u002Fuser-images.githubusercontent.com\u002F21193399\u002F77794564-08820280-7064-11ea-8da2-b9b00bb3457b.png)\r\n","2020-03-27T19:49:37",{"id":237,"version":238,"summary_zh":239,"released_at":240},101352,"v1.0","First official release!\r\n\u003Cimg width=\"1002\" alt=\"Screen Shot 2020-03-20 at 10 47 18\" src=\"https:\u002F\u002Fuser-images.githubusercontent.com\u002F21193399\u002F77156926-4c8b6b00-6a98-11ea-9727-dd36beb85f58.png\">\r\n","2020-03-20T10:48:36",{"id":242,"version":243,"summary_zh":244,"released_at":245},101353,"v0.8","Improvement on fnet, better consistency across notebooks, including advanced settings, correct default settings for most apart fnet and Unet.\r\n\u003Cimg width=\"309\" alt=\"Screen Shot 2020-03-14 at 11 43 07\" src=\"https:\u002F\u002Fuser-images.githubusercontent.com\u002F21193399\u002F76681256-07bf8a00-65e9-11ea-960e-98c56e997f7b.png\">\r\n","2020-03-14T11:43:43",{"id":247,"version":248,"summary_zh":249,"released_at":250},101354,"v.07","First batch of feedback, functionality and readability improvements. Bug fixes in fnet.","2020-03-12T08:28:41",{"id":252,"version":253,"summary_zh":254,"released_at":255},101355,"v0.6","Now including fnet and U-net upgrade","2020-03-06T11:39:30"]