[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-DiffSharp--DiffSharp":3,"tool-DiffSharp--DiffSharp":64},[4,17,27,35,43,56],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":16},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,3,"2026-04-05T11:01:52",[13,14,15],"开发框架","图像","Agent","ready",{"id":18,"name":19,"github_repo":20,"description_zh":21,"stars":22,"difficulty_score":23,"last_commit_at":24,"category_tags":25,"status":16},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",138956,2,"2026-04-05T11:33:21",[13,15,26],"语言模型",{"id":28,"name":29,"github_repo":30,"description_zh":31,"stars":32,"difficulty_score":23,"last_commit_at":33,"category_tags":34,"status":16},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,"2026-04-03T11:11:01",[13,14,15],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":23,"last_commit_at":41,"category_tags":42,"status":16},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[13,26],{"id":44,"name":45,"github_repo":46,"description_zh":47,"stars":48,"difficulty_score":23,"last_commit_at":49,"category_tags":50,"status":16},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,"2026-04-05T10:45:23",[14,51,52,53,15,54,26,13,55],"数据工具","视频","插件","其他","音频",{"id":57,"name":58,"github_repo":59,"description_zh":60,"stars":61,"difficulty_score":10,"last_commit_at":62,"category_tags":63,"status":16},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,"2026-04-04T04:44:48",[15,14,13,26,54],{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":69,"readme_en":70,"readme_zh":71,"quickstart_zh":72,"use_case_zh":73,"hero_image_url":74,"owner_login":67,"owner_name":67,"owner_avatar_url":75,"owner_bio":76,"owner_company":77,"owner_location":77,"owner_email":77,"owner_twitter":77,"owner_website":78,"owner_url":79,"languages":80,"stars":101,"forks":102,"last_commit_at":103,"license":104,"difficulty_score":105,"env_os":106,"env_gpu":107,"env_ram":108,"env_deps":109,"category_tags":115,"github_topics":116,"view_count":23,"oss_zip_url":77,"oss_zip_packed_at":77,"status":16,"created_at":124,"updated_at":125,"faqs":126,"releases":155},1386,"DiffSharp\u002FDiffSharp","DiffSharp","DiffSharp: Differentiable Functional Programming","DiffSharp 是一个专为可微分编程设计的张量库，旨在简化机器学习、概率编程及复杂优化任务的开发流程。它核心解决了在构建复杂模型时，如何高效、准确地进行自动求导的难题，特别是支持高阶嵌套求导和混合模式求导，让开发者能轻松处理深层嵌套的数学运算。\n\n这款工具非常适合研究人员和需要高度灵活性的开发者使用。如果你习惯函数式编程范式，或者正在探索前沿的可微分算法，DiffSharp 提供了基于 F# 语言的稳健环境；同时，它也照顾到了深度学习社区的习惯，采用了与 PyTorch 相似的命名规范和惯用法，并底层集成高效的 LibTorch CUDA\u002FC++ 张量，原生支持 GPU 加速。\n\nDiffSharp 的独特亮点在于其强大的“嵌套求导”能力，允许用户在任意层级对代码进行微分，这在传统框架中往往难以实现。此外，它完美兼容 Linux、macOS 和 Windows 系统，并支持在 Jupyter 和 Visual Studio Code 中使用交互式笔记本进行实验。作为一个完全开源的项目，DiffSharp 由自动微分领域的专家领衔开发，是连接理论研究与工程实现的有力桥梁。","\u003Cdiv align=\"left\">\n  \u003Ca href=\"https:\u002F\u002Fdiffsharp.github.io\"> \u003Cimg height=\"80px\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FDiffSharp_DiffSharp_readme_5f6558e5dd70.png\">\u003C\u002Fa>\n\u003C\u002Fdiv>\n\n-----------------------------------------\n\n[![Build Status](https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002Fworkflows\u002FBuild\u002Ftest\u002Fdocs\u002Fpublish\u002Fbadge.svg)](https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002Factions)\n[![Coverage Status](https:\u002F\u002Fcoveralls.io\u002Frepos\u002Fgithub\u002FDiffSharp\u002FDiffSharp\u002Fbadge.svg?branch=)](https:\u002F\u002Fcoveralls.io\u002Fgithub\u002FDiffSharp\u002FDiffSharp?branch=)\n\nThis is the development branch of DiffSharp 1.0.\n\n> **NOTE: This branch is undergoing development. It has incomplete code, functionality, and design that are likely to change without notice; when using TorchSharp backend, only x64 platform is currently supported out of the box, see [DEVGUIDE.md] for more details.**\n\nDiffSharp is a tensor library with support for [differentiable programming](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FDifferentiable_programming). It is designed for use in machine learning, probabilistic programming, optimization and other domains.\n\n**Key features**\n\n* Nested and mixed-mode differentiation\n* Common optimizers, model elements, differentiable probability distributions\n* F# for robust functional programming\n* PyTorch familiar naming and idioms, efficient LibTorch CUDA\u002FC++ tensors with GPU support\n* Linux, macOS, Windows supported\n* Use interactive notebooks in Jupyter and Visual Studio Code\n* 100% open source\n\n## Documentation\n\nYou can find the documentation [here](https:\u002F\u002Fdiffsharp.github.io\u002F), including information on installation and getting started.\n\nRelease notes can be found [here](https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002Fblob\u002Fdev\u002FRELEASE_NOTES.md).\n\n## Communication\n\nPlease use [GitHub issues](https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002Fissues) to share bug reports, feature requests, installation issues, suggestions etc.\n\n## Contributing\n\nWe welcome all contributions.\n\n* Bug fixes: if you encounter a bug, please open an [issue](https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002Fissues) describing the bug. If you are planning to contribute a bug fix, please feel free to do so in a pull request.\n* New features: if you plan to contribute new features, please first open an [issue](https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002Fissues) to discuss the feature before creating a pull request.\n\n## The Team\n\nDiffSharp is developed by [Atılım Güneş Baydin](http:\u002F\u002Fwww.robots.ox.ac.uk\u002F~gunes\u002F), [Don Syme](https:\u002F\u002Fwww.microsoft.com\u002Fen-us\u002Fresearch\u002Fpeople\u002Fdsyme\u002F) and other contributors, having started as a project supervised by the automatic differentiation wizards [Barak Pearlmutter](https:\u002F\u002Fscholar.google.com\u002Fcitations?user=AxFrw0sAAAAJ&hl=en) and [Jeffrey Siskind](https:\u002F\u002Fscholar.google.com\u002Fcitations?user=CgSBtPYAAAAJ&hl=en).\n\n## License\n\nDiffSharp is licensed under the BSD 2-Clause \"Simplified\" License, which you can find in the [LICENSE](https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002Fblob\u002Fdev\u002FLICENSE) file in this repository. \n","\u003Cdiv align=\"左\">\n  \u003Ca href=\"https:\u002F\u002Fdiffsharp.github.io\"> \u003Cimg height=\"80px\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FDiffSharp_DiffSharp_readme_5f6558e5dd70.png\">\u003C\u002Fa>\n\u003C\u002Fdiv>\n\n-----------------------------------------\n\n[![构建状态](https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002Fworkflows\u002FBuild\u002Ftest\u002Fdocs\u002Fpublish\u002Fbadge.svg)](https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002Factions)\n[![覆盖率状态](https:\u002F\u002Fcoveralls.io\u002Frepos\u002Fgithub\u002FDiffSharp\u002FDiffSharp\u002Fbadge.svg?branch=)](https:\u002F\u002Fcoveralls.io\u002Fgithub\u002FDiffSharp\u002FDiffSharp?branch=)\n\n这是 DiffSharp 1.0 的开发分支。\n\n> **注意：本分支仍在开发中。其代码、功能和设计尚不完善，可能会在未事先通知的情况下进行调整；在使用 TorchSharp 后端时，目前仅支持 x64 平台，详情请参阅 [DEVGUIDE.md]。**\n\nDiffSharp 是一个张量库，支持【可微编程】（https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FDifferentiable_programming）。它专为机器学习、概率编程、优化及其他领域而设计。\n\n**关键特性**\n\n* 嵌套式与混合模式的求导\n* 常用的优化器、模型元素以及可微概率分布\n* 采用 F# 实现稳健的函数式编程\n* 采用 PyTorch 熟悉的命名方式与惯用法，同时提供高效的 LibTorch CUDA\u002FC++ 张量，并支持 GPU 加速\n* 支持 Linux、macOS 和 Windows\n* 可在 Jupyter 和 Visual Studio Code 中使用交互式笔记本\n* 100% 开源\n\n## 文档\n\n您可以在【这里】（https:\u002F\u002Fdiffsharp.github.io\u002F）找到文档，其中包含有关安装及快速入门的相关信息。\n\n版本发布说明可在【这里】（https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002Fblob\u002Fdev\u002FRELEASE_NOTES.md）查阅。\n\n## 沟通\n\n如需分享错误报告、功能请求、安装问题或建议等，请使用【GitHub 问题】（https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002Fissues）。\n\n## 贡献\n\n我们欢迎所有贡献者。\n\n* 错误修复：如果您发现任何问题，请打开一个【问题】（https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002Fissues），详细描述该问题。若您计划提交错误修复方案，请随时通过拉取请求的方式参与贡献。\n* 新功能开发：若您计划贡献新功能，请先在【问题】（https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002Fissues）上发起讨论，再创建拉取请求。\n\n## 团队\n\nDiffSharp 由【Atılım Güneş Baydin】（http:\u002F\u002Fwww.robots.ox.ac.uk\u002F~gunes\u002F）、【Don Syme】（https:\u002F\u002Fwww.microsoft.com\u002Fen-us\u002Fresearch\u002Fpeople\u002Fdsyme）以及其他贡献者共同开发，最初由自动微分领域的专家【Barak Pearlmutter】（https:\u002F\u002Fscholar.google.com\u002Fcitations?user=AxFrw0sAAAAJ&hl=en）和【Jeffrey Siskind】（https:\u002F\u002Fscholar.google.com\u002Fcitations?user=CgSBtPYAAAAJ&hl=en）负责监督的项目起步。\n\n## 许可证\n\nDiffSharp 采用 BSD 2-Clause “Simplified” 许可证，您可在本仓库的【LICENSE】（https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002Fblob\u002Fdev\u002FLICENSE）文件中找到相关条款。","# DiffSharp 快速上手指南\n\nDiffSharp 是一个支持**可微分编程**的张量库，专为机器学习、概率编程和优化领域设计。它基于 F# 语言，提供嵌套及混合模式求导功能，并兼容 PyTorch 的命名习惯与 LibTorch 后端（支持 GPU 加速）。\n\n## 环境准备\n\n在开始之前，请确保您的开发环境满足以下要求：\n\n*   **操作系统**：Linux, macOS 或 Windows。\n*   **运行时环境**：安装 [.NET SDK](https:\u002F\u002Fdotnet.microsoft.com\u002Fdownload) (建议最新版本)。\n*   **GPU 支持 (可选)**：\n    *   若需使用 CUDA 加速，请确保已安装适配的 NVIDIA 驱动和 CUDA Toolkit。\n    *   **注意**：当前开发分支在使用 TorchSharp 后端时，默认仅支持 **x64** 平台。\n*   **开发工具 (可选)**：推荐使用 Visual Studio Code (配合 Ionide-F# 插件) 或 Jupyter Notebook (.NET Interactive) 进行交互式开发。\n\n## 安装步骤\n\n### 1. 创建新项目\n打开终端，运行以下命令创建一个新的 F# 控制台应用：\n\n```bash\ndotnet new console -lang F# -n DiffSharpDemo\ncd DiffSharpDemo\n```\n\n### 2. 添加 DiffSharp 包\n根据您的需求安装核心库及后端支持。\n\n**基础安装 (CPU):**\n```bash\ndotnet add package DiffSharp\n```\n\n**带 GPU 支持 (LibTorch\u002FCUDA):**\n如果您需要使用 GPU 加速，需额外安装对应的后端包（请根据您的 CUDA 版本选择）：\n```bash\n# 示例：安装带有 CUDA 支持的 LibTorch 后端\ndotnet add package DiffSharp.Backends.LibTorch\n# 注意：具体版本和变体请参考 NuGet 上的最新包名，如 DiffSharp.Backends.LibTorch.Cuda-11.x\n```\n\n> **提示**：国内开发者若遇到 NuGet 下载缓慢，可临时切换至国内镜像源：\n> ```bash\n> dotnet add package DiffSharp --source https:\u002F\u002Fapi.nuget.org\u002Fv3\u002Findex.json\n> # 或使用腾讯云\u002F阿里云镜像配置 nuget.config\n> ```\n\n## 基本使用\n\n以下是一个最简单的示例，演示如何定义张量、执行运算并进行自动求导。\n\n新建或编辑 `Program.fs` 文件，输入以下代码：\n\n```fsharp\nopen DiffSharp\n\n\u002F\u002F 初始化后端 (默认使用 CPU，若安装了 GPU 包可自动检测或手动指定)\nBackend.Default \u003C- Backend.Torch\n\n\u002F\u002F 1. 创建张量\nlet x = tensor [ 1.0; 2.0; 3.0 ]\n\n\u002F\u002F 2. 定义一个可微函数: f(x) = x^2 + 2x\nlet f x = x * x + 2.0 * x\n\n\u002F\u002F 3. 计算函数值\nlet y = f x\nprintfn \"函数值 f(x): %A\" y\n\n\u002F\u002F 4. 自动求导 (计算梯度 df\u002Fdx)\n\u002F\u002F grad 函数返回输入变量的梯度\nlet g = grad f x\nprintfn \"梯度 df\u002Fdx: %A\" g\n\n\u002F\u002F 预期输出:\n\u002F\u002F 函数值 f(x): [3.0; 8.0; 15.0]\n\u002F\u002F 梯度 df\u002Fdx: [4.0; 6.0; 8.0]  (即 2x + 2)\n```\n\n### 运行项目\n\n在终端执行：\n\n```bash\ndotnet run\n```\n\n您将看到计算结果和自动求导后的梯度输出。至此，您已成功运行了第一个 DiffSharp 程序。","某量化金融团队正在开发一套高频交易策略，需要基于复杂的随机微分方程构建概率模型，并实时计算高维参数梯度以优化风险收益比。\n\n### 没有 DiffSharp 时\n- 开发者被迫在 Python 和 C++ 之间切换，手动推导复杂的数学公式并编写反向传播代码，极易引入人为计算错误。\n- 面对嵌套的随机过程，传统框架难以支持高阶或混合模式求导，导致模型简化过度，无法捕捉市场细微的非线性特征。\n- 调试函数式逻辑十分痛苦，缺乏类型安全保护，往往在运行时才发现维度不匹配或数值溢出问题。\n- 迭代周期漫长，每次修改模型结构都需要重新编译底层代码，严重拖累了策略验证和上线速度。\n\n### 使用 DiffSharp 后\n- 利用 F# 的强类型函数式特性直接表达数学公式，DiffSharp 自动处理所有微分细节，彻底消除了手动推导梯度的错误风险。\n- 借助其独有的嵌套与混合模式微分能力，团队轻松实现了对复杂概率分布的高阶求导，显著提升了模型对市场波动的拟合精度。\n- 享受类似 PyTorch 的流畅开发体验，同时拥有 GPU 加速的 LibTorch 后端支持，在保持代码简洁的同时实现了高性能计算。\n- 结合 Jupyter Notebook 进行交互式探索，修改模型即见结果，将策略从构思到验证的周期从数天缩短至数小时。\n\nDiffSharp 通过将自动微分无缝融入函数式编程范式，让科研人员能专注于数学模型本身而非底层实现，极大释放了复杂概率编程的生产力。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FDiffSharp_DiffSharp_5f6558e5.png","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FDiffSharp_8c3e8a3f.png","Differentiable Functional Programming",null,"http:\u002F\u002Fdiffsharp.github.io\u002FDiffSharp\u002F","https:\u002F\u002Fgithub.com\u002FDiffSharp",[81,85,89,93,97],{"name":82,"color":83,"percentage":84},"F#","#b845fc",89.9,{"name":86,"color":87,"percentage":88},"HTML","#e34c26",9.5,{"name":90,"color":91,"percentage":92},"R","#198CE7",0.6,{"name":94,"color":95,"percentage":96},"Python","#3572A5",0.1,{"name":98,"color":99,"percentage":100},"Dockerfile","#384d54",0,614,72,"2026-03-19T08:12:06","BSD-2-Clause",4,"Linux, macOS, Windows","非必需（支持 CPU），若使用 GPU 需 NVIDIA 显卡并配合 LibTorch CUDA 后端；开发分支目前仅原生支持 x64 平台","未说明",{"notes":110,"python":111,"dependencies":112},"该工具主要使用 F# 进行函数式编程，语法风格参考 PyTorch。当前为 1.0 开发分支，代码和功能尚不完整且可能随时变更。若使用 TorchSharp 后端，目前仅开箱支持 x64 架构平台。详细开发指南请参阅 DEVGUIDE.md。","未说明 (主要基于 F# 语言)",[113,114],"TorchSharp (LibTorch backend)",".NET SDK \u002F F#",[13],[117,118,119,120,121,122,123],"machine-learning","tensor","dotnet","autodiff","gpu","deep-learning","neural-network","2026-03-27T02:49:30.150509","2026-04-06T05:19:53.403235",[127,132,137,141,146,151],{"id":128,"question_zh":129,"answer_zh":130,"source_url":131},6363,"如何在 Windows 上正确配置 DiffSharp 以使用 CUDA\u002FGPU？","不要直接使用 `DiffSharp-cuda-windows` 捆绑包，因为它存在依赖错误（错误地依赖了 Linux 版的 libtorch）。建议手动安装以下 NuGet 包来替代：\n1. `DiffSharp.Backends.Torch` (版本如 1.0.0-preview-243323135)\n2. `DiffSharp.Core` (对应版本)\n3. `libtorch-cuda-10.2-win-x64` (版本如 1.5.6)\n\n通过手动添加这些依赖，可以成功在 Windows 上启用 CUDA\u002FGPU 支持。","https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002Fissues\u002F169",{"id":133,"question_zh":134,"answer_zh":135,"source_url":136},6364,"超参数优化（如动态学习率）在实际训练中为什么会导致效果变差或学习率趋近于零？","如果在小批量（minibatch）数据变化剧烈的情况下直接应用基于论文方法的超梯度调整学习率，可能会导致学习率被过度压低（例如降至 0.00003），从而阻碍模型学习。这通常是因为该方法在不同小批量间的数据分布差异较大时表现不稳定。\n\n建议的解决策略是：\n1. 不要期望自适应算法在所有情况下都优于手动网格搜索得到的最优固定值。\n2. 测试不同初始学习率（如 0.1, 0.01, 0.0001 等），观察自适应算法是否能将学习率收敛到接近最优值并降低损失。\n3. 如果自适应算法能将较差的初始学习率调整至最优范围，则说明其有效；微小的性能差异在某些场景下是可以接受的。","https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002Fissues\u002F286",{"id":138,"question_zh":139,"answer_zh":140,"source_url":136},6365,"在进行超参数优化时，每个小批量（minibatch）后即时调整学习率的方法是否有效？","这种方法的有效性取决于具体场景。虽然理论上可行，但在小批量输入数据变化剧烈时，计算出的超梯度可能不准确，导致学习率调整方向错误。\n\n原论文中的方法通常是基于整个 epoch 或相同输入数据步骤进行的。如果在每个小批量后都进行调整，需警惕因数据波动导致的噪声干扰。如果发现动态学习率导致性能下降（如学习率过低），可能需要重新审视实现细节或改用更稳定的调整频率（如按 epoch 调整）。",{"id":142,"question_zh":143,"answer_zh":144,"source_url":145},6366,"Linux 环境下使用 Torch\u002FGPU 时遇到随机崩溃（INTERNAL ASSERT FAILED）怎么办？","该问题表现为在 Linux (x64 RHEL6) + GTX 1060 + CUDA 11 环境下偶尔发生的随机崩溃，报错信息指向 PyTorch 内部断言失败 (`s.isIntegral(false) INTERNAL ASSERT FAILED`)。\n\n排查建议：\n1. 确认代码在 Windows GPU 和 Linux CPU 模式下运行正常，以排除逻辑错误。\n2. 此类崩溃往往与特定的 PyTorch\u002FLibTorch 版本或底层驱动兼容性有关，且难以复现和调试。\n3. 如果无法通过简化代码复现，可能是特定环境下的偶发 Bug。建议尝试更新或降级 LibTorch 版本，或者向 PyTorch 官方提交包含完整堆栈跟踪的错误报告。","https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002Fissues\u002F183",{"id":147,"question_zh":148,"answer_zh":149,"source_url":150},6367,"DiffSharp 中 `Tensor0` 命名及切片（Slicing）形状计算的变更原因是什么？","关于 `Tensor0`：维护者认为原名不够直观，计划将其重命名为更合适的名称（如 `TensorC` 或 `Tensor_`），以反映其作为标量或零维张量的特性。\n\n关于切片形状计算：之前的代码在处理类似 `t.[3..]` 的切片时，错误地设置了标志位，导致截取单个元素时形状计算不正确。修复后的版本修正了 `boundsToShape` 中的逻辑，确保切片结果形状和反向模式传播中的伴随张量（adjoints）形状匹配。此外，新增了形状断言检查 (`Shape.canExpand`) 以确保反向传播时的形状一致性。","https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002Fissues\u002F258",{"id":152,"question_zh":153,"answer_zh":154,"source_url":150},6368,"如何验证 DiffSharp 中修复后的切片和导数计算是否正确？","在合并相关修复（如 #235 和 #262）后，需要通过以下方式验证：\n1. 检查现有的导数测试用例是否通过。\n2. 必须补充前向模式导数测试（forward derivative tests）。\n3. 特别需要增加维度大于 1（>1 dimensional）的测试用例，以确保高维张量下的切片和形状推导逻辑无误。\n4. 审查反向模式断言（reverse mode assert）的逻辑是否足够健壮。",[156,161,166,171,176,181,186,191,196,201,206,211,216,221,226,231,236,241,246,251],{"id":157,"version":158,"summary_zh":159,"released_at":160},105930,"v1.0.7","## New\r\n* `flatten` and `unflatten` operations now also flatten\u002Funflatten the reverse mode adjoints of the tensors involved\r\n* `scatter` operation\r\n* `det` operation (determinant)\r\n* `dsharp.argmax` and `dsharp.argmin`\r\n\r\n## Changes\r\n* Update to dotnet 6.0 and [F# 6.0](https:\u002F\u002Fdevblogs.microsoft.com\u002Fdotnet\u002Fwhats-new-in-fsharp-6\u002F), support indexing with `expr[idx]` among other improvements\r\n* Tensor `.parents` is now called `.ancestors`\r\n* Improvements in `DiffSharp.Model` design and API, e.g., `.children`, `.descendants`\r\n* Improve `nllLoss` and `gather` performance\r\n* Improve `Model` to string representation\r\n\r\n## Bug fixes\r\n* Fixed bug in combining unweighted items in `Empirical` distributions","2022-03-27T12:06:29",{"id":162,"version":163,"summary_zh":164,"released_at":165},105931,"v1.0.1","## DiffSharp 1.0\r\n\r\nThis is a complete reimplementation of DiffSharp that supports tensors and modern machine learning workflows. It includes a libtorch raw tensor backend, CUDA support, APIs for backends and extensions, and extensive unit tests.","2022-03-27T11:43:15",{"id":167,"version":168,"summary_zh":169,"released_at":170},105932,"v0.8.4","* **Fix:** correction in array slicing when used in variable updates #59 ","2019-08-24T09:51:49",{"id":172,"version":173,"summary_zh":174,"released_at":175},105933,"v0.8.3","* **Improvement:** put the unmanaged\u002Fnative dependencies in \u002Fruntime directory in the NuGet for better .NET Core support","2019-07-04T16:14:47",{"id":177,"version":178,"summary_zh":179,"released_at":180},105934,"v0.8.2","* **Fix:** initialisation using a constant matrix on the left of multiplication #56 ","2019-06-25T13:15:45",{"id":182,"version":183,"summary_zh":184,"released_at":185},105935,"v0.8.1","* **Fix:** single-precision computeAdjoints with non-scalar values #55 ","2019-06-20T11:41:05",{"id":187,"version":188,"summary_zh":189,"released_at":190},105936,"v0.8.0","* **Improvement:** Moved to .NET Standard to allow targeting .NET Core as well as .NET Standard\r\n\r\n* **Note:** There are some breaking API changes in this pre-release. Conversion examples are welcome.","2019-06-11T14:12:02",{"id":192,"version":193,"summary_zh":194,"released_at":195},105937,"v0.7.7","- **Fixed**: Bug fix in forward AD implementation of `Sigmoid` and `ReLU` for `D`, `DV`, and `DM` (fixes #16, thank you @mrakgr )\r\n- **Improvement**: Performance improvement by removing several more `Parallel.For` and `Array.Parallel.map` operations, working better with OpenBLAS multithreading\r\n- **Added**: Operations involving incompatible dimensions of `DV` and `DM` will now throw exceptions for warning the user\r\n","2015-12-25T23:07:38",{"id":197,"version":198,"summary_zh":199,"released_at":200},105938,"v0.7.6","- **Fixed**: Bug fix in LAPACK wrappers `ssysv` and `dsysv` in the OpenBLAS backend that caused incorrect solution for linear systems described by a symmetric matrix (fixes #11, thank you @grek142)\r\n- **Added**: Added unit tests covering the whole backend interface\r\n","2015-12-15T14:56:09",{"id":202,"version":203,"summary_zh":204,"released_at":205},105939,"v0.7.5","- **Improved**: Performance improvement thanks to faster `Array2D.copy` operations (thank you Don Syme @dsyme)\r\n- **Improved**: Significantly faster matrix transposition using extended BLAS operations `cblas_?omatcopy` provided by OpenBLAS\r\n- **Improved**: Performance improvement by disabling parts of the OpenBLAS backend using `System.Threading.Tasks`, which was interfering with OpenBLAS multithreading. Pending further tests.\r\n- **Update**: Updated the Win64 binaries of OpenBLAS to version 0.2.15 (27-10-2015), which has bug fixes and optimizations. Change log [here](http:\u002F\u002Fwww.openblas.net\u002FChangelog.txt)\r\n- **Fixed**: Bug fixes in reverse AD operations `Sub_D_DV` and `Sub_D_DM` (fixes #8, thank you @mrakgr)\r\n- **Fixed**: Fixed bug in the benchmarking module causing incorrect reporting of the overhead factor of the AD `grad` operation\r\n- **Improved**: Documentation updates\r\n","2015-12-06T17:01:14",{"id":207,"version":208,"summary_zh":209,"released_at":210},105940,"v0.7.4","- **Improved**: Overall performance improvements with parallelization and memory reshaping in OpenBLAS backend\r\n- **Fixed**: Bug fixes in reverse AD `Make_DM_ofDV` and `DV.Append`\r\n- **Fixed**: Bug fixes in `DM` operations `map2Cols`, `map2Rows`, `mapi2Cols`, `mapi2Rows`\r\n- **Added**: New operation `primalDeep` for the deepest primal value in nested AD values\r\n","2015-10-13T01:34:44",{"id":212,"version":213,"summary_zh":214,"released_at":215},105941,"v0.7.3","- **Fixed**: Bug fix in `DM.Min`\r\n- **Added**: `Mean`, `Variance`, `StandardDev`, `Normalize`, and `Standardize` functions\r\n- **Added**: Support for visualizations with configurable Unicode\u002FASCII palette and contrast\r\n","2015-10-06T17:06:59",{"id":217,"version":218,"summary_zh":219,"released_at":220},105942,"v0.7.2","- **Added**: Fast reshape operations `ReshapeCopy_DV_DM` and `ReshapeCopy_DM_DV`\r\n","2015-10-04T17:24:00",{"id":222,"version":223,"summary_zh":224,"released_at":225},105943,"v0.7.1","- **Fixed**: Bug fixes for reverse AD `Abs`, `Sign`, `Floor`, `Ceil`, `Round`, `DV.AddSubVector`, `Make_DM_ofDs`, `Mul_Out_V_V`, `Mul_DVCons_D`\r\n- **Added**: New methods `DV.isEmpty` and `DM.isEmpty`\r\n","2015-10-04T03:46:41",{"id":227,"version":228,"summary_zh":229,"released_at":230},105944,"v0.7.0","Version 0.7.0 is a reimplementation of the library with support for **linear algebra primitives**, **BLAS\u002FLAPACK**, **32- and 64-bit precision** and different **CPU\u002FGPU backends**\r\n- **Changed**: Namespaces have been reorganized and simplified. This is a breaking change. There is now just one AD implementation, under `DiffSharp.AD` (with `DiffSharp.AD.Float32` and `DiffSharp.AD.Float64` variants, see below). This internally makes use of forward or reverse AD as needed.\r\n- **Added**: Support for 32 bit (single precision) and 64 bit (double precision) floating point operations. All modules have `Float32` and `Float64` versions providing the same functionality with the specified precision. 32 bit floating point operations are significantly faster (as much as twice as fast) on many current systems.\r\n- **Added**: DiffSharp now uses the OpenBLAS library by default for linear algebra operations. The AD operations with the types `D` for scalars, `DV` for vectors, and `DM` for matrices use the underlying linear algebra backend for highly optimized native BLAS and LAPACK operations. For non-BLAS operations (such as Hadamard products and matrix transpose), parallel implementations in managed code are used. All operations with the `D`, `DV`, and `DM` types support forward and reverse nested AD up to any level. This also paves the way for GPU backends (CUDA\u002FCuBLAS) which will be introduced in following releases. Please see the documentation and API reference for information about how to use the `D`, `DV`, and `DM` types. (**Deprecated**: The FsAlg generic linear algebra library and the `Vector\u003C'T>` and `Matrix\u003C'T>` types are no longer used.)\r\n- **Fixed**: Reverse mode AD has been reimplemented in a tail-recursive way for better performance and preventing StackOverflow exceptions encountered in previous versions.\r\n- **Changed**: The library now uses F# 4.0 (FSharp.Core 4.4.0.0).\r\n- **Changed**: The library is now 64 bit only, meaning that users should set \"x64\" as the platform target for all build configurations.\r\n- **Fixed**: Overall bug fixes.\r\n","2015-09-29T10:54:33",{"id":232,"version":233,"summary_zh":234,"released_at":235},105945,"v0.6.3","- **Fixed:** Bug fix in `DiffSharp.AD` subtraction operation between `D` and `DF`\r\n","2015-07-18T20:01:33",{"id":237,"version":238,"summary_zh":239,"released_at":240},105946,"v0.6.2","- **Changed**: Update FsAlg to 0.5.8\r\n","2015-06-06T18:31:23",{"id":242,"version":243,"summary_zh":244,"released_at":245},105947,"v0.6.1","- **Added**: Support for C#, through the new `DiffSharp.Interop` namespace\r\n- **Added**: Support for casting AD types to `int`\r\n- **Changed**: Update FsAlg to 0.5.6\r\n- **Improved**: Documentation updates\r\n","2015-06-02T23:18:14",{"id":247,"version":248,"summary_zh":249,"released_at":250},105948,"v0.6.0","- **Changed:** DiffSharp is now released under the LGPL license, allowing use (as a dynamically linked library) in closed-source projects and open-source projects under non-GPL licenses\r\n- **Added:** Nesting support. The modules `DiffSharp.AD`, `DiffSharp.AD.Forward` and `DiffSharp.AD.Reverse` are now the main components of the library, providing support for nested AD operations.\r\n- **Changed:** The library now uses the FsAlg linear algebra library for handling vector and matrix operations and interfaces\r\n- **Changed:** All AD-enabled numeric types in the library are now called `D`\r\n- **Changed:** The non-nested modules `DiffSharp.AD.Forward`, `DiffSharp.AD.Forward2`, `DiffSharp.AD.ForwardG`, `DiffSharp.AD.ForwardGH`, `DiffSharp.AD.ForwardN`, `DiffSharp.AD.Reverse` are now called `DiffSharp.AD.Specialized.Forward1`, `DiffSharp.AD.Specialized.Forward2`, `DiffSharp.AD.Specialized.ForwardG`, `DiffSharp.AD.Specialized.ForwardGH`, `DiffSharp.AD.Specialized.ForwardN`, `DiffSharp.AD.Specialized.Reverse1`\r\n- **Improved:** The non-nested `DiffSharp.AD.Specialized.Reverse1` module is reimplemented from scratch, not requiring a stack\r\n- **Removed:** The non-nested `DiffSharp.AD.ForwardReverse` module is removed. This functionality is now handled by the nested modules.\r\n- **Improved:** Major rewrite of documentation and examples, to reflect changed library structure\r\n- **Improved:** Updated benchmarks\r\n","2015-04-26T23:23:17",{"id":252,"version":253,"summary_zh":254,"released_at":255},105949,"v0.5.10","- **Improved:** Improvements in the `DiffSharp.Util.LinearAlgebra` module\r\n- **Changed:** Minor changes in the internal API, such as `dualSet` -> `dualPT`, `dualAct` -> `dualP1`\r\n","2015-03-27T22:45:50"]