[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-dotnet--TorchSharp":3,"tool-dotnet--TorchSharp":64},[4,17,27,35,43,56],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":16},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,3,"2026-04-05T11:01:52",[13,14,15],"开发框架","图像","Agent","ready",{"id":18,"name":19,"github_repo":20,"description_zh":21,"stars":22,"difficulty_score":23,"last_commit_at":24,"category_tags":25,"status":16},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",138956,2,"2026-04-05T11:33:21",[13,15,26],"语言模型",{"id":28,"name":29,"github_repo":30,"description_zh":31,"stars":32,"difficulty_score":23,"last_commit_at":33,"category_tags":34,"status":16},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,"2026-04-03T11:11:01",[13,14,15],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":23,"last_commit_at":41,"category_tags":42,"status":16},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[13,26],{"id":44,"name":45,"github_repo":46,"description_zh":47,"stars":48,"difficulty_score":23,"last_commit_at":49,"category_tags":50,"status":16},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,"2026-04-05T10:45:23",[14,51,52,53,15,54,26,13,55],"数据工具","视频","插件","其他","音频",{"id":57,"name":58,"github_repo":59,"description_zh":60,"stars":61,"difficulty_score":10,"last_commit_at":62,"category_tags":63,"status":16},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,"2026-04-04T04:44:48",[15,14,13,26,54],{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":69,"readme_en":70,"readme_zh":71,"quickstart_zh":72,"use_case_zh":73,"hero_image_url":74,"owner_login":75,"owner_name":76,"owner_avatar_url":77,"owner_bio":78,"owner_company":79,"owner_location":79,"owner_email":80,"owner_twitter":81,"owner_website":82,"owner_url":83,"languages":84,"stars":123,"forks":124,"last_commit_at":125,"license":126,"difficulty_score":10,"env_os":127,"env_gpu":128,"env_ram":129,"env_deps":130,"category_tags":136,"github_topics":137,"view_count":23,"oss_zip_url":79,"oss_zip_packed_at":79,"status":16,"created_at":142,"updated_at":143,"faqs":144,"releases":175},1980,"dotnet\u002FTorchSharp","TorchSharp","A .NET library that provides access to the library that powers PyTorch.","TorchSharp 是一个为 .NET 开发者打造的开源库，直接对接 PyTorch 的底层引擎 LibTorch，让你在 C# 或 F# 中像使用 Python 一样构建和训练神经网络。它让原本依赖 Python 生态的深度学习任务，可以在 .NET 项目中无缝实现，无需切换语言或环境。特别适合使用 .NET 构建 AI 应用的开发者，比如企业级系统工程师、AI 研究员或希望将模型集成到 Windows 桌面\u002F服务端应用的团队。TorchSharp 保留了 PyTorch 的张量操作和自动微分机制，API 设计贴近原生 PyTorch 风格，同时充分利用 .NET 的静态类型系统，提升开发安全性与效率。例如，你可以用 C# 直接定义神经网络层、计算损失、反向传播，代码结构清晰直观。目前支持 Windows 和 Apple Silicon Mac，提供 CPU 与 CUDA 版本，适配不同硬件需求。如果你已在 .NET 生态中工作，又想探索机器学习，TorchSharp 是一个自然且强大的选择。","[![Gitter](https:\u002F\u002Fbadges.gitter.im\u002Fdotnet\u002FTorchSharp.svg)](https:\u002F\u002Fgitter.im\u002Fdotnet\u002FTorchSharp?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)\n\u003Cbr\u002F>\n[![Build Status](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdotnet_TorchSharp_readme_5881a66031b5.png)](https:\u002F\u002Fdotnet.visualstudio.com\u002FTorchSharp\u002F_build\u002Flatest?definitionId=174&branchName=main)\n\u003Cbr\u002F>\n[![TorchSharp](https:\u002F\u002Fimg.shields.io\u002Fnuget\u002Fvpre\u002FTorchSharp.svg?cacheSeconds=3600&label=TorchSharp%20nuget)](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchSharp)\u003Cbr\u002F>\n[![TorchAudio](https:\u002F\u002Fimg.shields.io\u002Fnuget\u002Fvpre\u002FTorchAudio.svg?cacheSeconds=3600&label=TorchAudio%20nuget)](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchAudio)\u003Cbr\u002F>\n[![TorchVision](https:\u002F\u002Fimg.shields.io\u002Fnuget\u002Fvpre\u002FTorchVision.svg?cacheSeconds=3600&label=TorchVision%20nuget)](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchVision)\u003Cbr\u002F>\n[![TorchSharp-cpu](https:\u002F\u002Fimg.shields.io\u002Fnuget\u002Fvpre\u002FTorchSharp-cpu.svg?cacheSeconds=3600&label=TorchSharp-cpu%20nuget)](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchSharp-cpu)\n[![TorchSharp-cuda-windows](https:\u002F\u002Fimg.shields.io\u002Fnuget\u002Fvpre\u002FTorchSharp-cuda-windows.svg?cacheSeconds=3600&label=TorchSharp-cuda-windows%20nuget)](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchSharp-cuda-windows)\n[![TorchSharp-cuda-linux](https:\u002F\u002Fimg.shields.io\u002Fnuget\u002Fvpre\u002FTorchSharp-cuda-linux.svg?cacheSeconds=3600&label=TorchSharp-cuda-linux%20nuget)](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchSharp-cuda-linux)\u003Cbr\u002F>\n\u003Cbr\u002F>\nPlease check the [Release Notes](RELEASENOTES.md) file for news on what's been updated in each new release.\n\n\n__TorchSharp no longer supports MacOS on Intel hardware.__\n\nWith libtorch release 2.4.0, Intel HW support was deprecated for libtorch. This means that the last version of TorchSharp to work on Intel Macintosh hardware is 0.102.8. Starting with 0.103.0, only Macs based on Apple Silicon are supported.\n\n__TorchSharp examples has their own home!__\n\nHead over to the [TorchSharp Examples Repo](https:\u002F\u002Fgithub.com\u002Fdotnet\u002FTorchSharpExamples) for convenient access to existing and upcoming examples.\n\n__IMPORTANT NOTES:__\n\nWhen targeting __.NET FX__ on Windows, the project configuration must be set to 'x64' rather than 'Any CPU' for anything that depends on TorchSharp.\n\nAs we build up to a v1.0 release, we will continue to make breaking changes, but only when we consider it necessary for usability. Similarity to the PyTorch experience is a primary design tenet, and we will continue on that path.\n\n# TorchSharp\n\nTorchSharp is a .NET library that provides access to the library that powers PyTorch. It is part of the .NET Foundation.\n\nThe focus is to bind the API surfaced by LibTorch with a particular focus on tensors. The design intent is to stay as close as possible to the Pytorch experience, while still taking advantage of the benefits of the .NET static type system where it makes sense. For example: method overloading is relied on when Pytorch defines multiple valid types for a particular parameter.\n\nThe technology is a \"wrapper library\": no more, no less. [DiffSharp](https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002F) uses this\nrepository extensively and has been a major factor in iterating support.\n\nThings that you can try:\n\n```csharp\nusing TorchSharp;\nusing static TorchSharp.torch.nn;\n\nvar lin1 = Linear(1000, 100);\nvar lin2 = Linear(100, 10);\nvar seq = Sequential((\"lin1\", lin1), (\"relu1\", ReLU()), (\"drop1\", Dropout(0.1)), (\"lin2\", lin2));\n\nusing var x = torch.randn(64, 1000);\nusing var y = torch.randn(64, 10);\n\nvar optimizer = torch.optim.Adam(seq.parameters());\n\nfor (int i = 0; i \u003C 10; i++) {\n    using var eval = seq.forward(x);\n    using var output = functional.mse_loss(eval, y, Reduction.Sum);\n\n    optimizer.zero_grad();\n\n    output.backward();\n\n    optimizer.step();\n}\n```\n\n## A Few Things to Know\n\nWhile the intent has been to stay close to the Pytorch experience, there are some peculiarities to take note of:\n\n1. We have disregarded .NET naming conventions in favor of Python where it impacts the experience. We know this will feel wrong to some, but after a lot of deliberation, we decided to follow the lead of the SciSharp community and embrace naming similarity with Python over .NET tradition. We believe this will make it easier to take Python-based examples and snippets and apply them in .NET.\n\n2. In order to make a constructor call look more the Pytorch code, each class has a factory method with the same name. Because we cannot have a method and a class with the same name in a scope, we moved the class declarations to a nested scope 'Modules.'\n\n    For example:\n\n    ```csharp\n\n    Module conv1 = Conv1d(...);\n\n    ```\n    creates an instance of `Modules.Conv1d`, which has 'torch.Module' as its base class.\n\n3. C# uses ':' when passing a named parameter, while F# and Python uses '=', and Pytorch functions have enough parameters to encourage passing them by name. This means that you cannot simply copy a lot of code into C#.\n\n4. There are a number of APIs where Pytorch encodes what are effectively enum types as strings. We have chosen to use proper .NET enumeration types in most cases.\n\n5. The type `torch.device` is `torch.Device` in TorchSharp. We felt that using all-lowercase for a class type was one step too far. The device object constructors, which is what you use most of the time, are still called `device()`\n\n\n# Memory management\n\nSee [docfx\u002Farticles\u002Fmemory.md](docfx\u002Farticles\u002Fmemory.md).\n\n# Download\n\nTorchSharp is distributed via the NuGet gallery: [https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchSharp\u002F](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchSharp\u002F)\n\nWe recommend using one of the 'bundled' packages, which will pull in both TorchSharp and the right backends:\n\n- [TorchSharp-cpu](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchSharp-cpu) (CPU, Linux\u002FWindows\u002FOSX)\n- [TorchSharp-cuda-windows](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchSharp-cuda-windows) (CPU\u002FCUDA 12.1, Windows)\n- [TorchSharp-cuda-linux](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchSharp-cuda-linux) (CPU\u002FCUDA 12.1, Linux)\n\nOtherwise, you also need one of the LibTorch backend packages: https:\u002F\u002Fwww.nuget.org\u002Fpackages?q=libtorch, specifically one of\n\n* `libtorch-cpu-linux-x64` (CPU, Linux)\n\n* `libtorch-cpu-win-x64` (CPU, Windows)\n\n* `libtorch-cpu-osx-arm64` (CPU, OSX)\n\n* `libtorch-cpu` (CPU, references all three, larger download but simpler)\n\n* `libtorch-cuda-12.1-linux-x64` (CPU\u002FCUDA 12.1, Linux)\n\n  > NOTE: Due to the presence of very large native binaries, using the `libtorch-cuda-12.1-linux-x64` package requires\n  > .NET 6, e.g. .NET SDK version `6.0.100-preview.5.21302.13` or greater.\n\n* `libtorch-cuda-12.1-win-x64` (CPU\u002FCUDA 12.1, Windows)\n\nAlternatively you can access the LibTorch native binaries via direct reference to existing local native\nbinaries of LibTorch installed through other means (for example, by installing [PyTorch](https:\u002F\u002Fpytorch.org\u002F) using a Python package manager). You will have to add an explicit load of the relevant native library, for example:\n\n```csharp\n    using System.Runtime.InteropServices;\n    NativeLibrary.Load(\"\u002Fhome\u002Fgunes\u002Fanaconda3\u002Flib\u002Fpython3.8\u002Fsite-packages\u002Ftorch\u002Flib\u002Flibtorch.so\")\n```\n\n**NOTE:** Some have reported that in order to use TorchSharp on Windows, the C++ redistributable needs to be installed. This will be the case where VS is installed, but it maybe necessary to install this version of the C++ redist on machines where TorchSharp is deployed:\n\n```\nMicrosoft Visual C++ 2015-2022 ( 14.36.32532 )\n```\n\n# Code of Conduct\nThis project has adopted the code of conduct defined by the Contributor Covenant to clarify expected behavior in our community.\nFor more information see the [.NET Foundation Code of Conduct](https:\u002F\u002Fdotnetfoundation.org\u002Fcode-of-conduct).\n\n# Developing and Contributing\n\nSee [DEVGUIDE.md](DEVGUIDE.md) and [CONTRIBUTING.md](CONTRIBUTING.md).\n\n\u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fdotnet\u002FTorchSharp\u002Fgraphs\u002Fcontributors\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdotnet_TorchSharp_readme_55ecf02b577f.png\" \u002F>\n\u003C\u002Fa>\n\n# Uses\n\n[DiffSharp](https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002F) also uses this\nrepository extensively and has been a major factor in iterating support.\n","[![Gitter](https:\u002F\u002Fbadges.gitter.im\u002Fdotnet\u002FTorchSharp.svg)](https:\u002F\u002Fgitter.im\u002Fdotnet\u002FTorchSharp?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge)\n\u003Cbr\u002F>\n[![Build Status](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdotnet_TorchSharp_readme_5881a66031b5.png)](https:\u002F\u002Fdotnet.visualstudio.com\u002FTorchSharp\u002F_build\u002Flatest?definitionId=174&branchName=main)\n\u003Cbr\u002F>\n[![TorchSharp](https:\u002F\u002Fimg.shields.io\u002Fnuget\u002Fvpre\u002FTorchSharp.svg?cacheSeconds=3600&label=TorchSharp%20nuget)](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchSharp)\u003Cbr\u002F>\n[![TorchAudio](https:\u002F\u002Fimg.shields.io\u002Fnuget\u002Fvpre\u002FTorchAudio.svg?cacheSeconds=3600&label=TorchAudio%20nuget)](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchAudio)\u003Cbr\u002F>\n[![TorchVision](https:\u002F\u002Fimg.shields.io\u002Fnuget\u002Fvpre\u002FTorchVision.svg?cacheSeconds=3600&label=TorchVision%20nuget)](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchVision)\u003Cbr\u002F>\n[![TorchSharp-cpu](https:\u002F\u002Fimg.shields.io\u002Fnuget\u002Fvpre\u002FTorchSharp-cpu.svg?cacheSeconds=3600&label=TorchSharp-cpu%20nuget)](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchSharp-cpu)\n[![TorchSharp-cuda-windows](https:\u002F\u002Fimg.shields.io\u002Fnuget\u002Fvpre\u002FTorchSharp-cuda-windows.svg?cacheSeconds=3600&label=TorchSharp-cuda-windows%20nuget)](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchSharp-cuda-windows)\n[![TorchSharp-cuda-linux](https:\u002F\u002Fimg.shields.io\u002Fnuget\u002Fvpre\u002FTorchSharp-cuda-linux.svg?cacheSeconds=3600&label=TorchSharp-cuda-linux%20nuget)](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchSharp-cuda-linux)\u003Cbr\u002F>\n\u003Cbr\u002F>\n请查看[发行说明](RELEASENOTES.md)文件，了解每次新版本的更新内容。\n\n\n__TorchSharp不再支持基于Intel硬件的MacOS。__\n\n随着libtorch 2.4.0版本的发布，libtorch对Intel硬件的支持已被弃用。这意味着，能够运行在Intel Macintosh硬件上的TorchSharp的最后一个版本是0.102.8。从0.103.0开始，仅支持基于Apple Silicon的Mac。\n\n__TorchSharp示例有了自己的主页！__\n\n前往[TorchSharp示例仓库](https:\u002F\u002Fgithub.com\u002Fdotnet\u002FTorchSharpExamples)，方便地获取现有和即将推出的示例。\n\n__重要提示：__\n\n当在Windows上以__.NET FX__为目标时，对于依赖TorchSharp的任何项目，其配置必须设置为‘x64’，而非‘Any CPU’。\n\n随着我们向v1.0版本迈进，我们将继续做出破坏性更改，但仅在我们认为对可用性有必要时才会这样做。与PyTorch体验的相似性是我们的主要设计原则，我们将继续沿着这条道路前行。\n\n# TorchSharp\n\nTorchSharp是一个.NET库，提供了访问驱动PyTorch的库的接口。它是.NET基金会的一部分。\n\n重点在于绑定LibTorch提供的API，并特别关注张量。设计意图是尽可能贴近PyTorch的使用体验，同时在有意义的地方充分利用.NET静态类型系统的优点。例如：当PyTorch为某个参数定义了多种有效类型时，我们会依赖方法重载。\n\n该技术是一个“包装库”：不多也不少。[DiffSharp](https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002F)广泛使用了这个仓库，也是迭代支持的重要因素。\n\n你可以尝试的内容：\n\n```csharp\nusing TorchSharp;\nusing static TorchSharp.torch.nn;\n\nvar lin1 = Linear(1000, 100);\nvar lin2 = Linear(100, 10);\nvar seq = Sequential((\"lin1\", lin1), (\"relu1\", ReLU()), (\"drop1\", Dropout(0.1)), (\"lin2\", lin2));\n\nusing var x = torch.randn(64, 1000);\nusing var y = torch.randn(64, 10);\n\nvar optimizer = torch.optim.Adam(seq.parameters());\n\nfor (int i = 0; i \u003C 10; i++) {\n    using var eval = seq.forward(x);\n    using var output = functional.mse_loss(eval, y, Reduction.Sum);\n\n    optimizer.zero_grad();\n\n    output.backward();\n\n    optimizer.step();\n}\n```\n\n## 几点须知\n\n尽管我们的目标是贴近PyTorch的使用体验，但仍有一些特殊之处需要注意：\n\n1. 我们为了提升体验，放弃了.NET命名规范而采用了Python风格。我们知道这可能会让一些人觉得不习惯，但在经过大量讨论后，我们决定跟随SciSharp社区的脚步，优先采用与Python相似的命名方式，而非传统的.NET风格。我们相信，这将使你更容易将基于Python的示例和代码片段应用到.NET中。\n\n2. 为了使构造函数调用更像PyTorch代码，每个类都提供了一个同名的工厂方法。由于在同一作用域内不能同时拥有方法和类同名，我们把类声明移到了嵌套的作用域‘Modules’中。\n\n    例如：\n\n    ```csharp\n\n    Module conv1 = Conv1d(...);\n\n    ```\n    创建的是`Modules.Conv1d`的实例，它的基类是`torch.Module`。\n\n3. C#在传递命名参数时使用‘:’，而F#和Python使用‘=’，且PyTorch函数的参数足够多，鼓励通过名称传递。这意味着你不能简单地把很多代码复制到C#中。\n\n4. 在许多API中，PyTorch将实际上的枚举类型编码为字符串。我们在大多数情况下选择了使用真正的.NET枚举类型。\n\n5. 类型`torch.device`在TorchSharp中是`torch.Device`。我们觉得全部小写作为类名有点过头了。设备对象的构造函数——也就是你最常使用的——仍然叫作`device()`\n\n\n# 内存管理\n\n请参阅[docfx\u002Farticles\u002Fmemory.md](docfx\u002Farticles\u002Fmemory.md)。\n\n# 下载\n\nTorchSharp 通过 NuGet 库分发：[https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchSharp\u002F](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchSharp\u002F)\n\n我们建议使用其中一个“捆绑”包，它会同时引入 TorchSharp 和合适的后端：\n\n- [TorchSharp-cpu](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchSharp-cpu)（CPU，Linux\u002FWindows\u002FOSX）\n- [TorchSharp-cuda-windows](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchSharp-cuda-windows)（CPU\u002FCUDA 12.1，Windows）\n- [TorchSharp-cuda-linux](https:\u002F\u002Fwww.nuget.org\u002Fpackages\u002FTorchSharp-cuda-linux)（CPU\u002FCUDA 12.1，Linux）\n\n否则，您还需要使用其中一个 LibTorch 后端包：https:\u002F\u002Fwww.nuget.org\u002Fpackages?q=libtorch，具体包括以下几种：\n\n* `libtorch-cpu-linux-x64`（CPU，Linux）\n\n* `libtorch-cpu-win-x64`（CPU，Windows）\n\n* `libtorch-cpu-osx-arm64`（CPU，OSX）\n\n* `libtorch-cpu`（CPU，引用全部三种，下载文件较大但更简单）\n\n* `libtorch-cuda-12.1-linux-x64`（CPU\u002FCUDA 12.1，Linux）\n\n> 注意：由于包含体积非常大的原生二进制文件，使用 `libtorch-cuda-12.1-linux-x64` 包需要 .NET 6，例如 .NET SDK 版本 `6.0.100-preview.5.21302.13` 或更高版本。\n\n* `libtorch-cuda-12.1-win-x64`（CPU\u002FCUDA 12.1，Windows）\n\n或者，您也可以通过直接引用其他方式安装的 LibTorch 原生二进制文件来访问 LibTorch 原生二进制文件（例如，通过 Python 包管理器安装 [PyTorch](https:\u002F\u002Fpytorch.org\u002F)）。您需要显式加载相关的原生库，例如：\n\n```csharp\n    using System.Runtime.InteropServices;\n    NativeLibrary.Load(\"\u002Fhome\u002Fgunes\u002Fanaconda3\u002Flib\u002Fpython3.8\u002Fsite-packages\u002Ftorch\u002Flib\u002Flibtorch.so\")\n```\n\n**注意：** 有人报告称，在 Windows 上使用 TorchSharp 时，需要安装 C++ 可再发行组件。如果已安装 VS，则此情况通常已经满足，但在部署 TorchSharp 的机器上可能仍需安装此版本的 C++ 可再发行组件：\n\n```\nMicrosoft Visual C++ 2015-2022 ( 14.36.32532 )\n```\n\n# 行为准则\n本项目采用了《贡献者契约》定义的行为准则，以明确社区内的期望行为。\n欲了解更多信息，请参阅 [.NET 基金会行为准则](https:\u002F\u002Fdotnetfoundation.org\u002Fcode-of-conduct)。\n\n# 开发与贡献\n\n请参阅 [DEVGUIDE.md](DEVGUIDE.md) 和 [CONTRIBUTING.md](CONTRIBUTING.md)。\n\n\u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fdotnet\u002FTorchSharp\u002Fgraphs\u002Fcontributors\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdotnet_TorchSharp_readme_55ecf02b577f.png\" \u002F>\n\u003C\u002Fa>\n\n# 使用场景\n\n[DiffSharp](https:\u002F\u002Fgithub.com\u002FDiffSharp\u002FDiffSharp\u002F) 也广泛使用了本仓库，并且在迭代支持方面发挥了重要作用。","# TorchSharp 快速上手指南\n\n## 环境准备\n\n- **操作系统**：Windows 10\u002F11、Linux（Ubuntu 20.04+）、macOS Apple Silicon（M1\u002FM2）  \n- **.NET SDK**：.NET 6 或更高版本  \n- **CUDA 支持**（可选）：NVIDIA 显卡 + CUDA 12.1，仅限 Windows\u002FLinux  \n- **注意**：Intel 架构的 macOS 已不再支持，需使用 Apple Silicon 设备  \n- **Windows 用户**：请安装 [Microsoft Visual C++ 2015-2022 (14.36.32532)](https:\u002F\u002Faka.ms\u002Fvs\u002F17\u002Frelease\u002Fvc_redist.x64.exe)\n\n## 安装步骤\n\n推荐使用官方预打包的 NuGet 包，自动包含 LibTorch 后端：\n\n### CPU 版本（推荐初学者）\n```bash\ndotnet add package TorchSharp-cpu\n```\n\n### GPU 版本（Windows）\n```bash\ndotnet add package TorchSharp-cuda-windows\n```\n\n### GPU 版本（Linux）\n```bash\ndotnet add package TorchSharp-cuda-linux\n```\n\n> **国内加速建议**：使用阿里云或清华大学 NuGet 镜像源  \n> 在 `nuget.config` 中添加：\n> ```xml\n> \u003Cadd key=\"aliyun\" value=\"https:\u002F\u002Fmirrors.aliyun.com\u002Fnuget\u002Fv3\u002Findex.json\" \u002F>\n> ```\n\n## 基本使用\n\n以下是一个线性网络训练的最小示例：\n\n```csharp\nusing TorchSharp;\nusing static TorchSharp.torch.nn;\n\nvar lin1 = Linear(1000, 100);\nvar lin2 = Linear(100, 10);\nvar seq = Sequential((\"lin1\", lin1), (\"relu1\", ReLU()), (\"drop1\", Dropout(0.1)), (\"lin2\", lin2));\n\nusing var x = torch.randn(64, 1000);\nusing var y = torch.randn(64, 10);\n\nvar optimizer = torch.optim.Adam(seq.parameters());\n\nfor (int i = 0; i \u003C 10; i++) {\n    using var eval = seq.forward(x);\n    using var output = functional.mse_loss(eval, y, Reduction.Sum);\n\n    optimizer.zero_grad();\n    output.backward();\n    optimizer.step();\n}\n```\n\n> 注意：TorchSharp 保持与 PyTorch 一致的 API 命名（如 `Linear`, `ReLU`, `adam`），无需翻译为 .NET 风格。","一家专注于工业质检的 .NET 开发团队，正在为某汽车零部件厂商构建基于深度学习的表面缺陷检测系统，团队主力使用 C# 和 ASP.NET Core 构建后端服务，但缺乏 Python 环境的部署能力。\n\n### 没有 TorchSharp 时\n- 团队必须将模型训练完全交给 Python 团队，每次模型更新都需要导出 ONNX 文件并手动集成，流程繁琐且易出错。\n- 在 C# 服务中调用 Python 模型需依赖外部进程（如 Python.NET 或 REST API），启动延迟高，内存占用大，无法满足产线实时检测需求。\n- 模型推理时需在 C# 与 Python 之间频繁序列化\u002F反序列化张量数据，导致吞吐量下降 40% 以上。\n- 难以直接调试模型内部梯度或张量形状，排查训练偏差时需来回切换环境，开发效率极低。\n- 部署时需在生产服务器上安装 Python 环境和依赖库，增加了运维复杂度和安全风险。\n\n### 使用 TorchSharp 后\n- 团队直接在 C# 项目中加载 PyTorch 预训练模型，无需导出或跨语言调用，模型加载时间从 3.2 秒降至 0.4 秒。\n- 所有张量运算、前向传播和损失计算均在 .NET 进程内完成，推理延迟降低 65%，满足每秒 30 帧的产线检测要求。\n- 可直接使用 `torch.randn`、`Linear`、`Sequential` 等原生 API 构建和微调模型，与 Python PyTorch 代码高度一致，开发人员可快速上手。\n- 调试时可直接在 Visual Studio 中查看张量值、梯度变化，结合 .NET 调试器快速定位模型异常。\n- 仅需部署单一 .NET 6+ 应用程序，无需安装 Python，容器镜像体积减少 80%，部署更安全、更稳定。\n\nTorchSharp 让 .NET 开发者能无缝使用 PyTorch 的强大能力，彻底打通了深度学习模型在工业级 C# 系统中的落地最后一公里。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdotnet_TorchSharp_32ec135e.png","dotnet",".NET Platform","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fdotnet_0412dd17.png","Home of the open source .NET platform",null,"contact@dotnetfoundation.org","dotnetfdn","https:\u002F\u002Fdot.net","https:\u002F\u002Fgithub.com\u002Fdotnet",[85,89,93,97,101,105,109,113,116,119],{"name":86,"color":87,"percentage":88},"C#","#178600",91.7,{"name":90,"color":91,"percentage":92},"C++","#f34b7d",4.4,{"name":94,"color":95,"percentage":96},"C","#555555",2.6,{"name":98,"color":99,"percentage":100},"F#","#b845fc",0.4,{"name":102,"color":103,"percentage":104},"Python","#3572A5",0.3,{"name":106,"color":107,"percentage":108},"Jupyter Notebook","#DA5B0B",0.2,{"name":110,"color":111,"percentage":112},"CMake","#DA3434",0.1,{"name":114,"color":115,"percentage":112},"Batchfile","#C1F12E",{"name":117,"color":118,"percentage":112},"Shell","#89e051",{"name":120,"color":121,"percentage":122},"PowerShell","#012456",0,1805,220,"2026-04-01T19:10:11","MIT","Linux, Windows, macOS","需要 NVIDIA GPU，显存未明确说明，CUDA 12.1","未说明",{"notes":131,"python":129,"dependencies":132},"在 Windows 上部署时可能需要安装 Microsoft Visual C++ 2015-2022 (14.36.32532) 红istributable；macOS 上仅支持 Apple Silicon（ARM64）架构，Intel 架构已弃用；.NET 项目必须配置为 x64 平台；可直接引用本地 PyTorch 安装的 libtorch 库，但需手动加载；.NET 6 或更高版本用于 CUDA Linux 包；命名风格遵循 PyTorch 而非 .NET 传统，如 torch.device 在 C# 中为 Device 类。",[133,134,135],"libtorch-cpu","libtorch-cuda-12.1-linux-x64","libtorch-cuda-12.1-win-x64",[13],[138,139,140,141],"libtorch","mlnet","torchsharp","torchscript","2026-03-27T02:49:30.150509","2026-04-06T06:55:38.370711",[145,150,155,160,165,170],{"id":146,"question_zh":147,"answer_zh":148,"source_url":149},8930,"在 TorchSharp 0.100.0 版本中加载模型时报错，如何解决？","该问题已在 0.100.2 版本中修复。解决方案是升级到 TorchSharp 0.100.2 或更高版本，该版本预加载了 'nvfuser_codegen'，解决了原生库路径加载问题。可通过 NuGet 更新包：Install-Package TorchSharp -Version 0.100.2。","https:\u002F\u002Fgithub.com\u002Fdotnet\u002FTorchSharp\u002Fissues\u002F999",{"id":151,"question_zh":152,"answer_zh":153,"source_url":154},8931,"保存模型后重新加载，训练时损失值重置为初始值，如何正确保存和加载预训练模型？","TorchSharp 的 .save() 和 .load() 方法仅保存参数，但未保留优化器状态或训练上下文。确保在加载后重新初始化优化器，并检查是否在加载后调用了 model.train()。若仍无效，建议使用 state_dict() 手动保存\u002F加载参数：model.state_dict().Save(\"path.pt\")，加载时使用 model.load_state_dict(torch.load(\"path.pt\"))。","https:\u002F\u002Fgithub.com\u002Fdotnet\u002FTorchSharp\u002Fissues\u002F760",{"id":156,"question_zh":157,"answer_zh":158,"source_url":159},8932,"在 Linux 上使用 libtorch-cuda 包时 TorchSharp 无声崩溃，如何启用 CUDA 支持？","确保安装了与 libtorch-cuda 匹配的 NVIDIA 驱动和 CUDA Toolkit（如 10.2）。设置环境变量 LD_LIBRARY_PATH 指向 libtorch-cuda 包中 .so 文件所在目录。例如：export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:\u002Fpath\u002Fto\u002Flibtorch-cuda\u002Flib。同时确认 .NET 运行时为 6.0+，并使用 torch.IsCudaAvailable() 验证是否检测到 GPU。","https:\u002F\u002Fgithub.com\u002Fdotnet\u002FTorchSharp\u002Fissues\u002F143",{"id":161,"question_zh":162,"answer_zh":163,"source_url":164},8933,"升级到 TorchSharp 0.101.3 后，LayerNorm 层未迁移到 GPU，导致设备不一致错误，如何修复？","这是 0.101.3 版本中的已知 Bug。解决方案是降级到 0.101.2，或手动将所有子模块显式移动到目标设备：model.to(device) 后，遍历 model.modules() 并调用 module.to(device) 确保每个组件迁移。官方已在后续版本修复此问题，建议升级至 0.101.4+。","https:\u002F\u002Fgithub.com\u002Fdotnet\u002FTorchSharp\u002Fissues\u002F1185",{"id":166,"question_zh":167,"answer_zh":168,"source_url":169},8934,"在 .NET 4.8 项目中使用 TorchSharp 时报错提示缺少 libtorch-cpu-11.3-win-x64，如何解决？","确保在项目中直接引用了 TorchSharp-cpu NuGet 包（而非仅 TorchSharp），并将所有 libtorch-cpu-win-x64 的 DLL 文件（如 torchsharp.dll、libtorch.dll）复制到输出目录根路径（而非子文件夹）。同时检查项目是否为 x64 架构，非 x64 架构会导致加载失败。","https:\u002F\u002Fgithub.com\u002Fdotnet\u002FTorchSharp\u002Fissues\u002F907",{"id":171,"question_zh":172,"answer_zh":173,"source_url":174},8935,"使用 nn.Sequential 定义模型时，state_dict 无法获取最后层的参数，如何解决？","此问题通常因 Sequential 层未正确注册到父模块导致。确保在模型构造函数中使用 RegisterComponents() 方法显式注册所有子模块，特别是最后一个 Sequential 层。例如：RegisterComponents(\"cv4\", cv4);。若仍缺失，尝试将 Sequential 替换为直接定义的 Layer，并手动注册每个子层。","https:\u002F\u002Fgithub.com\u002Fdotnet\u002FTorchSharp\u002Fissues\u002F1272",[176,179,182,185,188,191,194,197,200,203,206,209,212,215,218,221,224,227,230,233],{"id":177,"version":178,"summary_zh":79,"released_at":79},106359,"v0.106.0",{"id":180,"version":181,"summary_zh":79,"released_at":79},106360,"v0.105.2",{"id":183,"version":184,"summary_zh":79,"released_at":79},106361,"v0.105.1",{"id":186,"version":187,"summary_zh":79,"released_at":79},106362,"v0.105.0",{"id":189,"version":190,"summary_zh":79,"released_at":79},106363,"v0.104.0",{"id":192,"version":193,"summary_zh":79,"released_at":79},106364,"v0.103.1",{"id":195,"version":196,"summary_zh":79,"released_at":79},106365,"v0.103.0",{"id":198,"version":199,"summary_zh":79,"released_at":79},106366,"v0.102.8",{"id":201,"version":202,"summary_zh":79,"released_at":79},106367,"v0.102.6",{"id":204,"version":205,"summary_zh":79,"released_at":79},106368,"v0.102.5",{"id":207,"version":208,"summary_zh":79,"released_at":79},106369,"v0.102.4",{"id":210,"version":211,"summary_zh":79,"released_at":79},106370,"v0.102.3",{"id":213,"version":214,"summary_zh":79,"released_at":79},106371,"v0.102.2",{"id":216,"version":217,"summary_zh":79,"released_at":79},106372,"v0.102.1",{"id":219,"version":220,"summary_zh":79,"released_at":79},106373,"v0.102.0",{"id":222,"version":223,"summary_zh":79,"released_at":79},106374,"v0.101.6",{"id":225,"version":226,"summary_zh":79,"released_at":79},106375,"v0.101.5",{"id":228,"version":229,"summary_zh":79,"released_at":79},106376,"v0.101.4",{"id":231,"version":232,"summary_zh":79,"released_at":79},106377,"v0.101.3",{"id":234,"version":235,"summary_zh":79,"released_at":79},106378,"v0.101.2"]