[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-xavier-zy--Awesome-pytorch-list-CNVersion":3,"tool-xavier-zy--Awesome-pytorch-list-CNVersion":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",142651,2,"2026-04-06T23:34:12",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107888,"2026-04-06T11:32:50",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":32,"last_commit_at":50,"category_tags":51,"status":17},4721,"markitdown","microsoft\u002Fmarkitdown","MarkItDown 是一款由微软 AutoGen 团队打造的轻量级 Python 工具，专为将各类文件高效转换为 Markdown 格式而设计。它支持 PDF、Word、Excel、PPT、图片（含 OCR）、音频（含语音转录）、HTML 乃至 YouTube 链接等多种格式的解析，能够精准提取文档中的标题、列表、表格和链接等关键结构信息。\n\n在人工智能应用日益普及的今天，大语言模型（LLM）虽擅长处理文本，却难以直接读取复杂的二进制办公文档。MarkItDown 恰好解决了这一痛点，它将非结构化或半结构化的文件转化为模型“原生理解”且 Token 效率极高的 Markdown 格式，成为连接本地文件与 AI 分析 pipeline 的理想桥梁。此外，它还提供了 MCP（模型上下文协议）服务器，可无缝集成到 Claude Desktop 等 LLM 应用中。\n\n这款工具特别适合开发者、数据科学家及 AI 研究人员使用，尤其是那些需要构建文档检索增强生成（RAG）系统、进行批量文本分析或希望让 AI 助手直接“阅读”本地文件的用户。虽然生成的内容也具备一定可读性，但其核心优势在于为机器",93400,"2026-04-06T19:52:38",[52,14],"插件",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":10,"last_commit_at":59,"category_tags":60,"status":17},4487,"LLMs-from-scratch","rasbt\u002FLLMs-from-scratch","LLMs-from-scratch 是一个基于 PyTorch 的开源教育项目，旨在引导用户从零开始一步步构建一个类似 ChatGPT 的大型语言模型（LLM）。它不仅是同名技术著作的官方代码库，更提供了一套完整的实践方案，涵盖模型开发、预训练及微调的全过程。\n\n该项目主要解决了大模型领域“黑盒化”的学习痛点。许多开发者虽能调用现成模型，却难以深入理解其内部架构与训练机制。通过亲手编写每一行核心代码，用户能够透彻掌握 Transformer 架构、注意力机制等关键原理，从而真正理解大模型是如何“思考”的。此外，项目还包含了加载大型预训练权重进行微调的代码，帮助用户将理论知识延伸至实际应用。\n\nLLMs-from-scratch 特别适合希望深入底层原理的 AI 开发者、研究人员以及计算机专业的学生。对于不满足于仅使用 API，而是渴望探究模型构建细节的技术人员而言，这是极佳的学习资源。其独特的技术亮点在于“循序渐进”的教学设计：将复杂的系统工程拆解为清晰的步骤，配合详细的图表与示例，让构建一个虽小但功能完备的大模型变得触手可及。无论你是想夯实理论基础，还是为未来研发更大规模的模型做准备",90106,"2026-04-06T11:19:32",[35,15,13,14],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":66,"readme_en":67,"readme_zh":68,"quickstart_zh":69,"use_case_zh":70,"hero_image_url":71,"owner_login":72,"owner_name":73,"owner_avatar_url":74,"owner_bio":75,"owner_company":76,"owner_location":76,"owner_email":77,"owner_twitter":76,"owner_website":76,"owner_url":78,"languages":79,"stars":84,"forks":85,"last_commit_at":86,"license":76,"difficulty_score":87,"env_os":88,"env_gpu":89,"env_ram":90,"env_deps":91,"category_tags":97,"github_topics":98,"view_count":32,"oss_zip_url":76,"oss_zip_packed_at":76,"status":17,"created_at":118,"updated_at":119,"faqs":120,"releases":121},4865,"xavier-zy\u002FAwesome-pytorch-list-CNVersion","Awesome-pytorch-list-CNVersion","Awesome-pytorch-list 翻译工作进行中......","Awesome-pytorch-list-CNVersion 是一个专为中文用户打造的 PyTorch 开源项目精选合集，旨在打破语言壁垒，让国内开发者能更便捷地获取全球优质的深度学习资源。面对 PyTorch 生态中成千上万的库、教程和论文实现，初学者和研究者往往难以快速筛选出高质量内容，而这份清单通过系统化的分类整理，涵盖了自然语言处理、计算机视觉、语音处理、概率生成模型等核心领域，并收录了如 AllenNLP、OpenNMT-py、ESPnet 等明星项目及其简介。\n\n它不仅列出了官方库和相关工具，还精心整理了教程、书籍、会议报告及前沿论文的代码实现，帮助用户一站式掌握从基础入门到科研创新的全链路资源。无论是刚接触深度学习的学生、需要快速复现算法的研究人员，还是希望寻找成熟组件的工程师，都能从中高效定位所需工具。其独特价值在于将原本分散且多为英文的资源进行了本地化梳理，显著降低了学习门槛和技术调研成本，是中文社区探索 PyTorch 生态不可或缺的导航地图。","Awesome-Pytorch-list｜厉害的Pytorch项目\n========================\n\n![pytorch-logo-dark](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fxavier-zy_Awesome-pytorch-list-CNVersion_readme_c47227853472.png)\n\n## [English Version](https:\u002F\u002Fgithub.com\u002Fbharathgs\u002FAwesome-pytorch-list)\n\n## Contents｜内容\n- [Awesome-Pytorch-list｜厉害的Pytorch项目](#awesome-pytorch-list%E5%8E%89%E5%AE%B3%E7%9A%84pytorch%E9%A1%B9%E7%9B%AE)\n  - [English Version](#english-version)\n  - [Contents｜内容](#contents%E5%86%85%E5%AE%B9)\n  - [Pytorch & related libraries｜Pytorch & 相关库](#pytorch--related-librariespytorch--%E7%9B%B8%E5%85%B3%E5%BA%93)\n    - [NLP & Speech Processing｜自然语言处理 & 语音处理](#nlp--speech-processing%E8%87%AA%E7%84%B6%E8%AF%AD%E8%A8%80%E5%A4%84%E7%90%86--%E8%AF%AD%E9%9F%B3%E5%A4%84%E7%90%86)\n    - [CV｜计算机视觉](#cv%E8%AE%A1%E7%AE%97%E6%9C%BA%E8%A7%86%E8%A7%89)\n    - [Probabilistic\u002FGenerative Libraries｜概率库和生成库](#probabilisticgenerative-libraries%E6%A6%82%E7%8E%87%E5%BA%93%E5%92%8C%E7%94%9F%E6%88%90%E5%BA%93)\n    - [Other libraries｜其他库](#other-libraries%E5%85%B6%E4%BB%96%E5%BA%93)\n  - [Tutorials & books & examples｜教程 & 书籍 & 示例](#tutorials--books--examples%E6%95%99%E7%A8%8B--%E4%B9%A6%E7%B1%8D--%E7%A4%BA%E4%BE%8B)\n  - [Paper implementations｜论文实现](#paper-implementations%E8%AE%BA%E6%96%87%E5%AE%9E%E7%8E%B0)\n  - [Talks & conferences｜报告 & 会议](#talks--conferences%E6%8A%A5%E5%91%8A--%E4%BC%9A%E8%AE%AE)\n  - [Pytorch elsewhere ｜ Pytorch相关](#pytorch-elsewhere--pytorch%E7%9B%B8%E5%85%B3)\n        \n## Pytorch & related libraries｜Pytorch & 相关库\n\n1. [pytorch](http:\u002F\u002Fpytorch.org): Tensors and Dynamic neural networks in Python with strong GPU acceleration | 使用强GPU加速的Python张量计算和动态神经网络.\n\n### NLP & Speech Processing｜自然语言处理 & 语音处理:\n\n1. \u003Ckbd>2800+\u003C\u002Fkbd> [text](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Ftext): 针对文本数据和NLP数据集的数据加载和抽象。\n2. \u003Ckbd>1300+\u003C\u002Fkbd> [pytorch-seq2seq](https:\u002F\u002Fgithub.com\u002FIBM\u002Fpytorch-seq2seq): Pytorch中处理seq2seq的开源框架。\n3. \u003Ckbd>1000-\u003C\u002Fkbd> [anuvada](https:\u002F\u002Fgithub.com\u002FSandeep42\u002Fanuvada): NLP可解释模型。\n4. \u003Ckbd>1300+\u003C\u002Fkbd> [audio](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Faudio): 简单的音频I\u002FO。\n5. \u003Ckbd>1000-\u003C\u002Fkbd> [loop](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Floop):  一种跨多说话者的语音生成方法。\n6. \u003Ckbd>null\u003C\u002Fkbd> [fairseq](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Ffairseq-py): Facebook开发的Sequence-to-Sequence python工具包。\n7. \u003Ckbd>1000-\u003C\u002Fkbd> [speech](https:\u002F\u002Fgithub.com\u002Fawni\u002Fspeech): 语音转文字的端到端模型实现。\n8. \u003Ckbd>5100+\u003C\u002Fkbd> [OpenNMT-py](https:\u002F\u002Fgithub.com\u002FOpenNMT\u002FOpenNMT-py): 开源神经机器翻译 http:\u002F\u002Fopennmt.net.\n9. \u003Ckbd>2300+\u003C\u002Fkbd> [neuralcoref](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Fneuralcoref): 在spaCy中使用神经网络实现快速共指消解。\n10. \u003Ckbd>1000+\u003C\u002Fkbd> [sentiment-discovery](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fsentiment-discovery): 基于规模的无监督语言模型在稳健情绪分类中的应用。\n11. \u003Ckbd>2800+\u003C\u002Fkbd> [MUSE](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FMUSE): 一个多语言无监督或有监督词语嵌入库。\n12. \u003Ckbd>1000-\u003C\u002Fkbd> [nmtpytorch](https:\u002F\u002Fgithub.com\u002Flium-lst\u002Fnmtpytorch): PyTorch中的Sequence-to-Sequence框架。\n13. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-wavenet](https:\u002F\u002Fgithub.com\u002Fvincentherrmann\u002Fpytorch-wavenet): 快速生成WaveNet的实现。\n14. \u003Ckbd>1000-\u003C\u002Fkbd> [Tacotron-pytorch](https:\u002F\u002Fgithub.com\u002Fsoobinseo\u002FTacotron-pytorch): Tacotron: 端到端语音合成。\n15. \u003Ckbd>10300+\u003C\u002Fkbd> [AllenNLP](https:\u002F\u002Fgithub.com\u002Fallenai\u002Fallennlp): 开源NLP研究库，基于PyTorch。[http:\u002F\u002Fwww.allennlp.org\u002F](https:\u002F\u002Fallennlp.org)\n16. \u003Ckbd>1900+\u003C\u002Fkbd> [PyTorch-NLP](https:\u002F\u002Fgithub.com\u002FPetrochukM\u002FPyTorch-NLP): 为加速NLP研究设立的一个库，包含神经网络层、文本处理模块和众多数据集。 pytorchnlp.readthedocs.io\n17. \u003Ckbd>1000-\u003C\u002Fkbd> [quick-nlp](https:\u002F\u002Fgithub.com\u002Foutcastofmusic\u002Fquick-nlp): 基于FastAI的Pytorch NLP库。\n18. \u003Ckbd>4900+\u003C\u002Fkbd> [TTS](https:\u002F\u002Fgithub.com\u002Fmozilla\u002FTTS): 文本转语音的深度学习框架。\n19. \u003Ckbd>2800+\u003C\u002Fkbd> [LASER](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FLASER): LASER是一个用来计算和使用多语言语句嵌入的库。\n20. \u003Ckbd>1100+\u003C\u002Fkbd> [pyannote-audio](https:\u002F\u002Fgithub.com\u002Fpyannote\u002Fpyannote-audio): 用于说话人分类的神经构建块：语音活动检测, 说话人变化检测, 说话人嵌入。\n21. \u003Ckbd>1000-\u003C\u002Fkbd> [gensen](https:\u002F\u002Fgithub.com\u002FMaluuba\u002Fgensen): 基于大规模多任务学习的通用句子表示。\n22. \u003Ckbd>1000-\u003C\u002Fkbd> [translate](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Ftranslate): 翻译——一个PyTorch语言库。\n23. \u003Ckbd>3900+\u003C\u002Fkbd> [espnet](https:\u002F\u002Fgithub.com\u002Fespnet\u002Fespnet): 端到端语音处理工具集。 espnet.github.io\u002Fespnet\n24. \u003Ckbd>4500+\u003C\u002Fkbd> [pythia](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fpythia): 源于FAIR(Facebook AI Research)的视觉与语言多模态研究的模块化框架。\n25. \u003Ckbd>1400+\u003C\u002Fkbd> [UnsupervisedMT](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FUnsupervisedMT): 基于短语的神经无监督机器翻译。\n26. \u003Ckbd>1300+\u003C\u002Fkbd> [jiant](https:\u002F\u002Fgithub.com\u002Fjsalt18-sentence-repl\u002Fjiant): 通用文本理解模型的jiant工具包。https:\u002F\u002Fjiant.info\n27. \u003Ckbd>4300+\u003C\u002Fkbd> [BERT-PyTorch](https:\u002F\u002Fgithub.com\u002Fcodertimo\u002FBERT-pytorch): Google AI 2018 BERT 的 Pytorch 实现，伴有简单注释。\n28. \u003Ckbd>2100+\u003C\u002Fkbd> [InferSent](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FInferSent): NLI的句子嵌入(InferSent)和训练代码。\n29. \u003Ckbd>1300+\u003C\u002Fkbd> [uis-rnn](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fuis-rnn):无限交错状态递归神经网络(UIS-RNN)算法，能够从嘈杂的环境中分辨声音，对应论文 Fully Supervised Speaker Diarization. arxiv.org\u002Fabs\u002F1810.04719\n30. \u003Ckbd>10600+\u003C\u002Fkbd> [flair](https:\u002F\u002Fgithub.com\u002Fzalandoresearch\u002Fflair): 一个针对最先进的NLP的简单框架。\n31. \u003Ckbd>6200+\u003C\u002Fkbd> [pytext](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fpytext): 基于PyTorch的自然语言建模框架。 fb.me\u002Fpytextdocs\n32. \u003Ckbd>1000-\u003C\u002Fkbd> [voicefilter](https:\u002F\u002Fgithub.com\u002Fmindslab-ai\u002Fvoicefilter): 谷歌AI的VoiceFilter的非官方实现。 http:\u002F\u002Fswpark.me\u002Fvoicefilter\n33. \u003Ckbd>1000-\u003C\u002Fkbd> [BERT-NER](https:\u002F\u002Fgithub.com\u002Fkamalkraj\u002FBERT-NER): 基于BERT的命名体识别(Named-Entity-Recognition)。\n34. \u003Ckbd>1000-\u003C\u002Fkbd> [transfer-nlp](https:\u002F\u002Fgithub.com\u002Ffeedly\u002Ftransfer-nlp): 为可复制实验管理而设计的NLP库。\n35. \u003Ckbd>1000-\u003C\u002Fkbd> [texar-pytorch](https:\u002F\u002Fgithub.com\u002Fasyml\u002Ftexar-pytorch): 机器学习和文本生成工具包。 texar.io\n36. \u003Ckbd>2000+\u003C\u002Fkbd> [pytorch-kaldi](https:\u002F\u002Fgithub.com\u002Fmravanelli\u002Fpytorch-kaldi): pytorch-kaldi 是一个开发中的最先进的dnn\u002Frnn混合语音识别系统。其DNN部分由PyTorch实现，而特征提取、标签计算和解码由kaldi工具包完成。\n37. \u003Ckbd>2900+\u003C\u002Fkbd> [NeMo](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002FNeMo): 神经模块：对话式AI（conversational AI）工具集 nvidia.github.io\u002FNeMo\n38. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-struct](https:\u002F\u002Fgithub.com\u002Fharvardnlp\u002Fpytorch-struct): 经过测试的GPU实现库，实现了深度学习中的一些核心的结构化算法，如HMM, Dep Trees, CKY, ...\n39. \u003Ckbd>1000-\u003C\u002Fkbd> [espresso](https:\u002F\u002Fgithub.com\u002Ffreewym\u002Fespresso): Espresso: 快速的端到端神经语音识别工具集。\n40. \u003Ckbd>48900+\u003C\u002Fkbd> [transformers](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Ftransformers): huggingface Transformers: TensorFlow 2.0  和 PyTorch 上最先进的NLP工具。huggingface.co\u002Ftransformers\n41. \u003Ckbd>1500+\u003C\u002Fkbd> [reformer-pytorch](https:\u002F\u002Fgithub.com\u002Flucidrains\u002Freformer-pytorch): [Reformer](https:\u002F\u002Fopenreview.net\u002Fpdf?id=rkgNKkHtvB) 的 PyTorch 版。\n42. \u003Ckbd>1000-\u003C\u002Fkbd> [torch-metrics](https:\u002F\u002Fgithub.com\u002Fenochkan\u002Ftorch-metrics): PyTorch 中的模型评估指标。\n43. \u003Ckbd>2600+\u003C\u002Fkbd> [speechbrain](https:\u002F\u002Fgithub.com\u002Fspeechbrain\u002Fspeechbrain): SpeechBrain is an open-source and all-in-one speech toolkit based on PyTorch.\n44. \u003Ckbd>1000-\u003C\u002Fkbd> [Backprop](https:\u002F\u002Fgithub.com\u002Fbackprop-ai\u002Fbackprop): Backprop makes it simple to use, finetune, and deploy state-of-the-art ML models.\n\n### CV｜计算机视觉:\n\n1. \u003Ckbd>9400+\u003C\u002Fkbd> [pytorch vision](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fvision): TorchVision包含流行的数据集、模型架构、计算机视觉中常用的图像变换。\n2. \u003Ckbd>1000-\u003C\u002Fkbd> [pt-styletransfer](https:\u002F\u002Fgithub.com\u002Ftymokvo\u002Fpt-styletransfer): 作为PyTorch中一个类的神经风格转移。\n3. \u003Ckbd>1000-\u003C\u002Fkbd> [OpenFacePytorch](https:\u002F\u002Fgithub.com\u002Fthnkim\u002FOpenFacePytorch): 使用OpenFace的nn4.small2.v1.t7模型的PyTorch模块。\n4. \u003Ckbd>1000-\u003C\u002Fkbd> [img_classification_pk_pytorch](https:\u002F\u002Fgithub.com\u002Ffelixgwu\u002Fimg_classification_pk_pytorch): 将你的图像分类模型和最先进的模型进行快速比较 (比如DenseNet, ResNet, ...)\n5. \u003Ckbd>1400+\u003C\u002Fkbd> [SparseConvNet](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FSparseConvNet): 子流形稀疏卷积神经网络。\n6. \u003Ckbd>1000-\u003C\u002Fkbd> [Convolution_LSTM_pytorch](https:\u002F\u002Fgithub.com\u002Fautoman000\u002FConvolution_LSTM_pytorch): 多层卷积LSTM(长短期记忆网络)模块。\n7. \u003Ckbd>5000+\u003C\u002Fkbd> [face-alignment](https:\u002F\u002Fgithub.com\u002F1adrianb\u002Fface-alignment): :fire: 基于 PyTorch 的 2D 和 3D 面部对齐库。 adrianbulat.com\n8. \u003Ckbd>1500+\u003C\u002Fkbd> [pytorch-semantic-segmentation](https:\u002F\u002Fgithub.com\u002FZijunDeng\u002Fpytorch-semantic-segmentation): 语义分割。\n9. \u003Ckbd>1000-\u003C\u002Fkbd> [RoIAlign.pytorch](https:\u002F\u002Fgithub.com\u002Flongcw\u002FRoIAlign.pytorch): PyTorch版本的RoIAlign。其实现基于crop_and_resize，支持CPU和GPU上的前向和后向。\n10. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-cnn-finetune](https:\u002F\u002Fgithub.com\u002Fcreafz\u002Fpytorch-cnn-finetune): 用PyTorch微调预训练卷积神经网络。\n11. \u003Ckbd>1000-\u003C\u002Fkbd> [detectorch](https:\u002F\u002Fgithub.com\u002Fignacio-rocco\u002Fdetectorch): Detectorch - PyTorch版detectron框架，目前仅有detectron的推断(inference)和评估(evalutaion)功能，无训练(training)功能。\n12. \u003Ckbd>4400+\u003C\u002Fkbd> [Augmentor](https:\u002F\u002Fgithub.com\u002Fmdbloice\u002FAugmentor): 用于机器学习的图像增强库。 http:\u002F\u002Faugmentor.readthedocs.io\n13. \u003Ckbd>1000-\u003C\u002Fkbd> [s2cnn](https:\u002F\u002Fgithub.com\u002Fjonas-koehler\u002Fs2cnn): Spherical CNNs：球面卷积网络的PyTorch实现。 (e.g. 全方位图像、全球信号)\n14. \u003Ckbd>2100+\u003C\u002Fkbd> [TorchCV](https:\u002F\u002Fgithub.com\u002Fdonnyyou\u002Ftorchcv): 基于PyTorch的计算机视觉深度学习框架。\n15. \u003Ckbd>8400+\u003C\u002Fkbd> [maskrcnn-benchmark](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fmaskrcnn-benchmark): 实例分割与对象检测的快速模块化参考实现。\n16. \u003Ckbd>2200+\u003C\u002Fkbd> [image-classification-mobile](https:\u002F\u002Fgithub.com\u002Fosmr\u002Fimgclsmob): 计算机视觉卷积网络训练沙盒，包含ImageNet-1K上的与训练分类模型集合。\n17. \u003Ckbd>1000-\u003C\u002Fkbd> [medicaltorch](https:\u002F\u002Fgithub.com\u002Fperone\u002Fmedicaltorch): 一个医学成像框架。http:\u002F\u002Fmedicaltorch.readthedocs.io\n18. \u003Ckbd>8400+\u003C\u002Fkbd> [albumentations](https:\u002F\u002Fgithub.com\u002Falbu\u002Falbumentations): 快速图像增强库和其他库的易用包装器。\n19. \u003Ckbd>4200+\u003C\u002Fkbd> [kornia](https:\u002F\u002Fgithub.com\u002Farraiyopensource\u002Fkornia): 开源可微计算机视觉库。https:\u002F\u002Fkornia.org\n20. \u003Ckbd>1000-\u003C\u002Fkbd> [text-detector](https:\u002F\u002Fgithub.com\u002Fs3nh\u002Ftext-detector): 检测和翻译文本。\n21. \u003Ckbd>2200+\u003C\u002Fkbd> [facenet-pytorch](https:\u002F\u002Fgithub.com\u002Ftimesler\u002Ffacenet-pytorch): 预训练Pytorch人脸检测与识别模型，从 [davidsandberg\u002Ffacenet](https:\u002F\u002Fgithub.com\u002Fdavidsandberg\u002Ffacenet) 移植而来。\n22. \u003Ckbd>17300+\u003C\u002Fkbd> [detectron2](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fdetectron2): Detectron2是FAIR的下一代目标检测和分割研究平台。\n23. \u003Ckbd>1000-\u003C\u002Fkbd> [vedaseg](https:\u002F\u002Fgithub.com\u002FMedia-Smart\u002Fvedaseg): 基于PyTorch的语义分割工具箱。\n24. \u003Ckbd>1300+\u003C\u002Fkbd> [ClassyVision](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FClassyVision): A用于图像和视频分类的端到端PyTorch框架。https:\u002F\u002Fclassyvision.ai\n25. \u003Ckbd>1000-\u003C\u002Fkbd> [detecto](https:\u002F\u002Fgithub.com\u002Falankbi\u002Fdetecto): 用 5 行代码构建功能完备的计算机视觉模型。https:\u002F\u002Fdetecto.readthedocs.io\u002F\n26. \u003Ckbd>5000+\u003C\u002Fkbd> [pytorch3d](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fpytorch3d): PyTorch3d 是一个面向深度学习的高效、可复用的 3D 计算机视觉库。 https:\u002F\u002Fpytorch3d.org\u002F\n27. \u003Ckbd>15700+\u003C\u002Fkbd> [MMDetection](https:\u002F\u002Fgithub.com\u002Fopen-mmlab\u002Fmmdetection): MMDetection 是一个开源的目标检测工具箱，属于 [OpenMMLab 项目](https:\u002F\u002Fopen-mmlab.github.io\u002F)。\n28. \u003Ckbd>1000-\u003C\u002Fkbd> [neural-dream](https:\u002F\u002Fgithub.com\u002FProGamerGov\u002Fneural-dream): DeepDream 算法的 PyTorch 实现，可以创造梦一样的幻觉视觉效果。\n29. \u003Ckbd>1000-\u003C\u002Fkbd> [FlashTorch](https:\u002F\u002Fgithub.com\u002FMisaOgura\u002Fflashtorch): Visualization toolkit for neural networks in PyTorch!\n30. \u003Ckbd>1000-\u003C\u002Fkbd> [Lucent](https:\u002F\u002Fgithub.com\u002Fgreentfrapp\u002Flucent): Tensorflow and OpenAI Clarity's Lucid adapted for PyTorch.\n31. \u003Ckbd>1300+\u003C\u002Fkbd> [MMDetection3D](https:\u002F\u002Fgithub.com\u002Fopen-mmlab\u002Fmmdetection3d): MMDetection3D is OpenMMLab's next-generation platform for general 3D object detection, a part of the [OpenMMLab project](https:\u002F\u002Fopen-mmlab.github.io\u002F).\n32. \u003Ckbd>2100+\u003C\u002Fkbd> [MMSegmentation](https:\u002F\u002Fgithub.com\u002Fopen-mmlab\u002Fmmsegmentation): MMSegmentation is a semantic segmentation toolbox and benchmark, a part of the [OpenMMLab project](https:\u002F\u002Fopen-mmlab.github.io\u002F).\n33. \u003Ckbd>2200+\u003C\u002Fkbd> [MMEditing](https:\u002F\u002Fgithub.com\u002Fopen-mmlab\u002Fmmediting): MMEditing is a image and video editing toolbox, a part of the [OpenMMLab project](https:\u002F\u002Fopen-mmlab.github.io\u002F).\n34. \u003Ckbd>1000+\u003C\u002Fkbd> [MMAction2](https:\u002F\u002Fgithub.com\u002Fopen-mmlab\u002Fmmaction2): MMAction2 is OpenMMLab's next generation action understanding toolbox and benchmark, a part of the [OpenMMLab project](https:\u002F\u002Fopen-mmlab.github.io\u002F).\n35. \u003Ckbd>1000+\u003C\u002Fkbd> [MMPose](https:\u002F\u002Fgithub.com\u002Fopen-mmlab\u002Fmmpose): MMPose is a pose estimation toolbox and benchmark, a part of the [OpenMMLab project](https:\u002F\u002Fopen-mmlab.github.io\u002F).\n36. \u003Ckbd>1000+\u003C\u002Fkbd> [lightly](https:\u002F\u002Fgithub.com\u002Flightly-ai\u002Flightly) - Lightly is a computer vision framework for self-supervised learning.\n\n### Probabilistic\u002FGenerative Libraries｜概率库和生成库:\n\n1. \u003Ckbd>1000-\u003C\u002Fkbd> [ptstat](https:\u002F\u002Fgithub.com\u002Fstepelu\u002Fptstat): 概率编程和统计推断。\n2. \u003Ckbd>7000+\u003C\u002Fkbd> [pyro](https:\u002F\u002Fgithub.com\u002Fuber\u002Fpyro): 基于 Python 和 PyTorch 的深度通用概率编程库。 http:\u002F\u002Fpyro.ai\n3. \u003Ckbd>1000-\u003C\u002Fkbd> [probtorch](https:\u002F\u002Fgithub.com\u002Fprobtorch\u002Fprobtorch): Probabilistic Torch是一个扩展了PyTorch的深度生成模型的库。\n4. \u003Ckbd>1000-\u003C\u002Fkbd> [paysage](https:\u002F\u002Fgithub.com\u002Fdrckf\u002Fpaysage): 基于Python\u002FPyTorch的非监督学习和生成模型库。\n5. \u003Ckbd>1000-\u003C\u002Fkbd> [pyvarinf](https:\u002F\u002Fgithub.com\u002Fctallec\u002Fpyvarinf): Python包，促进了带有变分推断的贝叶斯深度学习方法在pytorch中的应用。\n6. \u003Ckbd>1000-\u003C\u002Fkbd> [pyprob](https:\u002F\u002Fgithub.com\u002Fprobprog\u002Fpyprob): 一个基于PyTorch的概率编程与推断编译的库。\n7. \u003Ckbd>1000-\u003C\u002Fkbd> [mia](https:\u002F\u002Fgithub.com\u002Fspring-epfl\u002Fmia): 一个运行针对机器学习模型的成员推理攻击的库。\n8. \u003Ckbd>1000-\u003C\u002Fkbd> [pro_gan_pytorch](https:\u002F\u002Fgithub.com\u002Fakanimax\u002Fpro_gan_pytorch): 作为PyTorch nn.Module的扩展的ProGAN包。\n9. \u003Ckbd>2000+\u003C\u002Fkbd> [botorch](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fbotorch): PyTorch中的贝叶斯优化。\n\n\n### Other libraries｜其他库:\n\n1. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch extras](https:\u002F\u002Fgithub.com\u002Fmrdrozdov\u002Fpytorch-extras): PyTorch的额外特性。\n2. \u003Ckbd>1000-\u003C\u002Fkbd> [functional zoo](https:\u002F\u002Fgithub.com\u002Fszagoruyko\u002Ffunctional-zoo): PyTorch和Tensorflow的模型定义和预训练权重。\n3. \u003Ckbd>1600+\u003C\u002Fkbd> [torch-sampling](https:\u002F\u002Fgithub.com\u002Fncullen93\u002Ftorchsample): Pytorch的采样、高级训练、数据增强和实用程序。\n4. \u003Ckbd>1000-\u003C\u002Fkbd> [torchcraft-py](https:\u002F\u002Fgithub.com\u002Fdeepcraft\u002Ftorchcraft-py): TorchCraft的Python包装器，TorchCraft是连接Torch和StarCraft的桥梁。\n5. \u003Ckbd>1000-\u003C\u002Fkbd> [aorun](https:\u002F\u002Fgithub.com\u002Framon-oliveira\u002Faorun): Aorun试图以PyTorch为后端实现类似于Keras的API。\n6. \u003Ckbd>1000-\u003C\u002Fkbd> [logger](https:\u002F\u002Fgithub.com\u002Foval-group\u002Flogger): 机器学习记录器（logger）。\n7. \u003Ckbd>1000-\u003C\u002Fkbd> [PyTorch-docset](https:\u002F\u002Fgithub.com\u002Fiamaziz\u002FPyTorch-docset): PyTorch离线文档，结合Dash，Zeal，Velocity或者LovelyDocs使用。\n8. \u003Ckbd>1000-\u003C\u002Fkbd> [convert_torch_to_pytorch](https:\u002F\u002Fgithub.com\u002Fclcarwin\u002Fconvert_torch_to_pytorch): 将Torch t7模型转换为PyTorch模型。\n9. \u003Ckbd>8000+\u003C\u002Fkbd> [pretrained-models.pytorch](https:\u002F\u002Fgithub.com\u002FCadene\u002Fpretrained-models.pytorch): PyTorch 预训练卷积神经网络：NASNet, ResNeXt, ResNet, InceptionV4, InceptionResnetV2, Xception, DPN 等等。该项目的目标是帮助复制研究论文结果。\n10. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_fft](https:\u002F\u002Fgithub.com\u002Flocuslab\u002Fpytorch_fft): CUDA FFTs的PyTorch包装器。\n11. \u003Ckbd>1000-\u003C\u002Fkbd> [caffe_to_torch_to_pytorch](https:\u002F\u002Fgithub.com\u002Ffanq15\u002Fcaffe_to_torch_to_pytorch): Caffe模型转PyTorch\u002FTorch模型，Torch模型转PyTorch模型。\n12. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-extension](https:\u002F\u002Fgithub.com\u002Fsniklaus\u002Fpytorch-extension): PyTorch的CUDA扩展示例，计算了两个张量的[哈达玛积(Hadamard product)](https:\u002F\u002Fbaike.baidu.com\u002Fitem\u002F哈达玛积\u002F18894493?fr=aladdin)。\n13. \u003Ckbd>7000+\u003C\u002Fkbd> [tensorboard-pytorch](https:\u002F\u002Fgithub.com\u002Flanpa\u002Ftensorboard-pytorch): 该模块以tensorboard格式保存PyTorch张量以供检查。目前支持tensorboard中的标量、图像、音频、直方图等特性。\n14. \u003Ckbd>2400+\u003C\u002Fkbd> [gpytorch](https:\u002F\u002Fgithub.com\u002Fjrg365\u002Fgpytorch): GPyTorch是一个用PyTorch实现的高斯过程库。它可以轻松地创建可伸缩、灵活和模块化的高斯过程模型。\n15. \u003Ckbd>2500+\u003C\u002Fkbd> [spotlight](https:\u002F\u002Fgithub.com\u002Fmaciejkula\u002Fspotlight): 深度推荐模型。\n16. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-cns](https:\u002F\u002Fgithub.com\u002Fawentzonline\u002Fpytorch-cns): 基于PyTorch的广义压缩网络搜索（Generalized [Compressed Network Search](http:\u002F\u002Fpeople.idsia.ch\u002F~juergen\u002Fcompressednetworksearch.html)）。\n17. \u003Ckbd>1000-\u003C\u002Fkbd> [pyinn](https:\u002F\u002Fgithub.com\u002Fszagoruyko\u002Fpyinn): CuPy实现融合PyTorch操作。\n18. \u003Ckbd>1000-\u003C\u002Fkbd> [inferno](https:\u002F\u002Fgithub.com\u002Fnasimrahaman\u002Finferno): 关于PyTorch的实用程序库。\n19. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-fitmodule](https:\u002F\u002Fgithub.com\u002Fhenryre\u002Fpytorch-fitmodule): 一种用于PyTorch模块的超简单拟合方法。\n20. \u003Ckbd>4000+\u003C\u002Fkbd> [inferno-sklearn](https:\u002F\u002Fgithub.com\u002Fdnouri\u002Finferno): 一个基于PyTorch封装且兼容scikit-learn的神经网络库。\n21. \u003Ckbd>null\u003C\u002Fkbd> [pytorch-caffe-darknet-convert](https:\u002F\u002Fgithub.com\u002Fmarvis\u002Fpytorch-caffe-darknet-convert): 在 pytorch, caffe prototxt\u002Fweights 和 darknet cfg\u002Fweights 之间转换。\n22. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch2caffe](https:\u002F\u002Fgithub.com\u002Flongcw\u002Fpytorch2caffe): 将PyTorch模型转换成Caffe模型。\n23. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-tools](https:\u002F\u002Fgithub.com\u002Fnearai\u002Fpytorch-tools): PyTorch工具。\n24. \u003Ckbd>1900+\u003C\u002Fkbd> [sru](https:\u002F\u002Fgithub.com\u002Ftaolei87\u002Fsru): 训练RNNs和训练CNNs一样快。 (arxiv.org\u002Fabs\u002F1709.02755)\n25. \u003Ckbd>1000-\u003C\u002Fkbd> [torch2coreml](https:\u002F\u002Fgithub.com\u002Fprisma-ai\u002Ftorch2coreml): Torch7 -> CoreML，该工具可将Torch7模型转换为[Apple CoreML](https:\u002F\u002Fdeveloper.apple.com\u002Fdocumentation\u002Fcoreml)格式以便在Apple设备上运行。\n26. \u003Ckbd>1700+\u003C\u002Fkbd> [PyTorch-Encoding](https:\u002F\u002Fgithub.com\u002Fzhanghang1989\u002FPyTorch-Encoding): PyTorch 深度纹理编码网络 (Deep Texture Encoding Network) http:\u002F\u002Fhangzh.com\u002FPyTorch-Encoding\n27. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-ctc](https:\u002F\u002Fgithub.com\u002Fryanleary\u002Fpytorch-ctc): PyTorch-CTC 实现了CTC(联结主义时间分类，Connectionist Temporal Classification)集束搜索（Beam Search）解码。C++代码借鉴了TensorFlow，并通过一些改进增加了灵活性。\n28. \u003Ckbd>1000-\u003C\u002Fkbd> [candlegp](https:\u002F\u002Fgithub.com\u002Ft-vi\u002Fcandlegp): Pytorch中的高斯过程。\n29. \u003Ckbd>1000-\u003C\u002Fkbd> [dpwa](https:\u002F\u002Fgithub.com\u002Floudinthecloud\u002Fdpwa): 基于成对平均（Pair-Wise Averaging）的分布式学习。\n30. \u003Ckbd>1000-\u003C\u002Fkbd> [dni-pytorch](https:\u002F\u002Fgithub.com\u002Fkoz4k\u002Fdni-pytorch): 基于合成梯度的PyTorch解耦神经接口。\n31. \u003Ckbd>4000+\u003C\u002Fkbd> [skorch](https:\u002F\u002Fgithub.com\u002Fdnouri\u002Fskorch): 一个基于PyTorch封装且兼容scikit-learn的神经网络库。\n32. \u003Ckbd>3600+\u003C\u002Fkbd> [ignite](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fignite): Ignite是一个高级库，帮助你在PyTorch中训练神经网络。\n33. \u003Ckbd>1000-\u003C\u002Fkbd> [Arnold](https:\u002F\u002Fgithub.com\u002Fglample\u002FArnold): Arnold - DOOM 游戏代理。\n34. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-mcn](https:\u002F\u002Fgithub.com\u002Falbanie\u002Fpytorch-mcn): 将MatConvNet模型转换为PyTorch模型。\n35. \u003Ckbd>3200+\u003C\u002Fkbd> [simple-faster-rcnn-pytorch](https:\u002F\u002Fgithub.com\u002Fchenyuntc\u002Fsimple-faster-rcnn-pytorch): Faster R-CNN 的简化实现，性能与原始论文相当。\n36. \u003Ckbd>1000-\u003C\u002Fkbd> [generative_zoo](https:\u002F\u002Fgithub.com\u002FDL-IT\u002Fgenerative_zoo): generative_zoo提供了PyTorch中一些生成模型的工作实现。\n37. \u003Ckbd>1800+\u003C\u002Fkbd> [pytorchviz](https:\u002F\u002Fgithub.com\u002Fszagoruyko\u002Fpytorchviz): 可视化PyTorch的运行图。\n38. \u003Ckbd>1000-\u003C\u002Fkbd> [cogitare](https:\u002F\u002Fgithub.com\u002Fcogitare-ai\u002Fcogitare): Cogitare - 一个现代、快速、模块化的深度学习和机器学习框架。\n39. \u003Ckbd>1000-\u003C\u002Fkbd> [pydlt](https:\u002F\u002Fgithub.com\u002Fdmarnerides\u002Fpydlt): 基于PyTorch的深度学习工具箱。\n40. \u003Ckbd>1000-\u003C\u002Fkbd> [semi-supervised-pytorch](https:\u002F\u002Fgithub.com\u002Fwohlert\u002Fsemi-supervised-pytorch): 各种基于VAE的半监督模型和生成模型的实现。\n41. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_cluster](https:\u002F\u002Fgithub.com\u002Frusty1s\u002Fpytorch_cluster): 优化图簇算法的PyTorch扩展库。\n42. \u003Ckbd>1000-\u003C\u002Fkbd> [neural-assembly-compiler](https:\u002F\u002Fgithub.com\u002Faditya-khant\u002Fneural-assembly-compiler): 基于自适应神经编译的PyTorch神经汇编编译器。\n43. \u003Ckbd>1000-\u003C\u002Fkbd> [caffemodel2pytorch](https:\u002F\u002Fgithub.com\u002Fvadimkantorov\u002Fcaffemodel2pytorch): 将Caffe模型转换为PyTorch模型。\n44. \u003Ckbd>1000-\u003C\u002Fkbd> [extension-cpp](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fextension-cpp): PyTorch中的C++扩展。\n45. \u003Ckbd>1000-\u003C\u002Fkbd> [pytoune](https:\u002F\u002Fgithub.com\u002FGRAAL-Research\u002Fpytoune): 类Keras框架和实用程序。\n46. \u003Ckbd>1000-\u003C\u002Fkbd> [jetson-reinforcement](https:\u002F\u002Fgithub.com\u002Fdusty-nv\u002Fjetson-reinforcement): 使用PyTorch，OpenAI Gym和Gazebo机器人模拟的NVIDIA Jetson深度强化学习GPU库。\n47. \u003Ckbd>1000-\u003C\u002Fkbd> [matchbox](https:\u002F\u002Fgithub.com\u002Fsalesforce\u002Fmatchbox): 编写单个示例的PyTorch代码，然后小批量地高效运行。\n48. \u003Ckbd>1000-\u003C\u002Fkbd> [torch-two-sample](https:\u002F\u002Fgithub.com\u002Fjosipd\u002Ftorch-two-sample): PyTorch双样本测试库。\n49. \u003Ckbd>3100+\u003C\u002Fkbd> [pytorch-summary](https:\u002F\u002Fgithub.com\u002Fsksq96\u002Fpytorch-summary): PyTorch模型总结，类似于Keras中的`model.summary()`。\n50. \u003Ckbd>1000-\u003C\u002Fkbd> [mpl.pytorch](https:\u002F\u002Fgithub.com\u002FBelBES\u002Fmpl.pytorch): MaxPoolingLoss的PyTorch实现。\n51. \u003Ckbd>null\u003C\u002Fkbd> [scVI-dev](https:\u002F\u002Fgithub.com\u002FYosefLab\u002FscVI-dev): 链接失效。\n52. \u003Ckbd>5500+\u003C\u002Fkbd> [apex](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fapex): 一个PyTorch扩展：面向精简混合精度和分布式训练。\n53. \u003Ckbd>3100+\u003C\u002Fkbd> [ELF](https:\u002F\u002Fgithub.com\u002Fpytorch\u002FELF): ELF: 游戏研究平台，复现了AlphaGoZero\u002FAlphaZero。\n54. \u003Ckbd>1000-\u003C\u002Fkbd> [Torchlite](https:\u002F\u002Fgithub.com\u002FEKami\u002FTorchlite): Pytorch建立在sklearn、Pytorch和Tensorflow等流行机器学习框架上的高水平库。\n55. \u003Ckbd>1000-\u003C\u002Fkbd> [joint-vae](https:\u002F\u002Fgithub.com\u002FSchlumberger\u002Fjoint-vae): JointVAE的PyTorch实现，一个面向分离连续和离散变异因素的框架 :star2:。\n56. \u003Ckbd>1000-\u003C\u002Fkbd> [SLM-Lab](https:\u002F\u002Fgithub.com\u002Fkengz\u002FSLM-Lab): PyTorch模块化深度强化学习框架。\n57. \u003Ckbd>1000-\u003C\u002Fkbd> [bindsnet](https:\u002F\u002Fgithub.com\u002FHananel-Hazan\u002Fbindsnet): 一个Python包，可借助PyTorch `Tensor` 功能在CPUs或GPUs上模拟脉冲神经网络(SNNs, Spiking Neural Networks)。\n58. \u003Ckbd>1000-\u003C\u002Fkbd> [pro_gan_pytorch](https:\u002F\u002Fgithub.com\u002Fakanimax\u002Fpro_gan_pytorch): 作为 PyTorch nn.Module 扩展的 ProGAN 包。\n59. \u003Ckbd>11500+\u003C\u002Fkbd> [pytorch_geometric](https:\u002F\u002Fgithub.com\u002Frusty1s\u002Fpytorch_geometric): PyTorch几何深度学习扩展库。\n60. \u003Ckbd>1000-\u003C\u002Fkbd> [torchplus](https:\u002F\u002Fgithub.com\u002Fknighton\u002Ftorchplus): 在 PyTorch modules 上实现 + 运算符，返回序列。\n61. \u003Ckbd>1000-\u003C\u002Fkbd> [lagom](https:\u002F\u002Fgithub.com\u002Fzuoxingdong\u002Flagom): lagom: 用于强化学习算法快速原型构建的轻量级PyTorch架构。\n62. \u003Ckbd>1000-\u003C\u002Fkbd> [torchbearer](https:\u002F\u002Fgithub.com\u002Fecs-vlc\u002Ftorchbearer): torchbearer: PyTorch模型拟合库。\n63. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-maml-rl](https:\u002F\u002Fgithub.com\u002Ftristandeleu\u002Fpytorch-maml-rl): 强化学习中的模型不可知元学习(MAML, Model-Agnostic Meta-Learning)。\n64. \u003Ckbd>1000-\u003C\u002Fkbd> [NALU](https:\u002F\u002Fgithub.com\u002Fbharathgs\u002FNALU): 神经算术逻辑单元(Neural Arithmetic Logic Units)的PyTorch基本实现，论文：arxiv.org\u002Fpdf\u002F1808.00508.pdf 。\n65. \u003Ckbd>1000-\u003C\u002Fkbd> [QuCumber](https:\u002F\u002Fgithub.com\u002FPIQuIL\u002FQuCumber): 神经网络多体波函数重构。\n66. \u003Ckbd>1000-\u003C\u002Fkbd> [magnet](https:\u002F\u002Fgithub.com\u002FMagNet-DL\u002Fmagnet): 自我建立的深度学习项目。http:\u002F\u002Fmagnet-dl.readthedocs.io\u002F\n67. \u003Ckbd>1000-\u003C\u002Fkbd> [opencv_transforms](https:\u002F\u002Fgithub.com\u002Fjbohnslav\u002Fopencv_transforms): OpenCV实现Torchvision的图像分割。\n68. \u003Ckbd>21100+\u003C\u002Fkbd> [fastai](https:\u002F\u002Fgithub.com\u002Ffastai\u002Ffastai): fast.ai 深度学习库、课程和教程。\n69. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-dense-correspondence](https:\u002F\u002Fgithub.com\u002FRobotLocomotion\u002Fpytorch-dense-correspondence): [《Dense Object Nets: Learning Dense Visual Object Descriptors By and For Robotic Manipulation》](arxiv.org\u002Fpdf\u002F1806.08756.pdf) 一文的代码。\n70. \u003Ckbd>1000-\u003C\u002Fkbd> [colorization-pytorch](https:\u002F\u002Fgithub.com\u002Frichzhang\u002Fcolorization-pytorch): PyTorch实现交互式深度着色(Interactive Deep Colorization)。 richzhang.github.io\u002Fideepcolor\n71. \u003Ckbd>1000-\u003C\u002Fkbd> [beauty-net](https:\u002F\u002Fgithub.com\u002Fcms-flash\u002Fbeauty-net): PyTorch一个简单、灵活、可扩展的PyTorch模板。\n72. \u003Ckbd>1000-\u003C\u002Fkbd> [OpenChem](https:\u002F\u002Fgithub.com\u002FMariewelt\u002FOpenChem): OpenChem: 面向计算化学和药物设计研究的深度学习工具包 mariewelt.github.io\u002FOpenChem 。\n73. \u003Ckbd>1000-\u003C\u002Fkbd> [torchani](https:\u002F\u002Fgithub.com\u002Faiqm\u002Ftorchani): PyTorch精确神经网络电位。 aiqm.github.io\u002Ftorchani\n74. \u003Ckbd>1000-\u003C\u002Fkbd> [PyTorch-LBFGS](https:\u002F\u002Fgithub.com\u002Fhjmshi\u002FPyTorch-LBFGS): PyTorch实现L-BFGS。\n75. \u003Ckbd>2400+\u003C\u002Fkbd> [gpytorch](https:\u002F\u002Fgithub.com\u002Fcornellius-gp\u002Fgpytorch): PyTorch中对高斯过程的高效且模块化的实现。\n76. \u003Ckbd>1000-\u003C\u002Fkbd> [hessian](https:\u002F\u002Fgithub.com\u002Fmariogeiger\u002Fhessian): PyTorch版hessian。\n77. \u003Ckbd>1000-\u003C\u002Fkbd> [vel](https:\u002F\u002Fgithub.com\u002FMillionIntegrals\u002Fvel): 深度学习研究中的速度。\n78. \u003Ckbd>1000-\u003C\u002Fkbd> [nonechucks](https:\u002F\u002Fgithub.com\u002Fmsamogh\u002Fnonechucks): 动态地处理数据集中的坏样本，使用转换作为过滤器。\n79. \u003Ckbd>1000+\u003C\u002Fkbd> [torchstat](https:\u002F\u002Fgithub.com\u002FSwall0w\u002Ftorchstat): PyTorch中的模型分析器。\n80. \u003Ckbd>1400+\u003C\u002Fkbd> [QNNPACK](https:\u002F\u002Fgithub.com\u002Fpytorch\u002FQNNPACK): 量化神经网络包—量化神经网络算子的移动优化实现。\n81. \u003Ckbd>3600+\u003C\u002Fkbd> [torchdiffeq](https:\u002F\u002Fgithub.com\u002Frtqichen\u002Ftorchdiffeq): PyTorch解常微分方程（ODE），使用的是全GPU支持、O(1)内存复杂度的反向传播算法。\n82. \u003Ckbd>1000+\u003C\u002Fkbd> [redner](https:\u002F\u002Fgithub.com\u002FBachiLi\u002Fredner): 可微的 Monte Carlo 路径跟踪器。\n83. \u003Ckbd>1000-\u003C\u002Fkbd> [pixyz](https:\u002F\u002Fgithub.com\u002Fmasa-su\u002Fpixyz): 一个库，用来以更简洁、直观和可扩展的方式开发深层生成模型。\n84. \u003Ckbd>1000-\u003C\u002Fkbd> [euclidesdb](https:\u002F\u002Fgithub.com\u002Fperone\u002Feuclidesdb): 一种多模型机器学习特征嵌入数据库。 http:\u002F\u002Feuclidesdb.readthedocs.io\n85. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch2keras](https:\u002F\u002Fgithub.com\u002Fnerox8664\u002Fpytorch2keras): 将PyTorch模型转换为Keras模型。\n86. \u003Ckbd>1000-\u003C\u002Fkbd> [salad](https:\u002F\u002Fgithub.com\u002Fdomainadaptation\u002Fsalad): 域适应和半监督学习工具箱。\n87. \u003Ckbd>1000-\u003C\u002Fkbd> [netharn](https:\u002F\u002Fgithub.com\u002FErotemic\u002Fnetharn): PyTorch的参数化拟合和预测线束（Prediction Harnesses）。\n88. \u003Ckbd>7700+\u003C\u002Fkbd> [dgl](https:\u002F\u002Fgithub.com\u002Fdmlc\u002Fdgl): Python包，基于现有的DL框架，用于简化对图形的深度学习。http:\u002F\u002Fdgl.ai.\n89. \u003Ckbd>1600+\u003C\u002Fkbd> [gandissect](https:\u002F\u002Fgithub.com\u002FCSAILVision\u002Fgandissect): 基于PyTorch的工具，用于可视化和理解GAN的神经元。gandissect.csail.mit.edu\n90. \u003Ckbd>1000-\u003C\u002Fkbd> [delira](https:\u002F\u002Fgithub.com\u002Fjustusschock\u002Fdelira): 基于PyTorch和Tensorlow的快速原型和训练深层神经网络的轻量级框架，用于医疗成像。 delira.rtfd.io\n91. \u003Ckbd>1000-\u003C\u002Fkbd> [mushroom](https:\u002F\u002Fgithub.com\u002FAIRLab-POLIMI\u002Fmushroom): 强化学习实验的Python库。\n92. \u003Ckbd>1000-\u003C\u002Fkbd> [Xlearn](https:\u002F\u002Fgithub.com\u002Fthuml\u002FXlearn): 迁移学习库。\n93. \u003Ckbd>1000-\u003C\u002Fkbd> [geoopt](https:\u002F\u002Fgithub.com\u002Fferrine\u002Fgeoopt): 基于PyTorch优化的黎曼自适应优化方法。\n94. \u003Ckbd>1000-\u003C\u002Fkbd> [vegans](https:\u002F\u002Fgithub.com\u002Funit8co\u002Fvegans): 包含多种现有的GANs。\n95. \u003Ckbd>4200+\u003C\u002Fkbd> [kornia](https:\u002F\u002Fgithub.com\u002Farraiyopensource\u002Fkornia): PyTorch开源可微计算机视觉库。 https:\u002F\u002Fkornia.org\n96. \u003Ckbd>1000-\u003C\u002Fkbd> [AdverTorch](https:\u002F\u002Fgithub.com\u002FBorealisAI\u002Fadvertorch): 研究对抗鲁棒性的工具箱。\n97. \u003Ckbd>2800+\u003C\u002Fkbd> [AdaBound](https:\u002F\u002Fgithub.com\u002FLuolc\u002FAdaBound): 一个优化器，训练速度和Adam一样快，和SGD一样好。\n98. \u003Ckbd>1000-\u003C\u002Fkbd> [fenchel-young-losses](https:\u002F\u002Fgithub.com\u002Fmblondel\u002Ffenchel-young-losses): 在PyTorch\u002FTensorFlow\u002Fscikit-learn中使用Fenchel-Young损失作为概率分类的损失函数。\n99. \u003Ckbd>2700+\u003C\u002Fkbd> [pytorch-OpCounter](https:\u002F\u002Fgithub.com\u002FLyken17\u002Fpytorch-OpCounter): 统计PyTorch模型的MACs\u002FFLOPs。\n100. \u003Ckbd>1000-\u003C\u002Fkbd> [Tor10](https:\u002F\u002Fgithub.com\u002Fkaihsin\u002FTor10): 基于PyTorch，为量子模拟设计的通用张量网络库。\n101. \u003Ckbd>2600+\u003C\u002Fkbd> [Catalyst](https:\u002F\u002Fgithub.com\u002Fcatalyst-team\u002Fcatalyst): PyTorch DL&RL 研究的高级实用程序。它的开发重点是可重复性、快速实验和代码\u002F思想重用。能够研究\u002F开发新的东西，而不是编写另一个常规的训练循环。\n102. \u003Ckbd>1500+\u003C\u002Fkbd> [Ax](https:\u002F\u002Fgithub.com\u002Ffacebook\u002FAx): 自适应实验平台。\n103. \u003Ckbd>1000-\u003C\u002Fkbd> [pywick](https:\u002F\u002Fgithub.com\u002Fachaiah\u002Fpywick): 高水平的PyTorch神经网络训练库。\n104. \u003Ckbd>1000-\u003C\u002Fkbd> [torchgpipe](https:\u002F\u002Fgithub.com\u002Fkakaobrain\u002Ftorchgpipe): PyTorch实现GPipe。 torchgpipe.readthedocs.io\n105. \u003Ckbd>1000+\u003C\u002Fkbd> [hub](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fhub): Pytorch Hub 是一个预训练模型库，用来提升研究的可重复性。\n106. \u003Ckbd>14600+\u003C\u002Fkbd> [pytorch-lightning](https:\u002F\u002Fgithub.com\u002FwilliamFalcon\u002Fpytorch-lightning): 面向ML研究人员的轻量级PyTorch包装器。缩放模型，少写样板。\n107. \u003Ckbd>1000-\u003C\u002Fkbd> [Tor10](https:\u002F\u002Fgithub.com\u002Fkaihsin\u002FTor10): 基于pytorch为量子模拟设计的通用张量网络库。\n108. \u003Ckbd>3100+\u003C\u002Fkbd> [tensorwatch](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002Ftensorwatch): 针对Python机器学习与数据科学的调试、监控与可视化。\n109. \u003Ckbd>1000-\u003C\u002Fkbd> [wavetorch](https:\u002F\u002Fgithub.com\u002Ffancompute\u002Fwavetorch): 波动方程的数值求解与反传播。 arxiv.org\u002Fabs\u002F1904.12831\n110. \u003Ckbd>1000-\u003C\u002Fkbd> [diffdist](https:\u002F\u002Fgithub.com\u002Fag14774\u002Fdiffdist): diffdist是一个面向PyTorch的Python库。它扩展了`torch.autograd`的默认功能，并增加了对进程间可微通信的支持。\n111. \u003Ckbd>1000-\u003C\u002Fkbd> [torchprof](https:\u002F\u002Fgithub.com\u002Fawwong1\u002Ftorchprof): 用于Pytorch模型逐层分析的最小依赖库。\n112. \u003Ckbd>1000-\u003C\u002Fkbd> [osqpth](https:\u002F\u002Fgithub.com\u002Foxfordcontrol\u002Fosqpth): PyTorch可微OSQP求解器。\n113. \u003Ckbd>1000-\u003C\u002Fkbd> [mctorch](https:\u002F\u002Fgithub.com\u002Fmctorch\u002Fmctorch): 面向深度学习的流形优化库。\n114. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-hessian-eigenthings](https:\u002F\u002Fgithub.com\u002Fnoahgolmant\u002Fpytorch-hessian-eigenthings): 使用Hessian向量积和随机幂迭代的高效PyTorch Hessian特征分解。\n115. \u003Ckbd>1200+\u003C\u002Fkbd> [MinkowskiEngine](https:\u002F\u002Fgithub.com\u002FStanfordVL\u002FMinkowskiEngine): 闵可夫斯基引擎是一个用于广义稀疏卷积和高维稀疏张量的自动微分方法库。\n116. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-cpp-rl](https:\u002F\u002Fgithub.com\u002FOmegastick\u002Fpytorch-cpp-rl): CppRl是一个强化学习框架，用 PyTorch C++ 前端编写。\n117. \u003Ckbd>1000+\u003C\u002Fkbd> [pytorch-toolbelt](https:\u002F\u002Fgithub.com\u002FBloodAxe\u002Fpytorch-toolbelt): PyTorch扩展，用来进行快速R&D原型开发和Kaggle代码收集。\n118. \u003Ckbd>1000-\u003C\u002Fkbd> [argus-tensor-stream](https:\u002F\u002Fgithub.com\u002FFonbet\u002Fargus-tensor-stream): 一个库，用来将实时视频流解码至CUDA内存。tensorstream.argus-ai.com\n119. \u003Ckbd>1000-\u003C\u002Fkbd> [macarico](https:\u002F\u002Fgithub.com\u002Fhal3\u002Fmacarico): 在 PyTorch 中学习搜索。\n120. \u003Ckbd>1900+\u003C\u002Fkbd> [rlpyt](https:\u002F\u002Fgithub.com\u002Fastooke\u002Frlpyt): PyTorch 中的强化学习。\n121. \u003Ckbd>1000-\u003C\u002Fkbd> [pywarm](https:\u002F\u002Fgithub.com\u002Fblue-season\u002Fpywarm): 为 PyTorch 建立神经网络的一种更清洁的方法。https:\u002F\u002Fblue-season.github.io\u002Fpywarm\u002F\n122. \u003Ckbd>1300+\u003C\u002Fkbd> [learn2learn](https:\u002F\u002Fgithub.com\u002Flearnables\u002Flearn2learn): PyTorch元学习框架。http:\u002F\u002Flearn2learn.net\n123. \u003Ckbd>1000-\u003C\u002Fkbd> [torchbeast](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Ftorchbeast): 分布式强化学习的PyTorch平台。\n124. \u003Ckbd>1100+\u003C\u002Fkbd> [higher](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fhigher): higher 是一个PyTorch库，允许用户获得跨越训练循环而不是单个训练步骤的损失的高阶梯度。\n125. \u003Ckbd>null\u003C\u002Fkbd> [Torchelie](https:\u002F\u002Fgithub.com\u002FVermeille\u002FTorchelie\u002F): Torchélie 是面向PyTorch的一系列工具函数、层、损失、模型、训练器等的合集。 https:\u002F\u002Ftorchelie.readthedocs.org\u002F\n126. \u003Ckbd>1000-\u003C\u002Fkbd> [CrypTen](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FCrypTen): CrypTen 是一个隐私保护机器学习框架，它使用PyTorch编写，允许研究人员和开发人员使用加密数据训练模型。CrypTen目前支持将安全的多方计算（[Secure Multiparty Computation](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FSecure_multi-party_computation)）作为其加密机制。\n127. \u003Ckbd>1000+\u003C\u002Fkbd> [cvxpylayers](https:\u002F\u002Fgithub.com\u002Fcvxgrp\u002Fcvxpylayers): cvxpylayers 是一个 Python 库，用于在PyTorch中构造可微凸优化层。\n128. \u003Ckbd>1000+\u003C\u002Fkbd> [RepDistiller](https:\u002F\u002Fgithub.com\u002FHobbitLong\u002FRepDistiller): 对比表示蒸馏（CRD）和最新知识蒸馏方法的基准。\n129. \u003Ckbd>2500+\u003C\u002Fkbd> [kaolin](https:\u002F\u002Fgithub.com\u002FNVIDIAGameWorks\u002Fkaolin): 一个旨在加速3D深度学习研究的PyTorch库。\n130. \u003Ckbd>1000-\u003C\u002Fkbd> [PySNN](https:\u002F\u002Fgithub.com\u002FBasBuller\u002FPySNN): 高效的尖峰神经网络框架，建立在PyTorch之上，用于GPU加速。\n131. \u003Ckbd>1000-\u003C\u002Fkbd> [sparktorch](https:\u002F\u002Fgithub.com\u002Fdmmiller612\u002Fsparktorch): 在 Apache Spark 上训练和运行 PyTorch 模型。\n132. \u003Ckbd>3400+\u003C\u002Fkbd> [pytorch-metric-learning](https:\u002F\u002Fgithub.com\u002FKevinMusgrave\u002Fpytorch-metric-learning): 在应用程序中使用度量学习的最简单方法。模块化，灵活，可扩展。用 PyTorch 构建。\n133. \u003Ckbd>1000-\u003C\u002Fkbd> [autonomous-learning-library](https:\u002F\u002Fgithub.com\u002Fcpnota\u002Fautonomous-learning-library): 用于建立深度强化学习代理的 PyTorch 库。\n134. \u003Ckbd>1000-\u003C\u002Fkbd> [flambe](https:\u002F\u002Fgithub.com\u002Fasappresearch\u002Fflambe): 一个用于加速研究及其生产路径的ML框架。https:\u002F\u002Fflambe.ai\n135. \u003Ckbd>1900+\u003C\u002Fkbd> [pytorch-optimizer](https:\u002F\u002Fgithub.com\u002Fjettify\u002Fpytorch-optimizer): Collections of modern optimization algorithms for PyTorch, includes: AccSGD, AdaBound, AdaMod, DiffGrad, Lamb, RAdam, RAdam, Yogi.\n136. \u003Ckbd>2200+\u003C\u002Fkbd> [PyTorch-VAE](https:\u002F\u002Fgithub.com\u002FAntixK\u002FPyTorch-VAE): A Collection of Variational Autoencoders (VAE) in PyTorch.\n137. \u003Ckbd>16700+\u003C\u002Fkbd> [ray](https:\u002F\u002Fgithub.com\u002Fray-project\u002Fray): A fast and simple framework for building and running distributed applications. Ray is packaged with RLlib, a scalable reinforcement learning library, and Tune, a scalable hyperparameter tuning library. ray.io\n138. \u003Ckbd>1000-\u003C\u002Fkbd> [Pytorch Geometric Temporal](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002Fpytorch_geometric_temporal): A temporal extension library for PyTorch Geometric.\n139. \u003Ckbd>1000-\u003C\u002Fkbd> [Poutyne](https:\u002F\u002Fgithub.com\u002FGRAAL-Research\u002Fpoutyne): A Keras-like framework for PyTorch that handles much of the boilerplating code needed to train neural networks.\n140. \u003Ckbd>1000-\u003C\u002Fkbd> [Pytorch-Toolbox](https:\u002F\u002Fgithub.com\u002FPistonY\u002Ftorch-toolbox): This is toolbox project for Pytorch. Aiming to make you write Pytorch code more easier, readable and concise.\n141. \u003Ckbd>1000-\u003C\u002Fkbd> [Pytorch-contrib](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fcontrib): It contains reviewed implementations of ideas from recent machine learning papers.\n142. \u003Ckbd>6200+\u003C\u002Fkbd> [EfficientNet PyTorch](https:\u002F\u002Fgithub.com\u002Flukemelas\u002FEfficientNet-PyTorch): It contains an op-for-op PyTorch reimplementation of EfficientNet, along with pre-trained models and examples.\n143. \u003Ckbd>1300+\u003C\u002Fkbd> [PyTorch\u002FXLA](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fxla): PyTorch\u002FXLA is a Python package that uses the XLA deep learning compiler to connect the PyTorch deep learning framework and Cloud TPUs.\n144. \u003Ckbd>1000-\u003C\u002Fkbd> [webdataset](https:\u002F\u002Fgithub.com\u002Ftmbdev\u002Fwebdataset): WebDataset is a PyTorch Dataset (IterableDataset) implementation providing efficient access to datasets stored in POSIX tar archives.\n145. \u003Ckbd>1000-\u003C\u002Fkbd> [volksdep](https:\u002F\u002Fgithub.com\u002FMedia-Smart\u002Fvolksdep): volksdep is an open-source toolbox for deploying and accelerating PyTorch, Onnx and Tensorflow models with TensorRT.\n146. \u003Ckbd>1700+\u003C\u002Fkbd> [PyTorch-StudioGAN](https:\u002F\u002Fgithub.com\u002FPOSTECH-CVLab\u002FPyTorch-StudioGAN): StudioGAN is a Pytorch library providing implementations of representative Generative Adversarial Networks (GANs) for conditional\u002Funconditional image generation. StudioGAN aims to offer an identical playground for modern GANs so that machine learning researchers can readily compare and analyze a new idea.\n147. \u003Ckbd>null\u003C\u002Fkbd> [torchdrift](https:\u002F\u002Fgithub.com\u002Ftorchdrift\u002Ftorchdrift\u002F): drift detection library\n148. \u003Ckbd>1600+\u003C\u002Fkbd> [accelerate](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Faccelerate) : A simple way to train and use PyTorch models with multi-GPU, TPU, mixed-precision.\n149. \u003Ckbd>1000-\u003C\u002Fkbd> [lightning-transformers](https:\u002F\u002Fgithub.com\u002FPyTorchLightning\u002Flightning-transformers): Flexible interface for high-performance research using SOTA Transformers leveraging Pytorch Lightning, Transformers, and Hydra.\n\n## Tutorials & books & examples｜教程 & 书籍 & 示例\n\n1. \u003Ckbd>4200+\u003C\u002Fkbd> [Practical Pytorch](https:\u002F\u002Fgithub.com\u002Fspro\u002Fpractical-pytorch)**: 该教程对不同的RNN模型进行了解释。\n2. [DeepLearningForNLPInPytorch](https:\u002F\u002Fpytorch.org\u002Ftutorials\u002Fbeginner\u002Fdeep_learning_nlp_tutorial.html): IPython Notebook 深度学习教程，包含对自然语言处理的强调。\n3. \u003Ckbd>21300+\u003C\u002Fkbd> [pytorch-tutorial](https:\u002F\u002Fgithub.com\u002Fyunjey\u002Fpytorch-tutorial): 面向研究人员的深度学习教程，其中大部分模型的实现代码都少于30行。\n4. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-exercises](https:\u002F\u002Fgithub.com\u002Fkeon\u002Fpytorch-exercises): PyTorch练习集合。\n5. \u003Ckbd>5200+\u003C\u002Fkbd> [pytorch tutorials](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Ftutorials): 各种PyTorch教程。\n6. \u003Ckbd>16500+\u003C\u002Fkbd> [pytorch examples](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fexamples):  PyTorch使用示例，应用场景包括视觉、文本、强化学习等。\n7. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch practice](https:\u002F\u002Fgithub.com\u002Fnapsternxg\u002Fpytorch-practice): PyTorch示例。  \n8. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch mini tutorials](https:\u002F\u002Fgithub.com\u002Fvinhkhuc\u002FPyTorch-Mini-Tutorials): PyTorch极简教程，改编自Alec Radford的[Theano教程](https:\u002F\u002Fgithub.com\u002FNewmu\u002FTheano-Tutorials)。\n9. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch text classification](https:\u002F\u002Fgithub.com\u002Fxiayandi\u002FPytorch_text_classification): PyTorch实现基于CNN的文本分类。\n10. \u003Ckbd>1000-\u003C\u002Fkbd> [cats vs dogs](https:\u002F\u002Fgithub.com\u002Fdesimone\u002Fpytorch-cat-vs-dogs): Kaggle 竞赛 Dogs vs. Cats Redux: Kernels Edition 的网络微调示例。\n11. \u003Ckbd>1000-\u003C\u002Fkbd> [convnet](https:\u002F\u002Fgithub.com\u002Feladhoffer\u002FconvNet.pytorch): 深度卷积网络在不同数据集(ImageNet, Cifar10, Cifar100, MNIST)上的完整训练示例。\n12. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-generative-adversarial-networks](https:\u002F\u002Fgithub.com\u002Fmailmahee\u002Fpytorch-generative-adversarial-networks): 一个简单的对抗生成网络(GAN) 。\n13. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch containers](https:\u002F\u002Fgithub.com\u002Famdegroot\u002Fpytorch-containers): PyTorch中简化的Torch容器。\n14. \u003Ckbd>1000-\u003C\u002Fkbd> [T-SNE in pytorch](https:\u002F\u002Fgithub.com\u002Fcemoody\u002Ftopicsne): t-SNE实验。\n15. \u003Ckbd>1000-\u003C\u002Fkbd> [AAE_pytorch](https:\u002F\u002Fgithub.com\u002Ffducau\u002FAAE_pytorch): PyTorch版对抗自编码器。\n16. \u003Ckbd>1000-\u003C\u002Fkbd> [Kind_PyTorch_Tutorial](https:\u002F\u002Fgithub.com\u002FGunhoChoi\u002FKind_PyTorch_Tutorial): PyTorch新手教程。  \n17. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-poetry-gen](https:\u002F\u002Fgithub.com\u002Fjustdark\u002Fpytorch-poetry-gen): 基于PyTorch的char-RNN（字符级循环神经网络）。  \n18. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-REINFORCE](https:\u002F\u002Fgithub.com\u002FJamesChuanggg\u002Fpytorch-REINFORCE): PyTorch 实现了 OpenAI gym 下离散和连续控制的 REINFORCE。\n19. \u003Ckbd>6100+\u003C\u002Fkbd> [PyTorch-Tutorial](https:\u002F\u002Fgithub.com\u002FMorvanZhou\u002FPyTorch-Tutorial)**: 简单而快速地搭建你自己的神经网络。 https:\u002F\u002Fmorvanzhou.github.io\u002Ftutorials\u002F\n20. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-intro](https:\u002F\u002Fgithub.com\u002Fjoansj\u002Fpytorch-intro): 演示如何在PyTorch中实现CNNs和RNNs。\n21. \u003Ckbd>1300+\u003C\u002Fkbd> [pytorch-classification](https:\u002F\u002Fgithub.com\u002Fbearpaw\u002Fpytorch-classification): 一个CIFAR-10\u002F100和ImageNet数据集上的分类框架。\n22. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_notebooks - hardmaru](https:\u002F\u002Fgithub.com\u002Fhardmaru\u002Fpytorch_notebooks): 用NumPy和PyTorch编写的随机教程。\n23. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_tutoria-quick](https:\u002F\u002Fgithub.com\u002Fsoravux\u002Fpytorch_tutorial): PyTorch介绍和教程。面向计算机视觉、图形和机器学习领域的研究人员，要求对神经网络理论知识和常用神经网络框架由基本的了解。\n24. \u003Ckbd>1000-\u003C\u002Fkbd> [Pytorch_fine_tuning_Tutorial](https:\u002F\u002Fgithub.com\u002FSpandan-Madan\u002FPytorch_fine_tuning_Tutorial): 在PyTorch中进行微调或转移学习的简短教程。\n25. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_exercises](https:\u002F\u002Fgithub.com\u002FKyubyong\u002Fpytorch_exercises): PyTorch练习。\n26. \u003Ckbd>1000-\u003C\u002Fkbd> [traffic-sign-detection](https:\u002F\u002Fgithub.com\u002Fsoumith\u002Ftraffic-sign-detection-homework): 纽约大学2018年计算机视觉秋季课程示例。\n27. \u003Ckbd>1000-\u003C\u002Fkbd> [mss_pytorch](https:\u002F\u002Fgithub.com\u002FJs-Mim\u002Fmss_pytorch): 无需进行滤波后处理，利用循环推断算法实现歌唱语音分离 - PyTorch 实现。 演示: js-mim.github.io\u002Fmss_pytorch\n28. \u003Ckbd>2600+\u003C\u002Fkbd> [DeepNLP-models-Pytorch](https:\u002F\u002Fgithub.com\u002FDSKSD\u002FDeepNLP-models-Pytorch) cs-224n课程中的各种深度NLP模型的PyTorch实现。(Stanford Univ: NLP with Deep Learning)\n29. \u003Ckbd>1000-\u003C\u002Fkbd> [Mila introductory tutorials](https:\u002F\u002Fgithub.com\u002Fmila-udem\u002Fwelcome_tutorials): 面向MILA新生的各种教程。（[MILA：加拿大蒙特利尔人工智能研究中心](https:\u002F\u002Fmila.quebec\u002Fen\u002Fmila\u002F)）\n30. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch.rl.learning](https:\u002F\u002Fgithub.com\u002Fmoskomule\u002Fpytorch.rl.learning): 使用PyTorch学习强化学习。\n31. \u003Ckbd>1000-\u003C\u002Fkbd> [minimal-seq2seq](https:\u002F\u002Fgithub.com\u002Fkeon\u002Fseq2seq): 关注神经机器翻译的最小Seq2Seq模型。\n32. \u003Ckbd>1000-\u003C\u002Fkbd> [tensorly-notebooks](https:\u002F\u002Fgithub.com\u002FJeanKossaifi\u002Ftensorly-notebooks): 利用Python和TensorLy实现张量方法。 tensorly.github.io\u002Fdev\n33. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_bits](https:\u002F\u002Fgithub.com\u002Fjpeg729\u002Fpytorch_bits): 时序预测的相关示例。\n34. \u003Ckbd>1000-\u003C\u002Fkbd> [skip-thoughts](https:\u002F\u002Fgithub.com\u002Fsanyam5\u002Fskip-thoughts): PyTorch实现Skip-Thought词向量模型。\n35. \u003Ckbd>1000-\u003C\u002Fkbd> [video-caption-pytorch](https:\u002F\u002Fgithub.com\u002FxiadingZ\u002Fvideo-caption-pytorch): 利用PyTorch为视频添加字幕。\n36. \u003Ckbd>1000-\u003C\u002Fkbd> [Capsule-Network-Tutorial](https:\u002F\u002Fgithub.com\u002Fhiggsfield\u002FCapsule-Network-Tutorial): 简单易学的胶囊网络（Capsule Network）教程。\n37. \u003Ckbd>2100+\u003C\u002Fkbd> [code-of-learn-deep-learning-with-pytorch](https:\u002F\u002Fgithub.com\u002FSherlockLiao\u002Fcode-of-learn-deep-learning-with-pytorch): 《深度学习入门之PyTorch》书中代码。 item.jd.com\u002F17915495606.html\n38. \u003Ckbd>2300+\u003C\u002Fkbd> [RL-Adventure](https:\u002F\u002Fgithub.com\u002Fhiggsfield\u002FRL-Adventure): Pytorch 版 Deep Q Learning 教程，简单、易学、代码可读性强，包含 DQN \u002F DDQN \u002F Prioritized replay\u002F noisy networks\u002F distributional values\u002F Rainbow\u002F hierarchical RL 的 PyTorch 实现。\n39. \u003Ckbd>1000-\u003C\u002Fkbd> [accelerated_dl_pytorch](https:\u002F\u002Fgithub.com\u002Fhpcgarage\u002Faccelerated_dl_pytorch): Jupyter Day Atlanta II 会议上的加速深度学习算法，包含 PyTorch 教程和会议演讲文稿。\n40. \u003Ckbd>2500+\u003C\u002Fkbd> [RL-Adventure-2](https:\u002F\u002Fgithub.com\u002Fhiggsfield\u002FRL-Adventure-2): 以下内容的 PyTorch0.4 版本教程: actor critic \u002F proximal policy optimization \u002F acer \u002F ddpg \u002F twin dueling ddpg \u002F soft actor critic \u002F generative adversarial imitation learning \u002F hindsight experience replay。\n41. [Generative Adversarial Networks (GANs) in 50 lines of code (PyTorch)](https:\u002F\u002Fmedium.com\u002F@devnag\u002Fgenerative-adversarial-networks-gans-in-50-lines-of-code-pytorch-e81b79659e3f): 50行生成对抗网络。\n42. [adversarial-autoencoders-with-pytorch](https:\u002F\u002Fblog.paperspace.com\u002Fadversarial-autoencoders-with-pytorch\u002F): PyTorch对抗自编码器。\n43. [transfer learning using pytorch](https:\u002F\u002Fmedium.com\u002F@vishnuvig\u002Ftransfer-learning-using-pytorch-4c3475f4495): PyTorch迁移学习。\n44. [how-to-implement-a-yolo-object-detector-in-pytorch](https:\u002F\u002Fblog.paperspace.com\u002Fhow-to-implement-a-yolo-object-detector-in-pytorch\u002F): 如何使用PyTorch实现一个YOLO (v3)物体检测器。\n45. [pytorch-for-recommenders-101](http:\u002F\u002Fblog.fastforwardlabs.com\u002F2018\u002F04\u002F10\u002Fpytorch-for-recommenders-101.html): 使用PyTorch构建推荐系统。\n46. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-for-numpy-users](https:\u002F\u002Fgithub.com\u002Fwkentaro\u002Fpytorch-for-numpy-users): 面向Numpy用户的PyTorch。\n47. [PyTorch Tutorial](http:\u002F\u002Fwww.pytorchtutorial.com\u002F): PyTorch中文教程（PyTorch中文网）。\n48. \u003Ckbd>1000-\u003C\u002Fkbd> [grokking-pytorch](https:\u002F\u002Fgithub.com\u002FKaixhin\u002Fgrokking-pytorch): 手把手教你学会PyTorch。\n49. \u003Ckbd>5200+\u003C\u002Fkbd> [PyTorch-Deep-Learning-Minicourse](https:\u002F\u002Fgithub.com\u002FAtcold\u002FPyTorch-Deep-Learning-Minicourse): PyTorch深度学习微型课程。\n50. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-custom-dataset-examples](https:\u002F\u002Fgithub.com\u002Futkuozbulak\u002Fpytorch-custom-dataset-examples): PyTorch的一些自定义数据集示例。\n51. [Multiplicative LSTM for sequence-based Recommenders](https:\u002F\u002Fflorianwilhelm.info\u002F2018\u002F08\u002Fmultiplicative_LSTM_for_sequence_based_recos\u002F): 面向基于序列的推荐器的乘法LSTM。\u002F基于LSTM的序列推荐实现。\n52. \u003Ckbd>1000-\u003C\u002Fkbd> [deeplearning.ai-pytorch](https:\u002F\u002Fgithub.com\u002Ffurkanu\u002Fdeeplearning.ai-pytorch): Coursera深度学习课程(deeplearning.ai)任务的PyTorch实现。\n53. \u003Ckbd>1000-\u003C\u002Fkbd> [MNIST_Pytorch_python_and_capi](https:\u002F\u002Fgithub.com\u002Ftobiascz\u002FMNIST_Pytorch_python_and_capi): 示例：如何在Python中训练一个MNIST网络并在C++中用PyTorch1.0运行。\n54. \u003Ckbd>1000-\u003C\u002Fkbd> [torch_light](https:\u002F\u002Fgithub.com\u002Fne7ermore\u002Ftorch_light): 教程和示例，包括强化学习、NLP、CV。Logistic、CNN、RNN、LSTM等神经网络模型由数行代码实现，一些高级示例由复杂模型实现。\n55. \u003Ckbd>1000-\u003C\u002Fkbd> [portrain-gan](https:\u002F\u002Fgithub.com\u002Fdribnet\u002Fportrain-gan): 编码（解码尚未实现）art-DCGAN 生成的肖像油画。\n56. \u003Ckbd>1000-\u003C\u002Fkbd> [mri-analysis-pytorch](https:\u002F\u002Fgithub.com\u002Fomarsar\u002Fmri-analysis-pytorch): 使用PyTorch和MedicalTorch进行核磁共振（MRI）分析。\n57. \u003Ckbd>1000-\u003C\u002Fkbd> [cifar10-fast](https:\u002F\u002Fgithub.com\u002Fdavidcpage\u002Fcifar10-fast): 在79秒内完成CIFAR10数据集上的ResNet模型的训练并达到94%的测试准确率，相关内容参见 [blog series](https:\u002F\u002Fwww.myrtle.ai\u002F2018\u002F09\u002F24\u002Fhow_to_train_your_resnet\u002F)。\n58. [Intro to Deep Learning with PyTorch](https:\u002F\u002Fin.udacity.com\u002Fcourse\u002Fdeep-learning-pytorch--ud188): Udacity和Facebook联合推出的免费课程，包括对PyTorch的介绍和对PyTorch作者之一的Soumith Chintala的采访。\n59. \u003Ckbd>2900+\u003C\u002Fkbd> [pytorch-sentiment-analysis](https:\u002F\u002Fgithub.com\u002Fbentrevett\u002Fpytorch-sentiment-analysis): PyTorch和TorchText语义分析教程。\n60. \u003Ckbd>11800+\u003C\u002Fkbd> [pytorch-image-models](https:\u002F\u002Fgithub.com\u002Frwightman\u002Fpytorch-image-models): PyTorch图像模型、脚本、与训练权重—— (SE)ResNet\u002FResNeXT, DPN, EfficientNet, MobileNet-V3\u002FV2\u002FV1, MNASNet, Single-Path NAS, FBNet等等。\n61. \u003Ckbd>1000-\u003C\u002Fkbd> [CIFAR-ZOO](https:\u002F\u002Fgithub.com\u002FBIGBALLON\u002FCIFAR-ZOO): 以CIFAR为基准的多种CNN架构的PyTorch实现。\n62. \u003Ckbd>3700+\u003C\u002Fkbd> [d2l-pytorch](https:\u002F\u002Fgithub.com\u002Fdsgiitr\u002Fd2l-pytorch): 本项目尝试复制《动手深度学习（Dive into Deep Learning）》(www.d2l.ai) 一书，将MXnet代码改编为PyTorch版。\n63. \u003Ckbd>1000-\u003C\u002Fkbd> [thinking-in-tensors-writing-in-pytorch](https:\u002F\u002Fgithub.com\u002Fstared\u002Fthinking-in-tensors-writing-in-pytorch):  张量思维，PyTorch实践 (深度学习入门)。\n64. \u003Ckbd>1000-\u003C\u002Fkbd> [NER-BERT-pytorch](https:\u002F\u002Fgithub.com\u002Flemonhu\u002FNER-BERT-pytorch): 命名试题识别的PyTorch解决方案，使用了Google AI的预训练BERT模型。\n65. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-sync-batchnorm-example](https:\u002F\u002Fgithub.com\u002Fdougsouza\u002Fpytorch-sync-batchnorm-example): 如何在 PyTorch 中使用交叉复制（Cross Replica）\u002F同步批标准化（Synchronized Batchnorm）。\n66. \u003Ckbd>1000-\u003C\u002Fkbd> [SentimentAnalysis](https:\u002F\u002Fgithub.com\u002Fbarissayil\u002FSentimentAnalysis): 情绪分析神经网络，在斯坦福情绪树库上用微调BERT训练得到。\n67. \u003Ckbd>1100+\u003C\u002Fkbd> [pytorch-cpp](https:\u002F\u002Fgithub.com\u002Fprabhuomkar\u002Fpytorch-cpp): 为深度学习研究者打造，用 C++ 实现 PyTorch 教程内容（基于 [pytorch-tutorial](https:\u002F\u002Fgithub.com\u002Fyunjey\u002Fpytorch-tutorial) 的Python教程）。\n68. [Deep Learning with PyTorch: Zero to GANs](https:\u002F\u002Fjovian.ml\u002Faakashns\u002Fcollections\u002Fdeep-learning-with-pytorch): Deep Learning with PyTorch ([video](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=GIsg-ZUy0MY))系列在线课程的相关代码。\n69. [Deep Learning with PyTorch](https:\u002F\u002Fwww.manning.com\u002Fbooks\u002Fdeep-learning-with-pytorch): Deep Learning with PyTorch teaches you how to implement deep learning algorithms with Python and PyTorch, the book includes a case study: building an algorithm capable of detecting malignant lung tumors using CT scans.\n70. [Serverless Machine Learning in Action with PyTorch and AWS](https:\u002F\u002Fwww.manning.com\u002Fbooks\u002Fserverless-machine-learning-in-action): Serverless Machine Learning in Action is a guide to bringing your experimental PyTorch machine learning code to production using serverless capabilities from major cloud providers like AWS, Azure, or GCP.\n71. \u003Ckbd>3200+\u003C\u002Fkbd> [LabML NN](https:\u002F\u002Fgithub.com\u002Flab-ml\u002Fnn): A collection of PyTorch implementations of neural networks architectures and algorithms with side-by-side notes.\n\n## Paper implementations｜论文实现\n\n1. \u003Ckbd>1000-\u003C\u002Fkbd> [google_evolution](https:\u002F\u002Fgithub.com\u002Fneuralix\u002Fgoogle_evolution): 实现了 [Large-scale evolution of image classifiers](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.01041) 一文的结果网络之一。\n2. \u003Ckbd>1000-\u003C\u002Fkbd> [pyscatwave](https:\u002F\u002Fgithub.com\u002Fedouardoyallon\u002Fpyscatwave): 基于CuPy\u002FPyTorch的快速散射变换，[Scaling the Scattering Transform: Deep Hybrid Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.08961)\n3. \u003Ckbd>1000-\u003C\u002Fkbd> [scalingscattering](https:\u002F\u002Fgithub.com\u002Fedouardoyallon\u002Fscalingscattering): 该仓库包含 [Scaling The Scattering Transform : Deep Hybrid Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.08961) 一文中的实验。  \n4. \u003Ckbd>1000-\u003C\u002Fkbd> [deep-auto-punctuation](https:\u002F\u002Fgithub.com\u002Fepisodeyang\u002Fdeep-auto-punctuation): 通过逐字符学习实现自动添加标点。\n5. \u003Ckbd>1100+\u003C\u002Fkbd> [Realtime_Multi-Person_Pose_Estimation](https:\u002F\u002Fgithub.com\u002Ftensorboy\u002Fpytorch_Realtime_Multi-Person_Pose_Estimation): 基于PyTorch的多人人体姿态估计，[原始代码](https:\u002F\u002Fgithub.com\u002FZheC\u002FRealtime_Multi-Person_Pose_Estimation)。\n6. \u003Ckbd>1000-\u003C\u002Fkbd> [PyTorch-value-iteration-networks](https:\u002F\u002Fgithub.com\u002Fonlytailei\u002FPyTorch-value-iteration-networks): PyTorch实现价值迭代网络（[Value Iteration Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1602.02867)）（NIPS2016最佳论文奖）。\n7. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_Highway](https:\u002F\u002Fgithub.com\u002Fanalvikingur\u002Fpytorch_Highway): PyTorch实现高速公路网络（[Highway Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1505.00387)）。\n8. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_NEG_loss](https:\u002F\u002Fgithub.com\u002Fanalvikingur\u002Fpytorch_NEG_loss): PyTorch实现负采样损失（[Negative Sampling Loss](https:\u002F\u002Farxiv.org\u002Fabs\u002F1310.4546)）。  \n9. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_RVAE](https:\u002F\u002Fgithub.com\u002Fanalvikingur\u002Fpytorch_RVAE): 用PyTorch实现的产生序列数据的递归变分自动编码器，相关论文：[Generating Sentences from a Continuous Space](https:\u002F\u002Farxiv.org\u002Fabs\u002F1511.06349#)，[Character-Aware Neural Language Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F1508.06615)。\n10. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_TDNN](https:\u002F\u002Fgithub.com\u002Fanalvikingur\u002Fpytorch_TDNN): 用PyTorch实现时间延迟神经网络（Time Delayed NN）。\n11. \u003Ckbd>1000-\u003C\u002Fkbd> [eve.pytorch](https:\u002F\u002Fgithub.com\u002Fmoskomule\u002Feve.pytorch): 一个Eve优化器的实现，相关论文：[Imploving Stochastic Gradient Descent with Feedback](https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.01505)。  \n12. \u003Ckbd>1000-\u003C\u002Fkbd> [e2e-model-learning](https:\u002F\u002Fgithub.com\u002Flocuslab\u002Fe2e-model-learning): 随机优化中的基于任务的端到端模型，https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.04529 。\n13. \u003Ckbd>1000-\u003C\u002Fkbd> [pix2pix-pytorch](https:\u002F\u002Fgithub.com\u002Fmrzhu-cool\u002Fpix2pix-pytorch): PyTorch实现“基于条件对抗网络的图像到图像翻译”。 论文：[Image-to-Image Translation Using Conditional Adversarial Networks](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1611.07004v1.pdf)。\n14. \u003Ckbd>4300+\u003C\u002Fkbd> [Single Shot MultiBox Detector](https:\u002F\u002Fgithub.com\u002Famdegroot\u002Fssd.pytorch): 单发多盒探测器，论文：[Single Shot MultiBox Detector](http:\u002F\u002Farxiv.org\u002Fabs\u002F1512.02325)。\n15. \u003Ckbd>1000-\u003C\u002Fkbd> [DiscoGAN](https:\u002F\u002Fgithub.com\u002Fcarpedm20\u002FDiscoGAN-pytorch): 学习利用生成性对抗网络发现跨域关系。论文：[Learning to Discover Cross-Domain Relations with Generative Adversarial Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.05192)。  \n16. \u003Ckbd>1000-\u003C\u002Fkbd> [official DiscoGAN implementation](https:\u002F\u002Fgithub.com\u002FSKTBrain\u002FDiscoGAN): 官方实现“学习利用生成性对抗网络发现跨域关系”。 论文：[Learning to Discover Cross-Domain Relations with Generative Adversarial Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.05192)。  \n17. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-es](https:\u002F\u002Fgithub.com\u002Fatgambardella\u002Fpytorch-es): 进化策略。论文：[Evolution Strategies as a Scalable Alternative to Reinforcement Learning](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.03864) .  \n18. \u003Ckbd>1000-\u003C\u002Fkbd> [piwise](https:\u002F\u002Fgithub.com\u002Fbodokaiser\u002Fpiwise): 使用PyTorch对VOC2012数据集进行像素切割。\n19. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-dqn](https:\u002F\u002Fgithub.com\u002Ftransedward\u002Fpytorch-dqn): 深度Q学习网络。  \n20. \u003Ckbd>1000+\u003C\u002Fkbd> [neuraltalk2-pytorch](https:\u002F\u002Fgithub.com\u002Fruotianluo\u002Fneuraltalk2.pytorch): PyTorch图像字幕代码库(在分支“with_finetune”中有可微调CNN)。\n21. \u003Ckbd>1000-\u003C\u002Fkbd> [vnet.pytorch](https:\u002F\u002Fgithub.com\u002Fmattmacy\u002Fvnet.pytorch): PyTorch实现V-Net：全卷积神经网络在体医学图像分割中的应用。 http:\u002F\u002Fmattmacy.io\u002Fvnet.pytorch\u002F\n22. \u003Ckbd>1400+\u003C\u002Fkbd> [pytorch-fcn](https:\u002F\u002Fgithub.com\u002Fwkentaro\u002Fpytorch-fcn): PyTorch 实现完全卷积网络。 \n23. \u003Ckbd>1000-\u003C\u002Fkbd> [WideResNets](https:\u002F\u002Fgithub.com\u002Fxternalz\u002FWideResNet-pytorch): PyTorch实现WideResNets。该实现比官方Torch实现花费更少的GPU内存。实现: https:\u002F\u002Fgithub.com\u002Fszagoruyko\u002Fwide-residual-networks .\n24. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_highway_networks](https:\u002F\u002Fgithub.com\u002Fc0nn3r\u002Fpytorch_highway_networks): PyTorch实现高速公路网络。  \n25. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-NeuCom](https:\u002F\u002Fgithub.com\u002Fypxie\u002Fpytorch-NeuCom): Pytorch实现DeepMind的可微神经计算机[论文](http:\u002F\u002Fwww.nature.com\u002Farticles\u002Fnature20101.epdf?author_access_token=ImTXBI8aWbYxYQ51Plys8NRgN0jAjWel9jnR3ZoTv0MggmpDmwljGswxVdeocYSurJ3hxupzWuRNeGvvXnoO8o4jTJcnAyhGuZzXJ1GEaD-Z7E6X_a9R-xqJ9TfJWBqz)。\n26. \u003Ckbd>1000-\u003C\u002Fkbd> [captionGen](https:\u002F\u002Fgithub.com\u002Feladhoffer\u002FcaptionGen): 使用PyTorch为图像生成标注。\n27. \u003Ckbd>1100+\u003C\u002Fkbd> [AnimeGAN](https:\u002F\u002Fgithub.com\u002Fjayleicn\u002FanimeGAN): 生成对抗网络的PyTorch简单实现，关注于动漫脸谱绘画。\n28. \u003Ckbd>1000-\u003C\u002Fkbd> [Cnn-text classification](https:\u002F\u002Fgithub.com\u002FShawn1993\u002Fcnn-text-classification-pytorch): PyTorch 实现 [Kim的基于卷积神经网络的句子分类](https:\u002F\u002Farxiv.org\u002Fabs\u002F1408.5882) 论文。\n29. \u003Ckbd>1700+\u003C\u002Fkbd> [deepspeech2](https:\u002F\u002Fgithub.com\u002FSeanNaren\u002Fdeepspeech.pytorch): 使用 Baidu Warp-CTC 实现DeepSpeech2。创造一个基于 DeepSpeech2 架构的网络，用 CTC 激活函数训练。\n30. \u003Ckbd>1000-\u003C\u002Fkbd> [seq2seq](https:\u002F\u002Fgithub.com\u002FMaximumEntropy\u002FSeq2Seq-PyTorch): 包含PyTorch中的Seq2Seq模型。  \n31. \u003Ckbd>1000-\u003C\u002Fkbd> [Asynchronous Advantage Actor-Critic in PyTorch](https:\u002F\u002Fgithub.com\u002Frarilurelo\u002Fpytorch_a3c): PyTorch实现A3C(Asynchronous Advantage Actor-Critic)，论文：[Asynchronous Methods for Deep Reinforcement Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1602.01783v1.pdf)。由于 PyTorch 可以轻松地在多进程内控制共享内存，我们可以轻易实现A3C这样的异步算法。  \n32. \u003Ckbd>1000-\u003C\u002Fkbd> [densenet](https:\u002F\u002Fgithub.com\u002Fbamos\u002Fdensenet.pytorch): This is a PyTorch 实现 DenseNet-BC 架构，相关论文 [Densely Connected Convolutional Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1608.06993)。该实现的 CIFAR-10+ 100层错误率为 4.77 增长率为 12。官方实现和许多第三方库的链接参见 [liuzhuang13\u002FDenseNet](https:\u002F\u002Fgithub.com\u002Fliuzhuang13\u002FDenseNet)。\n33. \u003Ckbd>1000-\u003C\u002Fkbd> [nninit](https:\u002F\u002Fgithub.com\u002Falykhantejani\u002Fnninit): PyTorch神经网络模块的权值初始化方案，这是 [nninit](https:\u002F\u002Fgithub.com\u002FKaixhin\u002Fnninit) 的流行端口。\n34. \u003Ckbd>1500+\u003C\u002Fkbd> [faster rcnn](https:\u002F\u002Fgithub.com\u002Flongcw\u002Ffaster_rcnn_pytorch): PyTorch 实现 Faster RCNN。该项目主要基于 py-faster-rcnn 和 TFFRCNN。更多关于 R-CNN 的细节请参考论文 Faster R-CNN：[Towards Real-Time Object Detection with Region Proposal Network](https:\u002F\u002Farxiv.org\u002Fabs\u002F1506.01497)。\n35. \u003Ckbd>1000-\u003C\u002Fkbd> [doomnet](https:\u002F\u002Fgithub.com\u002Fakolishchak\u002Fdoom-net-pytorch): PyTorch版Doom-net，实现了ViZDoom环境下的RL模型。  \n36. \u003Ckbd>1000-\u003C\u002Fkbd> [flownet](https:\u002F\u002Fgithub.com\u002FClementPinard\u002FFlowNetPytorch): 通过Dosovitskiy等完成FlowNet的Pytorch实现。\n37. \u003Ckbd>1000-\u003C\u002Fkbd> [sqeezenet](https:\u002F\u002Fgithub.com\u002Fgsp-27\u002Fpytorch_Squeezenet): 在CIFAR10数据集上用PyTorch实现Squeezenet模型，[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1602.07360)。\n38. \u003Ckbd>2800+\u003C\u002Fkbd> [WassersteinGAN](https:\u002F\u002Fgithub.com\u002Fmartinarjovsky\u002FWassersteinGAN): PyTorch实现[WassersteinGAN](https:\u002F\u002Farxiv.org\u002Fabs\u002F1701.07875)。\n39. \u003Ckbd>1000-\u003C\u002Fkbd> [optnet](https:\u002F\u002Fgithub.com\u002Flocuslab\u002Foptnet): 该仓库包含PyTorch源码，重现了论文[OptNet: Differentiable Optimization as a Layer in Neural Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.00443)中的实验。  \n40. \u003Ckbd>1000-\u003C\u002Fkbd> [qp solver](https:\u002F\u002Fgithub.com\u002Flocuslab\u002Fqpth): PyTorch的一个快速和可微分的QP求解器。https:\u002F\u002Flocuslab.github.io\u002Fqpth\u002F\n41. \u003Ckbd>1000-\u003C\u002Fkbd> [Continuous Deep Q-Learning with Model-based Acceleration ](https:\u002F\u002Fgithub.com\u002Fikostrikov\u002Fpytorch-naf): [基于模型加速的连续深度Q学习](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1603.00748v1.pdf)的再实现。\n42. \u003Ckbd>1000-\u003C\u002Fkbd> [Learning to learn by gradient descent by gradient descent](https:\u002F\u002Fgithub.com\u002Fikostrikov\u002Fpytorch-meta-optimizer): PyTorch实现[Learning to learn by gradient descent by gradient descent](https:\u002F\u002Farxiv.org\u002Fabs\u002F1606.04474)。\n43. \u003Ckbd>1000-\u003C\u002Fkbd> [fast-neural-style](https:\u002F\u002Fgithub.com\u002Fdarkstar112358\u002Ffast-neural-style): PyTorch实现fast-neural-style，论文：[Perceptual Losses for Real-Time Style Transfer and Super-Resolution](https:\u002F\u002Farxiv.org\u002Fabs\u002F1603.08155)。\n44. \u003Ckbd>1000-\u003C\u002Fkbd> [PytorchNeuralStyleTransfer](https:\u002F\u002Fgithub.com\u002Fleongatys\u002FPytorchNeuralStyleTransfer): Pytorch中的神经风格转换。\n45. \u003Ckbd>1000-\u003C\u002Fkbd> [Fast Neural Style for Image Style Transform by Pytorch](https:\u002F\u002Fgithub.com\u002Fbengxy\u002FFastNeuralStyle): 使用快速神经风格进行图像风格转换。\n46. \u003Ckbd>1000-\u003C\u002Fkbd> [neural style transfer](https:\u002F\u002Fgithub.com\u002Falexis-jacq\u002FPytorch-Tutorials): 通过神经风格算法介绍PyTorch，[Neural-Style algorithm](https:\u002F\u002Farxiv.org\u002Fabs\u002F1508.06576)。\n47. \u003Ckbd>1000-\u003C\u002Fkbd> [VIN_PyTorch_Visdom](https:\u002F\u002Fgithub.com\u002Fzuoxingdong\u002FVIN_PyTorch_Visdom): PyTorch实现价值迭代网络(VIN):干净、简单、模块化。利用Visdom进行可视化。\n48. \u003Ckbd>1400+\u003C\u002Fkbd> [YOLO2](https:\u002F\u002Fgithub.com\u002Flongcw\u002Fyolo2-pytorch): PyTorch中的YOLOv2。\n49. \u003Ckbd>1200+\u003C\u002Fkbd> [attention-transfer](https:\u002F\u002Fgithub.com\u002Fszagoruyko\u002Fattention-transfer): 通过注意转移改善卷积网络，[ICLR2017会议论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1612.03928)。\n50. \u003Ckbd>1000-\u003C\u002Fkbd> [SVHNClassifier](https:\u002F\u002Fgithub.com\u002Fpotterhsu\u002FSVHNClassifier-PyTorch): PyTorch实现[基于深度卷积神经网络的街景图像多位数识别](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1312.6082.pdf)。\n51. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-deform-conv](https:\u002F\u002Fgithub.com\u002Foeway\u002Fpytorch-deform-conv): PyTorch实现可变形卷积(Deformable Convolution)。  \n52. \u003Ckbd>1000-\u003C\u002Fkbd> [BEGAN-pytorch](https:\u002F\u002Fgithub.com\u002Fcarpedm20\u002FBEGAN-pytorch): PyTorch实现[边界均衡生成对抗网络（BEGAN）](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.10717): Boundary Equilibrium Generative Adversarial Networks.  \n53. \u003Ckbd>1000-\u003C\u002Fkbd> [treelstm.pytorch](https:\u002F\u002Fgithub.com\u002Fdasguptar\u002Ftreelstm.pytorch): PyTorch实现树形结构LSTM。\n54. \u003Ckbd>1000-\u003C\u002Fkbd> [AGE](https:\u002F\u002Fgithub.com\u002FDmitryUlyanov\u002FAGE): 论文代码，原文：对抗生成编码器网络（[Adversarial Generator-Encoder Networks](http:\u002F\u002Fsites.skoltech.ru\u002Fapp\u002Fdata\u002Fuploads\u002Fsites\u002F25\u002F2017\u002F04\u002FAGE.pdf)）。\n55. \u003Ckbd>1000-\u003C\u002Fkbd> [ResNeXt.pytorch](https:\u002F\u002Fgithub.com\u002Fprlz77\u002FResNeXt.pytorch): 再现 ResNet-V3 (深度神经网络的聚集残差变换)。\n56. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-rl](https:\u002F\u002Fgithub.com\u002Fjingweiz\u002Fpytorch-rl): 基于PyTorch和Visdom的深度强化学习。\n57. \u003Ckbd>1000-\u003C\u002Fkbd> [Deep-Leafsnap](https:\u002F\u002Fgithub.com\u002Fsujithv28\u002FDeep-Leafsnap): 对比传统的计算机视觉方法，使用深度神经网络的[LeafSnap](https:\u002F\u002Fneerajkumar.org\u002Fbase\u002Fpapers\u002Fnk_eccv2012_leafsnap.pdf)能有效提高测试准确率。\n58. \u003Ckbd>15400+\u003C\u002Fkbd> [pytorch-CycleGAN-and-pix2pix](https:\u002F\u002Fgithub.com\u002Fjunyanz\u002Fpytorch-CycleGAN-and-pix2pix): PyTorch 实现图像风格迁移。\n59. \u003Ckbd>1000-\u003C\u002Fkbd> [A3C-PyTorch](https:\u002F\u002Fgithub.com\u002Fonlytailei\u002FA3C-PyTorch):PyTorch 实现 A3C(Advantage async actor-critic)算法。\n60. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-value-iteration-networks](https:\u002F\u002Fgithub.com\u002Fkentsommer\u002Fpytorch-value-iteration-networks): PyTorch实现价值迭代网络Value Iteration Networks (NIPS 2016 最佳论文)。  \n61. \u003Ckbd>1000-\u003C\u002Fkbd> [PyTorch-Style-Transfer](https:\u002F\u002Fgithub.com\u002Fzhanghang1989\u002FPyTorch-Style-Transfer): PyTorch实现实时转换多风格生成网络。\n62. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-deeplab-resnet](https:\u002F\u002Fgithub.com\u002Fisht7\u002Fpytorch-deeplab-resnet): PyTorch实现 [DeepLab resnet v2](https:\u002F\u002Farxiv.org\u002Fabs\u002F1606.00915)。\n63. \u003Ckbd>1100+\u003C\u002Fkbd> [pointnet.pytorch](https:\u002F\u002Fgithub.com\u002Ffxia22\u002Fpointnet.pytorch): PyTorch实现 \"PointNet: 基于深度学习的3D点分类和分割模型\" https:\u002F\u002Farxiv.org\u002Fabs\u002F1612.00593  \n64. \u003Ckbd>2100+\u003C\u002Fkbd> [pytorch-playground](https:\u002F\u002Fgithub.com\u002Faaron-xichen\u002Fpytorch-playground): 包含常见的预训练模型和数据集(MNIST, SVHN, CIFAR10, CIFAR100, STL10, AlexNet, VGG16, VGG19, ResNet, Inception, SqueezeNet)**.\n65. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-dnc](https:\u002F\u002Fgithub.com\u002Fjingweiz\u002Fpytorch-dnc): PyTorch\u002FVisdom实现的神经机器翻译(NTM)&可微神经计算机(DNC)。\n66. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_image_classifier](https:\u002F\u002Fgithub.com\u002Fjinfagang\u002Fpytorch_image_classifier): 使用PyTorch的最小但实用的图像分类器管道，在ResNet18上进行细化，在自己的小型数据集上获得99%的准确率。\n67. \u003Ckbd>1000-\u003C\u002Fkbd> [mnist-svhn-transfer](https:\u002F\u002Fgithub.com\u002Fyunjey\u002Fmnist-svhn-transfer): PyTorch实现CycleGAN和SGAN。\n68. \u003Ckbd>null\u003C\u002Fkbd> [pytorch-yolo2](https:\u002F\u002Fgithub.com\u002Fmarvis\u002Fpytorch-yolo2): pytorch-yolo2\n69. \u003Ckbd>1000-\u003C\u002Fkbd> [dni](https:\u002F\u002Fgithub.com\u002Fandrewliao11\u002Fdni.pytorch): PyTorch实现使用合成梯度的解耦神经接口，论文：[Decoupled Neural Interfaces using Synthetic Gradients](https:\u002F\u002Farxiv.org\u002Fabs\u002F1608.05343)。\n70. \u003Ckbd>1200+\u003C\u002Fkbd> [wgan-gp](https:\u002F\u002Fgithub.com\u002Fcaogang\u002Fwgan-gp): PyTorch实现论文\"[Improved Training of Wasserstein GANs](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.00028v3)\".\n71. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-seq2seq-intent-parsing](https:\u002F\u002Fgithub.com\u002Fspro\u002Fpytorch-seq2seq-intent-parsing):  PyTorch使用seq2seq和注意力模型进行意图分析和空位填充。\n72. \u003Ckbd>1000-\u003C\u002Fkbd> [pyTorch_NCE](https:\u002F\u002Fgithub.com\u002Fdemelin\u002FpyTorch_NCE): 复现噪音对比估计算法，论文：[Noise-contrastive estimation: A new estimation principle for unnormalized statistical models](http:\u002F\u002Fproceedings.mlr.press\u002Fv9\u002Fgutmann10a\u002Fgutmann10a.pdf)。\n73. \u003Ckbd>1000-\u003C\u002Fkbd> [molencoder](https:\u002F\u002Fgithub.com\u002Fcxhernandez\u002Fmolencoder): 分子自动编码器。\n74. \u003Ckbd>1000-\u003C\u002Fkbd> [GAN-weight-norm](https:\u002F\u002Fgithub.com\u002Fstormraiser\u002FGAN-weight-norm): 论文代码，\"[生成对抗网络中批量和权重归一化的影响](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.03971)\"\n75. \u003Ckbd>1000-\u003C\u002Fkbd> [lgamma](https:\u002F\u002Fgithub.com\u002Frachtsingh\u002Flgamma): 实现polygamma、lgamma和beta函数。\n76. \u003Ckbd>1000-\u003C\u002Fkbd> [bigBatch](https:\u002F\u002Fgithub.com\u002Feladhoffer\u002FbigBatch): 论文代码，论文：“[训练越久，泛化越好：关闭神经网络大批量训练的泛化间隙](https:\u002F\u002Farxiv.org\u002Fabs\u002F1705.08741)”。\n77. \u003Ckbd>1000-\u003C\u002Fkbd> [rl_a3c_pytorch](https:\u002F\u002Fgithub.com\u002Fdgriff777\u002Frl_a3c_pytorch): 针对 Atari 2600 的强化学习，实现了 A3C LSTM 。\n78. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-retraining](https:\u002F\u002Fgithub.com\u002Fahirner\u002Fpytorch-retraining): PyTorch动物园模型转移学习(torchvision)。\n79. \u003Ckbd>1000-\u003C\u002Fkbd> [nmp_qc](https:\u002F\u002Fgithub.com\u002Fpriba\u002Fnmp_qc): 用于计算机视觉的神经消息传递。\n80. \u003Ckbd>2900+\u003C\u002Fkbd> [grad-cam](https:\u002F\u002Fgithub.com\u002Fjacobgil\u002Fpytorch-grad-cam): PyTorch 实现[Grad-CAM](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1610.02391v1.pdf)。\n81. \u003Ckbd>null\u003C\u002Fkbd> [pytorch-trpo](https:\u002F\u002Fgithub.com\u002Fmjacar\u002Fpytorch-trpo): PyTorch s实现置信域策略优化（[Trust Region Policy Optimization (TRPO)](https:\u002F\u002Farxiv.org\u002Fabs\u002F1502.05477)）。\n82. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-explain-black-box](https:\u002F\u002Fgithub.com\u002Fjacobgil\u002Fpytorch-explain-black-box): PyTorch通过有意义扰动实现黑箱的可解释性解释，[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.03296)。\n83. \u003Ckbd>1000-\u003C\u002Fkbd> [vae_vpflows](https:\u002F\u002Fgithub.com\u002Fjmtomczak\u002Fvae_vpflows): 凸组合线性IAF与Householder流 https:\u002F\u002Fjmtomczak.github.io\u002Fdeebmed.html 。\n84. \u003Ckbd>1000-\u003C\u002Fkbd> [relational-networks](https:\u002F\u002Fgithub.com\u002Fkimhc6028\u002Frelational-networks): Pytorch实现\"[用一个简单的神经网络模块来做关系推理](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1706.01427.pdf)\"(关系网络)。\n85. \u003Ckbd>1000-\u003C\u002Fkbd> [vqa.pytorch](https:\u002F\u002Fgithub.com\u002FCadene\u002Fvqa.pytorch): 视觉问答。\n86. \u003Ckbd>1300+\u003C\u002Fkbd> [end-to-end-negotiator](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fend-to-end-negotiator): 成交还是不成交？谈判对话的端到端学习。\n87. \u003Ckbd>1000-\u003C\u002Fkbd> [odin-pytorch](https:\u002F\u002Fgithub.com\u002FShiyuLiang\u002Fodin-pytorch): 神经网络失配实例的原则性检测。\n88. \u003Ckbd>1000-\u003C\u002Fkbd> [FreezeOut](https:\u002F\u002Fgithub.com\u002Fajbrock\u002FFreezeOut): 一种通过逐步冻结层加速神经网络训练的简单技术。\n89. \u003Ckbd>1000-\u003C\u002Fkbd> [ARAE](https:\u002F\u002Fgithub.com\u002Fjakezhaojb\u002FARAE): 论文代码，\"[对抗性正则化的自动编码器, ARAE](https:\u002F\u002Farxiv.org\u002Fabs\u002F1706.04223)\"。\n90. \u003Ckbd>1000-\u003C\u002Fkbd> [forward-thinking-pytorch](https:\u002F\u002Fgithub.com\u002Fkimhc6028\u002Fforward-thinking-pytorch): PyTorch实现\"[前向思考：一次一层地建立和训练神经网络](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1706.02480.pdf)\"。  \n91. \u003Ckbd>1000-\u003C\u002Fkbd> [context_encoder_pytorch](https:\u002F\u002Fgithub.com\u002FBoyuanJiang\u002Fcontext_encoder_pytorch): PyTorch实现上下文编码器(Context Encoders)，可用于图像修复。\n92. \u003Ckbd>5500+\u003C\u002Fkbd> [attention-is-all-you-need-pytorch](https:\u002F\u002Fgithub.com\u002Fjadore801120\u002Fattention-is-all-you-need-pytorch): PyTorch在\"Attention is All You Need\"中实现转换模型，https:\u002F\u002Fgithub.com\u002Fthnkim\u002FOpenFacePytorch。\n93. \u003Ckbd>1000-\u003C\u002Fkbd> [OpenFacePytorch](https:\u002F\u002Fgithub.com\u002Fthnkim\u002FOpenFacePytorch): 使用 OpenFace's nn4.small2.v1.t7 模型的PyTorch模块。\n94. \u003Ckbd>1000-\u003C\u002Fkbd> [neural-combinatorial-rl-pytorch](https:\u002F\u002Fgithub.com\u002Fpemami4911\u002Fneural-combinatorial-rl-pytorch):  PyTorch 实现\"[通过强化学习实现神经组合优化](https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.09940)\"。\n95. \u003Ckbd>null\u003C\u002Fkbd> [pytorch-nec](https:\u002F\u002Fgithub.com\u002Fmjacar\u002Fpytorch-nec): PyTorch实现神经情景控制([NEC，Neural Episodic Control](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.01988))。\n96. \u003Ckbd>1000-\u003C\u002Fkbd> [seq2seq.pytorch](https:\u002F\u002Fgithub.com\u002Feladhoffer\u002Fseq2seq.pytorch): 使用PyTorch进行Sequence-to-Sequence学习。\n97. \u003Ckbd>1000-\u003C\u002Fkbd> [Pytorch-Sketch-RNN](https:\u002F\u002Fgithub.com\u002Falexis-jacq\u002FPytorch-Sketch-RNN): PyTorch实现 “[A Neural Representation of Sketch Drawings](arxiv.org\u002Fabs\u002F1704.03477)”。\n98. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-pruning](https:\u002F\u002Fgithub.com\u002Fjacobgil\u002Fpytorch-pruning): PyTorch实现 [1611.06440] [用于资源有效推理的剪枝卷积神经网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.06440)\n99. \u003Ckbd>1000-\u003C\u002Fkbd> [DrQA](https:\u002F\u002Fgithub.com\u002Fhitvoice\u002FDrQA): PyTorch实现自动阅读维基百科并回答开放领域问题。\n100. \u003Ckbd>1000-\u003C\u002Fkbd> [YellowFin_Pytorch](https:\u002F\u002Fgithub.com\u002FJianGoForIt\u002FYellowFin_Pytorch): 基于动量梯度下降（momentum SGD）的自动调优优化器，无需手动指定学习速率和动量。\n101. \u003Ckbd>1000-\u003C\u002Fkbd> [samplernn-pytorch](https:\u002F\u002Fgithub.com\u002Fdeepsound-project\u002Fsamplernn-pytorch): PyTorch实现SampleRNN: 一种无条件端到端神经音频生成模型。\n102. \u003Ckbd>1000-\u003C\u002Fkbd> [AEGeAN](https:\u002F\u002Fgithub.com\u002Ftymokvo\u002FAEGeAN): 基于AE稳定的更深的深度卷积生成对抗网络(DCGAN, Deep Convolution Generative Adversarial Networks)。\n103. \u003Ckbd>1000-\u003C\u002Fkbd> [\u002Fpytorch-SRResNet](https:\u002F\u002Fgithub.com\u002Ftwtygqyy\u002Fpytorch-SRResNet): PyTorch实现“[基于生成对抗网络的实感单幅图像超分辨率](https:\u002F\u002Farxiv.org\u002Fabs\u002F1609.04802)”。\n104. \u003Ckbd>1000-\u003C\u002Fkbd> [vsepp](https:\u002F\u002Fgithub.com\u002Ffartashf\u002Fvsepp): 论文代码，\"[VSE++:使用难分样本(Hard Negative)改善视觉语义联合嵌入](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.05612)\"。\n105. \u003Ckbd>1000-\u003C\u002Fkbd> [Pytorch-DPPO](https:\u002F\u002Fgithub.com\u002Falexis-jacq\u002FPytorch-DPPO): Pytorch实现分布式近端策略优化([Distributed Proximal Policy Optimization](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.02286))。\n106. \u003Ckbd>1700+\u003C\u002Fkbd> [UNIT](https:\u002F\u002Fgithub.com\u002Fmingyuliutw\u002FUNIT): 无监督的图像到图像转换网络，[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.00848)。\n107. \u003Ckbd>1300+\u003C\u002Fkbd> [efficient_densenet_pytorch](https:\u002F\u002Fgithub.com\u002Fgpleiss\u002Fefficient_densenet_pytorch): DenseNets的内存高效实现。\n108. \u003Ckbd>1000-\u003C\u002Fkbd> [tsn-pytorch](https:\u002F\u002Fgithub.com\u002Fyjxiong\u002Ftsn-pytorch): PyTorch实现时间分割网络(TSN, Temporal Segment Networks)。\n109. \u003Ckbd>1000-\u003C\u002Fkbd> [SMASH](https:\u002F\u002Fgithub.com\u002Fajbrock\u002FSMASH): [SMASH](https:\u002F\u002Farxiv.org\u002Fabs\u002F1708.05344)，一种高效地探索神经体系结构的实验技术。\n110. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-retinanet](https:\u002F\u002Fgithub.com\u002Fkuangliu\u002Fpytorch-retinanet): RetinaNet。\n111. \u003Ckbd>1000-\u003C\u002Fkbd> [biogans](https:\u002F\u002Fgithub.com\u002Faosokin\u002Fbiogans): 实现 ICCV 2017 论文 \"[利用GANs进行生物图像合成](https:\u002F\u002Farxiv.org\u002Fabs\u002F1708.04692)\"。\n112. \u003Ckbd>null\u003C\u002Fkbd> [Semantic Image Synthesis via Adversarial Learning]( https:\u002F\u002Fgithub.com\u002Fwoozzu\u002Fdong_iccv_2017): PyTorch 实现 ICCV 2017 论文 \"[基于对抗学习的语义图像合成](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.06873)\"。\n113. \u003Ckbd>1000-\u003C\u002Fkbd> [fmpytorch](https:\u002F\u002Fgithub.com\u002Fjmhessel\u002Ffmpytorch): PyTorch在Cython中实现分析机（Factorization Machine）模块。\n114. \u003Ckbd>1000-\u003C\u002Fkbd> [ORN](https:\u002F\u002Fgithub.com\u002FZhouYanzhao\u002FORN): PyTorch 实现 CVPR 2017 论文 \"[Oriented Response Networks](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1701.01833.pdf)\"。\n115. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-maml](https:\u002F\u002Fgithub.com\u002Fkaterakelly\u002Fpytorch-maml): PyTorch实现 [MAML](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.03400)（Model-Agnostic Meta-Learning，与模型无关的元学习）。\n116. \u003Ckbd>2200+\u003C\u002Fkbd> [pytorch-generative-model-collections](https:\u002F\u002Fgithub.com\u002Fznxlwm\u002Fpytorch-generative-model-collections): PyTorch中的各种生成模型集合。\n117. \u003Ckbd>1000-\u003C\u002Fkbd> [vqa-winner-cvprw-2017](https:\u002F\u002Fgithub.com\u002Fmarkdtw\u002Fvqa-winner-cvprw-2017): Pytorch 实现 CVPR'17 VQA( Visual Question Answer，视觉问答) 挑战冠军。\n118. \u003Ckbd>1000-\u003C\u002Fkbd> [tacotron_pytorch](https:\u002F\u002Fgithub.com\u002Fr9y9\u002Ftacotron_pytorch):  PyTorch 实现 Tacotron 语音合成模型。\n119. \u003Ckbd>1000-\u003C\u002Fkbd> [pspnet-pytorch](https:\u002F\u002Fgithub.com\u002FLextal\u002Fpspnet-pytorch): PyTorch 实现 PSPNet 语义分割网络。\n120. \u003Ckbd>1000-\u003C\u002Fkbd> [LM-LSTM-CRF](https:\u002F\u002Fgithub.com\u002FLiyuanLucasLiu\u002FLM-LSTM-CRF): 《Empower Sequence Labeling with Task-Aware Language Model》 http:\u002F\u002Farxiv.org\u002Fabs\u002F1709.04109\n121. \u003Ckbd>5000+\u003C\u002Fkbd> [face-alignment](https:\u002F\u002Fgithub.com\u002F1adrianb\u002Fface-alignment): 使用PyTorch构建2D和3D人脸对齐库。\n122. \u003Ckbd>1000-\u003C\u002Fkbd> [DepthNet](https:\u002F\u002Fgithub.com\u002FClementPinard\u002FDepthNet): PyTorch 在Still Box数据集上训练DepthNet。\n123. \u003Ckbd>1600+\u003C\u002Fkbd> [EDSR-PyTorch](https:\u002F\u002Fgithub.com\u002Fthstkdgus35\u002FEDSR-PyTorch): 论文《Enhanced Deep Residual Networks for Single Image Super-Resolution》的PyTorch实现版本。 (CVPRW 2017)\n124. \u003Ckbd>1000-\u003C\u002Fkbd> [e2c-pytorch](https:\u002F\u002Fgithub.com\u002Fethanluoyc\u002Fe2c-pytorch): E2C，Embed to Control 实现。\n125. \u003Ckbd>2900+\u003C\u002Fkbd> [3D-ResNets-PyTorch](https:\u002F\u002Fgithub.com\u002Fkenshohara\u002F3D-ResNets-PyTorch): 基于3D残差网络的动作识别。\n126. \u003Ckbd>1000-\u003C\u002Fkbd> [bandit-nmt](https:\u002F\u002Fgithub.com\u002Fkhanhptnk\u002Fbandit-nmt): EMNLP 2017 论文《Reinforcement Learning for Bandit Neural Machine Translation with Simulated Human Feedback》的代码,，改论文在神经编解码模型的基础上实现了A2C算法，并在模拟噪声激励下对组合进行了基准测试。\n127. \u003Ckbd>2400+\u003C\u002Fkbd> [pytorch-a2c-ppo-acktr](https:\u002F\u002Fgithub.com\u002Fikostrikov\u002Fpytorch-a2c-ppo-acktr): PyTorch 实现 Advantage Actor Critic (A2C), Proximal Policy Optimization (PPO，近端策略优化) 和可扩展信赖域（Trust Region）方法，这些算法使用 Kronecker因子近似（ACKTR）和生成对抗模仿学习（GAIL）实现，可用于深度强化学习。\n128. \u003Ckbd>1000-\u003C\u002Fkbd> [zalando-pytorch](https:\u002F\u002Fgithub.com\u002FbaldassarreFe\u002Fzalando-pytorch): [Fashion-MNIST](https:\u002F\u002Fgithub.com\u002Fzalandoresearch\u002Ffashion-mnist)数据集上的各种实验。\n129. \u003Ckbd>1000-\u003C\u002Fkbd> [sphereface_pytorch](https:\u002F\u002Fgithub.com\u002Fclcarwin\u002Fsphereface_pytorch): PyTorch实现SphereFace，人脸识别相关，https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.08063 。\n130. \u003Ckbd>1000-\u003C\u002Fkbd> [Categorical DQN](https:\u002F\u002Fgithub.com\u002Ffloringogianu\u002Fcategorical-dqn): PyTorch 版 Categorical DQN，该模型来自论文《[A Distributional Perspective on Reinforcement Learning](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.06887)》。\n131. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-ntm](https:\u002F\u002Fgithub.com\u002Floudinthecloud\u002Fpytorch-ntm): 神经网络图灵机。\n132. \u003Ckbd>null\u003C\u002Fkbd> [mask_rcnn_pytorch](https:\u002F\u002Fgithub.com\u002Ffelixgwu\u002Fmask_rcnn_pytorch): Mask RCNN in PyTorch.\n133. \u003Ckbd>1000-\u003C\u002Fkbd> [graph_convnets_pytorch](https:\u002F\u002Fgithub.com\u002Fxbresson\u002Fgraph_convnets_pytorch): PyTorch 实现图卷积神经网络，NIPS’16。\n134. \u003Ckbd>1700+\u003C\u002Fkbd> [pytorch-faster-rcnn](https:\u002F\u002Fgithub.com\u002Fruotianluo\u002Fpytorch-faster-rcnn): PyTorch实现 faster RCNN 检测框架，基于 Xinlei Chen 的[tf-faster-rcnn](https:\u002F\u002Fgithub.com\u002Fendernewton\u002Ftf-faster-rcnn)，已不再维护。\n135. \u003Ckbd>1000-\u003C\u002Fkbd> [torchMoji](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002FtorchMoji): A pyTorch implementation of the DeepMoji model: state-of-the-art deep learning model for analyzing sentiment, emotion, sarcasm etc.\n136. \u003Ckbd>3900+\u003C\u002Fkbd> [semantic-segmentation-pytorch](https:\u002F\u002Fgithub.com\u002Fhangzhaomit\u002Fsemantic-segmentation-pytorch): 在[MIT ADE20K dataset](http:\u002F\u002Fsceneparsing.csail.mit.edu)数据集上实现语义分割\u002F场景解析。\n137. \u003Ckbd>1200+\u003C\u002Fkbd> [pytorch-qrnn](https:\u002F\u002Fgithub.com\u002Fsalesforce\u002Fpytorch-qrnn): PyTorch implementation of the Quasi-Recurrent Neural Network - up to 16 times faster than NVIDIA's cuDNN LSTM\n138. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-sgns](https:\u002F\u002Fgithub.com\u002Ftheeluwin\u002Fpytorch-sgns): Skipgram Negative Sampling in PyTorch.\n139. \u003Ckbd>1000-\u003C\u002Fkbd> [SfmLearner-Pytorch ](https:\u002F\u002Fgithub.com\u002FClementPinard\u002FSfmLearner-Pytorch): Pytorch version of SfmLearner from Tinghui Zhou et al.\n140. \u003Ckbd>1000-\u003C\u002Fkbd> [deformable-convolution-pytorch](https:\u002F\u002Fgithub.com\u002F1zb\u002Fdeformable-convolution-pytorch): PyTorch实现可变形卷积。\n141. \u003Ckbd>1000-\u003C\u002Fkbd> [skip-gram-pytorch](https:\u002F\u002Fgithub.com\u002Ffanglanting\u002Fskip-gram-pytorch): A complete pytorch implementation of skipgram model (with subsampling and negative sampling). The embedding result is tested with Spearman's rank correlation.\n142. \u003Ckbd>1000-\u003C\u002Fkbd> [stackGAN-v2](https:\u002F\u002Fgithub.com\u002Fhanzhanggit\u002FStackGAN-v2): Pytorch implementation for reproducing StackGAN_v2 results in the paper StackGAN++: Realistic Image Synthesis with Stacked Generative Adversarial Networks by Han Zhang*, Tao Xu*, Hongsheng Li, Shaoting Zhang, Xiaogang Wang, Xiaolei Huang, Dimitris Metaxas.\n143. \u003Ckbd>1000-\u003C\u002Fkbd> [self-critical.pytorch](https:\u002F\u002Fgithub.com\u002Fruotianluo\u002Fself-critical.pytorch): 非官方，PyTorch实现基于 self-critical 序列训练的图像标注。\n144. \u003Ckbd>3600+\u003C\u002Fkbd> [pygcn](https:\u002F\u002Fgithub.com\u002Ftkipf\u002Fpygcn): 图卷积网络。\n145. \u003Ckbd>1000-\u003C\u002Fkbd> [dnc](https:\u002F\u002Fgithub.com\u002Fixaxaar\u002Fpytorch-dnc): 可微神经计算机、稀疏存取存储器与稀疏可微神经计算机。\n146. \u003Ckbd>1000-\u003C\u002Fkbd> [prog_gans_pytorch_inference](https:\u002F\u002Fgithub.com\u002Fptrblck\u002Fprog_gans_pytorch_inference): PyTorch inference for \"Progressive Growing of GANs\" with CelebA snapshot.\n147. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-capsule](https:\u002F\u002Fgithub.com\u002Ftimomernick\u002Fpytorch-capsule): Pytorch implementation of Hinton's Dynamic Routing Between Capsules.\n148. \u003Ckbd>1000-\u003C\u002Fkbd> [PyramidNet-PyTorch](https:\u002F\u002Fgithub.com\u002Fdyhan0920\u002FPyramidNet-PyTorch): A PyTorch implementation for PyramidNets (Deep Pyramidal Residual Networks, arxiv.org\u002Fabs\u002F1610.02915)\n149. \u003Ckbd>1000-\u003C\u002Fkbd> [radio-transformer-networks](https:\u002F\u002Fgithub.com\u002Fgram-ai\u002Fradio-transformer-networks): A PyTorch implementation of Radio Transformer Networks from the paper \"An Introduction to Deep Learning for the Physical Layer\". arxiv.org\u002Fabs\u002F1702.00832\n150. \u003Ckbd>1000-\u003C\u002Fkbd> [honk](https:\u002F\u002Fgithub.com\u002Fcastorini\u002Fhonk): PyTorch reimplementation of Google's TensorFlow CNNs for keyword spotting.\n151. \u003Ckbd>1000-\u003C\u002Fkbd> [DeepCORAL](https:\u002F\u002Fgithub.com\u002FSSARCandy\u002FDeepCORAL): A PyTorch implementation of 'Deep CORAL: Correlation Alignment for Deep Domain Adaptation.', ECCV 2016\n152. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-pose](https:\u002F\u002Fgithub.com\u002Fbearpaw\u002Fpytorch-pose): PyTorch工具包，用于2D人体姿态估计。\n153. \u003Ckbd>1000-\u003C\u002Fkbd> [lang-emerge-parlai](https:\u002F\u002Fgithub.com\u002Fkarandesai-96\u002Flang-emerge-parlai): Implementation of EMNLP 2017 Paper \"Natural Language Does Not Emerge 'Naturally' in Multi-Agent Dialog\" using PyTorch and ParlAI\n154. \u003Ckbd>1200+\u003C\u002Fkbd> [Rainbow](https:\u002F\u002Fgithub.com\u002FKaixhin\u002FRainbow): Rainbow: Combining Improvements in Deep Reinforcement Learning \n155. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_compact_bilinear_pooling v1](https:\u002F\u002Fgithub.com\u002Fgdlg\u002Fpytorch_compact_bilinear_pooling): This repository has a pure Python implementation of Compact Bilinear Pooling and Count Sketch for PyTorch.\n156. \u003Ckbd>1000-\u003C\u002Fkbd> [CompactBilinearPooling-Pytorch v2](https:\u002F\u002Fgithub.com\u002FDeepInsight-PCALab\u002FCompactBilinearPooling-Pytorch): (Yang Gao, et al.) A Pytorch Implementation for Compact Bilinear Pooling.\n157. \u003Ckbd>1000-\u003C\u002Fkbd> [FewShotLearning](https:\u002F\u002Fgithub.com\u002Fgitabcworld\u002FFewShotLearning): Pytorch implementation of the paper \"Optimization as a Model for Few-Shot Learning\"\n158. \u003Ckbd>1000-\u003C\u002Fkbd> [meProp](https:\u002F\u002Fgithub.com\u002Fjklj077\u002FmeProp): Codes for \"meProp: Sparsified Back Propagation for Accelerated Deep Learning with Reduced Overfitting\".\n159. \u003Ckbd>1000-\u003C\u002Fkbd> [SFD_pytorch](https:\u002F\u002Fgithub.com\u002Fclcarwin\u002FSFD_pytorch): 单镜头尺度不变人脸检测器。\n160. \u003Ckbd>1000-\u003C\u002Fkbd> [GradientEpisodicMemory](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FGradientEpisodicMemory): Continuum Learning with GEM: Gradient Episodic Memory. https:\u002F\u002Farxiv.org\u002Fabs\u002F1706.08840\n161. \u003Ckbd>1900+\u003C\u002Fkbd> [DeblurGAN](https:\u002F\u002Fgithub.com\u002FKupynOrest\u002FDeblurGAN): Pytorch implementation of the paper DeblurGAN: Blind Motion Deblurring Using Conditional Adversarial Networks.\n162. \u003Ckbd>4800+\u003C\u002Fkbd> [StarGAN](https:\u002F\u002Fgithub.com\u002Fyunjey\u002FStarGAN): StarGAN: 多领域图像转换 GAN 网络，https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.09020 。\n163. \u003Ckbd>1000-\u003C\u002Fkbd> [CapsNet-pytorch](https:\u002F\u002Fgithub.com\u002Fadambielski\u002FCapsNet-pytorch): PyTorch 实现 NIPS 2017 论文 “[胶囊间的动态路由](https:\u002F\u002Farxiv.org\u002Fabs\u002F1710.09829)”。\n164. \u003Ckbd>1000-\u003C\u002Fkbd> [CondenseNet](https:\u002F\u002Fgithub.com\u002FShichenLiu\u002FCondenseNet): CondenseNet: 面向移动设备的轻量级 CNN。\n165. \u003Ckbd>6700+\u003C\u002Fkbd> [deep-image-prior](https:\u002F\u002Fgithub.com\u002FDmitryUlyanov\u002Fdeep-image-prior): 基于神经网络的图像修复，无学习过程。\n166. \u003Ckbd>1100+\u003C\u002Fkbd> [deep-head-pose](https:\u002F\u002Fgithub.com\u002Fnatanielruiz\u002Fdeep-head-pose): 使用PyTorch进行深度学习头部姿势估计。\n167. \u003Ckbd>1000-\u003C\u002Fkbd> [Random-Erasing](https:\u002F\u002Fgithub.com\u002Fzhunzhong07\u002FRandom-Erasing): 论文代码，论文：\"[随机擦除数据增强](https:\u002F\u002Farxiv.org\u002Fabs\u002F1708.04896)\"。\n168. \u003Ckbd>1000-\u003C\u002Fkbd> [FaderNetworks](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FFaderNetworks): Fader Networks: 通过滑动属性重构图像 - NIPS 2017，https:\u002F\u002Farxiv.org\u002Fpdf\u002F1706.00409.pdf 。\n169. \u003Ckbd>2300+\u003C\u002Fkbd> [FlowNet 2.0](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fflownet2-pytorch): FlowNet 2.0: 深度网络中光流估计的演化。\n170. \u003Ckbd>5300+\u003C\u002Fkbd> [pix2pixHD](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fpix2pixHD): 利用条件 GANs 合成和处理 HD 高清图像的 PyTorch 实现，https:\u002F\u002Farxiv.org\u002Fpdf\u002F1711.11585.pdf。\n171. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-smoothgrad](https:\u002F\u002Fgithub.com\u002Fpkdn\u002Fpytorch-smoothgrad): SmoothGrad通过增加噪声来去除噪声。\n172. \u003Ckbd>1000-\u003C\u002Fkbd> [RetinaNet](https:\u002F\u002Fgithub.com\u002Fc0nn3r\u002FRetinaNet): RetinaNe实现。\n173. \u003Ckbd>6300+\u003C\u002Fkbd> [faster-rcnn.pytorch](https:\u002F\u002Fgithub.com\u002Fjwyang\u002Ffaster-rcnn.pytorch): This project is a faster faster R-CNN implementation, aimed to accelerating the training of faster R-CNN object detection models. \n174. \u003Ckbd>1000-\u003C\u002Fkbd> [mixup_pytorch](https:\u002F\u002Fgithub.com\u002Fleehomyc\u002Fmixup_pytorch): A PyTorch implementation of the paper Mixup: Beyond Empirical Risk Minimization in PyTorch.\n175. \u003Ckbd>1100+\u003C\u002Fkbd> [inplace_abn](https:\u002F\u002Fgithub.com\u002Fmapillary\u002Finplace_abn): In-Place Activated BatchNorm for Memory-Optimized Training of DNNs\n176. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-pose-hg-3d](https:\u002F\u002Fgithub.com\u002Fxingyizhou\u002Fpytorch-pose-hg-3d): PyTorch implementation for 3D human pose estimation\n177. \u003Ckbd>1000-\u003C\u002Fkbd> [nmn-pytorch](https:\u002F\u002Fgithub.com\u002FHarshTrivedi\u002Fnmn-pytorch): Neural Module Network for VQA in Pytorch.\n178. \u003Ckbd>1000-\u003C\u002Fkbd> [bytenet](https:\u002F\u002Fgithub.com\u002Fkefirski\u002Fbytenet): Pytorch implementation of bytenet from \"Neural Machine Translation in Linear Time\" paper\n179. \u003Ckbd>1000-\u003C\u002Fkbd> [bottom-up-attention-vqa](https:\u002F\u002Fgithub.com\u002Fhengyuan-hu\u002Fbottom-up-attention-vqa): vqa, bottom-up-attention, pytorch\n180. \u003Ckbd>1000-\u003C\u002Fkbd> [yolo2-pytorch](https:\u002F\u002Fgithub.com\u002Fruiminshen\u002Fyolo2-pytorch): The YOLOv2 is one of the most popular one-stage object detector. This project adopts PyTorch as the developing framework to increase productivity, and utilize ONNX to convert models into Caffe 2 to benifit engineering deployment.\n181. \u003Ckbd>1000-\u003C\u002Fkbd> [reseg-pytorch](https:\u002F\u002Fgithub.com\u002FWizaron\u002Freseg-pytorch): PyTorch 实现ReSeg。 (https:\u002F\u002Farxiv.org\u002Fpdf\u002F1511.07053.pdf)\n182. \u003Ckbd>1000-\u003C\u002Fkbd> [binary-stochastic-neurons](https:\u002F\u002Fgithub.com\u002FWizaron\u002Fbinary-stochastic-neurons): Binary Stochastic Neurons in PyTorch.\n183. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-pose-estimation](https:\u002F\u002Fgithub.com\u002FDavexPro\u002Fpytorch-pose-estimation): PyTorch Implementation of Realtime Multi-Person Pose Estimation project.\n184. \u003Ckbd>1000-\u003C\u002Fkbd> [interaction_network_pytorch](https:\u002F\u002Fgithub.com\u002Fhiggsfield\u002Finteraction_network_pytorch): Pytorch Implementation of Interaction Networks for Learning about Objects, Relations and Physics.\n185. \u003Ckbd>1000-\u003C\u002Fkbd> [NoisyNaturalGradient](https:\u002F\u002Fgithub.com\u002Fwlwkgus\u002FNoisyNaturalGradient): Pytorch Implementation of paper \"Noisy Natural Gradient as Variational Inference\". \n186. \u003Ckbd>1000-\u003C\u002Fkbd> [ewc.pytorch](https:\u002F\u002Fgithub.com\u002Fmoskomule\u002Fewc.pytorch): An implementation of Elastic Weight Consolidation (EWC), proposed in James Kirkpatrick et al. Overcoming catastrophic forgetting in neural networks 2016(10.1073\u002Fpnas.1611835114).\n187. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-zssr](https:\u002F\u002Fgithub.com\u002Fjacobgil\u002Fpytorch-zssr): PyTorch implementation of 1712.06087 \"Zero-Shot\" Super-Resolution using Deep Internal Learning\n188. \u003Ckbd>1000-\u003C\u002Fkbd> [deep_image_prior](https:\u002F\u002Fgithub.com\u002Fatiyo\u002Fdeep_image_prior): 基于未训练神经网络的图像重建算法实现。算法：[Deep Image Prior](https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.10925)。\n189. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-transformer](https:\u002F\u002Fgithub.com\u002Fleviswind\u002Fpytorch-transformer): PyTorch实现论文[Attention Is All You Need](https:\u002F\u002Farxiv.org\u002Fabs\u002F1706.03762)。\n190. \u003Ckbd>1000-\u003C\u002Fkbd> [DeepRL-Grounding](https:\u002F\u002Fgithub.com\u002Fdevendrachaplot\u002FDeepRL-Grounding): PyTorch实现AAAI-18论文[Gated-Attention Architectures for Task-Oriented Language Grounding](https:\u002F\u002Farxiv.org\u002Fabs\u002F1706.07230)。\n191. \u003Ckbd>1000-\u003C\u002Fkbd> [deep-forecast-pytorch](https:\u002F\u002Fgithub.com\u002FWizaron\u002Fdeep-forecast-pytorch): 使用LSTMs进行风速预测，论文：[Deep Forecast: Deep Learning-based Spatio-Temporal Forecasting](arxiv.org\u002Fpdf\u002F1707.08110.pdf)。\n192. \u003Ckbd>1000-\u003C\u002Fkbd> [cat-net](https:\u002F\u002Fgithub.com\u002FutiasSTARS\u002Fcat-net):  正则外观变换（[Canonical Appearance Transformations](https:\u002F\u002Farxiv.org\u002Fabs\u002F1709.03009)）\n193. \u003Ckbd>1000-\u003C\u002Fkbd> [minimal_glo](https:\u002F\u002Fgithub.com\u002Ftneumann\u002Fminimal_glo): Minimal PyTorch implementation of Generative Latent Optimization from the paper \"Optimizing the Latent Space of Generative Networks\"\n194. \u003Ckbd>1000-\u003C\u002Fkbd> [LearningToCompare-Pytorch](https:\u002F\u002Fgithub.com\u002Fdragen1860\u002FLearningToCompare-Pytorch): Pytorch Implementation for Paper: Learning to Compare: Relation Network for Few-Shot Learning. \n195. \u003Ckbd>1400+\u003C\u002Fkbd> [poincare-embeddings](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fpoincare-embeddings): PyTorch implementation of the NIPS-17 paper \"Poincaré Embeddings for Learning Hierarchical Representations\". \n196. \u003Ckbd>null\u003C\u002Fkbd> [pytorch-trpo(Hessian-vector product version)](https:\u002F\u002Fgithub.com\u002Fikostrikov\u002Fpytorch-trpo): This is a PyTorch implementation of \"Trust Region Policy Optimization (TRPO)\" with exact Hessian-vector product instead of finite differences approximation.\n197. \u003Ckbd>1000-\u003C\u002Fkbd> [ggnn.pytorch](https:\u002F\u002Fgithub.com\u002FJamesChuanggg\u002Fggnn.pytorch): A PyTorch Implementation of Gated Graph Sequence Neural Networks (GGNN). \n198. \u003Ckbd>1000-\u003C\u002Fkbd> [visual-interaction-networks-pytorch](https:\u002F\u002Fgithub.com\u002FMrgemy95\u002Fvisual-interaction-networks-pytorch): This's an implementation of deepmind Visual Interaction Networks paper using pytorch\n199. \u003Ckbd>1000-\u003C\u002Fkbd> [adversarial-patch](https:\u002F\u002Fgithub.com\u002Fjhayes14\u002Fadversarial-patch): PyTorch实现对抗补丁。\n200. \u003Ckbd>1000-\u003C\u002Fkbd> [Prototypical-Networks-for-Few-shot-Learning-PyTorch](https:\u002F\u002Fgithub.com\u002Forobix\u002FPrototypical-Networks-for-Few-shot-Learning-PyTorch): Implementation of Prototypical Networks for Few Shot Learning (arxiv.org\u002Fabs\u002F1703.05175) in Pytorch\n201. \u003Ckbd>1000-\u003C\u002Fkbd> [Visual-Feature-Attribution-Using-Wasserstein-GANs-Pytorch](https:\u002F\u002Fgithub.com\u002Forobix\u002FVisual-Feature-Attribution-Using-Wasserstein-GANs-Pytorch): Implementation of Visual Feature Attribution using Wasserstein GANs (arxiv.org\u002Fabs\u002F1711.08998) in PyTorch.\n202. \u003Ckbd>1000-\u003C\u002Fkbd> [PhotographicImageSynthesiswithCascadedRefinementNetworks-Pytorch](https:\u002F\u002Fgithub.com\u002FBlade6570\u002FPhotographicImageSynthesiswithCascadedRefinementNetworks-Pytorch): 用级联优化网络生成照片级图像，https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.09405 。\n203. \u003Ckbd>2400+\u003C\u002Fkbd> [ENAS-pytorch](https:\u002F\u002Fgithub.com\u002Fcarpedm20\u002FENAS-pytorch): PyTorch实现\"[基于参数共享的高效神经网络结构搜索](https:\u002F\u002Farxiv.org\u002Fabs\u002F1802.03268)\"。\n204. \u003Ckbd>1000-\u003C\u002Fkbd> [Neural-IMage-Assessment](https:\u002F\u002Fgithub.com\u002Fkentsyx\u002FNeural-IMage-Assessment): 神经图片评估，https:\u002F\u002Farxiv.org\u002Fabs\u002F1709.05424 。\n205. \u003Ckbd>1000-\u003C\u002Fkbd> [proxprop](https:\u002F\u002Fgithub.com\u002Ftfrerix\u002Fproxprop): 近端回传(Proximal Backpropagation) - 隐式梯度代替显式梯度的神经网络训练算法。\n206. \u003Ckbd>10500+\u003C\u002Fkbd> [FastPhotoStyle](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002FFastPhotoStyle): 照片级逼真的图像风格化的一个封闭解。\n207. \u003Ckbd>1000-\u003C\u002Fkbd> [Deep-Image-Analogy-PyTorch](https:\u002F\u002Fgithub.com\u002FBen-Louis\u002FDeep-Image-Analogy-PyTorch): 基于PyTorch的深度图像模拟的Python实现。\n208. \u003Ckbd>2700+\u003C\u002Fkbd> [Person-reID_pytorch](https:\u002F\u002Fgithub.com\u002Flayumi\u002FPerson_reID_baseline_pytorch): 行人再识别Person-reID的PyTorch实现。\n209. \u003Ckbd>1000-\u003C\u002Fkbd> [pt-dilate-rnn](https:\u002F\u002Fgithub.com\u002Fzalandoresearch\u002Fpt-dilate-rnn): 空洞递归神经网络（Dilated RNNs）。\n210. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-i-revnet](https:\u002F\u002Fgithub.com\u002Fjhjacobsen\u002Fpytorch-i-revnet): Pytorch实现i-RevNets。\n211. \u003Ckbd>1000-\u003C\u002Fkbd> [OrthNet](https:\u002F\u002Fgithub.com\u002FOrcuslc\u002FOrthNet): TensorFlow、PyTorch和Numpy层生成正交多项式。\n212. \u003Ckbd>1000-\u003C\u002Fkbd> [DRRN-pytorch](https:\u002F\u002Fgithub.com\u002Fjt827859032\u002FDRRN-pytorch): \"[超分辨率的深递归残差网络(DRRN)](http:\u002F\u002Fcvlab.cse.msu.edu\u002Fpdfs\u002FTai_Yang_Liu_CVPR2017.pdf)\", CVPR 2017\n213. \u003Ckbd>1000-\u003C\u002Fkbd> [shampoo.pytorch](https:\u002F\u002Fgithub.com\u002Fmoskomule\u002Fshampoo.pytorch): Shampoo算法实现。\n214. \u003Ckbd>1000-\u003C\u002Fkbd> [Neural-IMage-Assessment 2](https:\u002F\u002Fgithub.com\u002Ftruskovskiyk\u002Fnima.pytorch): 神经图片评估，https:\u002F\u002Farxiv.org\u002Fabs\u002F1709.05424 。\n215. \u003Ckbd>2900+\u003C\u002Fkbd> [TCN](https:\u002F\u002Fgithub.com\u002Flocuslab\u002FTCN): Sequence modeling benchmarks and temporal convolutional networks locuslab\u002FTCN\n216. \u003Ckbd>1000-\u003C\u002Fkbd> [DCC](https:\u002F\u002Fgithub.com\u002Fshahsohil\u002FDCC): This repository contains the source code and data for reproducing results of Deep Continuous Clustering paper.\n217. \u003Ckbd>1000-\u003C\u002Fkbd> [packnet](https:\u002F\u002Fgithub.com\u002Farunmallya\u002Fpacknet): Code for PackNet: Adding Multiple Tasks to a Single Network by Iterative Pruning arxiv.org\u002Fabs\u002F1711.05769\n218. \u003Ckbd>1000-\u003C\u002Fkbd> [PyTorch-progressive_growing_of_gans](https:\u002F\u002Fgithub.com\u002Fgithub-pengge\u002FPyTorch-progressive_growing_of_gans): PyTorch implementation of Progressive Growing of GANs for Improved Quality, Stability, and Variation.\n219. \u003Ckbd>1000-\u003C\u002Fkbd> [nonauto-nmt](https:\u002F\u002Fgithub.com\u002Fsalesforce\u002Fnonauto-nmt): PyTorch Implementation of \"Non-Autoregressive Neural Machine Translation\"\n220. \u003Ckbd>9800+\u003C\u002Fkbd> [PyTorch-GAN](https:\u002F\u002Fgithub.com\u002Feriklindernoren\u002FPyTorch-GAN): PyTorch implementations of Generative Adversarial Networks.\n221. \u003Ckbd>1000-\u003C\u002Fkbd> [PyTorchWavelets](https:\u002F\u002Fgithub.com\u002Ftomrunia\u002FPyTorchWavelets): PyTorch implementation of the wavelet analysis found in Torrence and Compo (1998)\n222. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-made](https:\u002F\u002Fgithub.com\u002Fkarpathy\u002Fpytorch-made): MADE (Masked Autoencoder Density Estimation) implementation in PyTorch\n223. \u003Ckbd>1000-\u003C\u002Fkbd> [VRNN](https:\u002F\u002Fgithub.com\u002Femited\u002FVariationalRecurrentNeuralNetwork): Pytorch implementation of the Variational RNN (VRNN), from A Recurrent Latent Variable Model for Sequential Data.\n224. \u003Ckbd>1000-\u003C\u002Fkbd> [flow](https:\u002F\u002Fgithub.com\u002Femited\u002Fflow): Pytorch implementation of ICLR 2018 paper Deep Learning for Physical Processes: Integrating Prior Scientific Knowledge.\n225. \u003Ckbd>1600+\u003C\u002Fkbd> [deepvoice3_pytorch](https:\u002F\u002Fgithub.com\u002Fr9y9\u002Fdeepvoice3_pytorch): PyTorch实现基于卷积神经网络的语音合成模型。\n226. \u003Ckbd>1000-\u003C\u002Fkbd> [psmm](https:\u002F\u002Fgithub.com\u002Felanmart\u002Fpsmm): imlementation of the the Pointer Sentinel Mixture Model, as described in the paper by Stephen Merity et al.\n227. \u003Ckbd>3000+\u003C\u002Fkbd> [tacotron2](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Ftacotron2): Tacotron 2 - PyTorch implementation with faster-than-realtime inference.\n228. \u003Ckbd>1000-\u003C\u002Fkbd> [AccSGD](https:\u002F\u002Fgithub.com\u002Frahulkidambi\u002FAccSGD): Implements pytorch code for the Accelerated SGD algorithm.\n229. \u003Ckbd>1000-\u003C\u002Fkbd> [QANet-pytorch](https:\u002F\u002Fgithub.com\u002Fhengruo\u002FQANet-pytorch): an implementation of QANet with PyTorch (EM\u002FF1 = 70.5\u002F77.2 after 20 epoches for about 20 hours on one 1080Ti card.)\n230. \u003Ckbd>1000-\u003C\u002Fkbd> [ConvE](https:\u002F\u002Fgithub.com\u002FTimDettmers\u002FConvE): Convolutional 2D Knowledge Graph Embeddings\n231. \u003Ckbd>1000-\u003C\u002Fkbd> [Structured-Self-Attention](https:\u002F\u002Fgithub.com\u002Fkaushalshetty\u002FStructured-Self-Attention): Implementation for the paper A Structured Self-Attentive Sentence Embedding, which is published in ICLR 2017: arxiv.org\u002Fabs\u002F1703.03130 .\n232. \u003Ckbd>1000-\u003C\u002Fkbd> [graphsage-simple](https:\u002F\u002Fgithub.com\u002Fwilliamleif\u002Fgraphsage-simple): Simple reference implementation of GraphSAGE.\n233. \u003Ckbd>2800+\u003C\u002Fkbd> [Detectron.pytorch](https:\u002F\u002Fgithub.com\u002Froytseng-tw\u002FDetectron.pytorch): A pytorch implementation of Detectron. Both training from scratch and inferring directly from pretrained Detectron weights are available.\n234. \u003Ckbd>1000-\u003C\u002Fkbd> [R2Plus1D-PyTorch](https:\u002F\u002Fgithub.com\u002Firhumshafkat\u002FR2Plus1D-PyTorch): PyTorch implementation of the R2Plus1D convolution based ResNet architecture described in the paper \"A Closer Look at Spatiotemporal Convolutions for Action Recognition\"\n235. \u003Ckbd>1000-\u003C\u002Fkbd> [StackNN](https:\u002F\u002Fgithub.com\u002Fviking-sudo-rm\u002FStackNN): A PyTorch implementation of differentiable stacks for use in neural networks.\n236. \u003Ckbd>1000-\u003C\u002Fkbd> [translagent](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Ftranslagent): Code for Emergent Translation in Multi-Agent Communication.\n237. \u003Ckbd>1000-\u003C\u002Fkbd> [ban-vqa](https:\u002F\u002Fgithub.com\u002Fjnhwkim\u002Fban-vqa): Bilinear attention networks for visual question answering. \n238. \u003Ckbd>1200+\u003C\u002Fkbd> [pytorch-openai-transformer-lm](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Fpytorch-openai-transformer-lm): This is a PyTorch implementation of the TensorFlow code provided with OpenAI's paper \"Improving Language Understanding by Generative Pre-Training\" by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.\n239. \u003Ckbd>1000-\u003C\u002Fkbd> [T2F](https:\u002F\u002Fgithub.com\u002Fakanimax\u002FT2F): 使用深度学习进行Text-to-Face生成。该项目结合了[StackGAN](https:\u002F\u002Farxiv.org\u002Fabs\u002F1710.10916)和[ProGAN](https:\u002F\u002Farxiv.org\u002Fabs\u002F1710.10196)，这两个模型可以基于文字描述合成人脸。\n240. \u003Ckbd>1300+\u003C\u002Fkbd> [pytorch - fid](https:\u002F\u002Fgithub.com\u002Fmseitzer\u002Fpytorch-fid): A Port of Fréchet Inception Distance (FID score) to PyTorch\n241. \u003Ckbd>1000-\u003C\u002Fkbd> [vae_vpflows](https:\u002F\u002Fgithub.com\u002Fjmtomczak\u002Fvae_vpflows):Code in PyTorch for the convex combination linear IAF and the Householder Flow, J.M. Tomczak & M. Welling jmtomczak.github.io\u002Fdeebmed.html\n242. \u003Ckbd>1000-\u003C\u002Fkbd> [CoordConv-pytorch](https:\u002F\u002Fgithub.com\u002Fmkocabas\u002FCoordConv-pytorch): Pytorch implementation of CoordConv introduced in 'An intriguing failing of convolutional neural networks and the CoordConv solution' paper. (arxiv.org\u002Fpdf\u002F1807.03247.pdf)\n243. \u003Ckbd>1000-\u003C\u002Fkbd> [SDPoint](https:\u002F\u002Fgithub.com\u002Fxternalz\u002FSDPoint): Implementation of \"Stochastic Downsampling for Cost-Adjustable Inference and Improved Regularization in Convolutional Networks\", published in CVPR 2018. \n244. \u003Ckbd>1000-\u003C\u002Fkbd> [SRDenseNet-pytorch](https:\u002F\u002Fgithub.com\u002Fwxywhu\u002FSRDenseNet-pytorch): 极深网络，SRDenseNet-pytorch，论文：[基于密集跳跃连接的图像超分辨率（ICCV_2017）](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FTong_Image_Super-Resolution_Using_ICCV_2017_paper.pdf)。\n245. \u003Ckbd>1000-\u003C\u002Fkbd> [GAN_stability](https:\u002F\u002Fgithub.com\u002FLMescheder\u002FGAN_stability): Code for paper \"Which Training Methods for GANs do actually Converge? (ICML 2018)\"\n246. \u003Ckbd>1000-\u003C\u002Fkbd> [Mask-RCNN](https:\u002F\u002Fgithub.com\u002FwannabeOG\u002FMask-RCNN): A PyTorch implementation of the architecture of Mask RCNN, serves as an introduction to working with PyTorch\n247. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-coviar](https:\u002F\u002Fgithub.com\u002Fchaoyuaw\u002Fpytorch-coviar): Compressed Video Action Recognition\n248. \u003Ckbd>1000-\u003C\u002Fkbd> [PNASNet.pytorch](https:\u002F\u002Fgithub.com\u002Fchenxi116\u002FPNASNet.pytorch): PyTorch implementation of PNASNet-5 on ImageNet. \n249. \u003Ckbd>1000-\u003C\u002Fkbd> [NALU-pytorch](https:\u002F\u002Fgithub.com\u002Fkevinzakka\u002FNALU-pytorch): Basic pytorch implementation of NAC\u002FNALU from Neural Arithmetic Logic Units arxiv.org\u002Fpdf\u002F1808.00508.pdf\n250. \u003Ckbd>1000-\u003C\u002Fkbd> [LOLA_DiCE](https:\u002F\u002Fgithub.com\u002Falexis-jacq\u002FLOLA_DiCE): Pytorch 使用[DiCE](arxiv.org\u002Fabs\u002F1802.05098)实现[LOLA](arxiv.org\u002Fabs\u002F1709.04326)。\n251. \u003Ckbd>1000-\u003C\u002Fkbd> [generative-query-network-pytorch](https:\u002F\u002Fgithub.com\u002Fwohlert\u002Fgenerative-query-network-pytorch): Generative Query Network (GQN) in PyTorch as described in \"Neural Scene Representation and Rendering\"\n252. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_hmax](https:\u002F\u002Fgithub.com\u002Fwmvanvliet\u002Fpytorch_hmax): 在PyTorch中实现[HMAX(Hierarchical Model and X)](https:\u002F\u002Fmaxlab.neuro.georgetown.edu\u002Fhmax.html#inside)视觉模型。\n253. \u003Ckbd>1000-\u003C\u002Fkbd> [FCN-pytorch-easiest](https:\u002F\u002Fgithub.com\u002Fyunlongdong\u002FFCN-pytorch-easiest): trying to be the most easiest and just get-to-use pytorch implementation of FCN (Fully Convolotional Networks)\n254. \u003Ckbd>1000-\u003C\u002Fkbd> [transducer](https:\u002F\u002Fgithub.com\u002Fawni\u002Ftransducer): A Fast Sequence Transducer Implementation with PyTorch Bindings.\n255. \u003Ckbd>1000-\u003C\u002Fkbd> [AVO-pytorch](https:\u002F\u002Fgithub.com\u002Fartix41\u002FAVO-pytorch): Implementation of Adversarial Variational Optimization in PyTorch.\n256. \u003Ckbd>1000-\u003C\u002Fkbd> [HCN-pytorch](https:\u002F\u002Fgithub.com\u002Fhuguyuehuhu\u002FHCN-pytorch): A pytorch reimplementation of { Co-occurrence Feature Learning from Skeleton Data for Action Recognition and Detection with Hierarchical Aggregation }.\n257. \u003Ckbd>1000-\u003C\u002Fkbd> [binary-wide-resnet](https:\u002F\u002Fgithub.com\u002Fszagoruyko\u002Fbinary-wide-resnet): PyTorch implementation of Wide Residual Networks with 1-bit weights by McDonnel (ICLR 2018)\n258. \u003Ckbd>1000-\u003C\u002Fkbd> [piggyback](https:\u002F\u002Fgithub.com\u002Farunmallya\u002Fpiggyback): Code for Piggyback: Adapting a Single Network to Multiple Tasks by Learning to Mask Weights arxiv.org\u002Fabs\u002F1801.06519\n259. \u003Ckbd>7700+\u003C\u002Fkbd> [vid2vid](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fvid2vid): Pytorch implementation of our method for high-resolution (e.g. 2048x1024) photorealistic video-to-video translation.\n260. \u003Ckbd>1000-\u003C\u002Fkbd> [poisson-convolution-sum](https:\u002F\u002Fgithub.com\u002Fcranmer\u002Fpoisson-convolution-sum): Implements an infinite sum of poisson-weighted convolutions\n261. \u003Ckbd>1000-\u003C\u002Fkbd> [tbd-nets](https:\u002F\u002Fgithub.com\u002Fdavidmascharka\u002Ftbd-nets): PyTorch implementation of \"Transparency by Design: Closing the Gap Between Performance and Interpretability in Visual Reasoning\" arxiv.org\u002Fabs\u002F1803.05268 \n262. \u003Ckbd>1000-\u003C\u002Fkbd> [attn2d](https:\u002F\u002Fgithub.com\u002Felbayadm\u002Fattn2d): Pervasive Attention: 2D Convolutional Networks for Sequence-to-Sequence Prediction\n263. \u003Ckbd>7500+\u003C\u002Fkbd> [yolov3](https:\u002F\u002Fgithub.com\u002Fultralytics\u002Fyolov3): YOLOv3: 训练和推断，https:\u002F\u002Fwww.ultralytics.com 。\n264. \u003Ckbd>1000-\u003C\u002Fkbd> [deep-dream-in-pytorch](https:\u002F\u002Fgithub.com\u002Fduc0\u002Fdeep-dream-in-pytorch): Pytorch implementation of the DeepDream computer vision algorithm. \n265. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-flows](https:\u002F\u002Fgithub.com\u002Fikostrikov\u002Fpytorch-flows): PyTorch implementations of algorithms for density estimation\n266. \u003Ckbd>1000-\u003C\u002Fkbd> [quantile-regression-dqn-pytorch](https:\u002F\u002Fgithub.com\u002Fars-ashuha\u002Fquantile-regression-dqn-pytorch): Quantile Regression DQN a Minimal Working Example\n267. \u003Ckbd>1000-\u003C\u002Fkbd> [relational-rnn-pytorch](https:\u002F\u002Fgithub.com\u002FL0SG\u002Frelational-rnn-pytorch): An implementation of DeepMind's Relational Recurrent Neural Networks in PyTorch.\n268. \u003Ckbd>1000-\u003C\u002Fkbd> [DEXTR-PyTorch](https:\u002F\u002Fgithub.com\u002Fscaelles\u002FDEXTR-PyTorch): 深度极端切割，http:\u002F\u002Fwww.vision.ee.ethz.ch\u002F~cvlsegmentation\u002Fdextr 。\n269. \u003Ckbd>1000-\u003C\u002Fkbd> [PyTorch_GBW_LM](https:\u002F\u002Fgithub.com\u002Frdspring1\u002FPyTorch_GBW_LM): PyTorch Language Model for Google Billion Word Dataset.\n270. \u003Ckbd>1000-\u003C\u002Fkbd> [Pytorch-NCE](https:\u002F\u002Fgithub.com\u002FStonesjtu\u002FPytorch-NCE): The Noise Contrastive Estimation for softmax output written in Pytorch\n271. \u003Ckbd>1000-\u003C\u002Fkbd> [generative-models](https:\u002F\u002Fgithub.com\u002Fshayneobrien\u002Fgenerative-models): Annotated, understandable, and visually interpretable PyTorch implementations of: VAE, BIRVAE, NSGAN, MMGAN, WGAN, WGANGP, LSGAN, DRAGAN, BEGAN, RaGAN, InfoGAN, fGAN, FisherGAN. \n272. \u003Ckbd>1000-\u003C\u002Fkbd> [convnet-aig](https:\u002F\u002Fgithub.com\u002Fandreasveit\u002Fconvnet-aig): PyTorch implementation for Convolutional Networks with Adaptive Inference Graphs.\n273. \u003Ckbd>1000-\u003C\u002Fkbd> [integrated-gradient-pytorch](https:\u002F\u002Fgithub.com\u002FTianhongDai\u002Fintegrated-gradient-pytorch): This is the pytorch implementation of the paper - Axiomatic Attribution for Deep Networks.\n274. \u003Ckbd>1000-\u003C\u002Fkbd> [MalConv-Pytorch](https:\u002F\u002Fgithub.com\u002FAlexander-H-Liu\u002FMalConv-Pytorch): Pytorch implementation of MalConv. \n275. \u003Ckbd>1000-\u003C\u002Fkbd> [trellisnet](https:\u002F\u002Fgithub.com\u002Flocuslab\u002Ftrellisnet): Trellis Networks for Sequence Modeling\n276. \u003Ckbd>1000-\u003C\u002Fkbd> [Learning to Communicate with Deep Multi-Agent Reinforcement Learning](https:\u002F\u002Fgithub.com\u002Fminqi\u002Flearning-to-communicate-pytorch): pytorch implementation of  Learning to Communicate with Deep Multi-Agent Reinforcement Learning paper.\n277. \u003Ckbd>1000-\u003C\u002Fkbd> [pnn.pytorch](https:\u002F\u002Fgithub.com\u002Fmichaelklachko\u002Fpnn.pytorch): PyTorch implementation of CVPR'18 - Perturbative Neural Networks http:\u002F\u002Fxujuefei.com\u002Fpnn.html.\n278. \u003Ckbd>1000-\u003C\u002Fkbd> [Face_Attention_Network](https:\u002F\u002Fgithub.com\u002Frainofmine\u002FFace_Attention_Network): Pytorch implementation of face attention network as described in Face Attention Network: An Effective Face Detector for the Occluded Faces.\n279. \u003Ckbd>1800+\u003C\u002Fkbd> [waveglow](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fwaveglow): 基于流的语音合成生成网络。\n280. \u003Ckbd>1000-\u003C\u002Fkbd> [deepfloat](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fdeepfloat): This repository contains the SystemVerilog RTL, C++, HLS (Intel FPGA OpenCL to wrap RTL code) and Python needed to reproduce the numerical results in \"Rethinking floating point for deep learning\" \n281. \u003Ckbd>1000-\u003C\u002Fkbd> [EPSR](https:\u002F\u002Fgithub.com\u002Fsubeeshvasu\u002F2018_subeesh_epsr_eccvw): Pytorch implementation of [Analyzing Perception-Distortion Tradeoff using Enhanced Perceptual Super-resolution Network](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1811.00344.pdf). This work has won the first place in PIRM2018-SR competition (region 1) held as part of the ECCV 2018.\n282. \u003Ckbd>1000-\u003C\u002Fkbd> [ClariNet](https:\u002F\u002Fgithub.com\u002Fksw0306\u002FClariNet): Pytorch实现[ClariNet](https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.07281)。\n283. \u003Ckbd>48900+\u003C\u002Fkbd> [pytorch-pretrained-BERT](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Fpytorch-pretrained-BERT): PyTorch version of Google AI's BERT model with script to load Google's pre-trained models\n284. \u003Ckbd>1000-\u003C\u002Fkbd> [torch_waveglow](https:\u002F\u002Fgithub.com\u002Fnpuichigo\u002Fwaveglow): PyTorch实现WaveGlow: 基于流的语音合成生成网络。\n285. \u003Ckbd>3000+\u003C\u002Fkbd> [3DDFA](https:\u002F\u002Fgithub.com\u002Fcleardusk\u002F3DDFA): The pytorch improved re-implementation of TPAMI 2017 paper: Face Alignment in Full Pose Range: A 3D Total Solution.\n286. \u003Ckbd>1600+\u003C\u002Fkbd> [loss-landscape](https:\u002F\u002Fgithub.com\u002Ftomgoldstein\u002Floss-landscape): loss-landscape Code for visualizing the loss landscape of neural nets.\n287. \u003Ckbd>1000-\u003C\u002Fkbd> [famos](https:\u002F\u002Fgithub.com\u002Fzalandoresearch\u002Ffamos):（非）参数图像风格化马赛克的对抗性框架。论文：http:\u002F\u002Farxiv.org\u002Fabs\u002F1811.09236 。\n288. \u003Ckbd>1000-\u003C\u002Fkbd> [back2future.pytorch](https:\u002F\u002Fgithub.com\u002Fanuragranj\u002Fback2future.pytorch): This is a Pytorch implementation of\nJanai, J., Güney, F., Ranjan, A., Black, M. and Geiger, A., Unsupervised Learning of Multi-Frame Optical Flow with Occlusions. ECCV 2018.\n289. \u003Ckbd>1000-\u003C\u002Fkbd> [FFTNet](https:\u002F\u002Fgithub.com\u002Fmozilla\u002FFFTNet): Unofficial Implementation of FFTNet vocode paper.\n290. \u003Ckbd>1000-\u003C\u002Fkbd> [FaceBoxes.PyTorch](https:\u002F\u002Fgithub.com\u002Fzisianw\u002FFaceBoxes.PyTorch): PyTorch实现[FaceBoxes](https:\u002F\u002Farxiv.org\u002Fabs\u002F1708.05234)。\n291. \u003Ckbd>2900+\u003C\u002Fkbd> [Transformer-XL](https:\u002F\u002Fgithub.com\u002Fkimiyoung\u002Ftransformer-xl): Transformer-XL: Attentive Language Models Beyond a Fixed-Length Contexthttps:\u002F\u002Fgithub.com\u002Fkimiyoung\u002Ftransformer-xl\n292. \u003Ckbd>1000-\u003C\u002Fkbd> [associative_compression_networks](https:\u002F\u002Fgithub.com\u002Fjalexvig\u002Fassociative_compression_networks): Associative Compression Networks for Representation Learning. \n293. \u003Ckbd>1000-\u003C\u002Fkbd> [fluidnet_cxx](https:\u002F\u002Fgithub.com\u002Fjolibrain\u002Ffluidnet_cxx): FluidNet re-written with ATen tensor lib. \n294. \u003Ckbd>3700+\u003C\u002Fkbd> [Deep-Reinforcement-Learning-Algorithms-with-PyTorch](https:\u002F\u002Fgithub.com\u002Fp-christ\u002FDeep-Reinforcement-Learning-Algorithms-with-PyTorch): This repository contains PyTorch implementations of deep reinforcement learning algorithms.\n295. \u003Ckbd>1000-\u003C\u002Fkbd> [Shufflenet-v2-Pytorch](https:\u002F\u002Fgithub.com\u002Fericsun99\u002FShufflenet-v2-Pytorch): This is a Pytorch implementation of faceplusplus's ShuffleNet-v2. \n296. \u003Ckbd>1000-\u003C\u002Fkbd> [GraphWaveletNeuralNetwork](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FGraphWaveletNeuralNetwork): This is a Pytorch implementation of Graph Wavelet Neural Network. ICLR 2019. \n297. \u003Ckbd>1000-\u003C\u002Fkbd> [AttentionWalk](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FAttentionWalk): This is a Pytorch implementation of Watch Your Step: Learning Node Embeddings via Graph Attention. NIPS 2018.\n298. \u003Ckbd>1000-\u003C\u002Fkbd> [SGCN](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FSGCN): This is a Pytorch implementation of Signed Graph Convolutional Network. ICDM 2018.\n299. \u003Ckbd>1000-\u003C\u002Fkbd> [SINE](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FSINE): This is a Pytorch implementation of SINE: Scalable Incomplete Network Embedding. ICDM 2018.\n300. \u003Ckbd>1000-\u003C\u002Fkbd> [GAM](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FGAM): This is a Pytorch implementation of Graph Classification using Structural Attention. KDD 2018.\n301. \u003Ckbd>1000-\u003C\u002Fkbd> [neural-style-pt](https:\u002F\u002Fgithub.com\u002FProGamerGov\u002Fneural-style-pt): PyTorch 实现 Justin Johnson 的神经风格算法。论文：[A Neural Algorithm of Artistic Style](https:\u002F\u002Farxiv.org\u002Fabs\u002F1508.06576)。\n302. \u003Ckbd>1000-\u003C\u002Fkbd> [TuckER](https:\u002F\u002Fgithub.com\u002Fibalazevic\u002FTuckER): TuckER: Tensor Factorization for Knowledge Graph Completion.\n303. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-prunes](https:\u002F\u002Fgithub.com\u002FBayesWatch\u002Fpytorch-prunes): Pruning neural networks: is it time to nip it in the bud?\n304. \u003Ckbd>1000-\u003C\u002Fkbd> [SimGNN](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FSimGNN): SimGNN: 一个快速图形相似度计算的神经网络方法。论文：A Neural Network Approach to Fast Graph Similarity Computation.\n305. \u003Ckbd>1000-\u003C\u002Fkbd> [Character CNN](https:\u002F\u002Fgithub.com\u002Fahmedbesbes\u002Fcharacter-based-cnn): PyTorch implementation of the Character-level Convolutional Networks for Text Classification paper. \n306. \u003Ckbd>2400+\u003C\u002Fkbd> [XLM](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FXLM): PyTorch original implementation of Cross-lingual Language Model Pretraining.\n307. \u003Ckbd>1000-\u003C\u002Fkbd> [DiffAI](https:\u002F\u002Fgithub.com\u002Feth-sri\u002Fdiffai): A provable defense against adversarial examples and library for building compatible PyTorch models.\n308. \u003Ckbd>1000-\u003C\u002Fkbd> [APPNP](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FAPPNP): Combining Neural Networks with Personalized PageRank for Classification on Graphs. ICLR 2019.\n309. \u003Ckbd>1000-\u003C\u002Fkbd> [NGCN](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FMixHop-and-N-GCN): A Higher-Order Graph Convolutional Layer. NeurIPS 2018.\n310. \u003Ckbd>1000-\u003C\u002Fkbd> [gpt-2-Pytorch](https:\u002F\u002Fgithub.com\u002Fgraykode\u002Fgpt-2-Pytorch): Simple Text-Generator with OpenAI gpt-2 Pytorch Implementation\n311. \u003Ckbd>1000-\u003C\u002Fkbd> [Splitter](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FSplitter): Splitter: Learning Node Representations that Capture Multiple Social Contexts. (WWW 2019).\n312. \u003Ckbd>1000+\u003C\u002Fkbd> [CapsGNN](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FCapsGNN): 胶囊图神经网络，[Capsule Graph Neural Network](https:\u002F\u002Fopenreview.net\u002Fforum?id=Byl8BnRcYm)。\n313. \u003Ckbd>2300+\u003C\u002Fkbd> [BigGAN-PyTorch](https:\u002F\u002Fgithub.com\u002Fajbrock\u002FBigGAN-PyTorch): PyTorch实现BigGAN（非官方）。\n314. \u003Ckbd>1000-\u003C\u002Fkbd> [ppo_pytorch_cpp](https:\u002F\u002Fgithub.com\u002Fmhubii\u002Fppo_pytorch_cpp): 近端策略优化算法的C++ API。\n315. \u003Ckbd>1000-\u003C\u002Fkbd> [RandWireNN](https:\u002F\u002Fgithub.com\u002Fseungwonpark\u002FRandWireNN): 基于随机连接神经网络性能的图像识别。\n316. \u003Ckbd>1000-\u003C\u002Fkbd> [Zero-shot Intent CapsNet](https:\u002F\u002Fgithub.com\u002Fjoel-huang\u002Fzeroshot-capsnet-pytorch): GPU-accelerated PyTorch implementation of \"Zero-shot User Intent Detection via Capsule Neural Networks\".\n317. \u003Ckbd>1000-\u003C\u002Fkbd> [SEAL-CI](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FSEAL-CI) 半监督图分类：层次图视角，Semi-Supervised Graph Classification: A Hierarchical Graph Perspective. (WWW 2019)。\n318. \u003Ckbd>1000-\u003C\u002Fkbd> [MixHop](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FMixHop-and-N-GCN): MixHop: Higher-Order Graph Convolutional Architectures via Sparsified Neighborhood Mixing. ICML 2019.\n319. \u003Ckbd>1000-\u003C\u002Fkbd> [densebody_pytorch](https:\u002F\u002Fgithub.com\u002FLotayou\u002Fdensebody_pytorch): PyTorch implementation of CloudWalk's recent paper DenseBody.\n320. \u003Ckbd>1000-\u003C\u002Fkbd> [voicefilter](https:\u002F\u002Fgithub.com\u002Fmindslab-ai\u002Fvoicefilter): Unofficial PyTorch implementation of Google AI's VoiceFilter system http:\u002F\u002Fswpark.me\u002Fvoicefilter. \n321. \u003Ckbd>1300+\u003C\u002Fkbd> [NVIDIA\u002Fsemantic-segmentation](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fsemantic-segmentation): PyTorch实现“利用视频传播和标签松弛改进语义分割”。论文：[Improving Semantic Segmentation via Video Propagation and Label Relaxation](https:\u002F\u002Farxiv.org\u002Fabs\u002F1812.01593), In CVPR2019.\n322. \u003Ckbd>1000-\u003C\u002Fkbd> [ClusterGCN](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FClusterGCN): A PyTorch implementation of \"Cluster-GCN: An Efficient Algorithm for Training Deep and Large Graph Convolutional Networks\" (KDD 2019).\n323. \u003Ckbd>1000+\u003C\u002Fkbd> [NVlabs\u002FDG-Net](https:\u002F\u002Fgithub.com\u002FNVlabs\u002FDG-Net): A PyTorch implementation of \"Joint Discriminative and Generative Learning for Person Re-identification\" (CVPR19 Oral). \n324. \u003Ckbd>1000-\u003C\u002Fkbd> [NCRF](https:\u002F\u002Fgithub.com\u002Fbaidu-research\u002FNCRF): 基于神经网络条件随机场(NCRF)的肿瘤转移检测，相关论文：https:\u002F\u002Fopenreview.net\u002Fforum?id=S1aY66iiM。\n325. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-sift](https:\u002F\u002Fgithub.com\u002Fducha-aiki\u002Fpytorch-sift): PyTorch实现SIFT（尺度不变特征变换匹配算法，Scale Invariant Feature Transform）描述子。\n326. \u003Ckbd>1000-\u003C\u002Fkbd> [brain-segmentation-pytorch](https:\u002F\u002Fgithub.com\u002Fmateuszbuda\u002Fbrain-segmentation-pytorch): 深度学习分割网络U-Net的PyTorch模型实现，用于脑核磁共振中FLAIR异常的分割。\n327. \u003Ckbd>1000-\u003C\u002Fkbd> [glow-pytorch](https:\u002F\u002Fgithub.com\u002Frosinality\u002Fglow-pytorch): PyTorch 实现 \"[Glow, Generative Flow with Invertible 1x1 Convolutions](https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.03039)\"。\n328. \u003Ckbd>1000-\u003C\u002Fkbd> [EfficientNets-PyTorch](https:\u002F\u002Fgithub.com\u002Fzsef123\u002FEfficientNets-PyTorch): PyTorch实现EfficientNet: 卷积神经网络模型尺度的再思考。\n329. \u003Ckbd>1000-\u003C\u002Fkbd> [STEAL](https:\u002F\u002Fgithub.com\u002Fnv-tlabs\u002FSTEAL): STEAL - 从噪声标注中学习语义边界，https:\u002F\u002Fnv-tlabs.github.io\u002FSTEAL\u002F 。\n330. \u003Ckbd>1000-\u003C\u002Fkbd> [EigenDamage-Pytorch](https:\u002F\u002Fgithub.com\u002Falecwangcq\u002FEigenDamage-Pytorch): 官方实现 ICML'19 论文 \"[特征损伤：克罗内克分解特征基中的结构剪枝](https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.05934)\"。\n331. \u003Ckbd>1000-\u003C\u002Fkbd> [Aspect-level-sentiment](https:\u002F\u002Fgithub.com\u002Fruidan\u002FAspect-level-sentiment): 论文代码和数据集，ACL2018论文：\"[利用文档知识进行体层情感分类](https:\u002F\u002Farxiv.org\u002Fabs\u002F1806.04346)\"。\n332. \u003Ckbd>1000-\u003C\u002Fkbd> [breast_cancer_classifier](https:\u002F\u002Fgithub.com\u002Fnyukat\u002Fbreast_cancer_classifier): 深层神经网络提高放射科医生乳腺癌筛查的效果，https:\u002F\u002Farxiv.org\u002Fabs\u002F1903.08297 。\n333. \u003Ckbd>1000-\u003C\u002Fkbd> [DGC-Net](https:\u002F\u002Fgithub.com\u002FAaltoVision\u002FDGC-Net): PyTorch实现\"[DGC-Net: 密集几何对应网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F1810.08393)\".\n334. \u003Ckbd>1000-\u003C\u002Fkbd> [universal-triggers](https:\u002F\u002Fgithub.com\u002FEric-Wallace\u002Funiversal-triggers): Universal Adversarial Triggers for Attacking and Analyzing NLP (EMNLP 2019)\n335. \u003Ckbd>3700+\u003C\u002Fkbd> [Deep-Reinforcement-Learning-Algorithms-with-PyTorch](https:\u002F\u002Fgithub.com\u002Fp-christ\u002FDeep-Reinforcement-Learning-Algorithms-with-PyTorch): PyTorch implementations of deep reinforcement learning algorithms and environments.\n336. \u003Ckbd>1000-\u003C\u002Fkbd> [simple-effective-text-matching-pytorch](https:\u002F\u002Fgithub.com\u002Falibaba-edu\u002Fsimple-effective-text-matching-pytorch): A pytorch implementation of the ACL2019 paper \"Simple and Effective Text Matching with Richer Alignment Features\".\n337. \u003Ckbd>null\u003C\u002Fkbd> [Adaptive-segmentation-mask-attack (ASMA)](https:\u002F\u002Fgithub.com\u002Futkuozbulak\u002Fadaptive-segmentation-mask-attack): A pytorch implementation of the MICCAI2019 paper \"Impact of Adversarial Examples on Deep Learning Models for Biomedical Image Segmentation\".\n338. \u003Ckbd>1000-\u003C\u002Fkbd> [NVIDIA\u002Funsupervised-video-interpolation](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Funsupervised-video-interpolation): A PyTorch Implementation of [Unsupervised Video Interpolation Using Cycle Consistency](https:\u002F\u002Farxiv.org\u002Fabs\u002F1906.05928), In ICCV 2019.\n339. \u003Ckbd>1000-\u003C\u002Fkbd> [Seg-Uncertainty](https:\u002F\u002Fgithub.com\u002Flayumi\u002FSeg-Uncertainty): Unsupervised Scene Adaptation with Memory Regularization in vivo, In IJCAI 2020.\n340. \u003Ckbd>5700+\u003C\u002Fkbd> [pulse](https:\u002F\u002Fgithub.com\u002Fadamian98\u002Fpulse): Self-Supervised Photo Upsampling via Latent Space Exploration of Generative Models\n341. \u003Ckbd>1000-\u003C\u002Fkbd> [distance-encoding](https:\u002F\u002Fgithub.com\u002Fsnap-stanford\u002Fdistance-encoding): Distance-Encoding - Design Provably More PowerfulGNNs for Structural Representation Learning.\n342. \u003Ckbd>1000-\u003C\u002Fkbd> [Pathfinder Discovery Networks](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FPDN): Pathfinder Discovery Networks for Neural Message Passing.\n343. \u003Ckbd>1000-\u003C\u002Fkbd> [PyKEEN](https:\u002F\u002Fgithub.com\u002Fpykeen\u002Fpykeen): A Python library for learning and evaluating knowledge graph embeddings\n\n## Talks & conferences｜报告 & 会议\n\n1. [PyTorch Conference 2018](https:\u002F\u002Fdevelopers.facebook.com\u002Fvideos\u002F2018\u002Fpytorch-developer-conference\u002F): 2018年首届PyTorch开发者大会。\n\n## Pytorch elsewhere ｜ Pytorch相关\n\n1. \u003Ckbd>8300+\u003C\u002Fkbd> [the-incredible-pytorch](https:\u002F\u002Fgithub.com\u002Fritchieng\u002Fthe-incredible-pytorch)**: 不可思议的Pythorch：一份PyTorch相关的教程、论文、项目、社区等的清单。\n2. \u003Ckbd>6500+\u003C\u002Fkbd> [generative models](https:\u002F\u002Fgithub.com\u002Fwiseodd\u002Fgenerative-models): 各种生成模型，例如基于Pytorch和Tensorflow的GAN、VAE。 http:\u002F\u002Fwiseodd.github.io  \n3. [pytorch vs tensorflow](https:\u002F\u002Fwww.reddit.com\u002Fr\u002FMachineLearning\u002Fcomments\u002F5w3q74\u002Fd_so_pytorch_vs_tensorflow_whats_the_verdict_on\u002F): Reddit上的PyTorch和TensorFlow的比较文章。\n4. [Pytorch discussion forum](https:\u002F\u002Fdiscuss.pytorch.org\u002F): PyTorch论坛。\n5. \u003Ckbd>null\u003C\u002Fkbd> [pytorch notebook: docker-stack](https:\u002F\u002Fhub.docker.com\u002Fr\u002Fescong\u002Fpytorch-notebook\u002F): 类似于 [Jupyter Notebook Scientific Python Stack](https:\u002F\u002Fgithub.com\u002Fjupyter\u002Fdocker-stacks\u002Ftree\u002Fmaster\u002Fscipy-notebook)\n6. \u003Ckbd>1000-\u003C\u002Fkbd> [drawlikebobross](https:\u002F\u002Fgithub.com\u002Fkendricktan\u002Fdrawlikebobross): 使用神经网络作画！\n7. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-tvmisc](https:\u002F\u002Fgithub.com\u002Ft-vi\u002Fpytorch-tvmisc): 该仓库收集了作者用PyTorch实现的各种玩意儿。\n8. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-a3c-mujoco](https:\u002F\u002Fgithub.com\u002Fandrewliao11\u002Fpytorch-a3c-mujoco): 该项目旨在解决Mujoco中的控制问题，高度基于pytorch-a3c。\n9. [PyTorch in 5 Minutes](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=nbJ-2G2GXL0&list=WL&index=9).\n10. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_chatbot](https:\u002F\u002Fgithub.com\u002Fjinfagang\u002Fpytorch_chatbot): 用PyTorch实现的聊天机器人。\n11. \u003Ckbd>1000-\u003C\u002Fkbd> [malmo-challenge](https:\u002F\u002Fgithub.com\u002FKaixhin\u002Fmalmo-challenge): Malmo协作人工智能挑战-Pig Catcher团队。\n12. \u003Ckbd>1000-\u003C\u002Fkbd> [sketchnet](https:\u002F\u002Fgithub.com\u002Fjtoy\u002Fsketchnet): 指导计算机作画。http:\u002F\u002Fwww.jtoy.net\u002Fprojects\u002Fsketchnet\u002F\n13. \u003Ckbd>1200+\u003C\u002Fkbd> [Deep-Learning-Boot-Camp](https:\u002F\u002Fgithub.com\u002FQuantScientist\u002FDeep-Learning-Boot-Camp): 非盈利社区运营的5天深度学习训练营。 http:\u002F\u002Fdeep-ml.com.\n14. \u003Ckbd>1000-\u003C\u002Fkbd> [Amazon_Forest_Computer_Vision](https:\u002F\u002Fgithub.com\u002Fmratsim\u002FAmazon_Forest_Computer_Vision): 亚马逊森林计算机视觉：使用PyTorch标记卫星图像标记\u002FKeras中的PyTorch技巧。\n15. \u003Ckbd>2400+\u003C\u002Fkbd> [AlphaZero_Gomoku](https:\u002F\u002Fgithub.com\u002Fjunxiaosong\u002FAlphaZero_Gomoku): 用AlphaZero算法玩五子棋。\n16. \u003Ckbd>null\u003C\u002Fkbd> [pytorch-cv](https:\u002F\u002Fgithub.com\u002Fyouansheng\u002Fpytorch-cv): null.\n17. \u003Ckbd>2800+\u003C\u002Fkbd> [deep-person-reid](https:\u002F\u002Fgithub.com\u002FKaiyangZhou\u002Fdeep-person-reid): Pytorch实现深度学习行人重新识别方法。\n18. \u003Ckbd>2700+\u003C\u002Fkbd> [pytorch-template](https:\u002F\u002Fgithub.com\u002Fvictoresque\u002Fpytorch-template): PyTorch深度学习模版。\n19. \u003Ckbd>1000-\u003C\u002Fkbd> [Deep Learning With Pytorch](https:\u002F\u002Fgithub.com\u002Fsvishnu88\u002FDLwithPyTorch): 随书代码《[Deep Learning With Pytorch TextBook](https:\u002F\u002Fwww.packtpub.com\u002Fbig-data-and-business-intelligence\u002Fdeep-learning-pytorch)》 PyTorch实用指南：使用PyTorch建立文本和视觉神经网络模型。[亚马逊中国电子版](https:\u002F\u002Fwww.amazon.cn\u002Fdp\u002FB078THDX3J\u002Fref=sr_1_1?__mk_zh_CN=亚马逊网站&keywords=Deep+Learning+with+PyTorch&qid=1568007543&s=gateway&sr=8-1)\n20. \u003Ckbd>1000-\u003C\u002Fkbd> [compare-tensorflow-pytorch](https:\u002F\u002Fgithub.com\u002Fjalola\u002Fcompare-tensorflow-pytorch): 比较用Tensorflow编写的层和用Pytorch编写的层之间的输出。\n21. \u003Ckbd>1000-\u003C\u002Fkbd> [hasktorch](https:\u002F\u002Fgithub.com\u002Fhasktorch\u002Fhasktorch): Haskell中的张量与神经网络。\n22. [Deep Learning With Pytorch](https:\u002F\u002Fwww.manning.com\u002Fbooks\u002Fdeep-learning-with-pytorch) Deep Learning with PyTorch 教你如何用Python和PyTorch实现深度学习算法。\n23. \u003Ckbd>1000-\u003C\u002Fkbd> [nimtorch](https:\u002F\u002Fgithub.com\u002Ffragcolor-xyz\u002Fnimtorch): PyTorch - Python + Nim，PyTorch的Nim前端。\n24. \u003Ckbd>1000-\u003C\u002Fkbd> [derplearning](https:\u002F\u002Fgithub.com\u002FJohn-Ellis\u002Fderplearning): 自动驾驶遥控车代码。\n25. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-saltnet](https:\u002F\u002Fgithub.com\u002Ftugstugi\u002Fpytorch-saltnet): Kaggle | TGS Salt Identification Challenge 第9名解决方案。\n26. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-scripts](https:\u002F\u002Fgithub.com\u002Fpeterjc123\u002Fpytorch-scripts): 一些脚本，使在Windows上使用PyTorch更加容易。\n27. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_misc](https:\u002F\u002Fgithub.com\u002Fptrblck\u002Fpytorch_misc): 为PyTorch讨论板创建的代码片段。\n28. \u003Ckbd>1000-\u003C\u002Fkbd> [awesome-pytorch-scholarship](https:\u002F\u002Fgithub.com\u002Farnas\u002Fawesome-pytorch-scholarship): 收集了一系列优秀的PyTorch学术文章、指南、博客、课程和其他资源。\n29. \u003Ckbd>1000-\u003C\u002Fkbd> [MentisOculi](https:\u002F\u002Fgithub.com\u002Fmmirman\u002FMentisOculi): PyTorch版raytracer。(raynet?)\n30. \u003Ckbd>2400+\u003C\u002Fkbd> [DoodleMaster](https:\u002F\u002Fgithub.com\u002Fkaranchahal\u002FDoodleMaster): “画出UI！”(\"Don't code your UI, Draw it !\")\n31. \u003Ckbd>1000-\u003C\u002Fkbd> [ocaml-torch](https:\u002F\u002Fgithub.com\u002FLaurentMazare\u002Focaml-torch): ocaml-torch为PyTorch张量库提供一些ocaml绑定。\n32. \u003Ckbd>1000-\u003C\u002Fkbd> [extension-script](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fextension-script): TorchScript自定义C++\u002FCUDA运算符的示例。\n33. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-inference](https:\u002F\u002Fgithub.com\u002Fzccyman\u002Fpytorch-inference):  Windows10 平台上 Pytorch 1.0在 C++ 中的推断。\n34. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-cpp-inference](https:\u002F\u002Fgithub.com\u002FWizaron\u002Fpytorch-cpp-inference): 包含使用PyTorch C++ API执行推断的各种示例。\n35. \u003Ckbd>1100+\u003C\u002Fkbd> [tch-rs](https:\u002F\u002Fgithub.com\u002FLaurentMazare\u002Ftch-rs): PyTorch的Rust绑定。\n36. \u003Ckbd>1000-\u003C\u002Fkbd> [TorchSharp](https:\u002F\u002Fgithub.com\u002Finteresaaat\u002FTorchSharp): Pytorch引擎的.NET绑定。\n37. \u003Ckbd>2000+\u003C\u002Fkbd> [ML Workspace](https:\u002F\u002Fgithub.com\u002Fml-tooling\u002Fml-workspace): 面向机器学习和数据科学的一体化Web IDE。包含Jupyter, VS Code, PyTorch 和许多其他工具或库，这些都集合在一个Docker映像中。\n38. \u003Ckbd>1100+\u003C\u002Fkbd> [PyTorch Style Guide](https:\u002F\u002Fgithub.com\u002FIgorSusmelj\u002Fpytorch-styleguide) Style guide for PyTorch code. Consistent and good code style helps collaboration and prevents errors!\n\n**Feedback: If you have any ideas or you want any other content to be added to this list, feel free to contribute.**\n","厉害-Pytorch-list｜厉害的Pytorch项目\n========================\n\n![pytorch-logo-dark](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fxavier-zy_Awesome-pytorch-list-CNVersion_readme_c47227853472.png)\n\n## [英文版](https:\u002F\u002Fgithub.com\u002Fbharathgs\u002FAwesome-pytorch-list)\n\n## 目录｜内容\n- [厉害-Pytorch-list｜厉害的Pytorch项目](#厉害-pytorch-list%E5%8E%89%E5%AE%B3%E7%9A%84pytorch%E9%A1%B9%E7%9B%AE)\n  - [英文版](#english-version)\n  - [目录｜内容](#contents%E5%86%85%E5%AE%B9)\n  - [Pytorch & 相关库｜Pytorch & 相关库](#pytorch--related-librariespytorch--%E7%9B%B8%E5%85%B3%E5%BA%93)\n    - [NLP & 语音处理｜自然语言处理 & 语音处理](#nlp--speech-processing%E8%87%AA%E7%84%B6%E8%AF%AD%E8%A8%80%E5%A4%84%E7%90%86--%E8%AF%AD%E9%9F%B3%E5%A4%84%E7%90%86)\n    - [CV｜计算机视觉](#cv%E8%AE%A1%E7%AE%97%E6%9C%BA%E8%A7%86%E8%A7%89)\n    - [概率库和生成库｜概率库和生成库](#probabilisticgenerative-libraries%E6%A6%82%E7%8E%87%E5%BA%93%E5%92%8C%E7%94%9F%E6%88%90%E5%BA%93)\n    - [其他库｜其他库](#other-libraries%E5%85%B6%E4%BB%96%E5%BA%93)\n  - [教程 & 书籍 & 示例｜教程 & 书籍 & 示例](#tutorials--books--examples%E6%95%99%E7%A8%8B--%E4%B9%A6%E7%B1%8D--%E7%A4%BA%E4%BE%8B)\n  - [论文实现｜论文实现](#paper-implementations%E8%AE%BF%E6%96%87%E5%AE%9E%E7%8E%B0)\n  - [报告 & 会议｜报告 & 会议](#talks--conferences%E6%8A%A5%E5%91%8A--%E4%BC%9A%E8%AE%AE)\n  - [Pytorch相关｜Pytorch相关](#pytorch-elsewhere--pytorch%E7%9B%B8%E5%85%B3)\n        \n## Pytorch & 相关库｜Pytorch & 相关库\n\n1. [pytorch](http:\u002F\u002Fpytorch.org): 使用强GPU加速的Python张量计算和动态神经网络.\n\n### NLP & 语音处理｜自然语言处理 & 语音处理:\n\n1. \u003Ckbd>2800+\u003C\u002Fkbd> [text](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Ftext): 针对文本数据和NLP数据集的数据加载和抽象。\n2. \u003Ckbd>1300+\u003C\u002Fkbd> [pytorch-seq2seq](https:\u002F\u002Fgithub.com\u002FIBM\u002Fpytorch-seq2seq): Pytorch中处理seq2seq的开源框架。\n3. \u003Ckbd>1000-\u003C\u002Fkbd> [anuvada](https:\u002F\u002Fgithub.com\u002FSandeep42\u002Fanuvada): NLP可解释模型。\n4. \u003Ckbd>1300+\u003C\u002Fkbd> [audio](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Faudio): 简单的音频I\u002FO。\n5. \u003Ckbd>1000-\u003C\u002Fkbd> [loop](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Floop):  一种跨多说话者的语音生成方法。\n6. \u003Ckbd>null\u003C\u002Fkbd> [fairseq](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Ffairseq-py): Facebook开发的Sequence-to-Sequence python工具包。\n7. \u003Ckbd>1000-\u003C\u002Fkbd> [speech](https:\u002F\u002Fgithub.com\u002Fawni\u002Fspeech): 语音转文字的端到端模型实现。\n8. \u003Ckbd>5100+\u003C\u002Fkbd> [OpenNMT-py](https:\u002F\u002Fgithub.com\u002FOpenNMT\u002FOpenNMT-py): 开源神经机器翻译 http:\u002F\u002Fopennmt.net.\n9. \u003Ckbd>2300+\u003C\u002Fkbd> [neuralcoref](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Fneuralcoref): 在spaCy中使用神经网络实现快速共指消解。\n10. \u003Ckbd>1000+\u003C\u002Fkbd> [sentiment-discovery](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fsentiment-discovery): 基于规模的无监督语言模型在稳健情绪分类中的应用。\n11. \u003Ckbd>2800+\u003C\u002Fkbd> [MUSE](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FMUSE): 一个多语言无监督或有监督词语嵌入库。\n12. \u003Ckbd>1000-\u003C\u002Fkbd> [nmtpytorch](https:\u002F\u002Fgithub.com\u002Flium-lst\u002Fnmtpytorch): PyTorch中的Sequence-to-Sequence框架。\n13. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-wavenet](https:\u002F\u002Fgithub.com\u002Fvincentherrmann\u002Fpytorch-wavenet): 快速生成WaveNet的实现。\n14. \u003Ckbd>1000-\u003C\u002Fkbd> [Tacotron-pytorch](https:\u002F\u002Fgithub.com\u002Fsoobinseo\u002FTacotron-pytorch): Tacotron: 端到端语音合成。\n15. \u003Ckbd>10300+\u003C\u002Fkbd> [AllenNLP](https:\u002F\u002Fgithub.com\u002Fallenai\u002Fallennlp): 开源NLP研究库，基于PyTorch。[http:\u002F\u002Fwww.allennlp.org\u002F](https:\u002F\u002Fallennlp.org)\n16. \u003Ckbd>1900+\u003C\u002Fkbd> [PyTorch-NLP](https:\u002F\u002Fgithub.com\u002FPetrochukM\u002FPyTorch-NLP): 为加速NLP研究设立的一个库，包含神经网络层、文本处理模块和众多数据集。 pytorchnlp.readthedocs.io\n17. \u003Ckbd>1000-\u003C\u002Fkbd> [quick-nlp](https:\u002F\u002Fgithub.com\u002Foutcastofmusic\u002Fquick-nlp): 基于FastAI的Pytorch NLP库。\n18. \u003Ckbd>4900+\u003C\u002Fkbd> [TTS](https:\u002F\u002Fgithub.com\u002Fmozilla\u002FTTS): 文本转语音的深度学习框架。\n19. \u003Ckbd>2800+\u003C\u002Fkbd> [LASER](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FLASER): LASER是一个用来计算和使用多语言语句嵌入的库。\n20. \u003Ckbd>1100+\u003C\u002Fkbd> [pyannote-audio](https:\u002F\u002Fgithub.com\u002Fpyannote\u002Fpyannote-audio): 用于说话人分类的神经构建块：语音活动检测, 说话人变化检测, 说话人嵌入。\n21. \u003Ckbd>1000-\u003C\u002Fkbd> [gensen](https:\u002F\u002Fgithub.com\u002FMaluuba\u002Fgensen): 基于大规模多任务学习的通用句子表示。\n22. \u003Ckbd>1000-\u003C\u002Fkbd> [translate](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Ftranslate): 翻译——一个PyTorch语言库。\n23. \u003Ckbd>3900+\u003C\u002Fkbd> [espnet](https:\u002F\u002Fgithub.com\u002Fespnet\u002Fespnet): 端到端语音处理工具集。 espnet.github.io\u002Fespnet\n24. \u003Ckbd>4500+\u003C\u002Fkbd> [pythia](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fpythia): 源于FAIR(Facebook AI Research)的视觉与语言多模态研究的模块化框架。\n25. \u003Ckbd>1400+\u003C\u002Fkbd> [UnsupervisedMT](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FUnsupervisedMT): 基于短语的神经无监督机器翻译。\n26. \u003Ckbd>1300+\u003C\u002Fkbd> [jiant](https:\u002F\u002Fgithub.com\u002Fjsalt18-sentence-repl\u002Fjiant): 通用文本理解模型的jiant工具包。https:\u002F\u002Fjiant.info\n27. \u003Ckbd>4300+\u003C\u002Fkbd> [BERT-PyTorch](https:\u002F\u002Fgithub.com\u002Fcodertimo\u002FBERT-pytorch): Google AI 2018 BERT 的 Pytorch 实现，伴有简单注释。\n28. \u003Ckbd>2100+\u003C\u002Fkbd> [InferSent](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FInferSent): NLI的句子嵌入(InferSent)和训练代码。\n29. \u003Ckbd>1300+\u003C\u002Fkbd> [uis-rnn](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fuis-rnn):无限交错状态递归神经网络(UIS-RNN)算法，能够从嘈杂的环境中分辨声音，对应论文 Fully Supervised Speaker Diarization. arxiv.org\u002Fabs\u002F1810.04719\n30. \u003Ckbd>10600+\u003C\u002Fkbd> [flair](https:\u002F\u002Fgithub.com\u002Fzalandoresearch\u002Fflair): 一个针对最先进的NLP的简单框架。\n31. \u003Ckbd>6200+\u003C\u002Fkbd> [pytext](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fpytext): 基于PyTorch的自然语言建模框架。 fb.me\u002Fpytextdocs\n32. \u003Ckbd>1000-\u003C\u002Fkbd> [voicefilter](https:\u002F\u002Fgithub.com\u002Fmindslab-ai\u002Fvoicefilter): 谷歌AI的VoiceFilter的非官方实现。 http:\u002F\u002Fswpark.me\u002Fvoicefilter\n33. \u003Ckbd>1000-\u003C\u002Fkbd> [BERT-NER](https:\u002F\u002Fgithub.com\u002Fkamalkraj\u002FBERT-NER): 基于BERT的命名体识别(Named-Entity-Recognition)。\n34. \u003Ckbd>1000-\u003C\u002Fkbd> [transfer-nlp](https:\u002F\u002Fgithub.com\u002Ffeedly\u002Ftransfer-nlp): 为可复制实验管理而设计的NLP库。\n35. \u003Ckbd>1000-\u003C\u002Fkbd> [texar-pytorch](https:\u002F\u002Fgithub.com\u002Fasyml\u002Ftexar-pytorch): 机器学习和文本生成工具包。 texar.io\n36. \u003Ckbd>2000+\u003C\u002Fkbd> [pytorch-kaldi](https:\u002F\u002Fgithub.com\u002Fmravanelli\u002Fpytorch-kaldi): pytorch-kaldi 是一个开发中的最先进的dnn\u002Frnn混合语音识别系统。其DNN部分由PyTorch实现，而特征提取、标签计算和解码由kaldi工具包完成。\n37. \u003Ckbd>2900+\u003C\u002Fkbd> [NeMo](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002FNeMo): 神经模块：对话式AI（conversational AI）工具集 nvidia.github.io\u002FNeMo\n38. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-struct](https:\u002F\u002Fgithub.com\u002Fharvardnlp\u002Fpytorch-struct): 经过测试的GPU实现库，实现了深度学习中的一些核心的结构化算法，如HMM, Dep Trees, CKY, ...\n39. \u003Ckbd>1000-\u003C\u002Fkbd> [espresso](https:\u002F\u002Fgithub.com\u002Ffreewym\u002Fespresso): Espresso: 快速的端到端神经语音识别工具集。\n40. \u003Ckbd>48900+\u003C\u002Fkbd> [transformers](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Ftransformers): huggingface Transformers: TensorFlow 2.0  和 PyTorch 上最先进的NLP工具。huggingface.co\u002Ftransformers\n41. \u003Ckbd>1500+\u003C\u002Fkbd> [reformer-pytorch](https:\u002F\u002Fgithub.com\u002Flucidrains\u002Freformer-pytorch): [Reformer](https:\u002F\u002Fopenreview.net\u002Fpdf?id=rkgNKkHtvB) 的 PyTorch 版。\n42. \u003Ckbd>1000-\u003C\u002Fkbd> [torch-metrics](https:\u002F\u002Fgithub.com\u002Fenochkan\u002Ftorch-metrics): PyTorch 中的模型评估指标。\n43. \u003Ckbd>2600+\u003C\u002Fkbd> [speechbrain](https:\u002F\u002Fgithub.com\u002Fspeechbrain\u002Fspeechbrain): SpeechBrain is an open-source and all-in-one speech toolkit based on PyTorch.\n44. \u003Ckbd>1000-\u003C\u002Fkbd> [Backprop](https:\u002F\u002Fgithub.com\u002Fbackprop-ai\u002Fbackprop): Backprop makes it simple to use, finetune, and deploy state-of-the-art ML models.\n\n### CV｜计算机视觉:\n\n1. \u003Ckbd>9400+\u003C\u002Fkbd> [pytorch vision](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fvision): TorchVision包含流行的数据集、模型架构、计算机视觉中常用的图像变换。\n2. \u003Ckbd>1000-\u003C\u002Fkbd> [pt-styletransfer](https:\u002F\u002Fgithub.com\u002Ftymokvo\u002Fpt-styletransfer): 作为PyTorch中一个类的神经风格转移。\n3. \u003Ckbd>1000-\u003C\u002Fkbd> [OpenFacePytorch](https:\u002F\u002Fgithub.com\u002Fthnkim\u002FOpenFacePytorch): 使用OpenFace的nn4.small2.v1.t7模型的PyTorch模块。\n4. \u003Ckbd>1000-\u003C\u002Fkbd> [img_classification_pk_pytorch](https:\u002F\u002Fgithub.com\u002Ffelixgwu\u002Fimg_classification_pk_pytorch): 将你的图像分类模型和最先进的模型进行快速比较 (比如DenseNet, ResNet, ...)\n5. \u003Ckbd>1400+\u003C\u002Fkbd> [SparseConvNet](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FSparseConvNet): 子流形稀疏卷积神经网络。\n6. \u003Ckbd>1000-\u003C\u002Fkbd> [Convolution_LSTM_pytorch](https:\u002F\u002Fgithub.com\u002Fautoman000\u002FConvolution_LSTM_pytorch): 多层卷积LSTM(长短期记忆网络)模块。\n7. \u003Ckbd>5000+\u003C\u002Fkbd> [face-alignment](https:\u002F\u002Fgithub.com\u002F1adrianb\u002Fface-alignment): :fire: 基于 PyTorch 的 2D 和 3D 面部对齐库。 adrianbulat.com\n8. \u003Ckbd>1500+\u003C\u002Fkbd> [pytorch-semantic-segmentation](https:\u002F\u002Fgithub.com\u002FZijunDeng\u002Fpytorch-semantic-segmentation): 语义分割。\n9. \u003Ckbd>1000-\u003C\u002Fkbd> [RoIAlign.pytorch](https:\u002F\u002Fgithub.com\u002Flongcw\u002FRoIAlign.pytorch): PyTorch版本的RoIAlign。其实现基于crop_and_resize，支持CPU和GPU上的前向和后向。\n10. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-cnn-finetune](https:\u002F\u002Fgithub.com\u002Fcreafz\u002Fpytorch-cnn-finetune): 用PyTorch微调预训练卷积神经网络。\n11. \u003Ckbd>1000-\u003C\u002Fkbd> [detectorch](https:\u002F\u002Fgithub.com\u002Fignacio-rocco\u002Fdetectorch): Detectorch - PyTorch版detectron框架，目前仅有detectron的推断(inference)和评估(evalutaion)功能，无训练(training)功能。\n12. \u003Ckbd>4400+\u003C\u002Fkbd> [Augmentor](https:\u002F\u002Fgithub.com\u002Fmdbloice\u002FAugmentor): 用于机器学习的图像增强库。 http:\u002F\u002Faugmentor.readthedocs.io\n13. \u003Ckbd>1000-\u003C\u002Fkbd> [s2cnn](https:\u002F\u002Fgithub.com\u002Fjonas-koehler\u002Fs2cnn): Spherical CNNs：球面卷积网络的PyTorch实现。 (e.g. 全方位图像、全球信号)\n14. \u003Ckbd>2100+\u003C\u002Fkbd> [TorchCV](https:\u002F\u002Fgithub.com\u002Fdonnyyou\u002Ftorchcv): 基于PyTorch的计算机视觉深度学习框架。\n15. \u003Ckbd>8400+\u003C\u002Fkbd> [maskrcnn-benchmark](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fmaskrcnn-benchmark): 实例分割与对象检测的快速模块化参考实现。\n16. \u003Ckbd>2200+\u003C\u002Fkbd> [image-classification-mobile](https:\u002F\u002Fgithub.com\u002Fosmr\u002Fimgclsmob): 计算机视觉卷积网络训练沙盒，包含ImageNet-1K上的与训练分类模型集合。\n17. \u003Ckbd>1000-\u003C\u002Fkbd> [medicaltorch](https:\u002F\u002Fgithub.com\u002Fperone\u002Fmedicaltorch): 一个医学成像框架。http:\u002F\u002Fmedicaltorch.readthedocs.io\n18. \u003Ckbd>8400+\u003C\u002Fkbd> [albumentations](https:\u002F\u002Fgithub.com\u002Falbu\u002Falbumentations): 快速图像增强库和其他库的易用包装器。\n19. \u003Ckbd>4200+\u003C\u002Fkbd> [kornia](https:\u002F\u002Fgithub.com\u002Farraiyopensource\u002Fkornia): 开源可微计算机视觉库。https:\u002F\u002Fkornia.org\n20. \u003Ckbd>1000-\u003C\u002Fkbd> [text-detector](https:\u002F\u002Fgithub.com\u002Fs3nh\u002Ftext-detector): 检测和翻译文本。\n21. \u003Ckbd>2200+\u003C\u002Fkbd> [facenet-pytorch](https:\u002F\u002Fgithub.com\u002Ftimesler\u002Ffacenet-pytorch): 预训练Pytorch人脸检测与识别模型，从 [davidsandberg\u002Ffacenet](https:\u002F\u002Fgithub.com\u002Fdavidsandberg\u002Ffacenet) 移植而来。\n22. \u003Ckbd>17300+\u003C\u002Fkbd> [detectron2](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fdetectron2): Detectron2是FAIR的下一代目标检测和分割研究平台。\n23. \u003Ckbd>1000-\u003C\u002Fkbd> [vedaseg](https:\u002F\u002Fgithub.com\u002FMedia-Smart\u002Fvedaseg): 基于PyTorch的语义分割工具箱。\n24. \u003Ckbd>1300+\u003C\u002Fkbd> [ClassyVision](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FClassyVision): A用于图像和视频分类的端到端PyTorch框架。https:\u002F\u002Fclassyvision.ai\n25. \u003Ckbd>1000-\u003C\u002Fkbd> [detecto](https:\u002F\u002Fgithub.com\u002Falankbi\u002Fdetecto): 用 5 行代码构建功能完备的计算机视觉模型。https:\u002F\u002Fdetecto.readthedocs.io\u002F\n26. \u003Ckbd>5000+\u003C\u002Fkbd> [pytorch3d](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fpytorch3d): PyTorch3d 是一个面向深度学习的高效、可复用的 3D 计算机视觉库。 https:\u002F\u002Fpytorch3d.org\u002F\n27. \u003Ckbd>15700+\u003C\u002Fkbd> [MMDetection](https:\u002F\u002Fgithub.com\u002Fopen-mmlab\u002Fmmdetection): MMDetection 是一个开源的目标检测工具箱，属于 [OpenMMLab 项目](https:\u002F\u002Fopen-mmlab.github.io\u002F)。\n28. \u003Ckbd>1000-\u003C\u002Fkbd> [neural-dream](https:\u002F\u002Fgithub.com\u002FProGamerGov\u002Fneural-dream): DeepDream 算法的 PyTorch 实现，可以创造梦一样的幻觉视觉效果。\n29. \u003Ckbd>1000-\u003C\u002Fkbd> [FlashTorch](https:\u002F\u002Fgithub.com\u002FMisaOgura\u002Fflashtorch): Visualization toolkit for neural networks in PyTorch!\n30. \u003Ckbd>1000-\u003C\u002Fkbd> [Lucent](https:\u002F\u002Fgithub.com\u002Fgreentfrapp\u002Flucent): Tensorflow and OpenAI Clarity's Lucid adapted for PyTorch。\n31. \u003Ckbd>1300+\u003C\u002Fkbd> [MMDetection3D](https:\u002F\u002Fgithub.com\u002Fopen-mmlab\u002Fmmdetection3d): MMDetection3D is OpenMMLab's next-generation platform for general 3D object detection，a part of the [OpenMMLab project](https:\u002F\u002Fopen-mmlab.github.io\u002F)。\n32. \u003Ckbd>2100+\u003C\u002Fkbd> [MMSegmentation](https:\u002F\u002Fgithub.com\u002Fopen-mmlab\u002Fmmsegmentation): MMSegmentation is a semantic segmentation toolbox and benchmark，a part of the [OpenMMLab project](https:\u002F\u002Fopen-mmlab.github.io\u002F)。\n33. \u003Ckbd>2200+\u003C\u002Fkbd> [MMEditing](https:\u002F\u002Fgithub.com\u002Fopen-mmlab\u002Fmmediting): MMEditing is a image and video editing toolbox，a part of the [OpenMMLab project](https:\u002F\u002Fopen-mmlab.github.io\u002F)。\n34. \u003Ckbd>1000+\u003C\u002Fkbd> [MMAction2](https:\u002F\u002Fgithub.com\u002Fopen-mmlab\u002Fmmaction2): MMAction2 is OpenMMLab's next generation action understanding toolbox and benchmark，a part of the [OpenMMLab project](https:\u002F\u002Fopen-mmlab.github.io\u002F)。\n35. \u003Ckbd>1000+\u003C\u002Fkbd> [MMPose](https:\u002F\u002Fgithub.com\u002Fopen-mmlab\u002Fmmpose): MMPose is a pose estimation toolbox and benchmark，a part of the [OpenMMLab project](https:\u002F\u002Fopen-mmlab.github.io\u002F)。\n36. \u003Ckbd>1000+\u003C\u002Fkbd> [lightly](https:\u002F\u002Fgithub.com\u002Flightly-ai\u002Flightly) - Lightly is a computer vision framework for self-supervised learning。\n\n### Probabilistic\u002FGenerative Libraries｜概率库和生成库:\n\n1. \u003Ckbd>1000-\u003C\u002Fkbd> [ptstat](https:\u002F\u002Fgithub.com\u002Fstepelu\u002Fptstat): 概率编程和统计推断。\n2. \u003Ckbd>7000+\u003C\u002Fkbd> [pyro](https:\u002F\u002Fgithub.com\u002Fuber\u002Fpyro): 基于 Python 和 PyTorch 的深度通用概率编程库。 http:\u002F\u002Fpyro.ai\n3. \u003Ckbd>1000-\u003C\u002Fkbd> [probtorch](https:\u002F\u002Fgithub.com\u002Fprobtorch\u002Fprobtorch): Probabilistic Torch是一个扩展了PyTorch的深度生成模型的库。\n4. \u003Ckbd>1000-\u003C\u002Fkbd> [paysage](https:\u002F\u002Fgithub.com\u002Fdrckf\u002Fpaysage): 基于Python\u002FPyTorch的非监督学习和生成模型库。\n5. \u003Ckbd>1000-\u003C\u002Fkbd> [pyvarinf](https:\u002F\u002Fgithub.com\u002Fctallec\u002Fpyvarinf): Python包，促进了带有变分推断的贝叶斯深度学习方法在pytorch中的应用。\n6. \u003Ckbd>1000-\u003C\u002Fkbd> [pyprob](https:\u002F\u002Fgithub.com\u002Fprobprog\u002Fpyprob): 一个基于PyTorch的概率编程与推断编译的库。\n7. \u003Ckbd>1000-\u003C\u002Fkbd> [mia](https:\u002F\u002Fgithub.com\u002Fspring-epfl\u002Fmia): 一个运行针对机器学习模型的成员推理攻击的库。\n8. \u003Ckbd>1000-\u003C\u002Fkbd> [pro_gan_pytorch](https:\u002F\u002Fgithub.com\u002Fakanimax\u002Fpro_gan_pytorch): 作为PyTorch nn.Module的扩展的ProGAN包。\n9. \u003Ckbd>2000+\u003C\u002Fkbd> [botorch](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fbotorch): PyTorch中的贝叶斯优化。\n\n\n### Other libraries｜其他库:\n\n1. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch extras](https:\u002F\u002Fgithub.com\u002Fmrdrozdov\u002Fpytorch-extras): PyTorch的额外特性。\n2. \u003Ckbd>1000-\u003C\u002Fkbd> [functional zoo](https:\u002F\u002Fgithub.com\u002Fszagoruyko\u002Ffunctional-zoo): PyTorch和Tensorflow的模型定义和预训练权重。\n3. \u003Ckbd>1600+\u003C\u002Fkbd> [torch-sampling](https:\u002F\u002Fgithub.com\u002Fncullen93\u002Ftorchsample): Pytorch的采样、高级训练、数据增强和实用程序。\n4. \u003Ckbd>1000-\u003C\u002Fkbd> [torchcraft-py](https:\u002F\u002Fgithub.com\u002Fdeepcraft\u002Ftorchcraft-py): TorchCraft的Python包装器，TorchCraft是连接Torch和StarCraft的桥梁。\n5. \u003Ckbd>1000-\u003C\u002Fkbd> [aorun](https:\u002F\u002Fgithub.com\u002Framon-oliveira\u002Faorun): Aorun试图以PyTorch为后端实现类似于Keras的API。\n6. \u003Ckbd>1000-\u003C\u002Fkbd> [logger](https:\u002F\u002Fgithub.com\u002Foval-group\u002Flogger): 机器学习记录器（logger）。\n7. \u003Ckbd>1000-\u003C\u002Fkbd> [PyTorch-docset](https:\u002F\u002Fgithub.com\u002Fiamaziz\u002FPyTorch-docset): PyTorch离线文档，结合Dash，Zeal，Velocity或者LovelyDocs使用。\n8. \u003Ckbd>1000-\u003C\u002Fkbd> [convert_torch_to_pytorch](https:\u002F\u002Fgithub.com\u002Fclcarwin\u002Fconvert_torch_to_pytorch): 将Torch t7模型转换为PyTorch模型。\n9. \u003Ckbd>8000+\u003C\u002Fkbd> [pretrained-models.pytorch](https:\u002F\u002Fgithub.com\u002FCadene\u002Fpretrained-models.pytorch): PyTorch 预训练卷积神经网络：NASNet, ResNeXt, ResNet, InceptionV4, InceptionResnetV2, Xception, DPN 等等。该项目的目标是帮助复制研究论文结果。\n10. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_fft](https:\u002F\u002Fgithub.com\u002Flocarlab\u002Fpytorch_fft): CUDA FFTs的PyTorch包装器。\n11. \u003Ckbd>1000-\u003C\u002Fkbd> [caffe_to_torch_to_pytorch](https:\u002F\u002Fgithub.com\u002Ffanq15\u002Fcaffe_to_torch_to_pytorch): Caffe模型转PyTorch\u002FTorch模型，Torch模型转PyTorch模型。\n12. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-extension](https:\u002F\u002Fgithub.com\u002Fsniklaus\u002Fpytorch-extension): PyTorch的CUDA扩展示例，计算了两个张量的[哈达玛积(Hadamard product)](https:\u002F\u002Fbaike.baidu.com\u002Fitem\u002F哈达玛积\u002F18894493?fr=aladdin)。\n13. \u003Ckbd>7000+\u003C\u002Fkbd> [tensorboard-pytorch](https:\u002F\u002Fgithub.com\u002Flanpa\u002Ftensorboard-pytorch): 该模块以tensorboard格式保存PyTorch张量以供检查。目前支持tensorboard中的标量、图像、音频、直方图等特性。\n14. \u003Ckbd>2400+\u003C\u002Fkbd> [gpytorch](https:\u002F\u002Fgithub.com\u002Fjrg365\u002Fgpytorch): GPyTorch是一个用PyTorch实现的高斯过程库。它可以轻松地创建可伸缩、灵活和模块化的高斯过程模型。\n15. \u003Ckbd>2500+\u003C\u002Fkbd> [spotlight](https:\u002F\u002Fgithub.com\u002Fmaciejkula\u002Fspotlight): 深度推荐模型。\n16. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-cns](https:\u002F\u002Fgithub.com\u002Fawentzonline\u002Fpytorch-cns): 基于PyTorch的广义压缩网络搜索（Generalized [Compressed Network Search](http:\u002F\u002Fpeople.idsia.ch\u002F~juergen\u002Fcompressednetworksearch.html)）。\n17. \u003Ckbd>1000-\u003C\u002Fkbd> [pyinn](https:\u002F\u002Fgithub.com\u002Fszagoruyko\u002Fpyinn): CuPy实现融合PyTorch操作。\n18. \u003Ckbd>1000-\u003C\u002Fkbd> [inferno](https:\u002F\u002Fgithub.com\u002Fnasimrahaman\u002Finferno): 关于PyTorch的实用程序库。\n19. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-fitmodule](https:\u002F\u002Fgithub.com\u002Fhenryre\u002Fpytorch-fitmodule): 一种用于PyTorch模块的超简单拟合方法。\n20. \u003Ckbd>4000+\u003C\u002Fkbd> [inferno-sklearn](https:\u002F\u002Fgithub.com\u002Fdnouri\u002Finferno): 一个基于PyTorch封装且兼容scikit-learn的神经网络库。\n21. \u003Ckbd>null\u003C\u002Fkbd> [pytorch-caffe-darknet-convert](https:\u002F\u002Fgithub.com\u002Fmarvis\u002Fpytorch-caffe-darknet-convert): 在 pytorch, caffe prototxt\u002Fweights 和 darknet cfg\u002Fweights 之间转换。\n22. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch2caffe](https:\u002F\u002Fgithub.com\u002Flongcw\u002Fpytorch2caffe): 将PyTorch模型转换成Caffe模型。\n23. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-tools](https:\u002F\u002Fgithub.com\u002Fnearai\u002Fpytorch-tools): PyTorch工具。\n24. \u003Ckbd>1900+\u003C\u002Fkbd> [sru](https:\u002F\u002Fgithub.com\u002Ftaolei87\u002Fsru): 训练RNNs和训练CNNs一样快。 (arxiv.org\u002Fabs\u002F1709.02755)\n25. \u003Ckbd>1000-\u003C\u002Fkbd> [torch2coreml](https:\u002F\u002Fgithub.com\u002Fprisma-ai\u002Ftorch2coreml): Torch7 -> CoreML，该工具可将Torch7模型转换为[Apple CoreML](https:\u002F\u002Fdeveloper.apple.com\u002Fdocumentation\u002Fcoreml)格式以便在Apple设备上运行。\n26. \u003Ckbd>1700+\u003C\u002Fkbd> [PyTorch-Encoding](https:\u002F\u002Fgithub.com\u002Fzhanghang1989\u002FPyTorch-Encoding): PyTorch 深度纹理编码网络 (Deep Texture Encoding Network) http:\u002F\u002Fhangzh.com\u002FPyTorch-Encoding\n27. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-ctc](https:\u002F\u002Fgithub.com\u002Fryanleary\u002Fpytorch-ctc): PyTorch-CTC 实现了CTC(联结主义时间分类，Connectionist Temporal Classification)集束搜索（Beam Search）解码。C++代码借鉴了TensorFlow，并通过一些改进增加了灵活性。\n28. \u003Ckbd>1000-\u003C\u002Fkbd> [candlegp](https:\u002F\u002Fgithub.com\u002Ft-vi\u002Fcandlegp): Pytorch中的高斯过程。\n29. \u003Ckbd>1000-\u003C\u002Fkbd> [dpwa](https:\u002F\u002Fgithub.com\u002Floudinthecloud\u002Fdpwa): 基于成对平均（Pair-Wise Averaging）的分布式学习。\n30. \u003Ckbd>1000-\u003C\u002Fkbd> [dni-pytorch](https:\u002F\u002Fgithub.com\u002Fkoz4k\u002Fdni-pytorch): 基于合成梯度的PyTorch解耦神经接口。\n31. \u003Ckbd>4000+\u003C\u002Fkbd> [skorch](https:\u002F\u002Fgithub.com\u002Fdnouri\u002Fskorch): 一个基于PyTorch封装且兼容scikit-learn的神经网络库。\n32. \u003Ckbd>3600+\u003C\u002Fkbd> [ignite](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fignite): Ignite是一个高级库，帮助你在PyTorch中训练神经网络。\n33. \u003Ckbd>1000-\u003C\u002Fkbd> [Arnold](https:\u002F\u002Fgithub.com\u002Fglample\u002FArnold): Arnold - DOOM 游戏代理。\n34. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-mcn](https:\u002F\u002Fgithub.com\u002Falbanie\u002Fpytorch-mcn): 将MatConvNet模型转换为PyTorch模型。\n35. \u003Ckbd>3200+\u003C\u002Fkbd> [simple-faster-rcnn-pytorch](https:\u002F\u002Fgithub.com\u002Fchenyuntc\u002Fsimple-faster-rcnn-pytorch): Faster R-CNN 的简化实现，性能与原始论文相当。\n36. \u003Ckbd>1000-\u003C\u002Fkbd> [generative_zoo](https:\u002F\u002Fgithub.com\u002FDL-IT\u002Fgenerative_zoo): generative_zoo提供了PyTorch中一些生成模型的工作实现。\n37. \u003Ckbd>1800+\u003C\u002Fkbd> [pytorchviz](https:\u002F\u002Fgithub.com\u002Fszagoruyko\u002Fpytorchviz): 可视化PyTorch的运行图。\n38. \u003Ckbd>1000-\u003C\u002Fkbd> [cogitare](https:\u002F\u002Fgithub.com\u002Fcogitare-ai\u002Fcogitare): Cogitare - 一个现代、快速、模块化的深度学习和机器学习框架。\n39. \u003Ckbd>1000-\u003C\u002Fkbd> [pydlt](https:\u002F\u002Fgithub.com\u002Fdmarnerides\u002Fpydlt): 基于PyTorch的深度学习工具箱。\n40. \u003Ckbd>1000-\u003C\u002Fkbd> [semi-supervised-pytorch](https:\u002F\u002Fgithub.com\u002Fwohlert\u002Fsemi-supervised-pytorch): 各种基于VAE的半监督模型和生成模型的实现。\n41. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_cluster](https:\u002F\u002Fgithub.com\u002Frusty1s\u002Fpytorch_cluster): 优化图簇算法的PyTorch扩展库。\n42. \u003Ckbd>1000-\u003C\u002Fkbd> [neural-assembly-compiler](https:\u002F\u002Fgithub.com\u002Faditya-khant\u002Fneural-assembly-compiler): 基于自适应神经编译的PyTorch神经汇编编译器。\n43. \u003Ckbd>1000-\u003C\u002Fkbd> [caffemodel2pytorch](https:\u002F\u002Fgithub.com\u002Fvadimkantorov\u002Fcaffemodel2pytorch): 将Caffe模型转换为PyTorch模型。\n44. \u003Ckbd>1000-\u003C\u002Fkbd> [extension-cpp](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fextension-cpp): PyTorch中的C++扩展。\n45. \u003Ckbd>1000-\u003C\u002Fkbd> [pytoune](https:\u002F\u002Fgithub.com\u002FGRAAL-Research\u002Fpytoune): 类Keras框架和实用程序。\n46. \u003Ckbd>1000-\u003C\u002Fkbd> [jetson-reinforcement](https:\u002F\u002Fgithub.com\u002Fdusty-nv\u002Fjetson-reinforcement): 使用PyTorch，OpenAI Gym和Gazebo机器人模拟的NVIDIA Jetson深度强化学习GPU库。\n47. \u003Ckbd>1000-\u003C\u002Fkbd> [matchbox](https:\u002F\u002Fgithub.com\u002Fsalesforce\u002Fmatchbox): 编写单个示例的PyTorch代码，然后小批量地高效运行。\n48. \u003Ckbd>1000-\u003C\u002Fkbd> [torch-two-sample](https:\u002F\u002Fgithub.com\u002Fjosipd\u002Ftorch-two-sample): PyTorch双样本测试库。\n49. \u003Ckbd>3100+\u003C\u002Fkbd> [pytorch-summary](https:\u002F\u002Fgithub.com\u002Fsksq96\u002Fpytorch-summary): PyTorch模型总结，类似于Keras中的`model.summary()`。\n50. \u003Ckbd>1000-\u003C\u002Fkbd> [mpl.pytorch](https:\u002F\u002Fgithub.com\u002FBelBES\u002Fmpl.pytorch): MaxPoolingLoss的PyTorch实现。\n51. \u003Ckbd>null\u003C\u002Fkbd> [scVI-dev](https:\u002F\u002Fgithub.com\u002FYosefLab\u002FscVI-dev): 链接失效。\n52. \u003Ckbd>5500+\u003C\u002Fkbd> [apex](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fapex): 一个PyTorch扩展：面向精简混合精度和分布式训练。\n53. \u003Ckbd>3100+\u003C\u002Fkbd> [ELF](https:\u002F\u002Fgithub.com\u002Fpytorch\u002FELF): ELF: 游戏研究平台，复现了AlphaGoZero\u002FAlphaZero。\n54. \u003Ckbd>1000-\u003C\u002Fkbd> [Torchlite](https:\u002F\u002Fgithub.com\u002FEKami\u002FTorchlite): Pytorch建立在sklearn、Pytorch和Tensorflow等流行机器学习框架上的高水平库。\n55. \u003Ckbd>1000-\u003C\u002Fkbd> [joint-vae](https:\u002F\u002Fgithub.com\u002FSchlumberger\u002Fjoint-vae): JointVAE的PyTorch实现，一个面向分离连续和离散变异因素的框架 :star2:。\n56. \u003Ckbd>1000-\u003C\u002Fkbd> [SLM-Lab](https:\u002F\u002Fgithub.com\u002Fkengz\u002FSLM-Lab): PyTorch模块化深度强化学习框架。\n57. \u003Ckbd>1000-\u003C\u002Fkbd> [bindsnet](https:\u002F\u002Fgithub.com\u002FHananel-Hazan\u002Fbindsnet): 一个Python包，可借助PyTorch `Tensor` 功能在CPUs或GPUs上模拟脉冲神经网络(SNNs, Spiking Neural Networks)。\n58. \u003Ckbd>1000-\u003C\u002Fkbd> [pro_gan_pytorch](https:\u002F\u002Fgithub.com\u002Fakanimax\u002Fpro_gan_pytorch): 作为 PyTorch nn.Module 扩展的 ProGAN 包。\n59. \u003Ckbd>11500+\u003C\u002Fkbd> [pytorch_geometric](https:\u002F\u002Fgithub.com\u002Frusty1s\u002Fpytorch_geometric): PyTorch几何深度学习扩展库。\n60. \u003Ckbd>1000-\u003C\u002Fkbd> [torchplus](https:\u002F\u002Fgithub.com\u002Fknighton\u002Ftorchplus): 在 PyTorch modules 上实现 + 运算符，返回序列。\n61. \u003Ckbd>1000-\u003C\u002Fkbd> [lagom](https:\u002F\u002Fgithub.com\u002Fzuoxingdong\u002Flagom): lagom: 用于强化学习算法快速原型构建的轻量级PyTorch架构。\n62. \u003Ckbd>1000-\u003C\u002Fkbd> [torchbearer](https:\u002F\u002Fgithub.com\u002Fecs-vlc\u002Ftorchbearer): torchbearer: PyTorch模型拟合库。\n63. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-maml-rl](https:\u002F\u002Fgithub.com\u002Ftristandeleu\u002Fpytorch-maml-rl): 强化学习中的模型不可知元学习(MAML, Model-Agnostic Meta-Learning)。\n64. \u003Ckbd>1000-\u003C\u002Fkbd> [NALU](https:\u002F\u002Fgithub.com\u002Fbharathgs\u002FNALU): 神经算术逻辑单元(Neural Arithmetic Logic Units)的PyTorch基本实现，论文：arxiv.org\u002Fpdf\u002F1808.00508.pdf 。\n65. \u003Ckbd>1000-\u003C\u002Fkbd> [QuCumber](https:\u002F\u002Fgithub.com\u002FPIQuIL\u002FQuCumber): 神经网络多体波函数重构。\n66. \u003Ckbd>1000-\u003C\u002Fkbd> [magnet](https:\u002F\u002Fgithub.com\u002FMagNet-DL\u002Fmagnet): 自我建立的深度学习项目。http:\u002F\u002Fmagnet-dl.readthedocs.io\u002F\n67. \u003Ckbd>1000-\u003C\u002Fkbd> [opencv_transforms](https:\u002F\u002Fgithub.com\u002Fjbohnslav\u002Fopencv_transforms): OpenCV实现Torchvision的图像分割。\n68. \u003Ckbd>21100+\u003C\u002Fkbd> [fastai](https:\u002F\u002Fgithub.com\u002Ffastai\u002Ffastai): fast.ai 深度学习库、课程和教程。\n69. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-dense-correspondence](https:\u002F\u002Fgithub.com\u002FRobotLocomotion\u002Fpytorch-dense-correspondence): [《Dense Object Nets: Learning Dense Visual Object Descriptors By and For Robotic Manipulation》](arxiv.org\u002Fpdf\u002F1806.08756.pdf) 一文的代码。\n70. \u003Ckbd>1000-\u003C\u002Fkbd> [colorization-pytorch](https:\u002F\u002Fgithub.com\u002Frichzhang\u002Fcolorization-pytorch): PyTorch实现交互式深度着色(Interactive Deep Colorization)。 richzhang.github.io\u002Fideepcolor\n71. \u003Ckbd>1000-\u003C\u002Fkbd> [beauty-net](https:\u002F\u002Fgithub.com\u002Fcms-flash\u002Fbeauty-net): PyTorch一个简单、灵活、可扩展的PyTorch模板。\n72. \u003Ckbd>1000-\u003C\u002Fkbd> [OpenChem](https:\u002F\u002Fgithub.com\u002FMariewelt\u002FOpenChem): OpenChem: 面向计算化学和药物设计研究的深度学习工具包 mariewelt.github.io\u002FOpenChem 。\n73. \u003Ckbd>1000-\u003C\u002Fkbd> [torchani](https:\u002F\u002Fgithub.com\u002Faiqm\u002Ftorchani): PyTorch精确神经网络电位。 aiqm.github.io\u002Ftorchani\n74. \u003Ckbd>1000-\u003C\u002Fkbd> [PyTorch-LBFGS](https:\u002F\u002Fgithub.com\u002Fhjmshi\u002FPyTorch-LBFGS): PyTorch实现L-BFGS。\n75. \u003Ckbd>2400+\u003C\u002Fkbd> [gpytorch](https:\u002F\u002Fgithub.com\u002Fcornellius-gp\u002Fgpytorch): PyTorch中对高斯过程的高效且模块化的实现。\n76. \u003Ckbd>1000-\u003C\u002Fkbd> [hessian](https:\u002F\u002Fgithub.com\u002Fmariogeiger\u002Fhessian): PyTorch版hessian。\n77. \u003Ckbd>1000-\u003C\u002Fkbd> [vel](https:\u002F\u002Fgithub.com\u002FMillionIntegrals\u002Fvel): 深度学习研究中的速度。\n78. \u003Ckbd>1000-\u003C\u002Fkbd> [nonechucks](https:\u002F\u002Fgithub.com\u002Fmsamogh\u002Fnonechucks): 动态地处理数据集中的坏样本，使用转换作为过滤器。\n79. \u003Ckbd>1000+\u003C\u002Fkbd> [torchstat](https:\u002F\u002Fgithub.com\u002FSwall0w\u002Ftorchstat): PyTorch中的模型分析器。\n80. \u003Ckbd>1400+\u003C\u002Fkbd> [QNNPACK](https:\u002F\u002Fgithub.com\u002Fpytorch\u002FQNNPACK): 量化神经网络包—量化神经网络算子的移动优化实现。\n81. \u003Ckbd>3600+\u003C\u002Fkbd> [torchdiffeq](https:\u002F\u002Fgithub.com\u002Frtqichen\u002Ftorchdiffeq): PyTorch解常微分方程（ODE），使用的是全GPU支持、O(1)内存复杂度的反向传播算法。\n82. \u003Ckbd>1000+\u003C\u002Fkbd> [redner](https:\u002F\u002Fgithub.com\u002FBachiLi\u002Fredner): 可微的 Monte Carlo 路径跟踪器。\n83. \u003Ckbd>1000-\u003C\u002Fkbd> [pixyz](https:\u002F\u002Fgithub.com\u002Fmasa-su\u002Fpixyz): 一个库，用来以更简洁、直观和可扩展的方式开发深层生成模型。\n84. \u003Ckbd>1000-\u003C\u002Fkbd> [euclidesdb](https:\u002F\u002Fgithub.com\u002Fperone\u002Feuclidesdb): 一种多模型机器学习特征嵌入数据库。 http:\u002F\u002Feuclidesdb.readthedocs.io\n85. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch2keras](https:\u002F\u002Fgithub.com\u002Fnerox8664\u002Fpytorch2keras): 将PyTorch模型转换为Keras模型。\n86. \u003Ckbd>1000-\u003C\u002Fkbd> [salad](https:\u002F\u002Fgithub.com\u002Fdomainadaptation\u002Fsalad): 域适应和半监督学习工具箱。\n87. \u003Ckbd>1000-\u003C\u002Fkbd> [netharn](https:\u002F\u002Fgithub.com\u002FErotemic\u002Fnetharn): PyTorch的参数化拟合和预测线束（Prediction Harnesses）。\n88. \u003Ckbd>7700+\u003C\u002Fkbd> [dgl](https:\u002F\u002Fgithub.com\u002Fdmlc\u002Fdgl): Python包，基于现有的DL框架，用于简化对图形的深度学习。http:\u002F\u002Fdgl.ai.\n89. \u003Ckbd>1600+\u003C\u002Fkbd> [gandissect](https:\u002F\u002Fgithub.com\u002FCSAILVision\u002Fgandissect): 基于PyTorch的工具，用于可视化和理解GAN的神经元。gandissect.csail.mit.edu\n90. \u003Ckbd>1000-\u003C\u002Fkbd> [delira](https:\u002F\u002Fgithub.com\u002Fjustusschock\u002Fdelira): 基于PyTorch和Tensorlow的快速原型和训练深层神经网络的轻量级框架，用于医疗成像。 delira.rtfd.io\n91. \u003Ckbd>1000-\u003C\u002Fkbd> [mushroom](https:\u002F\u002Fgithub.com\u002FAIRLab-POLIMI\u002Fmushroom): 强化学习实验的Python库。\n92. \u003Ckbd>1000-\u003C\u002Fkbd> [Xlearn](https:\u002F\u002Fgithub.com\u002Fthuml\u002FXlearn): 迁移学习库。\n93. \u003Ckbd>1000-\u003C\u002Fkbd> [geoopt](https:\u002F\u002Fgithub.com\u002Fferrine\u002Fgeoopt): 基于PyTorch优化的黎曼自适应优化方法。\n94. \u003Ckbd>1000-\u003C\u002Fkbd> [vegans](https:\u002F\u002Fgithub.com\u002Funit8co\u002Fvegans): 包含多种现有的GANs。\n95. \u003Ckbd>4200+\u003C\u002Fkbd> [kornia](https:\u002F\u002Fgithub.com\u002Farraiyopensource\u002Fkornia): PyTorch开源可微计算机视觉库。 https:\u002F\u002Fkornia.org\n96. \u003Ckbd>1000-\u003C\u002Fkbd> [AdverTorch](https:\u002F\u002Fgithub.com\u002FBorealisAI\u002Fadvertorch): 研究对抗鲁棒性的工具箱。\n97. \u003Ckbd>2800+\u003C\u002Fkbd> [AdaBound](https:\u002F\u002Fgithub.com\u002FLuolc\u002FAdaBound): 一个优化器，训练速度和Adam一样快，和SGD一样好。\n98. \u003Ckbd>1000-\u003C\u002Fkbd> [fenchel-young-losses](https:\u002F\u002Fgithub.com\u002Fmblondel\u002Ffenchel-young-losses): 在PyTorch\u002FTensorFlow\u002Fscikit-learn中使用Fenchel-Young损失作为概率分类的损失函数。\n99. \u003Ckbd>2700+\u003C\u002Fkbd> [pytorch-OpCounter](https:\u002F\u002Fgithub.com\u002FLyken17\u002Fpytorch-OpCounter): 统计PyTorch模型的MACs\u002FFLOPs。\n100. \u003Ckbd>1000-\u003C\u002Fkbd> [Tor10](https:\u002F\u002Fgithub.com\u002Fkaihsin\u002FTor10): 基于PyTorch，为量子模拟设计的通用张量网络库。\n101. \u003Ckbd>2600+\u003C\u002Fkbd> [Catalyst](https:\u002F\u002Fgithub.com\u002Fcatalyst-team\u002Fcatalyst): PyTorch DL&RL 研究的高级实用程序。它的开发重点是可重复性、快速实验和代码\u002F思想重用。能够研究\u002F开发新的东西，而不是编写另一个常规的训练循环。\n102. \u003Ckbd>1500+\u003C\u002Fkbd> [Ax](https:\u002F\u002Fgithub.com\u002Ffacebook\u002FAx): 自适应实验平台。\n103. \u003Ckbd>1000-\u003C\u002Fkbd> [pywick](https:\u002F\u002Fgithub.com\u002Fachaiah\u002Fpywick): 高水平的PyTorch神经网络训练库。\n104. \u003Ckbd>1000-\u003C\u002Fkbd> [torchgpipe](https:\u002F\u002Fgithub.com\u002Fkakaobrain\u002Ftorchgpipe): PyTorch实现GPipe。 torchgpipe.readthedocs.io\n105. \u003Ckbd>1000+\u003C\u002Fkbd> [hub](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fhub): Pytorch Hub 是一个预训练模型库，用来提升研究的可重复性。\n106. \u003Ckbd>14600+\u003C\u002Fkbd> [pytorch-lightning](https:\u002F\u002Fgithub.com\u002FwilliamFalcon\u002Fpytorch-lightning): 面向ML研究人员的轻量级PyTorch包装器。缩放模型，少写样板。\n107. \u003Ckbd>1000-\u003C\u002Fkbd> [Tor10](https:\u002F\u002Fgithub.com\u002Fkaihsin\u002FTor10): 基于pytorch为量子模拟设计的通用张量网络库。\n108. \u003Ckbd>3100+\u003C\u002Fkbd> [tensorwatch](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002Ftensorwatch): 针对Python机器学习与数据科学的调试、监控与可视化。\n109. \u003Ckbd>1000-\u003C\u002Fkbd> [wavetorch](https:\u002F\u002Fgithub.com\u002Ffancompute\u002Fwavetorch): 波动方程的数值求解与反传播。 arxiv.org\u002Fabs\u002F1904.12831\n110. \u003Ckbd>1000-\u003C\u002Fkbd> [diffdist](https:\u002F\u002Fgithub.com\u002Fag14774\u002Fdiffdist): diffdist是一个面向PyTorch的Python库。它扩展了`torch.autograd`的默认功能，并增加了对进程间可微通信的支持。\n111. \u003Ckbd>1000-\u003C\u002Fkbd> [torchprof](https:\u002F\u002Fgithub.com\u002Fawwong1\u002Ftorchprof): 用于Pytorch模型逐层分析的最小依赖库。\n112. \u003Ckbd>1000-\u003C\u002Fkbd> [osqpth](https:\u002F\u002Fgithub.com\u002Foxfordcontrol\u002Fosqpth): PyTorch可微OSQP求解器。\n113. \u003Ckbd>1000-\u003C\u002Fkbd> [mctorch](https:\u002F\u002Fgithub.com\u002Fmctorch\u002Fmctorch): 面向深度学习的流形优化库。\n114. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-hessian-eigenthings](https:\u002F\u002Fgithub.com\u002Fnoahgolmant\u002Fpytorch-hessian-eigenthings): 使用Hessian向量积和随机幂迭代的高效PyTorch Hessian特征分解。\n115. \u003Ckbd>1200+\u003C\u002Fkbd> [MinkowskiEngine](https:\u002F\u002Fgithub.com\u002FStanfordVL\u002FMinkowskiEngine): 闵可夫斯基引擎是一个用于广义稀疏卷积和高维稀疏张量的自动微分方法库。\n116. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-cpp-rl](https:\u002F\u002Fgithub.com\u002FOmegastick\u002Fpytorch-cpp-rl): CppRl是一个强化学习框架，用 PyTorch C++ 前端编写。\n117. \u003Ckbd>1000+\u003C\u002Fkbd> [pytorch-toolbelt](https:\u002F\u002Fgithub.com\u002FBloodAxe\u002Fpytorch-toolbelt): PyTorch扩展，用来进行快速R&D原型开发和Kaggle代码收集。\n118. \u003Ckbd>1000-\u003C\u002Fkbd> [argus-tensor-stream](https:\u002F\u002Fgithub.com\u002FFonbet\u002Fargus-tensor-stream): 一个库，用来将实时视频流解码至CUDA内存。 tensorstream.argus-ai.com\n119. \u003Ckbd>1000-\u003C\u002Fkbd> [macarico](https:\u002F\u002Fgithub.com\u002Fhal3\u002Fmacarico): 在 PyTorch 中学习搜索。\n120. \u003Ckbd>1900+\u003C\u002Fkbd> [rlpyt](https:\u002F\u002Fgithub.com\u002Fastooke\u002Frlpyt): PyTorch 中的强化学习。\n121. \u003Ckbd>1000-\u003C\u002Fkbd> [pywarm](https:\u002F\u002Fgithub.com\u002Fblue-season\u002Fpywarm): 为 PyTorch 建立神经网络的一种更清洁的方法。https:\u002F\u002Fblue-season.github.io\u002Fpywarm\u002F\n122. \u003Ckbd>1300+\u003C\u002Fkbd> [learn2learn](https:\u002F\u002Fgithub.com\u002Flearnables\u002Flearn2learn): PyTorch元学习框架。http:\u002F\u002Flearn2learn.net\n123. \u003Ckbd>1000-\u003C\u002Fkbd> [torchbeast](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Ftorchbeast): 分布式强化学习的PyTorch平台。\n124. \u003Ckbd>1100+\u003C\u002Fkbd> [higher](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fhigher): higher 是一个PyTorch库，允许用户获得跨越训练循环而不是单个训练步骤的损失的高阶梯度。\n125. \u003Ckbd>null\u003C\u002Fkbd> [Torchelie](https:\u002F\u002Fgithub.com\u002FVermeille\u002FTorchelie\u002F): Torchélie 是面向PyTorch的一系列工具函数、层、损失、模型、训练器等的合集。 https:\u002F\u002Ftorchelie.readthedocs.org\u002F\n126. \u003Ckbd>1000-\u003C\u002Fkbd> [CrypTen](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FCrypTen): CrypTen 是一个隐私保护机器学习框架，它使用PyTorch编写，允许研究人员和开发人员使用加密数据训练模型。CrypTen目前支持将安全的多方计算（[Secure Multiparty Computation](https:\u002F\u002Fen.wikipedia.org\u002Fwiki\u002FSecure_multi-party_computation)）作为其加密机制。\n127. \u003Ckbd>1000+\u003C\u002Fkbd> [cvxpylayers](https:\u002F\u002Fgithub.com\u002Fcvxgrp\u002Fcvxpylayers): cvxpylayers 是一个 Python 库，用于在PyTorch中构造可微凸优化层。\n128. \u003Ckbd>1000+\u003C\u002Fkbd> [RepDistiller](https:\u002F\u002Fgithub.com\u002FHobbitLong\u002FRepDistiller): 对比表示蒸馏（CRD）和最新知识蒸馏方法的基准。\n129. \u003Ckbd>2500+\u003C\u002Fkbd> [kaolin](https:\u002F\u002Fgithub.com\u002FNVIDIAGameWorks\u002Fkaolin): 一个旨在加速3D深度学习研究的PyTorch库。\n130. \u003Ckbd>1000-\u003C\u002Fkbd> [PySNN](https:\u002F\u002Fgithub.com\u002FBasBuller\u002FPySNN): 高效的尖峰神经网络框架，建立在PyTorch之上，用于GPU加速。\n131. \u003Ckbd>1000-\u003C\u002Fkbd> [sparktorch](https:\u002F\u002Fgithub.com\u002Fdmmiller612\u002Fsparktorch): 在 Apache Spark 上训练和运行 PyTorch 模型。\n132. \u003Ckbd>3400+\u003C\u002Fkbd> [pytorch-metric-learning](https:\u002F\u002Fgithub.com\u002FKevinMusgrave\u002Fpytorch-metric-learning): 在应用程序中使用度量学习的最简单方法。模块化，灵活，可扩展。用 PyTorch 构建。\n133. \u003Ckbd>1000-\u003C\u002Fkbd> [autonomous-learning-library](https:\u002F\u002Fgithub.com\u002Fcpnota\u002Fautonomous-learning-library): 用于建立深度强化学习代理的 PyTorch 库。\n134. \u003Ckbd>1000-\u003C\u002Fkbd> [flambe](https:\u002F\u002Fgithub.com\u002Fasappresearch\u002Fflambe): 一个用于加速研究及其生产路径的ML框架。https:\u002F\u002Fflambe.ai\n135. \u003Ckbd>1900+\u003C\u002Fkbd> [pytorch-optimizer](https:\u002F\u002Fgithub.com\u002Fjettify\u002Fpytorch-optimizer): Collections of modern optimization algorithms for PyTorch, includes: AccSGD, AdaBound, AdaMod, DiffGrad, Lamb, RAdam, Yogi.\n136. \u003Ckbd>2200+\u003C\u002Fkbd> [PyTorch-VAE](https:\u002F\u002Fgithub.com\u002FAntixK\u002FPyTorch-VAE): A Collection of Variational Autoencoders (VAE) in PyTorch.\n137. \u003Ckbd>16700+\u003C\u002Fkbd> [ray](https:\u002F\u002Fgithub.com\u002Fray-project\u002Fray): A fast and simple framework for building and running分布式应用。Ray is packaged with RLlib，a scalable reinforcement learning library，and Tune，a scalable hyperparameter tuning library。 ray.io\n138. \u003Ckbd>1000-\u003C\u002Fkbd> [Pytorch Geometric Temporal](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002Fpytorch_geometric_temporal): A temporal extension library for PyTorch Geometric。\n139. \u003Ckbd>1000-\u003C\u002Fkbd> [Poutyne](https:\u002F\u002Fgithub.com\u002FGRAAL-Research\u002Fpoutyne): A Keras-like framework for PyTorch that handles much of the boilerplating code needed to train neural networks。\n140. \u003Ckbd>1000-\u003C\u002Fkbd> [Pytorch-Toolbox](https:\u002F\u002Fgithub.com\u002FPistonY\u002Ftorch-toolbox): This is toolbox project for Pytorch。 Aiming to make you write Pytorch code more easier，readable and concise。\n141. \u003Ckbd>1000-\u003C\u002Fkbd> [Pytorch-contrib](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fcontrib): It contains reviewed implementations of ideas from recent machine learning papers。\n142. \u003Ckbd>6200+\u003C\u002Fkbd> [EfficientNet PyTorch](https:\u002F\u002Fgithub.com\u002Flukemelas\u002FEfficientNet-PyTorch): It contains an op-for-op PyTorch reimplementation of EfficientNet，along with pre-trained models and examples。\n143. \u003Ckbd>1300+\u003C\u002Fkbd> [PyTorch\u002FXLA](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fxla): PyTorch\u002FXLA is a Python package that uses the XLA deep learning compiler to connect the PyTorch deep learning framework and Cloud TPUs。\n144. \u003Ckbd>1000-\u003C\u002Fkbd> [webdataset](https:\u002F\u002Fgithub.com\u002Ftmbdev\u002Fwebdataset): WebDataset is a PyTorch Dataset (IterableDataset) implementation providing efficient access to datasets stored in POSIX tar archives。\n145. \u003Ckbd>1000-\u003C\u002Fkbd> [volksdep](https:\u002F\u002Fgithub.com\u002FMedia-Smart\u002Fvolksdep): volksdep is an open-source toolbox for deploying and accelerating PyTorch，Onnx和 Tensorflow models with TensorRT。\n146. \u003Ckbd>1700+\u003C\u002Fkbd> [PyTorch-StudioGAN](https:\u002F\u002Fgithub.com\u002FPOSTECH-CVLab\u002FPyTorch-StudioGAN): StudioGAN is a Pytorch library providing implementations of representative Generative Adversarial Networks (GANs) for conditional\u002Funconditional image generation。 StudioGAN aims to offer an identical playground for modern GANs so that machine learning researchers can readily compare and analyze a new idea。\n147. \u003Ckbd>null\u003C\u002Fkbd> [torchdrift](https:\u002F\u002Fgithub.com\u002Ftorchdrift\u002Ftorchdrift\u002F): drift detection library\n148. \u003Ckbd>1600+\u003C\u002Fkbd> [accelerate](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Faccelerate) : A simple way to train and use PyTorch models with multi-GPU，TPU，mixed-precision。\n149. \u003Ckbd>1000-\u003C\u002Fkbd> [lightning-transformers](https:\u002F\u002Fgithub.com\u002FPyTorchLightning\u002Flightning-transformers): Flexible interface for high-performance research using SOTA Transformers leveraging Pytorch Lightning，Transformers，and Hydra。\n\n## 教程、书籍和示例｜教程 & 书籍 & 示例\n\n1. \u003Ckbd>4200+\u003C\u002Fkbd> [Practical Pytorch](https:\u002F\u002Fgithub.com\u002Fspro\u002Fpractical-pytorch)**：该教程对不同的RNN模型进行了解释。\n2. [DeepLearningForNLPInPytorch](https:\u002F\u002Fpytorch.org\u002Ftutorials\u002Fbeginner\u002Fdeep_learning_nlp_tutorial.html)：IPython Notebook深度学习教程，包含对自然语言处理的强调。\n3. \u003Ckbd>21300+\u003C\u002Fkbd> [pytorch-tutorial](https:\u002F\u002Fgithub.com\u002Fyunjey\u002Fpytorch-tutorial)：面向研究人员的深度学习教程，其中大部分模型的实现代码都少于30行。\n4. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-exercises](https:\u002F\u002Fgithub.com\u002Fkeon\u002Fpytorch-exercises)：PyTorch练习集合。\n5. \u003Ckbd>5200+\u003C\u002Fkbd> [pytorch tutorials](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Ftutorials)：各种PyTorch教程。\n6. \u003Ckbd>16500+\u003C\u002Fkbd> [pytorch examples](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fexamples)：PyTorch使用示例，应用场景包括视觉、文本、强化学习等。\n7. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch practice](https:\u002F\u002Fgithub.com\u002Fnapsternxg\u002Fpytorch-practice)：PyTorch示例。  \n8. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch mini tutorials](https:\u002F\u002Fgithub.com\u002Fvinhkhuc\u002FPyTorch-Mini-Tutorials)：PyTorch极简教程，改编自Alec Radford的[Theano教程](https:\u002F\u002Fgithub.com\u002FNewmu\u002FTheano-Tutorials)。\n9. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch text classification](https:\u002F\u002Fgithub.com\u002Fxiayandi\u002FPytorch_text_classification)：PyTorch实现基于CNN的文本分类。\n10. \u003Ckbd>1000-\u003C\u002Fkbd> [cats vs dogs](https:\u002F\u002Fgithub.com\u002Fdesimone\u002Fpytorch-cat-vs-dogs)：Kaggle竞赛Dogs vs. Cats Redux: Kernels Edition的网络微调示例。\n11. \u003Ckbd>1000-\u003C\u002Fkbd> [convnet](https:\u002F\u002Fgithub.com\u002Feladhoffer\u002FconvNet.pytorch)：深度卷积网络在不同数据集(ImageNet、Cifar10、Cifar100、MNIST)上的完整训练示例。\n12. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-generative-adversarial-networks](https:\u002F\u002Fgithub.com\u002Fmailmahee\u002Fpytorch-generative-adversarial-networks)：一个简单的对抗生成网络(GAN)。\n13. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch containers](https:\u002F\u002Fgithub.com\u002Famdegroot\u002Fpytorch-containers)：PyTorch中简化的Torch容器。\n14. \u003Ckbd>1000-\u003C\u002Fkbd> [T-SNE in pytorch](https:\u002F\u002Fgithub.com\u002Fcemoody\u002Ftopicsne)：t-SNE实验。\n15. \u003Ckbd>1000-\u003C\u002Fkbd> [AAE_pytorch](https:\u002F\u002Fgithub.com\u002Ffducau\u002FAAE_pytorch)：PyTorch版对抗自编码器。\n16. \u003Ckbd>1000-\u003C\u002Fkbd> [Kind_PyTorch_Tutorial](https:\u002F\u002Fgithub.com\u002FGunhoChoi\u002FKind_PyTorch_Tutorial)：PyTorch新手教程。  \n17. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-poetry-gen](https:\u002F\u002Fgithub.com\u002Fjustdark\u002Fpytorch-poetry-gen)：基于PyTorch的char-RNN（字符级循环神经网络）。  \n18. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-REINFORCE](https:\u002F\u002Fgithub.com\u002FJamesChuanggg\u002Fpytorch-REINFORCE)：PyTorch实现了OpenAI gym下离散和连续控制的REINFORCE。\n19. \u003Ckbd>6100+\u003C\u002Fkbd> [PyTorch-Tutorial](https:\u002F\u002Fgithub.com\u002FMorvanZhou\u002FPyTorch-Tutorial)**：简单而快速地搭建你自己的神经网络。 https:\u002F\u002Fmorvanzhou.github.io\u002Ftutorials\u002F\n20. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-intro](https:\u002F\u002Fgithub.com\u002Fjoansj\u002Fpytorch-intro)：演示如何在PyTorch中实现CNNs和RNNs。\n21. \u003Ckbd>1300+\u003C\u002Fkbd> [pytorch-classification](https:\u002F\u002Fgithub.com\u002Fbearpaw\u002Fpytorch-classification)：一个CIFAR-10\u002F100和ImageNet数据集上的分类框架。\n22. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_notebooks - hardmaru](https:\u002F\u002Fgithub.com\u002Fhardmaru\u002Fpytorch_notebooks)：用NumPy和PyTorch编写的随机教程。\n23. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_tutoria-quick](https:\u002F\u002Fgithub.com\u002Fsoravux\u002Fpytorch_tutorial)：PyTorch介绍和教程。面向计算机视觉、图形和机器学习领域的研究人员，要求对神经网络理论知识和常用神经网络框架由基本的了解。\n24. \u003Ckbd>1000-\u003C\u002Fkbd> [Pytorch_fine_tuning_Tutorial](https:\u002F\u002Fgithub.com\u002FSpandan-Madan\u002FPytorch_fine_tuning_Tutorial)：在PyTorch中进行微调或转移学习的简短教程。\n25. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_exercises](https:\u002F\u002Fgithub.com\u002FKyubyong\u002Fpytorch_exercises)：PyTorch练习。\n26. \u003Ckbd>1000-\u003C\u002Fkbd> [traffic-sign-detection](https:\u002F\u002Fgithub.com\u002Fsoumith\u002Ftraffic-sign-detection-homework)：纽约大学2018年计算机视觉秋季课程示例。\n27. \u003Ckbd>1000-\u003C\u002Fkbd> [mss_pytorch](https:\u002F\u002Fgithub.com\u002FJs-Mim\u002Fmss_pytorch)：无需进行滤波后处理，利用循环推断算法实现歌唱语音分离 - PyTorch实现。 演示：js-mim.github.io\u002Fmss_pytorch\n28. \u003Ckbd>2600+\u003C\u002Fkbd> [DeepNLP-models-Pytorch](https:\u002F\u002Fgithub.com\u002FDSKSD\u002FDeepNLP-models-Pytorch) cs-224n课程中的各种深度NLP模型的PyTorch实现。(Stanford Univ：NLP与深度学习)\n29. \u003Ckbd>1000-\u003C\u002Fkbd> [Mila introductory tutorials](https:\u002F\u002Fgithub.com\u002Fmila-udem\u002Fwelcome_tutorials)：面向MILA新生的各种教程。（[MILA：加拿大蒙特利尔人工智能研究中心](https:\u002F\u002Fmila.quebec\u002Fen\u002Fmila\u002F)）\n30. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch.rl.learning](https:\u002F\u002Fgithub.com\u002Fmoskomule\u002Fpytorch.rl.learning)：使用PyTorch学习强化学习。\n31. \u003Ckbd>1000-\u003C\u002Fkbd> [minimal-seq2seq](https:\u002F\u002Fgithub.com\u002Fkeon\u002Fseq2seq)：关注神经机器翻译的最小Seq2Seq模型。\n32. \u003Ckbd>1000-\u003C\u002Fkbd> [tensorly-notebooks](https:\u002F\u002Fgithub.com\u002FJeanKossaifi\u002Ftensorly-notebooks)：利用Python和TensorLy实现张量方法。 tensorly.github.io\u002Fdev\n33. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_bits](https:\u002F\u002Fgithub.com\u002Fjpeg729\u002Fpytorch_bits)：时序预测的相关示例。\n34. \u003Ckbd>1000-\u003C\u002Fkbd> [skip-thoughts](https:\u002F\u002Fgithub.com\u002Fsanyam5\u002Fskip-thoughts)：PyTorch实现Skip-Thought词向量模型。\n35. \u003Ckbd>1000-\u003C\u002Fkbd> [video-caption-pytorch](https:\u002F\u002Fgithub.com\u002FxiadingZ\u002Fvideo-caption-pytorch)：利用PyTorch为视频添加字幕。\n36. \u003Ckbd>1000-\u003C\u002Fkbd> [Capsule-Network-Tutorial](https:\u002F\u002Fgithub.com\u002Fhiggsfield\u002FCapsule-Network-Tutorial)：简单易学的胶囊网络（Capsule Network）教程。\n37. \u003Ckbd>2100+\u003C\u002Fkbd> [code-of-learn-deep-learning-with-pytorch](https:\u002F\u002Fgithub.com\u002FSherlockLiao\u002Fcode-of-learn-deep-learning-with-pytorch)：《深度学习入门之PyTorch》书中代码。 item.jd.com\u002F17915495606.html\n38. \u003Ckbd>2300+\u003C\u002Fkbd> [RL-Adventure](https:\u002F\u002Fgithub.com\u002Fhiggsfield\u002FRL-Adventure)：Pytorch版Deep Q Learning教程，简单、易学、代码可读性强，包含DQN\u002FDDQN\u002FPrioritized replay\u002Fnoisy networks\u002Fdistributional values\u002FRainbow\u002F层次化RL的PyTorch实现。\n39. \u003Ckbd>1000-\u003C\u002Fkbd> [accelerated_dl_pytorch](https:\u002F\u002Fgithub.com\u002Fhpcgarage\u002Faccelerated_dl_pytorch)：Jupyter Day Atlanta II会议上的加速深度学习算法，包含PyTorch教程和会议演讲文稿。\n40. \u003Ckbd>2500+\u003C\u002Fkbd> [RL-Adventure-2](https:\u002F\u002Fgithub.com\u002Fhiggsfield\u002FRL-Adventure-2)：以下内容的PyTorch0.4版本教程：actor critic\u002F近端策略优化\u002Facer\u002FDDPG\u002F双人决斗DDPG\u002F软actor critic\u002F生成式对抗模仿学习\u002F事后经验回放。\n41. [Generative Adversarial Networks (GANs) in 50 lines of code (PyTorch)](https:\u002F\u002Fmedium.com\u002F@devnag\u002Fgenerative-adversarial-networks-gans-in-50-lines-of-code-pytorch-e81b79659e3f)：50行生成对抗网络。\n42. [adversarial-autoencoders-with-pytorch](https:\u002F\u002Fblog.paperspace.com\u002Fadversarial-autoencoders-with-pytorch\u002F)：PyTorch对抗自编码器。\n43. [transfer learning using pytorch](https:\u002F\u002Fmedium.com\u002F@vishnuvig\u002Ftransfer-learning-using-pytorch-4c3475f4495)：PyTorch迁移学习。\n44. [how-to-implement-a-yolo-object-detector-in-pytorch](https:\u002F\u002Fblog.paperspace.com\u002Fhow-to-implement-a-yolo-object-detector-in-pytorch\u002F)：如何使用PyTorch实现一个YOLO (v3)物体检测器。\n45. [pytorch-for-recommenders-101](http:\u002F\u002Fblog.fastforwardlabs.com\u002F2018\u002F04\u002F10\u002Fpytorch-for-recommenders-101.html)：使用PyTorch构建推荐系统。\n46. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-for-numpy-users](https:\u002F\u002Fgithub.com\u002Fwkentaro\u002Fpytorch-for-numpy-users)：面向Numpy用户的PyTorch。\n47. [PyTorch Tutorial](http:\u002F\u002Fwww.pytorchtutorial.com\u002F)：PyTorch中文教程（PyTorch中文网）。\n48. \u003Ckbd>1000-\u003C\u002Fkbd> [grokking-pytorch](https:\u002F\u002Fgithub.com\u002FKaixhin\u002Fgrokking-pytorch)：手把手教你学会PyTorch。\n49. \u003Ckbd>5200+\u003C\u002Fkbd> [PyTorch-Deep-Learning-Minicourse](https:\u002F\u002Fgithub.com\u002FAtcold\u002FPyTorch-Deep-Learning-Minicourse)：PyTorch深度学习微型课程。\n50. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-custom-dataset-examples](https:\u002F\u002Fgithub.com\u002Futkuozbulak\u002Fpytorch-custom-dataset-examples)：PyTorch的一些自定义数据集示例。\n51. [Multiplicative LSTM for sequence-based Recommenders](https:\u002F\u002Fflorianwilhelm.info\u002F2018\u002F08\u002Fmultiplicative_LSTM_for_sequence_based_recos\u002F)：面向基于序列的推荐器的乘法LSTM。\u002F基于LSTM的序列推荐实现。\n52. \u003Ckbd>1000-\u003C\u002Fkbd> [deeplearning.ai-pytorch](https:\u002F\u002Fgithub.com\u002Ffurkanu\u002Fdeeplearning.ai-pytorch)：Coursera深度学习课程(deeplearning.ai)任务的PyTorch实现。\n53. \u003Ckbd>1000-\u003C\u002Fkbd> [MNIST_Pytorch_python_and_capi](https:\u002F\u002Fgithub.com\u002Ftobiascz\u002FMNIST_Pytorch_python_and_capi)：示例：如何在Python中训练一个MNIST网络并在C++中用PyTorch1.0运行。\n54. \u003Ckbd>1000-\u003C\u002Fkbd> [torch_light](https:\u002F\u002Fgithub.com\u002Fne7ermore\u002Ftorch_light)：教程和示例，包括强化学习、NLP、CV。Logistic、CNN、RNN、LSTM等神经网络模型由数行代码实现，一些高级示例由复杂模型实现。\n55. \u003Ckbd>1000-\u003C\u002Fkbd> [portrain-gan](https:\u002F\u002Fgithub.com\u002Fdribnet\u002Fportrain-gan)：编码（解码尚未实现）art-DCGAN生成的肖像油画。\n56. \u003Ckbd>1000-\u003C\u002Fkbd> [mri-analysis-pytorch](https:\u002F\u002Fgithub.com\u002Fomarsar\u002Fmri-analysis-pytorch)：使用PyTorch和MedicalTorch进行核磁共振（MRI）分析。\n57. \u003Ckbd>1000-\u003C\u002Fkbd> [cifar10-fast](https:\u002F\u002Fgithub.com\u002Fdavidcpage\u002Fcifar10-fast)：在79秒内完成CIFAR10数据集上的ResNet模型的训练并达到94%的测试准确率，相关内容参见 [blog series](https:\u002F\u002Fwww.myrtle.ai\u002F2018\u002F09\u002F24\u002Fhow_to_train_your_resnet\u002F)。\n58. [Intro to Deep Learning with PyTorch](https:\u002F\u002Fin.udacity.com\u002Fcourse\u002Fdeep-learning-pytorch--ud188)：Udacity和Facebook联合推出的免费课程，包括对PyTorch的介绍和对PyTorch作者之一的Soumith Chintala的采访。\n59. \u003Ckbd>2900+\u003C\u002Fkbd> [pytorch-sentiment-analysis](https:\u002F\u002Fgithub.com\u002Fbentrevett\u002Fpytorch-sentiment-analysis)：PyTorch和TorchText语义分析教程。\n60. \u003Ckbd>11800+\u003C\u002Fkbd> [pytorch-image-models](https:\u002F\u002Fgithub.com\u002Frwightman\u002Fpytorch-image-models)：PyTorch图像模型、脚本、与训练权重—— (SE)ResNet\u002FResNeXT、DPN、EfficientNet、MobileNet-V3\u002FV2\u002FV1、MNASNet、Single-Path NAS、FBNet等等。\n61. \u003Ckbd>1000-\u003C\u002Fkbd> [CIFAR-ZOO](https:\u002F\u002Fgithub.com\u002FBIGBALLON\u002FCIFAR-ZOO)：以CIFAR为基准的多种CNN架构的PyTorch实现。\n62. \u003Ckbd>3700+\u003C\u002Fkbd> [d2l-pytorch](https:\u002F\u002Fgithub.com\u002Fdsgiitr\u002Fd2l-pytorch)：本项目尝试复制《动手深度学习（Dive into Deep Learning）》(www.d2l.ai)一书，将MXnet代码改编为PyTorch版。\n63. \u003Ckbd>1000-\u003C\u002Fkbd> [thinking-in-tensors-writing-in-pytorch](https:\u002F\u002Fgithub.com\u002Fstared\u002Fthinking-in-tensors-writing-in-pytorch)：张量思维，PyTorch实践 (深度学习入门)。\n64. \u003Ckbd>1000-\u003C\u002Fkbd> [NER-BERT-pytorch](https:\u002F\u002Fgithub.com\u002Flemonhu\u002FNER-BERT-pytorch)：命名实体识别的PyTorch解决方案，使用了Google AI的预训练BERT模型。\n65. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-sync-batchnorm-example](https:\u002F\u002Fgithub.com\u002Fdougsouza\u002Fpytorch-sync-batchnorm-example)：如何在PyTorch中使用交叉复制（Cross Replica）\u002F同步批标准化（Synchronized Batchnorm）。\n66. \u003Ckbd>1000-\u003C\u002Fkbd> [SentimentAnalysis](https:\u002F\u002Fgithub.com\u002Fbarissayil\u002FSentimentAnalysis)：情绪分析神经网络，在斯坦福情绪树库上用微调BERT训练得到。\n67. \u003Ckbd>1100+\u003C\u002Fkbd> [pytorch-cpp](https:\u002F\u002Fgithub.com\u002Fprabhuomkar\u002Fpytorch-cpp)：为深度学习研究者打造，用C++实现PyTorch教程内容（基于[pytorch-tutorial](https:\u002F\u002Fgithub.com\u002Fyunjey\u002Fpytorch-tutorial)的Python教程）。\n68. [Deep Learning with PyTorch: Zero to GANs](https:\u002F\u002Fjovian.ml\u002Faakashns\u002Fcollections\u002Fdeep-learning-with-pytorch)：Deep Learning with PyTorch ([视频](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=GIsg-ZUy0MY))系列在线课程的相关代码。\n69. [Deep Learning with PyTorch](https:\u002F\u002Fwww.manning.com\u002Fbooks\u002Fdeep-learning-with-pytorch)：Deep Learning with PyTorch教你如何用Python和PyTorch实现深度学习算法，书中包括一个案例研究：构建一个能够通过CT扫描检测恶性肺肿瘤的算法。\n70. [Serverless Machine Learning in Action with PyTorch and AWS](https:\u002F\u002Fwww.manning.com\u002Fbooks\u002Fserverless-machine-learning-in-action)：Serverless Machine Learning in Action是一本指南，教你如何利用AWS、Azure或GCP等主要云服务提供商的无服务器功能，将你的实验性PyTorch机器学习代码投入生产。\n71. \u003Ckbd>3200+\u003C\u002Fkbd> [LabML NN](https:\u002F\u002Fgithub.com\u002Flab-ml\u002Fnn)：一组PyTorch实现的神经网络架构和算法，并附有并排注释。\n\n## 论文实现｜论文实现\n\n1. \u003Ckbd>1000-\u003C\u002Fkbd> [google_evolution](https:\u002F\u002Fgithub.com\u002Fneuralix\u002Fgoogle_evolution): 实现了 [Large-scale evolution of image classifiers](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.01041) 一文的结果网络之一。\n2. \u003Ckbd>1000-\u003C\u002Fkbd> [pyscatwave](https:\u002F\u002Fgithub.com\u002Fedouardoyallon\u002Fpyscatwave): 基于CuPy\u002FPyTorch的快速散射变换，[Scaling the Scattering Transform: Deep Hybrid Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.08961)\n3. \u003Ckbd>1000-\u003C\u002Fkbd> [scalingscattering](https:\u002F\u002Fgithub.com\u002Fedouardoyallon\u002Fscalingscattering): 该仓库包含 [Scaling The Scattering Transform : Deep Hybrid Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.08961) 一文中的实验。  \n4. \u003Ckbd>1000-\u003C\u002Fkbd> [deep-auto-punctuation](https:\u002F\u002Fgithub.com\u002Fepisodeyang\u002Fdeep-auto-punctuation): 通过逐字符学习实现自动添加标点。\n5. \u003Ckbd>1100+\u003C\u002Fkbd> [Realtime_Multi-Person_Pose_Estimation](https:\u002F\u002Fgithub.com\u002Ftensorboy\u002Fpytorch_Realtime_Multi-Person_Pose_Estimation): 基于PyTorch的多人人体姿态估计，[原始代码](https:\u002F\u002Fgithub.com\u002FZheC\u002FRealtime_Multi-Person_Pose_Estimation)。\n6. \u003Ckbd>1000-\u003C\u002Fkbd> [PyTorch-value-iteration-networks](https:\u002F\u002Fgithub.com\u002Fonlytailei\u002FPyTorch-value-iteration-networks): PyTorch实现价值迭代网络（[Value Iteration Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1602.02867)）（NIPS2016最佳论文奖）。\n7. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_Highway](https:\u002F\u002Fgithub.com\u002Fanalvikingur\u002Fpytorch_Highway): PyTorch实现高速公路网络（[Highway Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1505.00387)）。\n8. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_NEG_loss](https:\u002F\u002Fgithub.com\u002Fanalvikingur\u002Fpytorch_NEG_loss): PyTorch实现负采样损失（[Negative Sampling Loss](https:\u002F\u002Farxiv.org\u002Fabs\u002F1310.4546)）。  \n9. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_RVAE](https:\u002F\u002Fgithub.com\u002Fanalvikingur\u002Fpytorch_RVAE): 用PyTorch实现的产生序列数据的递归变分自动编码器，相关论文：[Generating Sentences from a Continuous Space](https:\u002F\u002Farxiv.org\u002Fabs\u002F1511.06349#)，[Character-Aware Neural Language Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F1508.06615)。\n10. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_TDNN](https:\u002F\u002Fgithub.com\u002Fanalvikingur\u002Fpytorch_TDNN): 用PyTorch实现时间延迟神经网络（Time Delayed NN）。\n11. \u003Ckbd>1000-\u003C\u002Fkbd> [eve.pytorch](https:\u002F\u002Fgithub.com\u002Fmoskomule\u002Feve.pytorch): 一个Eve优化器的实现，相关论文：[Imploving Stochastic Gradient Descent with Feedback](https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.01505)。  \n12. \u003Ckbd>1000-\u003C\u002Fkbd> [e2e-model-learning](https:\u002F\u002Fgithub.com\u002Flocuslab\u002Fe2e-model-learning): 随机优化中的基于任务的端到端模型，https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.04529 。\n13. \u003Ckbd>1000-\u003C\u002Fkbd> [pix2pix-pytorch](https:\u002F\u002Fgithub.com\u002Fmrzhu-cool\u002Fpix2pix-pytorch): PyTorch实现“基于条件对抗网络的图像到图像翻译”。 论文：[Image-to-Image Translation Using Conditional Adversarial Networks](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1611.07004v1.pdf)。\n14. \u003Ckbd>4300+\u003C\u002Fkbd> [Single Shot MultiBox Detector](https:\u002F\u002Fgithub.com\u002Famdegroot\u002Fssd.pytorch): 单发多盒探测器，论文：[Single Shot MultiBox Detector](http:\u002F\u002Farxiv.org\u002Fabs\u002F1512.02325)。\n15. \u003Ckbd>1000-\u003C\u002Fkbd> [DiscoGAN](https:\u002F\u002Fgithub.com\u002Fcarpedm20\u002FDiscoGAN-pytorch): 学习利用生成性对抗网络发现跨域关系。论文：[Learning to Discover Cross-Domain Relations with Generative Adversarial Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.05192)。  \n16. \u003Ckbd>1000-\u003C\u002Fkbd> [official DiscoGAN implementation](https:\u002F\u002Fgithub.com\u002FSKTBrain\u002FDiscoGAN): 官方实现“学习利用生成性对抗网络发现跨域关系”。 论文：[Learning to Discover Cross-Domain Relations with Generative Adversarial Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.05192)。  \n17. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-es](https:\u002F\u002Fgithub.com\u002Fatgambardella\u002Fpytorch-es): 进化策略。论文：[Evolution Strategies as a Scalable Alternative to Reinforcement Learning](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.03864) .  \n18. \u003Ckbd>1000-\u003C\u002Fkbd> [piwise](https:\u002F\u002Fgithub.com\u002Fbodokaiser\u002Fpiwise): 使用PyTorch对VOC2012数据集进行像素切割。\n19. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-dqn](https:\u002F\u002Fgithub.com\u002Ftransedward\u002Fpytorch-dqn): 深度Q学习网络。  \n20. \u003Ckbd>1000+\u003C\u002Fkbd> [neuraltalk2-pytorch](https:\u002F\u002Fgithub.com\u002Fruotianluo\u002Fneuraltalk2.pytorch): PyTorch图像字幕代码库(在分支“with_finetune”中有可微调CNN)。\n21. \u003Ckbd>1000-\u003C\u002Fkbd> [vnet.pytorch](https:\u002F\u002Fgithub.com\u002Fmattmacy\u002Fvnet.pytorch): PyTorch实现V-Net：全卷积神经网络在体医学图像分割中的应用。 http:\u002F\u002Fmattmacy.io\u002Fvnet.pytorch\u002F\n22. \u003Ckbd>1400+\u003C\u002Fkbd> [pytorch-fcn](https:\u002F\u002Fgithub.com\u002Fwkentaro\u002Fpytorch-fcn): PyTorch 实现完全卷积网络。 \n23. \u003Ckbd>1000-\u003C\u002Fkbd> [WideResNets](https:\u002F\u002Fgithub.com\u002Fxternalz\u002FWideResNet-pytorch): PyTorch实现WideResNets。该实现比官方Torch实现花费更少的GPU内存。实现: https:\u002F\u002Fgithub.com\u002Fszagoruyko\u002Fwide-residual-networks .\n24. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_highway_networks](https:\u002F\u002Fgithub.com\u002Fc0nn3r\u002Fpytorch_highway_networks): PyTorch实现高速公路网络。  \n25. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-NeuCom](https:\u002F\u002Fgithub.com\u002Fypxie\u002Fpytorch-NeuCom): Pytorch实现DeepMind的可微神经计算机[论文](http:\u002F\u002Fwww.nature.com\u002Farticles\u002Fnature20101.epdf?author_access_token=ImTXBI8aWbYxYQ51Plys8NRgN0jAjWel9jnR3ZoTv0MggmpDmwljGswxVdeocYSurJ3hxupzWuRNeGvvXnoO8o4jTJcnAyhGuZzXJ1GEaD-Z7E6X_a9R-xqJ9TfJWBqz)。\n26. \u003Ckbd>1000-\u003C\u002Fkbd> [captionGen](https:\u002F\u002Fgithub.com\u002Feladhoffer\u002FcaptionGen): 使用PyTorch为图像生成标注。\n27. \u003Ckbd>1100+\u003C\u002Fkbd> [AnimeGAN](https:\u002F\u002Fgithub.com\u002Fjayleicn\u002FanimeGAN): 生成对抗网络的PyTorch简单实现，关注于动漫脸谱绘画。\n28. \u003Ckbd>1000-\u003C\u002Fkbd> [Cnn-text classification](https:\u002F\u002Fgithub.com\u002FShawn1993\u002Fcnn-text-classification-pytorch): PyTorch 实现 [Kim的基于卷积神经网络的句子分类](https:\u002F\u002Farxiv.org\u002Fabs\u002F1408.5882) 论文。\n29. \u003Ckbd>1700+\u003C\u002Fkbd> [deepspeech2](https:\u002F\u002Fgithub.com\u002FSeanNaren\u002Fdeepspeech.pytorch): 使用 Baidu Warp-CTC 实现DeepSpeech2。创造一个基于 DeepSpeech2 架构的网络，用 CTC 激活函数训练。\n30. \u003Ckbd>1000-\u003C\u002Fkbd> [seq2seq](https:\u002F\u002Fgithub.com\u002FMaximumEntropy\u002FSeq2Seq-PyTorch): 包含PyTorch中的Seq2Seq模型。  \n31. \u003Ckbd>1000-\u003C\u002Fkbd> [Asynchronous Advantage Actor-Critic in PyTorch](https:\u002F\u002Fgithub.com\u002Frarilurelo\u002Fpytorch_a3c): PyTorch实现A3C(Asynchronous Advantage Actor-Critic)，论文：[Asynchronous Methods for Deep Reinforcement Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1602.01783v1.pdf)。由于 PyTorch 可以轻松地在多进程内控制共享内存，我们可以轻易实现A3C这样的异步算法。  \n32. \u003Ckbd>1000-\u003C\u002Fkbd> [densenet](https:\u002F\u002Fgithub.com\u002Fbamos\u002Fdensenet.pytorch): This is a PyTorch 实现 DenseNet-BC 架构，相关论文 [Densely Connected Convolutional Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1608.06993)。该实现的 CIFAR-10+ 100层错误率为 4.77 增长率为 12。官方实现和许多第三方库的链接参见 [liuzhuang13\u002FDenseNet](https:\u002F\u002Fgithub.com\u002Fliuzhuang13\u002FDenseNet)。\n33. \u003Ckbd>1000-\u003C\u002Fkbd> [nninit](https:\u002F\u002Fgithub.com\u002Falykhantejani\u002Fnninit): PyTorch神经网络模块的权值初始化方案，这是 [nninit](https:\u002F\u002Fgithub.com\u002FKaixhin\u002Fnninit) 的流行端口。\n34. \u003Ckbd>1500+\u003C\u002Fkbd> [faster rcnn](https:\u002F\u002Fgithub.com\u002Flongcw\u002Ffaster_rcnn_pytorch): PyTorch 实现 Faster RCNN。该项目主要基于 py-faster-rcnn 和 TFFRCNN。更多关于 R-CNN 的细节请参考论文 Faster R-CNN：[Towards Real-Time Object Detection with Region Proposal Network](https:\u002F\u002Farxiv.org\u002Fabs\u002F1506.01497)。\n35. \u003Ckbd>1000-\u003C\u002Fkbd> [doomnet](https:\u002F\u002Fgithub.com\u002Fakolishchak\u002Fdoom-net-pytorch): PyTorch版Doom-net，实现了ViZDoom环境下的RL模型。  \n36. \u003Ckbd>1000-\u003C\u002Fkbd> [flownet](https:\u002F\u002Fgithub.com\u002FClementPinard\u002FFlowNetPytorch): 通过Dosovitskiy等完成FlowNet的Pytorch实现。\n37. \u003Ckbd>1000-\u003C\u002Fkbd> [sqeezenet](https:\u002F\u002Fgithub.com\u002Fgsp-27\u002Fpytorch_Squeezenet): 在CIFAR10数据集上用PyTorch实现Squeezenet模型，[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1602.07360)。\n38. \u003Ckbd>2800+\u003C\u002Fkbd> [WassersteinGAN](https:\u002F\u002Fgithub.com\u002Fmartinarjovsky\u002FWassersteinGAN): PyTorch实现[WassersteinGAN](https:\u002F\u002Farxiv.org\u002Fabs\u002F1701.07875)。\n39. \u003Ckbd>1000-\u003C\u002Fkbd> [optnet](https:\u002F\u002Fgithub.com\u002Flocuslab\u002Foptnet): 该仓库包含PyTorch源码，重现了论文[OptNet: Differentiable Optimization as a Layer in Neural Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.00443)中的实验。  \n40. \u003Ckbd>1000-\u003C\u002Fkbd> [qp solver](https:\u002F\u002Fgithub.com\u002Flocuslab\u002Fqpth): PyTorch的一个快速和可微分的QP求解器。https:\u002F\u002Flocuslab.github.io\u002Fqpth\u002F\n41. \u003Ckbd>1000-\u003C\u002Fkbd> [Continuous Deep Q-Learning with Model-based Acceleration ](https:\u002F\u002Fgithub.com\u002Fikostrikov\u002Fpytorch-naf): [基于模型加速的连续深度Q学习](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1603.00748v1.pdf)的再实现。\n42. \u003Ckbd>1000-\u003C\u002Fkbd> [Learning to learn by gradient descent by gradient descent](https:\u002F\u002Fgithub.com\u002Fikostrikov\u002Fpytorch-meta-optimizer): PyTorch实现[Learning to learn by gradient descent by gradient descent](https:\u002F\u002Farxiv.org\u002Fabs\u002F1606.04474)。\n43. \u003Ckbd>1000-\u003C\u002Fkbd> [fast-neural-style](https:\u002F\u002Fgithub.com\u002Fdarkstar112358\u002Ffast-neural-style): PyTorch实现fast-neural-style，论文：[Perceptual Losses for Real-Time Style Transfer and Super-Resolution](https:\u002F\u002Farxiv.org\u002Fabs\u002F1603.08155)。\n44. \u003Ckbd>1000-\u003C\u002Fkbd> [PytorchNeuralStyleTransfer](https:\u002F\u002Fgithub.com\u002Fleongatys\u002FPytorchNeuralStyleTransfer): Pytorch中的神经风格转换。\n45. \u003Ckbd>1000-\u003C\u002Fkbd> [Fast Neural Style for Image Style Transform by Pytorch](https:\u002F\u002Fgithub.com\u002Fbengxy\u002FFastNeuralStyle): 使用快速神经风格进行图像风格转换。\n46. \u003Ckbd>1000-\u003C\u002Fkbd> [neural style transfer](https:\u002F\u002Fgithub.com\u002Falexis-jacq\u002FPytorch-Tutorials): 通过神经风格算法介绍PyTorch，[Neural-Style algorithm](https:\u002F\u002Farxiv.org\u002Fabs\u002F1508.06576)。\n47. \u003Ckbd>1000-\u003C\u002Fkbd> [VIN_PyTorch_Visdom](https:\u002F\u002Fgithub.com\u002Fzuoxingdong\u002FVIN_PyTorch_Visdom): PyTorch实现价值迭代网络(VIN):干净、简单、模块化。利用Visdom进行可视化。\n48. \u003Ckbd>1400+\u003C\u002Fkbd> [YOLO2](https:\u002F\u002Fgithub.com\u002Flongcw\u002Fyolo2-pytorch): PyTorch中的YOLOv2。\n49. \u003Ckbd>1200+\u003C\u002Fkbd> [attention-transfer](https:\u002F\u002Fgithub.com\u002Fszagoruyko\u002Fattention-transfer): 通过注意转移改善卷积网络，[ICLR2017会议论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1612.03928)。\n50. \u003Ckbd>1000-\u003C\u002Fkbd> [SVHNClassifier](https:\u002F\u002Fgithub.com\u002Fpotterhsu\u002FSVHNClassifier-PyTorch): PyTorch实现[基于深度卷积神经网络的街景图像多位数识别](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1312.6082.pdf)。\n51. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-deform-conv](https:\u002F\u002Fgithub.com\u002Foeway\u002Fpytorch-deform-conv): PyTorch实现可变形卷积(Deformable Convolution)。  \n52. \u003Ckbd>1000-\u003C\u002Fkbd> [BEGAN-pytorch](https:\u002F\u002Fgithub.com\u002Fcarpedm20\u002FBEGAN-pytorch): PyTorch实现[边界均衡生成对抗网络（BEGAN）](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.10717): Boundary Equilibrium Generative Adversarial Networks.  \n53. \u003Ckbd>1000-\u003C\u002Fkbd> [treelstm.pytorch](https:\u002F\u002Fgithub.com\u002Fdasguptar\u002Ftreelstm.pytorch): PyTorch实现树形结构LSTM。\n54. \u003Ckbd>1000-\u003C\u002Fkbd> [AGE](https:\u002F\u002Fgithub.com\u002FDmitryUlyanov\u002FAGE): 论文代码，原文：对抗生成编码器网络（[Adversarial Generator-Encoder Networks](http:\u002F\u002Fsites.skoltech.ru\u002Fapp\u002Fdata\u002Fuploads\u002Fsites\u002F25\u002F2017\u002F04\u002FAGE.pdf)）。\n55. \u003Ckbd>1000-\u003C\u002Fkbd> [ResNeXt.pytorch](https:\u002F\u002Fgithub.com\u002Fprlz77\u002FResNeXt.pytorch): 再现 ResNet-V3 (深度神经网络的聚集残差变换)。\n56. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-rl](https:\u002F\u002Fgithub.com\u002Fjingweiz\u002Fpytorch-rl): 基于PyTorch和Visdom的深度强化学习。\n57. \u003Ckbd>1000-\u003C\u002Fkbd> [Deep-Leafsnap](https:\u002F\u002Fgithub.com\u002Fsujithv28\u002FDeep-Leafsnap): 对比传统的计算机视觉方法，使用深度神经网络的[LeafSnap](https:\u002F\u002Fneerajkumar.org\u002Fbase\u002Fpapers\u002Fnk_eccv2012_leafsnap.pdf)能有效提高测试准确率。\n58. \u003Ckbd>15400+\u003C\u002Fkbd> [pytorch-CycleGAN-and-pix2pix](https:\u002F\u002Fgithub.com\u002Fjunyanz\u002Fpytorch-CycleGAN-and-pix2pix): PyTorch 实现图像风格迁移。\n59. \u003Ckbd>1000-\u003C\u002Fkbd> [A3C-PyTorch](https:\u002F\u002Fgithub.com\u002Fonlytailei\u002FA3C-PyTorch):PyTorch 实现 A3C(Advantage async actor-critic)算法。\n60. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-value-iteration-networks](https:\u002F\u002Fgithub.com\u002Fkentsommer\u002Fpytorch-value-iteration-networks): PyTorch实现价值迭代网络Value Iteration Networks (NIPS 2016 最佳论文)。  \n61. \u003Ckbd>1000-\u003C\u002Fkbd> [PyTorch-Style-Transfer](https:\u002F\u002Fgithub.com\u002Fzhanghang1989\u002FPyTorch-Style-Transfer): PyTorch实现实时转换多风格生成网络。\n62. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-deeplab-resnet](https:\u002F\u002Fgithub.com\u002Fisht7\u002Fpytorch-deeplab-resnet): PyTorch实现 [DeepLab resnet v2](https:\u002F\u002Farxiv.org\u002Fabs\u002F1606.00915)。\n63. \u003Ckbd>1100+\u003C\u002Fkbd> [pointnet.pytorch](https:\u002F\u002Fgithub.com\u002Ffxia22\u002Fpointnet.pytorch): PyTorch实现 \"PointNet: 基于深度学习的3D点分类和分割模型\" https:\u002F\u002Farxiv.org\u002Fabs\u002F1612.00593  \n64. \u003Ckbd>2100+\u003C\u002Fkbd> [pytorch-playground](https:\u002F\u002Fgithub.com\u002Faaron-xichen\u002Fpytorch-playground): 包含常见的预训练模型和数据集(MNIST, SVHN, CIFAR10, CIFAR100, STL10, AlexNet, VGG16, VGG19, ResNet, Inception, SqueezeNet)**.\n65. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-dnc](https:\u002F\u002Fgithub.com\u002Fjingweiz\u002Fpytorch-dnc): PyTorch\u002FVisdom实现的神经机器翻译(NTM)&可微神经计算机(DNC)。\n66. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_image_classifier](https:\u002F\u002Fgithub.com\u002Fjinfagang\u002Fpytorch_image_classifier): 使用PyTorch的最小但实用的图像分类器管道，在ResNet18上进行细化，在自己的小型数据集上获得99%的准确率。\n67. \u003Ckbd>1000-\u003C\u002Fkbd> [mnist-svhn-transfer](https:\u002F\u002Fgithub.com\u002Fyunjey\u002Fmnist-svhn-transfer): PyTorch实现CycleGAN和SGAN。\n68. \u003Ckbd>null\u003C\u002Fkbd> [pytorch-yolo2](https:\u002F\u002Fgithub.com\u002Fmarvis\u002Fpytorch-yolo2): pytorch-yolo2\n69. \u003Ckbd>1000-\u003C\u002Fkbd> [dni](https:\u002F\u002Fgithub.com\u002Fandrewliao11\u002Fdni.pytorch): PyTorch实现使用合成梯度的解耦神经接口，论文：[Decoupled Neural Interfaces using Synthetic Gradients](https:\u002F\u002Farxiv.org\u002Fabs\u002F1608.05343)。\n70. \u003Ckbd>1200+\u003C\u002Fkbd> [wgan-gp](https:\u002F\u002Fgithub.com\u002Fcaogang\u002Fwgan-gp): PyTorch实现论文\"[Improved Training of Wasserstein GANs](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.00028v3)\".\n71. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-seq2seq-intent-parsing](https:\u002F\u002Fgithub.com\u002Fspro\u002Fpytorch-seq2seq-intent-parsing):  PyTorch使用seq2seq和注意力模型进行意图分析和空位填充。\n72. \u003Ckbd>1000-\u003C\u002Fkbd> [pyTorch_NCE](https:\u002F\u002Fgithub.com\u002Fdemelin\u002FpyTorch_NCE): 复现噪音对比估计算法，论文：[Noise-contrastive estimation: A new estimation principle for unnormalized statistical models](http:\u002F\u002Fproceedings.mlr.press\u002Fv9\u002Fgutmann10a\u002Fgutmann10a.pdf)。\n73. \u003Ckbd>1000-\u003C\u002Fkbd> [molencoder](https:\u002F\u002Fgithub.com\u002Fcxhernandez\u002Fmolencoder): 分子自动编码器。\n74. \u003Ckbd>1000-\u003C\u002Fkbd> [GAN-weight-norm](https:\u002F\u002Fgithub.com\u002Fstormraiser\u002FGAN-weight-norm): 论文代码，\"[生成对抗网络中批量和权重归一化的影响](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.03971)\"\n75. \u003Ckbd>1000-\u003C\u002Fkbd> [lgamma](https:\u002F\u002Fgithub.com\u002Frachtsingh\u002Flgamma): 实现polygamma、lgamma和beta函数。\n76. \u003Ckbd>1000-\u003C\u002Fkbd> [bigBatch](https:\u002F\u002Fgithub.com\u002Feladhoffer\u002FbigBatch): 论文代码，论文：“[训练越久，泛化越好：关闭神经网络大批量训练的泛化间隙](https:\u002F\u002Farxiv.org\u002Fabs\u002F1705.08741)”。\n77. \u003Ckbd>1000-\u003C\u002Fkbd> [rl_a3c_pytorch](https:\u002F\u002Fgithub.com\u002Fdgriff777\u002Frl_a3c_pytorch): 针对 Atari 2600 的强化学习，实现了 A3C LSTM 。\n78. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-retraining](https:\u002F\u002Fgithub.com\u002Fahirner\u002Fpytorch-retraining): PyTorch动物园模型转移学习(torchvision)。\n79. \u003Ckbd>1000-\u003C\u002Fkbd> [nmp_qc](https:\u002F\u002Fgithub.com\u002Fpriba\u002Fnmp_qc): 用于计算机视觉的神经消息传递。\n80. \u003Ckbd>2900+\u003C\u002Fkbd> [grad-cam](https:\u002F\u002Fgithub.com\u002Fjacobgil\u002Fpytorch-grad-cam): PyTorch 实现[Grad-CAM](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1610.02391v1.pdf)。\n81. \u003Ckbd>null\u003C\u002Fkbd> [pytorch-trpo](https:\u002F\u002Fgithub.com\u002Fmjacar\u002Fpytorch-trpo): PyTorch s实现置信域策略优化（[Trust Region Policy Optimization (TRPO)](https:\u002F\u002Farxiv.org\u002Fabs\u002F1502.05477)）。\n82. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-explain-black-box](https:\u002F\u002Fgithub.com\u002Fjacobgil\u002Fpytorch-explain-black-box): PyTorch通过有意义扰动实现黑箱的可解释性解释，[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.03296)。\n83. \u003Ckbd>1000-\u003C\u002Fkbd> [vae_vpflows](https:\u002F\u002Fgithub.com\u002Fjmtomczak\u002Fvae_vpflows): 凸组合线性IAF与Householder流 https:\u002F\u002Fjmtomczak.github.io\u002Fdeebmed.html 。\n84. \u003Ckbd>1000-\u003C\u002Fkbd> [relational-networks](https:\u002F\u002Fgithub.com\u002Fkimhc6028\u002Frelational-networks): Pytorch实现\"[用一个简单的神经网络模块来做关系推理](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1706.01427.pdf)\"(关系网络)。\n85. \u003Ckbd>1000-\u003C\u002Fkbd> [vqa.pytorch](https:\u002F\u002Fgithub.com\u002FCadene\u002Fvqa.pytorch): 视觉问答。\n86. \u003Ckbd>1300+\u003C\u002Fkbd> [end-to-end-negotiator](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fend-to-end-negotiator): 成交还是不成交？谈判对话的端到端学习。\n87. \u003Ckbd>1000-\u003C\u002Fkbd> [odin-pytorch](https:\u002F\u002Fgithub.com\u002FShiyuLiang\u002Fodin-pytorch): 神经网络失配实例的原则性检测。\n88. \u003Ckbd>1000-\u003C\u002Fkbd> [FreezeOut](https:\u002F\u002Fgithub.com\u002Fajbrock\u002FFreezeOut): 一种通过逐步冻结层加速神经网络训练的简单技术。\n89. \u003Ckbd>1000-\u003C\u002Fkbd> [ARAE](https:\u002F\u002Fgithub.com\u002Fjakezhaojb\u002FARAE): 论文代码，\"[对抗性正则化的自动编码器, ARAE](https:\u002F\u002Farxiv.org\u002Fabs\u002F1706.04223)\"。\n90. \u003Ckbd>1000-\u003C\u002Fkbd> [forward-thinking-pytorch](https:\u002F\u002Fgithub.com\u002Fkimhc6028\u002Fforward-thinking-pytorch): PyTorch实现\"[前向思考：一次一层地建立和训练神经网络](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1706.02480.pdf)\"。  \n91. \u003Ckbd>1000-\u003C\u002Fkbd> [context_encoder_pytorch](https:\u002F\u002Fgithub.com\u002FBoyuanJiang\u002Fcontext_encoder_pytorch): PyTorch实现上下文编码器(Context Encoders)，可用于图像修复。\n92. \u003Ckbd>5500+\u003C\u002Fkbd> [attention-is-all-you-need-pytorch](https:\u002F\u002Fgithub.com\u002Fjadore801120\u002Fattention-is-all-you-need-pytorch): PyTorch在\"Attention is All You Need\"中实现转换模型，https:\u002F\u002Fgithub.com\u002Fthnkim\u002FOpenFacePytorch。\n93. \u003Ckbd>1000-\u003C\u002Fkbd> [OpenFacePytorch](https:\u002F\u002Fgithub.com\u002Fthnkim\u002FOpenFacePytorch): 使用 OpenFace's nn4.small2.v1.t7 模型的PyTorch模块。\n94. \u003Ckbd>1000-\u003C\u002Fkbd> [neural-combinatorial-rl-pytorch](https:\u002F\u002Fgithub.com\u002Fpemami4911\u002Fneural-combinatorial-rl-pytorch):  PyTorch 实现\"[通过强化学习实现神经组合优化](https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.09940)\"。\n95. \u003Ckbd>null\u003C\u002Fkbd> [pytorch-nec](https:\u002F\u002Fgithub.com\u002Fmjacar\u002Fpytorch-nec): PyTorch实现神经情景控制([NEC，Neural Episodic Control](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.01988))。\n96. \u003Ckbd>1000-\u003C\u002Fkbd> [seq2seq.pytorch](https:\u002F\u002Fgithub.com\u002Feladhoffer\u002Fseq2seq.pytorch): 使用PyTorch进行Sequence-to-Sequence学习。\n97. \u003Ckbd>1000-\u003C\u002Fkbd> [Pytorch-Sketch-RNN](https:\u002F\u002Fgithub.com\u002Falexis-jacq\u002FPytorch-Sketch-RNN): PyTorch实现 “[A Neural Representation of Sketch Drawings](arxiv.org\u002Fabs\u002F1704.03477)”。\n98. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-pruning](https:\u002F\u002Fgithub.com\u002Fjacobgil\u002Fpytorch-pruning): PyTorch实现 [1611.06440] [用于资源有效推理的剪枝卷积神经网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.06440)\n99. \u003Ckbd>1000-\u003C\u002Fkbd> [DrQA](https:\u002F\u002Fgithub.com\u002Fhitvoice\u002FDrQA): PyTorch实现自动阅读维基百科并回答开放领域问题。\n100. \u003Ckbd>1000-\u003C\u002Fkbd> [YellowFin_Pytorch](https:\u002F\u002Fgithub.com\u002FJianGoForIt\u002FYellowFin_Pytorch): 基于动量梯度下降（momentum SGD）的自动调优优化器，无需手动指定学习速率和动量。\n101. \u003Ckbd>1000-\u003C\u002Fkbd> [samplernn-pytorch](https:\u002F\u002Fgithub.com\u002Fdeepsound-project\u002Fsamplernn-pytorch): PyTorch实现SampleRNN: 一种无条件端到端神经音频生成模型。\n102. \u003Ckbd>1000-\u003C\u002Fkbd> [AEGeAN](https:\u002F\u002Fgithub.com\u002Ftymokvo\u002FAEGeAN): 基于AE稳定的更深的深度卷积生成对抗网络(DCGAN, Deep Convolution Generative Adversarial Networks)。\n103. \u003Ckbd>1000-\u003C\u002Fkbd> [\u002Fpytorch-SRResNet](https:\u002F\u002Fgithub.com\u002Ftwtygqyy\u002Fpytorch-SRResNet): PyTorch实现“[基于生成对抗网络的实感单幅图像超分辨率](https:\u002F\u002Farxiv.org\u002Fabs\u002F1609.04802)”。\n104. \u003Ckbd>1000-\u003C\u002Fkbd> [vsepp](https:\u002F\u002Fgithub.com\u002Ffartashf\u002Fvsepp): 论文代码，\"[VSE++:使用难分样本(Hard Negative)改善视觉语义联合嵌入](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.05612)\"。\n105. \u003Ckbd>1000-\u003C\u002Fkbd> [Pytorch-DPPO](https:\u002F\u002Fgithub.com\u002Falexis-jacq\u002FPytorch-DPPO): Pytorch实现分布式近端策略优化([Distributed Proximal Policy Optimization](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.02286))。\n106. \u003Ckbd>1700+\u003C\u002Fkbd> [UNIT](https:\u002F\u002Fgithub.com\u002Fmingyuliutw\u002FUNIT): 无监督的图像到图像转换网络，[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.00848)。\n107. \u003Ckbd>1300+\u003C\u002Fkbd> [efficient_densenet_pytorch](https:\u002F\u002Fgithub.com\u002Fgpleiss\u002Fefficient_densenet_pytorch): DenseNets的内存高效实现。\n108. \u003Ckbd>1000-\u003C\u002Fkbd> [tsn-pytorch](https:\u002F\u002Fgithub.com\u002Fyjxiong\u002Ftsn-pytorch): PyTorch实现时间分割网络(TSN, Temporal Segment Networks)。\n109. \u003Ckbd>1000-\u003C\u002Fkbd> [SMASH](https:\u002F\u002Fgithub.com\u002Fajbrock\u002FSMASH): [SMASH](https:\u002F\u002Farxiv.org\u002Fabs\u002F1708.05344)，一种高效地探索神经体系结构的实验技术。\n110. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-retinanet](https:\u002F\u002Fgithub.com\u002Fkuangliu\u002Fpytorch-retinanet): RetinaNet。\n111. \u003Ckbd>1000-\u003C\u002Fkbd> [biogans](https:\u002F\u002Fgithub.com\u002Faosokin\u002Fbiogans): 实现 ICCV 2017 论文 \"[利用GANs进行生物图像合成](https:\u002F\u002Farxiv.org\u002Fabs\u002F1708.04692)\"。\n112. \u003Ckbd>null\u003C\u002Fkbd> [Semantic Image Synthesis via Adversarial Learning]( https:\u002F\u002Fgithub.com\u002Fwoozzu\u002Fdong_iccv_2017): PyTorch 实现 ICCV 2017 论文 \"[基于对抗学习的语义图像合成](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.06873)\"。\n113. \u003Ckbd>1000-\u003C\u002Fkbd> [fmpytorch](https:\u002F\u002Fgithub.com\u002Fjmhessel\u002Ffmpytorch): PyTorch在Cython中实现分析机（Factorization Machine）模块。\n114. \u003Ckbd>1000-\u003C\u002Fkbd> [ORN](https:\u002F\u002Fgithub.com\u002FZhouYanzhao\u002FORN): PyTorch 实现 CVPR 2017 论文 \"[Oriented Response Networks](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1701.01833.pdf)\"。\n115. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-maml](https:\u002F\u002Fgithub.com\u002Fkaterakelly\u002Fpytorch-maml): PyTorch实现 [MAML](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.03400)（Model-Agnostic Meta-Learning，与模型无关的元学习）。\n116. \u003Ckbd>2200+\u003C\u002Fkbd> [pytorch-generative-model-collections](https:\u002F\u002Fgithub.com\u002Fznxlwm\u002Fpytorch-generative-model-collections): PyTorch中的各种生成模型集合。\n117. \u003Ckbd>1000-\u003C\u002Fkbd> [vqa-winner-cvprw-2017](https:\u002F\u002Fgithub.com\u002Fmarkdtw\u002Fvqa-winner-cvprw-2017): Pytorch 实现 CVPR'17 VQA( Visual Question Answer，视觉问答) 挑战冠军。\n118. \u003Ckbd>1000-\u003C\u002Fkbd> [tacotron_pytorch](https:\u002F\u002Fgithub.com\u002Fr9y9\u002Ftacotron_pytorch):  PyTorch 实现 Tacotron 语音合成模型。\n119. \u003Ckbd>1000-\u003C\u002Fkbd> [pspnet-pytorch](https:\u002F\u002Fgithub.com\u002FLextal\u002Fpspnet-pytorch): PyTorch 实现 PSPNet 语义分割网络。\n120. \u003Ckbd>1000-\u003C\u002Fkbd> [LM-LSTM-CRF](https:\u002F\u002Fgithub.com\u002FLiyuanLucasLiu\u002FLM-LSTM-CRF): 《Empower Sequence Labeling with Task-Aware Language Model》 http:\u002F\u002Farxiv.org\u002Fabs\u002F1709.04109\n121. \u003Ckbd>5000+\u003C\u002Fkbd> [face-alignment](https:\u002F\u002Fgithub.com\u002F1adrianb\u002Fface-alignment): 使用PyTorch构建2D和3D人脸对齐库。\n122. \u003Ckbd>1000-\u003C\u002Fkbd> [DepthNet](https:\u002F\u002Fgithub.com\u002FClementPinard\u002FDepthNet): PyTorch 在Still Box数据集上训练DepthNet。\n123. \u003Ckbd>1600+\u003C\u002Fkbd> [EDSR-PyTorch](https:\u002F\u002Fgithub.com\u002Fthstkdgus35\u002FEDSR-PyTorch): 论文《Enhanced Deep Residual Networks for Single Image Super-Resolution》的PyTorch实现版本。 (CVPRW 2017)\n124. \u003Ckbd>1000-\u003C\u002Fkbd> [e2c-pytorch](https:\u002F\u002Fgithub.com\u002Fethanluoyc\u002Fe2c-pytorch): E2C，Embed to Control 实现。\n125. \u003Ckbd>2900+\u003C\u002Fkbd> [3D-ResNets-PyTorch](https:\u002F\u002Fgithub.com\u002Fkenshohara\u002F3D-ResNets-PyTorch): 基于3D残差网络的动作识别。\n126. \u003Ckbd>1000-\u003C\u002Fkbd> [bandit-nmt](https:\u002F\u002Fgithub.com\u002Fkhanhptnk\u002Fbandit-nmt): EMNLP 2017 论文《Reinforcement Learning for Bandit Neural Machine Translation with Simulated Human Feedback》的代码,，改论文在神经编解码模型的基础上实现了A2C算法，并在模拟噪声激励下对组合进行了基准测试。\n127. \u003Ckbd>2400+\u003C\u002Fkbd> [pytorch-a2c-ppo-acktr](https:\u002F\u002Fgithub.com\u002Fikostrikov\u002Fpytorch-a2c-ppo-acktr): PyTorch 实现 Advantage Actor Critic (A2C), Proximal Policy Optimization (PPO，近端策略优化) 和可扩展信赖域（Trust Region）方法，这些算法使用 Kronecker因子近似（ACKTR）和生成对抗模仿学习（GAIL）实现，可用于深度强化学习。\n128. \u003Ckbd>1000-\u003C\u002Fkbd> [zalando-pytorch](https:\u002F\u002Fgithub.com\u002FbaldassarreFe\u002Fzalando-pytorch): [Fashion-MNIST](https:\u002F\u002Fgithub.com\u002Fzalandoresearch\u002Ffashion-mnist)数据集上的各种实验。\n129. \u003Ckbd>1000-\u003C\u002Fkbd> [sphereface_pytorch](https:\u002F\u002Fgithub.com\u002Fclcarwin\u002Fsphereface_pytorch): PyTorch实现SphereFace，人脸识别相关，https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.08063 。\n130. \u003Ckbd>1000-\u003C\u002Fkbd> [Categorical DQN](https:\u002F\u002Fgithub.com\u002Ffloringogianu\u002Fcategorical-dqn): PyTorch 版 Categorical DQN，该模型来自论文《[A Distributional Perspective on Reinforcement Learning](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.06887)》。\n131. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-ntm](https:\u002F\u002Fgithub.com\u002Floudinthecloud\u002Fpytorch-ntm): 神经网络图灵机。\n132. \u003Ckbd>null\u003C\u002Fkbd> [mask_rcnn_pytorch](https:\u002F\u002Fgithub.com\u002Ffelixgwu\u002Fmask_rcnn_pytorch): Mask RCNN in PyTorch.\n133. \u003Ckbd>1000-\u003C\u002Fkbd> [graph_convnets_pytorch](https:\u002F\u002Fgithub.com\u002Fxbresson\u002Fgraph_convnets_pytorch): PyTorch 实现图卷积神经网络，NIPS’16。\n134. \u003Ckbd>1700+\u003C\u002Fkbd> [pytorch-faster-rcnn](https:\u002F\u002Fgithub.com\u002Fruotianluo\u002Fpytorch-faster-rcnn): PyTorch实现 faster RCNN 检测框架，基于 Xinlei Chen 的[tf-faster-rcnn](https:\u002F\u002Fgithub.com\u002Fendernewton\u002Ftf-faster-rcnn)，已不再维护。\n135. \u003Ckbd>1000-\u003C\u002Fkbd> [torchMoji](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002FtorchMoji): A pyTorch implementation of the DeepMoji model: state-of-the-art deep learning model for analyzing sentiment, emotion, sarcasm etc.\n136. \u003Ckbd>3900+\u003C\u002Fkbd> [semantic-segmentation-pytorch](https:\u002F\u002Fgithub.com\u002Fhangzhaomit\u002Fsemantic-segmentation-pytorch): 在[MIT ADE20K dataset](http:\u002F\u002Fsceneparsing.csail.mit.edu)数据集上实现语义分割\u002F场景解析。\n137. \u003Ckbd>1200+\u003C\u002Fkbd> [pytorch-qrnn](https:\u002F\u002Fgithub.com\u002Fsalesforce\u002Fpytorch-qrnn): PyTorch implementation of the Quasi-Recurrent Neural Network - up to 16 times faster than NVIDIA's cuDNN LSTM\n138. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-sgns](https:\u002F\u002Fgithub.com\u002Ftheeluwin\u002Fpytorch-sgns): Skipgram Negative Sampling in PyTorch.\n139. \u003Ckbd>1000-\u003C\u002Fkbd> [SfmLearner-Pytorch ](https:\u002F\u002Fgithub.com\u002FClementPinard\u002FSfmLearner-Pytorch): Pytorch version of SfmLearner from Tinghui Zhou et al.\n140. \u003Ckbd>1000-\u003C\u002Fkbd> [deformable-convolution-pytorch](https:\u002F\u002Fgithub.com\u002F1zb\u002Fdeformable-convolution-pytorch): PyTorch实现可变形卷积。\n141. \u003Ckbd>1000-\u003C\u002Fkbd> [skip-gram-pytorch](https:\u002F\u002Fgithub.com\u002Ffanglanting\u002Fskip-gram-pytorch): A complete pytorch implementation of skipgram model (with subsampling and negative sampling). The embedding result is tested with Spearman's rank correlation.\n142. \u003Ckbd>1000-\u003C\u002Fkbd> [stackGAN-v2](https:\u002F\u002Fgithub.com\u002Fhanzhanggit\u002FStackGAN-v2): Pytorch implementation for reproducing StackGAN_v2 results in the paper StackGAN++: Realistic Image Synthesis with Stacked Generative Adversarial Networks by Han Zhang*, Tao Xu*, Hongsheng Li, Shaoting Zhang, Xiaogang Wang, Xiaolei Huang, Dimitris Metaxas.\n143. \u003Ckbd>1000-\u003C\u002Fkbd> [self-critical.pytorch](https:\u002F\u002Fgithub.com\u002Fruotianluo\u002Fself-critical.pytorch): 非官方，PyTorch实现基于 self-critical 序列训练的图像标注。\n144. \u003Ckbd>3600+\u003C\u002Fkbd> [pygcn](https:\u002F\u002Fgithub.com\u002Ftkipf\u002Fpygcn): 图卷积网络。\n145. \u003Ckbd>1000-\u003C\u002Fkbd> [dnc](https:\u002F\u002Fgithub.com\u002Fixaxaar\u002Fpytorch-dnc): 可微神经计算机、稀疏存取存储器与稀疏可微神经计算机。\n146. \u003Ckbd>1000-\u003C\u002Fkbd> [prog_gans_pytorch_inference](https:\u002F\u002Fgithub.com\u002Fptrblck\u002Fprog_gans_pytorch_inference): PyTorch inference for \"Progressive Growing of GANs\" with CelebA snapshot.\n147. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-capsule](https:\u002F\u002Fgithub.com\u002Ftimomernick\u002Fpytorch-capsule): Pytorch implementation of Hinton's Dynamic Routing Between Capsules.\n148. \u003Ckbd>1000-\u003C\u002Fkbd> [PyramidNet-PyTorch](https:\u002F\u002Fgithub.com\u002Fdyhan0920\u002FPyramidNet-PyTorch): A PyTorch implementation for PyramidNets (Deep Pyramidal Residual Networks, arxiv.org\u002Fabs\u002F1610.02915)\n149. \u003Ckbd>1000-\u003C\u002Fkbd> [radio-transformer-networks](https:\u002F\u002Fgithub.com\u002Fgram-ai\u002Fradio-transformer-networks): A PyTorch implementation of Radio Transformer Networks from the paper \"An Introduction to Deep Learning for the Physical Layer\". arxiv.org\u002Fabs\u002F1702.00832\n150. \u003Ckbd>1000-\u003C\u002Fkbd> [honk](https:\u002F\u002Fgithub.com\u002Fcastorini\u002Fhonk): PyTorch reimplementation of Google's TensorFlow CNNs for keyword spotting.\n151. \u003Ckbd>1000-\u003C\u002Fkbd> [DeepCORAL](https:\u002F\u002Fgithub.com\u002FSSARCandy\u002FDeepCORAL): A PyTorch implementation of 'Deep CORAL: Correlation Alignment for Deep Domain Adaptation.', ECCV 2016\n152. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-pose](https:\u002F\u002Fgithub.com\u002Fbearpaw\u002Fpytorch-pose): PyTorch工具包，用于2D人体姿态估计。\n153. \u003Ckbd>1000-\u003C\u002Fkbd> [lang-emerge-parlai](https:\u002F\u002Fgithub.com\u002Fkarandesai-96\u002Flang-emerge-parlai): Implementation of EMNLP 2017 Paper \"Natural Language Does Not Emerge 'Naturally' in Multi-Agent Dialog\" using PyTorch and ParlAI\n154. \u003Ckbd>1200+\u003C\u002Fkbd> [Rainbow](https:\u002F\u002Fgithub.com\u002FKaixhin\u002FRainbow): Rainbow: Combining Improvements in Deep Reinforcement Learning \n155. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_compact_bilinear_pooling v1](https:\u002F\u002Fgithub.com\u002Fgdlg\u002Fpytorch_compact_bilinear_pooling): This repository has a pure Python implementation of Compact Bilinear Pooling and Count Sketch for PyTorch.\n156. \u003Ckbd>1000-\u003C\u002Fkbd> [CompactBilinearPooling-Pytorch v2](https:\u002F\u002Fgithub.com\u002FDeepInsight-PCALab\u002FCompactBilinearPooling-Pytorch): (Yang Gao, et al.) A Pytorch Implementation for Compact Bilinear Pooling.\n157. \u003Ckbd>1000-\u003C\u002Fkbd> [FewShotLearning](https:\u002F\u002Fgithub.com\u002Fgitabcworld\u002FFewShotLearning): Pytorch implementation of the paper \"Optimization as a Model for Few-Shot Learning\"\n158. \u003Ckbd>1000-\u003C\u002Fkbd> [meProp](https:\u002F\u002Fgithub.com\u002Fjklj077\u002FmeProp): Codes for \"meProp: Sparsified Back Propagation for Accelerated Deep Learning with Reduced Overfitting\".\n159. \u003Ckbd>1000-\u003C\u002Fkbd> [SFD_pytorch](https:\u002F\u002Fgithub.com\u002Fclcarwin\u002FSFD_pytorch): 单镜头尺度不变人脸检测器。\n160. \u003Ckbd>1000-\u003C\u002Fkbd> [GradientEpisodicMemory](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FGradientEpisodicMemory): Continuum Learning with GEM: Gradient Episodic Memory. https:\u002F\u002Farxiv.org\u002Fabs\u002F1706.08840\n161. \u003Ckbd>1900+\u003C\u002Fkbd> [DeblurGAN](https:\u002F\u002Fgithub.com\u002FKupynOrest\u002FDeblurGAN): Pytorch implementation of the paper DeblurGAN: Blind Motion Deblurring Using Conditional Adversarial Networks.\n162. \u003Ckbd>4800+\u003C\u002Fkbd> [StarGAN](https:\u002F\u002Fgithub.com\u002Fyunjey\u002FStarGAN): StarGAN: 多领域图像转换 GAN 网络，https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.09020 。\n163. \u003Ckbd>1000-\u003C\u002Fkbd> [CapsNet-pytorch](https:\u002F\u002Fgithub.com\u002Fadambielski\u002FCapsNet-pytorch): PyTorch 实现 NIPS 2017 论文 “[胶囊间的动态路由](https:\u002F\u002Farxiv.org\u002Fabs\u002F1710.09829)”。\n164. \u003Ckbd>1000-\u003C\u002Fkbd> [CondenseNet](https:\u002F\u002Fgithub.com\u002FShichenLiu\u002FCondenseNet): CondenseNet: 面向移动设备的轻量级 CNN。\n165. \u003Ckbd>6700+\u003C\u002Fkbd> [deep-image-prior](https:\u002F\u002Fgithub.com\u002FDmitryUlyanov\u002Fdeep-image-prior): 基于神经网络的图像修复，无学习过程。\n166. \u003Ckbd>1100+\u003C\u002Fkbd> [deep-head-pose](https:\u002F\u002Fgithub.com\u002Fnatanielruiz\u002Fdeep-head-pose): 使用PyTorch进行深度学习头部姿势估计。\n167. \u003Ckbd>1000-\u003C\u002Fkbd> [Random-Erasing](https:\u002F\u002Fgithub.com\u002Fzhunzhong07\u002FRandom-Erasing): 论文代码，论文：\"[随机擦除数据增强](https:\u002F\u002Farxiv.org\u002Fabs\u002F1708.04896)\"。\n168. \u003Ckbd>1000-\u003C\u002Fkbd> [FaderNetworks](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FFaderNetworks): Fader Networks: 通过滑动属性重构图像 - NIPS 2017，https:\u002F\u002Farxiv.org\u002Fpdf\u002F1706.00409.pdf 。\n169. \u003Ckbd>2300+\u003C\u002Fkbd> [FlowNet 2.0](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fflownet2-pytorch): FlowNet 2.0: 深度网络中光流估计的演化。\n170. \u003Ckbd>5300+\u003C\u002Fkbd> [pix2pixHD](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fpix2pixHD): 利用条件 GANs 合成和处理 HD 高清图像的 PyTorch 实现，https:\u002F\u002Farxiv.org\u002Fpdf\u002F1711.11585.pdf。\n171. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-smoothgrad](https:\u002F\u002Fgithub.com\u002Fpkdn\u002Fpytorch-smoothgrad): SmoothGrad通过增加噪声来去除噪声。\n172. \u003Ckbd>1000-\u003C\u002Fkbd> [RetinaNet](https:\u002F\u002Fgithub.com\u002Fc0nn3r\u002FRetinaNet): RetinaNe实现。\n173. \u003Ckbd>6300+\u003C\u002Fkbd> [faster-rcnn.pytorch](https:\u002F\u002Fgithub.com\u002Fjwyang\u002Ffaster-rcnn.pytorch): This project is a faster faster R-CNN implementation, aimed to accelerating the training of faster R-CNN object detection models. \n174. \u003Ckbd>1000-\u003C\u002Fkbd> [mixup_pytorch](https:\u002F\u002Fgithub.com\u002Fleehomyc\u002Fmixup_pytorch): A PyTorch implementation of the paper Mixup: Beyond Empirical Risk Minimization in PyTorch.\n175. \u003Ckbd>1100+\u003C\u002Fkbd> [inplace_abn](https:\u002F\u002Fgithub.com\u002Fmapillary\u002Finplace_abn): In-Place Activated BatchNorm for Memory-Optimized Training of DNNs\n176. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-pose-hg-3d](https:\u002F\u002Fgithub.com\u002Fxingyizhou\u002Fpytorch-pose-hg-3d): PyTorch implementation for 3D human pose estimation\n177. \u003Ckbd>1000-\u003C\u002Fkbd> [nmn-pytorch](https:\u002F\u002Fgithub.com\u002FHarshTrivedi\u002Fnmn-pytorch): Neural Module Network for VQA in Pytorch.\n178. \u003Ckbd>1000-\u003C\u002Fkbd> [bytenet](https:\u002F\u002Fgithub.com\u002Fkefirski\u002Fbytenet): Pytorch implementation of bytenet from \"Neural Machine Translation in Linear Time\" paper\n179. \u003Ckbd>1000-\u003C\u002Fkbd> [bottom-up-attention-vqa](https:\u002F\u002Fgithub.com\u002Fhengyuan-hu\u002Fbottom-up-attention-vqa): vqa, bottom-up-attention, pytorch\n180. \u003Ckbd>1000-\u003C\u002Fkbd> [yolo2-pytorch](https:\u002F\u002Fgithub.com\u002Fruiminshen\u002Fyolo2-pytorch): The YOLOv2 is one of the most popular one-stage object detector. This project adopts PyTorch as the developing framework to increase productivity, and utilize ONNX to convert models into Caffe 2 to benifit engineering deployment.\n181. \u003Ckbd>1000-\u003C\u002Fkbd> [reseg-pytorch](https:\u002F\u002Fgithub.com\u002FWizaron\u002Freseg-pytorch): PyTorch 实现ReSeg。 (https:\u002F\u002Farxiv.org\u002Fpdf\u002F1511.07053.pdf)\n182. \u003Ckbd>1000-\u003C\u002Fkbd> [binary-stochastic-neurons](https:\u002F\u002Fgithub.com\u002FWizaron\u002Fbinary-stochastic-neurons): Binary Stochastic Neurons in PyTorch.\n183. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-pose-estimation](https:\u002F\u002Fgithub.com\u002FDavexPro\u002Fpytorch-pose-estimation): PyTorch Implementation of Realtime Multi-Person Pose Estimation project.\n184. \u003Ckbd>1000-\u003C\u002Fkbd> [interaction_network_pytorch](https:\u002F\u002Fgithub.com\u002Fhiggsfield\u002Finteraction_network_pytorch): Pytorch Implementation of Interaction Networks for Learning about Objects, Relations and Physics.\n185. \u003Ckbd>1000-\u003C\u002Fkbd> [NoisyNaturalGradient](https:\u002F\u002Fgithub.com\u002Fwlwkgus\u002FNoisyNaturalGradient): Pytorch Implementation of paper \"Noisy Natural Gradient as Variational Inference\". \n186. \u003Ckbd>1000-\u003C\u002Fkbd> [ewc.pytorch](https:\u002F\u002Fgithub.com\u002Fmoskomule\u002Fewc.pytorch): An implementation of Elastic Weight Consolidation (EWC), proposed in James Kirkpatrick et al. Overcoming catastrophic forgetting in neural networks 2016(10.1073\u002Fpnas.1611835114).\n187. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-zssr](https:\u002F\u002Fgithub.com\u002Fjacobgil\u002Fpytorch-zssr): PyTorch implementation of 1712.06087 \"Zero-Shot\" Super-Resolution using Deep Internal Learning\n188. \u003Ckbd>1000-\u003C\u002Fkbd> [deep_image_prior](https:\u002F\u002Fgithub.com\u002Fatiyo\u002Fdeep_image_prior): 基于未训练神经网络的图像重建算法实现。算法：[Deep Image Prior](https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.10925)。\n189. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-transformer](https:\u002F\u002Fgithub.com\u002Fleviswind\u002Fpytorch-transformer): PyTorch实现论文[Attention Is All You Need](https:\u002F\u002Farxiv.org\u002Fabs\u002F1706.03762)。\n190. \u003Ckbd>1000-\u003C\u002Fkbd> [DeepRL-Grounding](https:\u002F\u002Fgithub.com\u002Fdevendrachaplot\u002FDeepRL-Grounding): PyTorch实现AAAI-18论文[Gated-Attention Architectures for Task-Oriented Language Grounding](https:\u002F\u002Farxiv.org\u002Fabs\u002F1706.07230)。\n191. \u003Ckbd>1000-\u003C\u002Fkbd> [deep-forecast-pytorch](https:\u002F\u002Fgithub.com\u002FWizaron\u002Fdeep-forecast-pytorch): 使用LSTMs进行风速预测，论文：[Deep Forecast: Deep Learning-based Spatio-Temporal Forecasting](arxiv.org\u002Fpdf\u002F1707.08110.pdf)。\n192. \u003Ckbd>1000-\u003C\u002Fkbd> [cat-net](https:\u002F\u002Fgithub.com\u002FutiasSTARS\u002Fcat-net):  正则外观变换（[Canonical Appearance Transformations](https:\u002F\u002Farxiv.org\u002Fabs\u002F1709.03009)）\n193. \u003Ckbd>1000-\u003C\u002Fkbd> [minimal_glo](https:\u002F\u002Fgithub.com\u002Ftneumann\u002Fminimal_glo): Minimal PyTorch implementation of Generative Latent Optimization from the paper \"Optimizing the Latent Space of Generative Networks\"\n194. \u003Ckbd>1000-\u003C\u002Fkbd> [LearningToCompare-Pytorch](https:\u002F\u002Fgithub.com\u002Fdragen1860\u002FLearningToCompare-Pytorch): Pytorch Implementation for Paper: Learning to Compare: Relation Network for Few-Shot Learning. \n195. \u003Ckbd>1400+\u003C\u002Fkbd> [poincare-embeddings](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fpoincare-embeddings): PyTorch implementation of the NIPS-17 paper \"Poincaré Embeddings for Learning Hierarchical Representations\". \n196. \u003Ckbd>null\u003C\u002Fkbd> [pytorch-trpo(Hessian-vector product version)](https:\u002F\u002Fgithub.com\u002Fikostrikov\u002Fpytorch-trpo): This is a PyTorch implementation of \"Trust Region Policy Optimization (TRPO)\" with exact Hessian-vector product instead of finite differences approximation.\n197. \u003Ckbd>1000-\u003C\u002Fkbd> [ggnn.pytorch](https:\u002F\u002Fgithub.com\u002FJamesChuanggg\u002Fggnn.pytorch): A PyTorch Implementation of Gated Graph Sequence Neural Networks (GGNN). \n198. \u003Ckbd>1000-\u003C\u002Fkbd> [visual-interaction-networks-pytorch](https:\u002F\u002Fgithub.com\u002FMrgemy95\u002Fvisual-interaction-networks-pytorch): This's an implementation of deepmind Visual Interaction Networks paper using pytorch\n199. \u003Ckbd>1000-\u003C\u002Fkbd> [adversarial-patch](https:\u002F\u002Fgithub.com\u002Fjhayes14\u002Fadversarial-patch): PyTorch实现对抗补丁。\n200. \u003Ckbd>1000-\u003C\u002Fkbd> [Prototypical-Networks-for-Few-shot-Learning-PyTorch](https:\u002F\u002Fgithub.com\u002Forobix\u002FPrototypical-Networks-for-Few-shot-Learning-PyTorch): Implementation of Prototypical Networks for Few Shot Learning (arxiv.org\u002Fabs\u002F1703.05175) in Pytorch\n201. \u003Ckbd>1000-\u003C\u002Fkbd> [Visual-Feature-Attribution-Using-Wasserstein-GANs-Pytorch](https:\u002F\u002Fgithub.com\u002Forobix\u002FVisual-Feature-Attribution-Using-Wasserstein-GANs-Pytorch): Implementation of Visual Feature Attribution using Wasserstein GANs (arxiv.org\u002Fabs\u002F1711.08998) in PyTorch.\n202. \u003Ckbd>1000-\u003C\u002Fkbd> [PhotographicImageSynthesiswithCascadedRefinementNetworks-Pytorch](https:\u002F\u002Fgithub.com\u002FBlade6570\u002FPhotographicImageSynthesiswithCascadedRefinementNetworks-Pytorch): 用级联优化网络生成照片级图像，https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.09405 。\n203. \u003Ckbd>2400+\u003C\u002Fkbd> [ENAS-pytorch](https:\u002F\u002Fgithub.com\u002Fcarpedm20\u002FENAS-pytorch): PyTorch实现\"[基于参数共享的高效神经网络结构搜索](https:\u002F\u002Farxiv.org\u002Fabs\u002F1802.03268)\"。\n204. \u003Ckbd>1000-\u003C\u002Fkbd> [Neural-IMage-Assessment](https:\u002F\u002Fgithub.com\u002Fkentsyx\u002FNeural-IMage-Assessment): 神经图片评估，https:\u002F\u002Farxiv.org\u002Fabs\u002F1709.05424 。\n205. \u003Ckbd>1000-\u003C\u002Fkbd> [proxprop](https:\u002F\u002Fgithub.com\u002Ftfrerix\u002Fproxprop): 近端回传(Proximal Backpropagation) - 隐式梯度代替显式梯度的神经网络训练算法。\n206. \u003Ckbd>10500+\u003C\u002Fkbd> [FastPhotoStyle](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002FFastPhotoStyle): 照片级逼真的图像风格化的一个封闭解。\n207. \u003Ckbd>1000-\u003C\u002Fkbd> [Deep-Image-Analogy-PyTorch](https:\u002F\u002Fgithub.com\u002FBen-Louis\u002FDeep-Image-Analogy-PyTorch): 基于PyTorch的深度图像模拟的Python实现。\n208. \u003Ckbd>2700+\u003C\u002Fkbd> [Person-reID_pytorch](https:\u002F\u002Fgithub.com\u002Flayumi\u002FPerson_reID_baseline_pytorch): 行人再识别Person-reID的PyTorch实现。\n209. \u003Ckbd>1000-\u003C\u002Fkbd> [pt-dilate-rnn](https:\u002F\u002Fgithub.com\u002Fzalandoresearch\u002Fpt-dilate-rnn): 空洞递归神经网络（Dilated RNNs）。\n210. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-i-revnet](https:\u002F\u002Fgithub.com\u002Fjhjacobsen\u002Fpytorch-i-revnet): Pytorch实现i-RevNets。\n211. \u003Ckbd>1000-\u003C\u002Fkbd> [OrthNet](https:\u002F\u002Fgithub.com\u002FOrcuslc\u002FOrthNet): TensorFlow、PyTorch和Numpy层生成正交多项式。\n212. \u003Ckbd>1000-\u003C\u002Fkbd> [DRRN-pytorch](https:\u002F\u002Fgithub.com\u002Fjt827859032\u002FDRRN-pytorch): \"[超分辨率的深递归残差网络(DRRN)](http:\u002F\u002Fcvlab.cse.msu.edu\u002Fpdfs\u002FTai_Yang_Liu_CVPR2017.pdf)\", CVPR 2017\n213. \u003Ckbd>1000-\u003C\u002Fkbd> [shampoo.pytorch](https:\u002F\u002Fgithub.com\u002Fmoskomule\u002Fshampoo.pytorch): Shampoo算法实现。\n214. \u003Ckbd>1000-\u003C\u002Fkbd> [Neural-IMage-Assessment 2](https:\u002F\u002Fgithub.com\u002Ftruskovskiyk\u002Fnima.pytorch): 神经图片评估，https:\u002F\u002Farxiv.org\u002Fabs\u002F1709.05424 。\n215. \u003Ckbd>2900+\u003C\u002Fkbd> [TCN](https:\u002F\u002Fgithub.com\u002Flocuslab\u002FTCN): Sequence modeling benchmarks and temporal convolutional networks locuslab\u002FTCN\n216. \u003Ckbd>1000-\u003C\u002Fkbd> [DCC](https:\u002F\u002Fgithub.com\u002Fshahsohil\u002FDCC): This repository contains the source code and data for reproducing results of Deep Continuous Clustering paper.\n217. \u003Ckbd>1000-\u003C\u002Fkbd> [packnet](https:\u002F\u002Fgithub.com\u002Farunmallya\u002Fpacknet): Code for PackNet: Adding Multiple Tasks to a Single Network by Iterative Pruning arxiv.org\u002Fabs\u002F1711.05769\n218. \u003Ckbd>1000-\u003C\u002Fkbd> [PyTorch-progressive_growing_of_gans](https:\u002F\u002Fgithub.com\u002Fgithub-pengge\u002FPyTorch-progressive_growing_of_gans): PyTorch implementation of Progressive Growing of GANs for Improved Quality, Stability, and Variation.\n219. \u003Ckbd>1000-\u003C\u002Fkbd> [nonauto-nmt](https:\u002F\u002Fgithub.com\u002Fsalesforce\u002Fnonauto-nmt): PyTorch Implementation of \"Non-Autoregressive Neural Machine Translation\"\n220. \u003Ckbd>9800+\u003C\u002Fkbd> [PyTorch-GAN](https:\u002F\u002Fgithub.com\u002Feriklindernoren\u002FPyTorch-GAN): PyTorch implementations of Generative Adversarial Networks.\n221. \u003Ckbd>1000-\u003C\u002Fkbd> [PyTorchWavelets](https:\u002F\u002Fgithub.com\u002Ftomrunia\u002FPyTorchWavelets): PyTorch implementation of the wavelet analysis found in Torrence and Compo (1998)\n222. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-made](https:\u002F\u002Fgithub.com\u002Fkarpathy\u002Fpytorch-made): MADE (Masked Autoencoder Density Estimation) implementation in PyTorch\n223. \u003Ckbd>1000-\u003C\u002Fkbd> [VRNN](https:\u002F\u002Fgithub.com\u002Femited\u002FVariationalRecurrentNeuralNetwork): Pytorch implementation of the Variational RNN (VRNN), from A Recurrent Latent Variable Model for Sequential Data.\n224. \u003Ckbd>1000-\u003C\u002Fkbd> [flow](https:\u002F\u002Fgithub.com\u002Femited\u002Fflow): Pytorch implementation of ICLR 2018 paper Deep Learning for Physical Processes: Integrating Prior Scientific Knowledge.\n225. \u003Ckbd>1600+\u003C\u002Fkbd> [deepvoice3_pytorch](https:\u002F\u002Fgithub.com\u002Fr9y9\u002Fdeepvoice3_pytorch): PyTorch实现基于卷积神经网络的语音合成模型。\n226. \u003Ckbd>1000-\u003C\u002Fkbd> [psmm](https:\u002F\u002Fgithub.com\u002Felanmart\u002Fpsmm): imlementation of the the Pointer Sentinel Mixture Model, as described in the paper by Stephen Merity et al.\n227. \u003Ckbd>3000+\u003C\u002Fkbd> [tacotron2](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Ftacotron2): Tacotron 2 - PyTorch implementation with faster-than-realtime inference.\n228. \u003Ckbd>1000-\u003C\u002Fkbd> [AccSGD](https:\u002F\u002Fgithub.com\u002Frahulkidambi\u002FAccSGD): Implements pytorch code for the Accelerated SGD algorithm.\n229. \u003Ckbd>1000-\u003C\u002Fkbd> [QANet-pytorch](https:\u002F\u002Fgithub.com\u002Fhengruo\u002FQANet-pytorch): an implementation of QANet with PyTorch (EM\u002FF1 = 70.5\u002F77.2 after 20 epoches for about 20 hours on one 1080Ti card.)\n230. \u003Ckbd>1000-\u003C\u002Fkbd> [ConvE](https:\u002F\u002Fgithub.com\u002FTimDettmers\u002FConvE): Convolutional 2D Knowledge Graph Embeddings\n231. \u003Ckbd>1000-\u003C\u002Fkbd> [Structured-Self-Attention](https:\u002F\u002Fgithub.com\u002Fkaushalshetty\u002FStructured-Self-Attention): Implementation for the paper A Structured Self-Attentive Sentence Embedding, which is published in ICLR 2017: arxiv.org\u002Fabs\u002F1703.03130 .\n232. \u003Ckbd>1000-\u003C\u002Fkbd> [graphsage-simple](https:\u002F\u002Fgithub.com\u002Fwilliamleif\u002Fgraphsage-simple): Simple reference implementation of GraphSAGE.\n233. \u003Ckbd>2800+\u003C\u002Fkbd> [Detectron.pytorch](https:\u002F\u002Fgithub.com\u002Froytseng-tw\u002FDetectron.pytorch): A pytorch implementation of Detectron. Both training from scratch and inferring directly from pretrained Detectron weights are available.\n234. \u003Ckbd>1000-\u003C\u002Fkbd> [R2Plus1D-PyTorch](https:\u002F\u002Fgithub.com\u002Firhumshafkat\u002FR2Plus1D-PyTorch): PyTorch implementation of the R2Plus1D convolution based ResNet architecture described in the paper \"A Closer Look at Spatiotemporal Convolutions for Action Recognition\"\n235. \u003Ckbd>1000-\u003C\u002Fkbd> [StackNN](https:\u002F\u002Fgithub.com\u002Fviking-sudo-rm\u002FStackNN): A PyTorch implementation of differentiable stacks for use in neural networks.\n236. \u003Ckbd>1000-\u003C\u002Fkbd> [translagent](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Ftranslagent): Code for Emergent Translation in Multi-Agent Communication.\n237. \u003Ckbd>1000-\u003C\u002Fkbd> [ban-vqa](https:\u002F\u002Fgithub.com\u002Fjnhwkim\u002Fban-vqa): Bilinear attention networks for visual question answering. \n238. \u003Ckbd>1200+\u003C\u002Fkbd> [pytorch-openai-transformer-lm](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Fpytorch-openai-transformer-lm): This is a PyTorch implementation of the TensorFlow code provided with OpenAI's paper \"Improving Language Understanding by Generative Pre-Training\" by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.\n239. \u003Ckbd>1000-\u003C\u002Fkbd> [T2F](https:\u002F\u002Fgithub.com\u002Fakanimax\u002FT2F): 使用深度学习进行Text-to-Face生成。该项目结合了[StackGAN](https:\u002F\u002Farxiv.org\u002Fabs\u002F1710.10916)和[ProGAN](https:\u002F\u002Farxiv.org\u002Fabs\u002F1710.10196)，这两个模型可以基于文字描述合成人脸。\n240. \u003Ckbd>1300+\u003C\u002Fkbd> [pytorch - fid](https:\u002F\u002Fgithub.com\u002Fmseitzer\u002Fpytorch-fid): A Port of Fréchet Inception Distance (FID score) to PyTorch\n241. \u003Ckbd>1000-\u003C\u002Fkbd> [vae_vpflows](https:\u002F\u002Fgithub.com\u002Fjmtomczak\u002Fvae_vpflows):Code in PyTorch for the convex combination linear IAF and the Householder Flow, J.M. Tomczak & M. Welling jmtomczak.github.io\u002Fdeebmed.html\n242. \u003Ckbd>1000-\u003C\u002Fkbd> [CoordConv-pytorch](https:\u002F\u002Fgithub.com\u002Fmkocabas\u002FCoordConv-pytorch): Pytorch implementation of CoordConv introduced in 'An intriguing failing of convolutional neural networks and the CoordConv solution' paper. (arxiv.org\u002Fpdf\u002F1807.03247.pdf)\n243. \u003Ckbd>1000-\u003C\u002Fkbd> [SDPoint](https:\u002F\u002Fgithub.com\u002Fxternalz\u002FSDPoint): Implementation of \"Stochastic Downsampling for Cost-Adjustable Inference and Improved Regularization in Convolutional Networks\", published in CVPR 2018. \n244. \u003Ckbd>1000-\u003C\u002Fkbd> [SRDenseNet-pytorch](https:\u002F\u002Fgithub.com\u002Fwxywhu\u002FSRDenseNet-pytorch): 极深网络，SRDenseNet-pytorch，论文：[基于密集跳跃连接的图像超分辨率（ICCV_2017）](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FTong_Image_Super-Resolution_Using_ICCV_2017_paper.pdf)。\n245. \u003Ckbd>1000-\u003C\u002Fkbd> [GAN_stability](https:\u002F\u002Fgithub.com\u002FLMescheder\u002FGAN_stability): Code for paper \"Which Training Methods for GANs do actually Converge? (ICML 2018)\"\n246. \u003Ckbd>1000-\u003C\u002Fkbd> [Mask-RCNN](https:\u002F\u002Fgithub.com\u002FwannabeOG\u002FMask-RCNN): A PyTorch implementation of the architecture of Mask RCNN, serves as an introduction to working with PyTorch\n247. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-coviar](https:\u002F\u002Fgithub.com\u002Fchaoyuaw\u002Fpytorch-coviar): Compressed Video Action Recognition\n248. \u003Ckbd>1000-\u003C\u002Fkbd> [PNASNet.pytorch](https:\u002F\u002Fgithub.com\u002Fchenxi116\u002FPNASNet.pytorch): PyTorch implementation of PNASNet-5 on ImageNet. \n249. \u003Ckbd>1000-\u003C\u002Fkbd> [NALU-pytorch](https:\u002F\u002Fgithub.com\u002Fkevinzakka\u002FNALU-pytorch): Basic pytorch implementation of NAC\u002FNALU from Neural Arithmetic Logic Units arxiv.org\u002Fpdf\u002F1808.00508.pdf\n250. \u003Ckbd>1000-\u003C\u002Fkbd> [LOLA_DiCE](https:\u002F\u002Fgithub.com\u002Falexis-jacq\u002FLOLA_DiCE): Pytorch 使用[DiCE](arxiv.org\u002Fabs\u002F1802.05098)实现[LOLA](arxiv.org\u002Fabs\u002F1709.04326)。\n251. \u003Ckbd>1000-\u003C\u002Fkbd> [generative-query-network-pytorch](https:\u002F\u002Fgithub.com\u002Fwohlert\u002Fgenerative-query-network-pytorch): Generative Query Network (GQN) in PyTorch as described in \"Neural Scene Representation and Rendering\"\n252. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_hmax](https:\u002F\u002Fgithub.com\u002Fwmvanvliet\u002Fpytorch_hmax): 在PyTorch中实现[HMAX(Hierarchical Model and X)](https:\u002F\u002Fmaxlab.neuro.georgetown.edu\u002Fhmax.html#inside)视觉模型。\n253. \u003Ckbd>1000-\u003C\u002Fkbd> [FCN-pytorch-easiest](https:\u002F\u002Fgithub.com\u002Fyunlongdong\u002FFCN-pytorch-easiest): trying to be the most easiest and just get-to-use pytorch implementation of FCN (Fully Convolotional Networks)\n254. \u003Ckbd>1000-\u003C\u002Fkbd> [transducer](https:\u002F\u002Fgithub.com\u002Fawni\u002Ftransducer): A Fast Sequence Transducer Implementation with PyTorch Bindings.\n255. \u003Ckbd>1000-\u003C\u002Fkbd> [AVO-pytorch](https:\u002F\u002Fgithub.com\u002Fartix41\u002FAVO-pytorch): Implementation of Adversarial Variational Optimization in PyTorch.\n256. \u003Ckbd>1000-\u003C\u002Fkbd> [HCN-pytorch](https:\u002F\u002Fgithub.com\u002Fhuguyuehuhu\u002FHCN-pytorch): A pytorch reimplementation of { Co-occurrence Feature Learning from Skeleton Data for Action Recognition and Detection with Hierarchical Aggregation }.\n257. \u003Ckbd>1000-\u003C\u002Fkbd> [binary-wide-resnet](https:\u002F\u002Fgithub.com\u002Fszagoruyko\u002Fbinary-wide-resnet): PyTorch implementation of Wide Residual Networks with 1-bit weights by McDonnel (ICLR 2018)\n258. \u003Ckbd>1000-\u003C\u002Fkbd> [piggyback](https:\u002F\u002Fgithub.com\u002Farunmallya\u002Fpiggyback): Code for Piggyback: Adapting a Single Network to Multiple Tasks by Learning to Mask Weights arxiv.org\u002Fabs\u002F1801.06519\n259. \u003Ckbd>7700+\u003C\u002Fkbd> [vid2vid](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fvid2vid): Pytorch implementation of our method for high-resolution (e.g. 2048x1024) photorealistic video-to-video translation.\n260. \u003Ckbd>1000-\u003C\u002Fkbd> [poisson-convolution-sum](https:\u002F\u002Fgithub.com\u002Fcranmer\u002Fpoisson-convolution-sum): Implements an infinite sum of poisson-weighted convolutions\n261. \u003Ckbd>1000-\u003C\u002Fkbd> [tbd-nets](https:\u002F\u002Fgithub.com\u002Fdavidmascharka\u002Ftbd-nets): PyTorch implementation of \"Transparency by Design: Closing the Gap Between Performance and Interpretability in Visual Reasoning\" arxiv.org\u002Fabs\u002F1803.05268 \n262. \u003Ckbd>1000-\u003C\u002Fkbd> [attn2d](https:\u002F\u002Fgithub.com\u002Felbayadm\u002Fattn2d): Pervasive Attention: 2D Convolutional Networks for Sequence-to-Sequence Prediction\n263. \u003Ckbd>7500+\u003C\u002Fkbd> [yolov3](https:\u002F\u002Fgithub.com\u002Fultralytics\u002Fyolov3): YOLOv3: 训练和推断，https:\u002F\u002Fwww.ultralytics.com 。\n264. \u003Ckbd>1000-\u003C\u002Fkbd> [deep-dream-in-pytorch](https:\u002F\u002Fgithub.com\u002Fduc0\u002Fdeep-dream-in-pytorch): Pytorch implementation of the DeepDream computer vision algorithm. \n265. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-flows](https:\u002F\u002Fgithub.com\u002Fikostrikov\u002Fpytorch-flows): PyTorch implementations of algorithms for density estimation\n266. \u003Ckbd>1000-\u003C\u002Fkbd> [quantile-regression-dqn-pytorch](https:\u002F\u002Fgithub.com\u002Fars-ashuha\u002Fquantile-regression-dqn-pytorch): Quantile Regression DQN a Minimal Working Example\n267. \u003Ckbd>1000-\u003C\u002Fkbd> [relational-rnn-pytorch](https:\u002F\u002Fgithub.com\u002FL0SG\u002Frelational-rnn-pytorch): An implementation of DeepMind's Relational Recurrent Neural Networks in PyTorch.\n268. \u003Ckbd>1000-\u003C\u002Fkbd> [DEXTR-PyTorch](https:\u002F\u002Fgithub.com\u002Fscaelles\u002FDEXTR-PyTorch): 深度极端切割，http:\u002F\u002Fwww.vision.ee.ethz.ch\u002F~cvlsegmentation\u002Fdextr 。\n269. \u003Ckbd>1000-\u003C\u002Fkbd> [PyTorch_GBW_LM](https:\u002F\u002Fgithub.com\u002Frdspring1\u002FPyTorch_GBW_LM): PyTorch Language Model for Google Billion Word Dataset.\n270. \u003Ckbd>1000-\u003C\u002Fkbd> [Pytorch-NCE](https:\u002F\u002Fgithub.com\u002FStonesjtu\u002FPytorch-NCE): The Noise Contrastive Estimation for softmax output written in Pytorch\n271. \u003Ckbd>1000-\u003C\u002Fkbd> [generative-models](https:\u002F\u002Fgithub.com\u002Fshayneobrien\u002Fgenerative-models): Annotated, understandable, and visually interpretable PyTorch implementations of: VAE, BIRVAE, NSGAN, MMGAN, WGAN, WGANGP, LSGAN, DRAGAN, BEGAN, RaGAN, InfoGAN, fGAN, FisherGAN. \n272. \u003Ckbd>1000-\u003C\u002Fkbd> [convnet-aig](https:\u002F\u002Fgithub.com\u002Fandreasveit\u002Fconvnet-aig): PyTorch implementation for Convolutional Networks with Adaptive Inference Graphs.\n273. \u003Ckbd>1000-\u003C\u002Fkbd> [integrated-gradient-pytorch](https:\u002F\u002Fgithub.com\u002FTianhongDai\u002Fintegrated-gradient-pytorch): This is the pytorch implementation of the paper - Axiomatic Attribution for Deep Networks.\n274. \u003Ckbd>1000-\u003C\u002Fkbd> [MalConv-Pytorch](https:\u002F\u002Fgithub.com\u002FAlexander-H-Liu\u002FMalConv-Pytorch): Pytorch implementation of MalConv. \n275. \u003Ckbd>1000-\u003C\u002Fkbd> [trellisnet](https:\u002F\u002Fgithub.com\u002Flocuslab\u002Ftrellisnet): Trellis Networks for Sequence Modeling\n276. \u003Ckbd>1000-\u003C\u002Fkbd> [Learning to Communicate with Deep Multi-Agent Reinforcement Learning](https:\u002F\u002Fgithub.com\u002Fminqi\u002Flearning-to-communicate-pytorch): pytorch implementation of  Learning to Communicate with Deep Multi-Agent Reinforcement Learning paper.\n277. \u003Ckbd>1000-\u003C\u002Fkbd> [pnn.pytorch](https:\u002F\u002Fgithub.com\u002Fmichaelklachko\u002Fpnn.pytorch): PyTorch implementation of CVPR'18 - Perturbative Neural Networks http:\u002F\u002Fxujuefei.com\u002Fpnn.html.\n278. \u003Ckbd>1000-\u003C\u002Fkbd> [Face_Attention_Network](https:\u002F\u002Fgithub.com\u002Frainofmine\u002FFace_Attention_Network): Pytorch implementation of face attention network as described in Face Attention Network: An Effective Face Detector for the Occluded Faces.\n279. \u003Ckbd>1800+\u003C\u002Fkbd> [waveglow](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fwaveglow): 基于流的语音合成生成网络。\n280. \u003Ckbd>1000-\u003C\u002Fkbd> [deepfloat](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fdeepfloat): This repository contains the SystemVerilog RTL, C++, HLS (Intel FPGA OpenCL to wrap RTL code) and Python needed to reproduce the numerical results in \"Rethinking floating point for deep learning\" \n281. \u003Ckbd>1000-\u003C\u002Fkbd> [EPSR](https:\u002F\u002Fgithub.com\u002Fsubeeshvasu\u002F2018_subeesh_epsr_eccvw): Pytorch implementation of [Analyzing Perception-Distortion Tradeoff using Enhanced Perceptual Super-resolution Network](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1811.00344.pdf). This work has won the first place in PIRM2018-SR competition (region 1) held as part of the ECCV 2018.\n282. \u003Ckbd>1000-\u003C\u002Fkbd> [ClariNet](https:\u002F\u002Fgithub.com\u002Fksw0306\u002FClariNet): Pytorch实现[ClariNet](https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.07281)。\n283. \u003Ckbd>48900+\u003C\u002Fkbd> [pytorch-pretrained-BERT](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Fpytorch-pretrained-BERT): PyTorch version of Google AI's BERT model with script to load Google's pre-trained models\n284. \u003Ckbd>1000-\u003C\u002Fkbd> [torch_waveglow](https:\u002F\u002Fgithub.com\u002Fnpuichigo\u002Fwaveglow): PyTorch实现WaveGlow: 基于流的语音合成生成网络。\n285. \u003Ckbd>3000+\u003C\u002Fkbd> [3DDFA](https:\u002F\u002Fgithub.com\u002Fcleardusk\u002F3DDFA): The pytorch improved re-implementation of TPAMI 2017 paper: Face Alignment in Full Pose Range: A 3D Total Solution.\n286. \u003Ckbd>1600+\u003C\u002Fkbd> [loss-landscape](https:\u002F\u002Fgithub.com\u002Ftomgoldstein\u002Floss-landscape): loss-landscape Code for visualizing the loss landscape of neural nets.\n287. \u003Ckbd>1000-\u003C\u002Fkbd> [famos](https:\u002F\u002Fgithub.com\u002Fzalandoresearch\u002Ffamos):（非）参数图像风格化马赛克的对抗性框架。论文：http:\u002F\u002Farxiv.org\u002Fabs\u002F1811.09236 。\n288. \u003Ckbd>1000-\u003C\u002Fkbd> [back2future.pytorch](https:\u002F\u002Fgithub.com\u002Fanuragranj\u002Fback2future.pytorch): This is a Pytorch implementation of\nJanai, J., Güney, F., Ranjan, A., Black, M. and Geiger, A., Unsupervised Learning of Multi-Frame Optical Flow with Occlusions. ECCV 2018.\n289. \u003Ckbd>1000-\u003C\u002Fkbd> [FFTNet](https:\u002F\u002Fgithub.com\u002Fmozilla\u002FFFTNet): Unofficial Implementation of FFTNet vocode paper.\n290. \u003Ckbd>1000-\u003C\u002Fkbd> [FaceBoxes.PyTorch](https:\u002F\u002Fgithub.com\u002Fzisianw\u002FFaceBoxes.PyTorch): PyTorch实现[FaceBoxes](https:\u002F\u002Farxiv.org\u002Fabs\u002F1708.05234)。\n291. \u003Ckbd>2900+\u003C\u002Fkbd> [Transformer-XL](https:\u002F\u002Fgithub.com\u002Fkimiyoung\u002Ftransformer-xl): Transformer-XL: Attentive Language Models Beyond a Fixed-Length Contexthttps:\u002F\u002Fgithub.com\u002Fkimiyoung\u002Ftransformer-xl\n292. \u003Ckbd>1000-\u003C\u002Fkbd> [associative_compression_networks](https:\u002F\u002Fgithub.com\u002Fjalexvig\u002Fassociative_compression_networks): Associative Compression Networks for Representation Learning. \n293. \u003Ckbd>1000-\u003C\u002Fkbd> [fluidnet_cxx](https:\u002F\u002Fgithub.com\u002Fjolibrain\u002Ffluidnet_cxx): FluidNet re-written with ATen tensor lib. \n294. \u003Ckbd>3700+\u003C\u002Fkbd> [Deep-Reinforcement-Learning-Algorithms-with-PyTorch](https:\u002F\u002Fgithub.com\u002Fp-christ\u002FDeep-Reinforcement-Learning-Algorithms-with-PyTorch): This repository contains PyTorch implementations of deep reinforcement learning algorithms.\n295. \u003Ckbd>1000-\u003C\u002Fkbd> [Shufflenet-v2-Pytorch](https:\u002F\u002Fgithub.com\u002Fericsun99\u002FShufflenet-v2-Pytorch): This is a Pytorch implementation of faceplusplus's ShuffleNet-v2. \n296. \u003Ckbd>1000-\u003C\u002Fkbd> [GraphWaveletNeuralNetwork](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FGraphWaveletNeuralNetwork): This is a Pytorch implementation of Graph Wavelet Neural Network. ICLR 2019. \n297. \u003Ckbd>1000-\u003C\u002Fkbd> [AttentionWalk](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FAttentionWalk): This is a Pytorch implementation of Watch Your Step: Learning Node Embeddings via Graph Attention. NIPS 2018.\n298. \u003Ckbd>1000-\u003C\u002Fkbd> [SGCN](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FSGCN): This is a Pytorch implementation of Signed Graph Convolutional Network. ICDM 2018.\n299. \u003Ckbd>1000-\u003C\u002Fkbd> [SINE](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FSINE): This is a Pytorch implementation of SINE: Scalable Incomplete Network Embedding. ICDM 2018.\n300. \u003Ckbd>1000-\u003C\u002Fkbd> [GAM](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FGAM): This is a Pytorch implementation of Graph Classification using Structural Attention. KDD 2018.\n301. \u003Ckbd>1000-\u003C\u002Fkbd> [neural-style-pt](https:\u002F\u002Fgithub.com\u002FProGamerGov\u002Fneural-style-pt): PyTorch 实现 Justin Johnson 的神经风格算法。论文：[A Neural Algorithm of Artistic Style](https:\u002F\u002Farxiv.org\u002Fabs\u002F1508.06576)。\n302. \u003Ckbd>1000-\u003C\u002Fkbd> [TuckER](https:\u002F\u002Fgithub.com\u002Fibalazevic\u002FTuckER): TuckER: Tensor Factorization for Knowledge Graph Completion.\n303. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-prunes](https:\u002F\u002Fgithub.com\u002FBayesWatch\u002Fpytorch-prunes): Pruning neural networks: is it time to nip it in the bud?\n304. \u003Ckbd>1000-\u003C\u002Fkbd> [SimGNN](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FSimGNN): SimGNN: 一个快速图形相似度计算的神经网络方法。论文：A Neural Network Approach to Fast Graph Similarity Computation.\n305. \u003Ckbd>1000-\u003C\u002Fkbd> [Character CNN](https:\u002F\u002Fgithub.com\u002Fahmedbesbes\u002Fcharacter-based-cnn): PyTorch implementation of the Character-level Convolutional Networks for Text Classification paper. \n306. \u003Ckbd>2400+\u003C\u002Fkbd> [XLM](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FXLM): PyTorch original implementation of Cross-lingual Language Model Pretraining.\n307. \u003Ckbd>1000-\u003C\u002Fkbd> [DiffAI](https:\u002F\u002Fgithub.com\u002Feth-sri\u002Fdiffai): A provable defense against adversarial examples and library for building compatible PyTorch models.\n308. \u003Ckbd>1000-\u003C\u002Fkbd> [APPNP](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FAPPNP): Combining Neural Networks with Personalized PageRank for Classification on Graphs. ICLR 2019.\n309. \u003Ckbd>1000-\u003C\u002Fkbd> [NGCN](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FMixHop-and-N-GCN): A Higher-Order Graph Convolutional Layer. NeurIPS 2018.\n310. \u003Ckbd>1000-\u003C\u002Fkbd> [gpt-2-Pytorch](https:\u002F\u002Fgithub.com\u002Fgraykode\u002Fgpt-2-Pytorch): Simple Text-Generator with OpenAI gpt-2 Pytorch Implementation\n311. \u003Ckbd>1000-\u003C\u002Fkbd> [Splitter](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FSplitter): Splitter: Learning Node Representations that Capture Multiple Social Contexts. (WWW 2019).\n312. \u003Ckbd>1000+\u003C\u002Fkbd> [CapsGNN](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FCapsGNN): 胶囊图神经网络，[Capsule Graph Neural Network](https:\u002F\u002Fopenreview.net\u002Fforum?id=Byl8BnRcYm)。\n313. \u003Ckbd>2300+\u003C\u002Fkbd> [BigGAN-PyTorch](https:\u002F\u002Fgithub.com\u002Fajbrock\u002FBigGAN-PyTorch): PyTorch实现BigGAN（非官方）。\n314. \u003Ckbd>1000-\u003C\u002Fkbd> [ppo_pytorch_cpp](https:\u002F\u002Fgithub.com\u002Fmhubii\u002Fppo_pytorch_cpp): 近端策略优化算法的C++ API。\n315. \u003Ckbd>1000-\u003C\u002Fkbd> [RandWireNN](https:\u002F\u002Fgithub.com\u002Fseungwonpark\u002FRandWireNN): 基于随机连接神经网络性能的图像识别。\n316. \u003Ckbd>1000-\u003C\u002Fkbd> [Zero-shot Intent CapsNet](https:\u002F\u002Fgithub.com\u002Fjoel-huang\u002Fzeroshot-capsnet-pytorch): GPU-accelerated PyTorch implementation of \"Zero-shot User Intent Detection via Capsule Neural Networks\".\n317. \u003Ckbd>1000-\u003C\u002Fkbd> [SEAL-CI](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FSEAL-CI) 半监督图分类：层次图视角，Semi-Supervised Graph Classification: A Hierarchical Graph Perspective. (WWW 2019)。\n318. \u003Ckbd>1000-\u003C\u002Fkbd> [MixHop](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FMixHop-and-N-GCN): MixHop: Higher-Order Graph Convolutional Architectures via Sparsified Neighborhood Mixing. ICML 2019.\n319. \u003Ckbd>1000-\u003C\u002Fkbd> [densebody_pytorch](https:\u002F\u002Fgithub.com\u002FLotayou\u002Fdensebody_pytorch): PyTorch implementation of CloudWalk's recent paper DenseBody.\n320. \u003Ckbd>1000-\u003C\u002Fkbd> [voicefilter](https:\u002F\u002Fgithub.com\u002Fmindslab-ai\u002Fvoicefilter): Unofficial PyTorch implementation of Google AI's VoiceFilter system http:\u002F\u002Fswpark.me\u002Fvoicefilter. \n321. \u003Ckbd>1300+\u003C\u002Fkbd> [NVIDIA\u002Fsemantic-segmentation](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fsemantic-segmentation): PyTorch实现“利用视频传播和标签松弛改进语义分割”。论文：[Improving Semantic Segmentation via Video Propagation and Label Relaxation](https:\u002F\u002Farxiv.org\u002Fabs\u002F1812.01593), In CVPR2019.\n322. \u003Ckbd>1000-\u003C\u002Fkbd> [ClusterGCN](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FClusterGCN): A PyTorch implementation of \"Cluster-GCN: An Efficient Algorithm for Training Deep and Large Graph Convolutional Networks\" (KDD 2019).\n323. \u003Ckbd>1000+\u003C\u002Fkbd> [NVlabs\u002FDG-Net](https:\u002F\u002Fgithub.com\u002FNVlabs\u002FDG-Net): A PyTorch implementation of \"Joint Discriminative and Generative Learning for Person Re-identification\" (CVPR19 Oral). \n324. \u003Ckbd>1000-\u003C\u002Fkbd> [NCRF](https:\u002F\u002Fgithub.com\u002Fbaidu-research\u002FNCRF): 基于神经网络条件随机场(NCRF)的肿瘤转移检测，相关论文：https:\u002F\u002Fopenreview.net\u002Fforum?id=S1aY66iiM。\n325. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-sift](https:\u002F\u002Fgithub.com\u002Fducha-aiki\u002Fpytorch-sift): PyTorch实现SIFT（尺度不变特征变换匹配算法，Scale Invariant Feature Transform）描述子。\n326. \u003Ckbd>1000-\u003C\u002Fkbd> [brain-segmentation-pytorch](https:\u002F\u002Fgithub.com\u002Fmateuszbuda\u002Fbrain-segmentation-pytorch): 深度学习分割网络U-Net的PyTorch模型实现，用于脑核磁共振中FLAIR异常的分割。\n327. \u003Ckbd>1000-\u003C\u002Fkbd> [glow-pytorch](https:\u002F\u002Fgithub.com\u002Frosinality\u002Fglow-pytorch): PyTorch 实现 \"[Glow, Generative Flow with Invertible 1x1 Convolutions](https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.03039)\"。\n328. \u003Ckbd>1000-\u003C\u002Fkbd> [EfficientNets-PyTorch](https:\u002F\u002Fgithub.com\u002Fzsef123\u002FEfficientNets-PyTorch): PyTorch实现EfficientNet: 卷积神经网络模型尺度的再思考。\n329. \u003Ckbd>1000-\u003C\u002Fkbd> [STEAL](https:\u002F\u002Fgithub.com\u002Fnv-tlabs\u002FSTEAL): STEAL - 从噪声标注中学习语义边界，https:\u002F\u002Fnv-tlabs.github.io\u002FSTEAL\u002F 。\n330. \u003Ckbd>1000-\u003C\u002Fkbd> [EigenDamage-Pytorch](https:\u002F\u002Fgithub.com\u002Falecwangcq\u002FEigenDamage-Pytorch): 官方实现 ICML'19 论文 \"[特征损伤：克罗内克分解特征基中的结构剪枝](https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.05934)\"。\n331. \u003Ckbd>1000-\u003C\u002Fkbd> [Aspect-level-sentiment](https:\u002F\u002Fgithub.com\u002Fruidan\u002FAspect-level-sentiment): 论文代码和数据集，ACL2018论文：\"[利用文档知识进行体层情感分类](https:\u002F\u002Farxiv.org\u002Fabs\u002F1806.04346)\"。\n332. \u003Ckbd>1000-\u003C\u002Fkbd> [breast_cancer_classifier](https:\u002F\u002Fgithub.com\u002Fnyukat\u002Fbreast_cancer_classifier): 深层神经网络提高放射科医生乳腺癌筛查的效果，https:\u002F\u002Farxiv.org\u002Fabs\u002F1903.08297 。\n333. \u003Ckbd>1000-\u003C\u002Fkbd> [DGC-Net](https:\u002F\u002Fgithub.com\u002FAaltoVision\u002FDGC-Net): PyTorch实现\"[DGC-Net: 密集几何对应网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F1810.08393)\".\n334. \u003Ckbd>1000-\u003C\u002Fkbd> [universal-triggers](https:\u002F\u002Fgithub.com\u002FEric-Wallace\u002Funiversal-triggers): Universal Adversarial Triggers for Attacking and Analyzing NLP (EMNLP 2019)\n335. \u003Ckbd>3700+\u003C\u002Fkbd> [Deep-Reinforcement-Learning-Algorithms-with-PyTorch](https:\u002F\u002Fgithub.com\u002Fp-christ\u002FDeep-Reinforcement-Learning-Algorithms-with-PyTorch): PyTorch implementations of deep reinforcement learning algorithms and environments.\n336. \u003Ckbd>1000-\u003C\u002Fkbd> [simple-effective-text-matching-pytorch](https:\u002F\u002Fgithub.com\u002Falibaba-edu\u002Fsimple-effective-text-matching-pytorch): A pytorch implementation of the ACL2019 paper \"Simple and Effective Text Matching with Richer Alignment Features\".\n337. \u003Ckbd>null\u003C\u002Fkbd> [Adaptive-segmentation-mask-attack (ASMA)](https:\u002F\u002Fgithub.com\u002Futkuozbulak\u002Fadaptive-segmentation-mask-attack): A pytorch implementation of the MICCAI2019 paper \"Impact of Adversarial Examples on Deep Learning Models for Biomedical Image Segmentation\".\n338. \u003Ckbd>1000-\u003C\u002Fkbd> [NVIDIA\u002Funsupervised-video-interpolation](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Funsupervised-video-interpolation): A PyTorch Implementation of [Unsupervised Video Interpolation Using Cycle Consistency](https:\u002F\u002Farxiv.org\u002Fabs\u002F1906.05928), In ICCV 2019.\n339. \u003Ckbd>1000-\u003C\u002Fkbd> [Seg-Uncertainty](https:\u002F\u002Fgithub.com\u002Flayumi\u002FSeg-Uncertainty): Unsupervised Scene Adaptation with Memory Regularization in vivo, In IJCAI 2020.\n340. \u003Ckbd>5700+\u003C\u002Fkbd> [pulse](https:\u002F\u002Fgithub.com\u002Fadamian98\u002Fpulse): Self-Supervised Photo Upsampling via Latent Space Exploration of Generative Models\n341. \u003Ckbd>1000-\u003C\u002Fkbd> [distance-encoding](https:\u002F\u002Fgithub.com\u002Fsnap-stanford\u002Fdistance-encoding): Distance-Encoding - Design Provably More PowerfulGNNs for Structural Representation Learning.\n342. \u003Ckbd>1000-\u003C\u002Fkbd> [Pathfinder Discovery Networks](https:\u002F\u002Fgithub.com\u002Fbenedekrozemberczki\u002FPDN): Pathfinder Discovery Networks for Neural Message Passing.\n343. \u003Ckbd>1000-\u003C\u002Fkbd> [PyKEEN](https:\u002F\u002Fgithub.com\u002Fpykeen\u002Fpykeen): A Python library for learning and evaluating knowledge graph embeddings\n\n## 演讲与会议｜报告 & 会议\n\n1. [PyTorch Conference 2018](https:\u002F\u002Fdevelopers.facebook.com\u002Fvideos\u002F2018\u002Fpytorch-developer-conference\u002F): 2018年首届PyTorch开发者大会。\n\n## Pytorch相关｜Pytorch相关\n\n1. \u003Ckbd>8300+\u003C\u002Fkbd> [the-incredible-pytorch](https:\u002F\u002Fgithub.com\u002Fritchieng\u002Fthe-incredible-pytorch)**: 不可思议的Pythorch：一份PyTorch相关的教程、论文、项目、社区等的清单。\n2. \u003Ckbd>6500+\u003C\u002Fkbd> [generative models](https:\u002F\u002Fgithub.com\u002Fwiseodd\u002Fgenerative-models): 各种生成模型，例如基于Pytorch和Tensorflow的GAN、VAE。 http:\u002F\u002Fwiseodd.github.io  \n3. [pytorch vs tensorflow](https:\u002F\u002Fwww.reddit.com\u002Fr\u002FMachineLearning\u002Fcomments\u002F5w3q74\u002Fd_so_pytorch_vs_tensorflow_whats_the_verdict_on\u002F): Reddit上的PyTorch和TensorFlow的比较文章。\n4. [Pytorch discussion forum](https:\u002F\u002Fdiscuss.pytorch.org\u002F): PyTorch论坛。\n5. \u003Ckbd>null\u003C\u002Fkbd> [pytorch notebook: docker-stack](https:\u002F\u002Fhub.docker.com\u002Fr\u002Fescong\u002Fpytorch-notebook\u002F): 类似于 [Jupyter Notebook Scientific Python Stack](https:\u002F\u002Fgithub.com\u002Fjupyter\u002Fdocker-stacks\u002Ftree\u002Fmaster\u002Fscipy-notebook)\n6. \u003Ckbd>1000-\u003C\u002Fkbd> [drawlikebobross](https:\u002F\u002Fgithub.com\u002Fkendricktan\u002Fdrawlikebobross): 使用神经网络作画！\n7. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-tvmisc](https:\u002F\u002Fgithub.com\u002Ft-vi\u002Fpytorch-tvmisc): 该仓库收集了作者用PyTorch实现的各种玩意儿。\n8. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-a3c-mujoco](https:\u002F\u002Fgithub.com\u002Fandrewliao11\u002Fpytorch-a3c-mujoco): 该项目旨在解决Mujoco中的控制问题，高度基于pytorch-a3c。\n9. [PyTorch in 5 Minutes](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=nbJ-2G2GXL0&list=WL&index=9).\n10. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_chatbot](https:\u002F\u002Fgithub.com\u002Fjinfagang\u002Fpytorch_chatbot): 用PyTorch实现的聊天机器人。\n11. \u003Ckbd>1000-\u003C\u002Fkbd> [malmo-challenge](https:\u002F\u002Fgithub.com\u002FKaixhin\u002Fmalmo-challenge): Malmo协作人工智能挑战-Pig Catcher团队。\n12. \u003Ckbd>1000-\u003C\u002Fkbd> [sketchnet](https:\u002F\u002Fgithub.com\u002Fjtoy\u002Fsketchnet): 指导计算机作画。http:\u002F\u002Fwww.jtoy.net\u002Fprojects\u002Fsketchnet\u002F\n13. \u003Ckbd>1200+\u003C\u002Fkbd> [Deep-Learning-Boot-Camp](https:\u002F\u002Fgithub.com\u002FQuantScientist\u002FDeep-Learning-Boot-Camp): 非盈利社区运营的5天深度学习训练营。 http:\u002F\u002Fdeep-ml.com.\n14. \u003Ckbd>1000-\u003C\u002Fkbd> [Amazon_Forest_Computer_Vision](https:\u002F\u002Fgithub.com\u002Fmratsim\u002FAmazon_Forest_Computer_Vision): 亚马逊森林计算机视觉：使用PyTorch标记卫星图像标记\u002FKeras中的PyTorch技巧。\n15. \u003Ckbd>2400+\u003C\u002Fkbd> [AlphaZero_Gomoku](https:\u002F\u002Fgithub.com\u002Fjunxiaosong\u002FAlphaZero_Gomoku): 用AlphaZero算法玩五子棋。\n16. \u003Ckbd>null\u003C\u002Fkbd> [pytorch-cv](https:\u002F\u002Fgithub.com\u002Fyouansheng\u002Fpytorch-cv): null。\n17. \u003Ckbd>2800+\u003C\u002Fkbd> [deep-person-reid](https:\u002F\u002Fgithub.com\u002FKaiyangZhou\u002Fdeep-person-reid): Pytorch实现深度学习行人重新识别方法。\n18. \u003Ckbd>2700+\u003C\u002Fkbd> [pytorch-template](https:\u002F\u002Fgithub.com\u002Fvictoresque\u002Fpytorch-template): PyTorch深度学习模版。\n19. \u003Ckbd>1000-\u003C\u002Fkbd> [Deep Learning With Pytorch](https:\u002F\u002Fgithub.com\u002Fsvishnu88\u002FDLwithPyTorch): 随书代码《[Deep Learning With Pytorch TextBook](https:\u002F\u002Fwww.packtpub.com\u002Fbig-data-and-business-intelligence\u002Fdeep-learning-pytorch)》 PyTorch实用指南：使用PyTorch建立文本和视觉神经网络模型。[亚马逊中国电子版](https:\u002F\u002Fwww.amazon.cn\u002Fdp\u002FB078THDX3J\u002Fref=sr_1_1?__mk_zh_CN=亚马逊网站&keywords=Deep+Learning+with+PyTorch&qid=1568007543&s=gateway&sr=8-1)\n20. \u003Ckbd>1000-\u003C\u002Fkbd> [compare-tensorflow-pytorch](https:\u002F\u002Fgithub.com\u002Fjalola\u002Fcompare-tensorflow-pytorch): 比较用Tensorflow编写的层和用Pytorch编写的层之间的输出。\n21. \u003Ckbd>1000-\u003C\u002Fkbd> [hasktorch](https:\u002F\u002Fgithub.com\u002Fhasktorch\u002Fhasktorch): Haskell中的张量与神经网络。\n22. [Deep Learning With Pytorch](https:\u002F\u002Fwww.manning.com\u002Fbooks\u002Fdeep-learning-with-pytorch) Deep Learning with PyTorch 教你如何用Python和PyTorch实现深度学习算法。\n23. \u003Ckbd>1000-\u003C\u002Fkbd> [nimtorch](https:\u002F\u002Fgithub.com\u002Ffragcolor-xyz\u002Fnimtorch): PyTorch - Python + Nim，PyTorch的Nim前端。\n24. \u003Ckbd>1000-\u003C\u002Fkbd> [derplearning](https:\u002F\u002Fgithub.com\u002FJohn-Ellis\u002Fderplearning): 自动驾驶遥控车代码。\n25. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-saltnet](https:\u002F\u002Fgithub.com\u002Ftugstugi\u002Fpytorch-saltnet): Kaggle | TGS Salt Identification Challenge 第9名解决方案。\n26. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-scripts](https:\u002F\u002Fgithub.com\u002Fpeterjc123\u002Fpytorch-scripts): 一些脚本，使在Windows上使用PyTorch更加容易。\n27. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch_misc](https:\u002F\u002Fgithub.com\u002Fptrblck\u002Fpytorch_misc): 为PyTorch讨论板创建的代码片段。\n28. \u003Ckbd>1000-\u003C\u002Fkbd> [awesome-pytorch-scholarship](https:\u002F\u002Fgithub.com\u002Farnas\u002Fawesome-pytorch-scholarship): 收集了一系列优秀的PyTorch学术文章、指南、博客、课程和其他资源。\n29. \u003Ckbd>1000-\u003C\u002Fkbd> [MentisOculi](https:\u002F\u002Fgithub.com\u002Fmmirman\u002FMentisOculi): PyTorch版raytracer。(raynet?)\n30. \u003Ckbd>2400+\u003C\u002Fkbd> [DoodleMaster](https:\u002F\u002Fgithub.com\u002Fkaranchahal\u002FDoodleMaster): “画出UI！”(\"Don't code your UI, Draw it !\")\n31. \u003Ckbd>1000-\u003C\u002Fkbd> [ocaml-torch](https:\u002F\u002Fgithub.com\u002FLaurentMazare\u002Focaml-torch): ocaml-torch为PyTorch张量库提供一些ocaml绑定。\n32. \u003Ckbd>1000-\u003C\u002Fkbd> [extension-script](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fextension-script): TorchScript自定义C++\u002FCUDA运算符的示例。\n33. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-inference](https:\u002F\u002Fgithub.com\u002Fzccyman\u002Fpytorch-inference):  Windows10 平台上 Pytorch 1.0在 C++ 中的推断。\n34. \u003Ckbd>1000-\u003C\u002Fkbd> [pytorch-cpp-inference](https:\u002F\u002Fgithub.com\u002FWizaron\u002Fpytorch-cpp-inference): 包含使用PyTorch C++ API执行推断的各种示例。\n35. \u003Ckbd>1100+\u003C\u002Fkbd> [tch-rs](https:\u002F\u002Fgithub.com\u002FLaurentMazare\u002Ftch-rs): PyTorch的Rust绑定。\n36. \u003Ckbd>1000-\u003C\u002Fkbd> [TorchSharp](https:\u002F\u002Fgithub.com\u002Finteresaaat\u002FTorchSharp): Pytorch引擎的.NET绑定。\n37. \u003Ckbd>2000+\u003C\u002Fkbd> [ML Workspace](https:\u002F\u002Fgithub.com\u002Fml-tooling\u002Fml-workspace): 面向机器学习和数据科学的一体化Web IDE。包含Jupyter, VS Code, PyTorch 和许多其他工具或库，这些都集合在一个Docker映像中。\n38. \u003Ckbd>1100+\u003C\u002Fkbd> [PyTorch Style Guide](https:\u002F\u002Fgithub.com\u002FIgorSusmelj\u002Fpytorch-styleguide) Style guide for PyTorch code. Consistent and good code style helps collaboration and prevents errors!\n\n**反馈：如果您有任何想法，或者希望在此列表中添加其他内容，请随时贡献。**","# Awesome-pytorch-list-CNVersion 快速上手指南\n\n本指南旨在帮助中国开发者快速利用 **Awesome-pytorch-list** 资源库，找到并上手优秀的 PyTorch 开源项目。该列表汇总了自然语言处理（NLP）、计算机视觉（CV）、概率生成模型等领域的高质量工具、教程及论文实现。\n\n## 环境准备\n\n在开始使用前，请确保您的开发环境满足以下基本要求：\n\n*   **操作系统**：Linux (推荐 Ubuntu\u002FCentOS), macOS, 或 Windows (WSL2 推荐)。\n*   **Python 版本**：建议 Python 3.8 - 3.11（具体版本需参考所选子项目的要求）。\n*   **硬件加速**：推荐使用 NVIDIA GPU 以发挥 PyTorch 的加速优势，需安装对应的 CUDA Toolkit 和 cuDNN。\n*   **前置依赖**：\n    *   Git (用于克隆仓库)\n    *   pip 或 conda (包管理工具)\n\n> **国内加速建议**：\n> 推荐使用 **清华大学开源软件镜像站** 或 **阿里云镜像站** 加速 Python 包和 Git 仓库的下载。\n\n## 安装步骤\n\n由于本仓库是一个资源列表而非单一软件包，\"安装\"通常指克隆仓库以便查阅，以及安装您感兴趣的具体子项目。\n\n### 1. 克隆资源列表仓库\n首先将本列表克隆到本地，方便随时查阅分类好的项目链接。\n\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002Fbharathgs\u002FAwesome-pytorch-list.git\ncd Awesome-pytorch-list\n```\n\n*(国内加速替代方案)*\n```bash\ngit clone https:\u002F\u002Fgitee.com\u002Fmirrors\u002FAwesome-pytorch-list.git\n# 或者使用国内代理\ngit clone https:\u002F\u002Fghproxy.com\u002Fhttps:\u002F\u002Fgithub.com\u002Fbharathgs\u002FAwesome-pytorch-list.git\n```\n\n### 2. 安装基础 PyTorch 环境\n在使用列表中的任何项目前，您需要先安装 PyTorch 核心库。推荐使用国内镜像源安装。\n\n**使用 pip (推荐清华源):**\n```bash\npip install torch torchvision torchaudio --index-url https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n```\n\n**使用 conda (推荐清华源):**\n```bash\nconda install pytorch torchvision torchaudio pytorch-cuda=11.8 -c pytorch -c nvidia\n# 若连接官方源慢，可配置 conda 使用清华源后执行上述命令\n```\n\n### 3. 安装具体子项目\n浏览仓库中的 `README.md` 文件，找到您需要的领域（如 NLP 或 CV），选择对应项目（例如 `transformers` 或 `detectron2`），进入其官方 GitHub 页面按照该项目特有的 `Installation` 说明进行安装。\n\n通用安装模式示例（以 HuggingFace Transformers 为例）：\n```bash\npip install transformers -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n```\n\n## 基本使用\n\n本列表的核心用法是**检索**与**集成**。以下是基于列表中热门项目的基本使用流程示例。\n\n### 场景一：快速开始 NLP 任务 (基于 Transformers)\n列表中高星项目 [transformers](https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Ftransformers) 是最常用的 NLP 工具。\n\n1.  **导入库并加载模型**：\n    ```python\n    from transformers import pipeline\n\n    # 创建一个情感分析管道，自动下载预训练模型\n    classifier = pipeline(\"sentiment-analysis\")\n\n    # 进行预测\n    result = classifier(\"I love using PyTorch for deep learning projects!\")\n    print(result)\n    # 输出示例: [{'label': 'POSITIVE', 'score': 0.9998}]\n    ```\n\n### 场景二：计算机视觉数据增强 (基于 Albumentations)\n列表中推荐的 [albumentations](https:\u002F\u002Fgithub.com\u002Falbu\u002Falbumentations) 是高效的图像增强库。\n\n1.  **定义增强流程并应用**：\n    ```python\n    import albumentations as A\n    import cv2\n\n    # 定义增强变换\n    transform = A.Compose([\n        A.RandomCrop(width=256, height=256),\n        A.HorizontalFlip(p=0.5),\n        A.Rotate(limit=45, p=0.5),\n    ])\n\n    # 读取图像 (BGR 格式)\n    image = cv2.imread(\"example.jpg\")\n    \n    # 应用变换\n    transformed = transform(image=image)\n    transformed_image = transformed[\"image\"]\n    ```\n\n### 场景三：查找特定领域的论文实现\n如果您需要复现某篇论文（如语音识别或目标检测）：\n1.  打开本地克隆的 `README.md` 文件。\n2.  搜索关键词（如 \"Speech\", \"Detection\", \"BERT\"）。\n3.  点击对应的项目链接（如 `espnet`, `MMDetection`）。\n4.  进入项目主页，复制其提供的 Quick Start 代码片段即可运行。\n\n---\n*提示：列表中每个项目旁的数字（如 \u003Ckbd>48900+\u003C\u002Fkbd>）代表该项目的 Star 数量，可作为项目流行度和稳定性的参考指标。*","某国内高校的自然语言处理实验室团队正计划复现一篇关于多语言情感分析的最新论文，并需要快速搭建基于 PyTorch 的实验环境。\n\n### 没有 Awesome-pytorch-list-CNVersion 时\n- **语言障碍严重**：团队成员需反复查阅英文原版列表，对 \"Probabilistic\u002FGenerative Libraries\" 等专业分类术语理解耗时，非英语母语成员难以快速定位所需资源。\n- **筛选效率低下**：面对 GitHub 上海量的 PyTorch 项目，无法区分哪些是成熟的工业级库（如 AllenNLP），哪些是过时的个人练习，导致在无效代码上浪费数天时间。\n- **领域匹配困难**：寻找特定的“语音转文字”或“共指消解”工具时，缺乏清晰的中文分类指引，容易遗漏像 `fairseq` 或 `neuralcoref` 这样的高星关键项目。\n- **环境配置迷茫**：新手成员不知道从何入手学习，缺乏经过筛选的中文教程和书籍指引，导致前期调研周期被无限拉长。\n\n### 使用 Awesome-pytorch-list-CNVersion 后\n- **无障碍快速检索**：借助精准的中文翻译，团队能立即理解“自然语言处理 & 语音处理”等分类含义，直接锁定目标领域，消除了语言隔阂。\n- **精准锁定高质资源**：通过列表中明确标注的星标数（如 10300+ 的 AllenNLP）和中文简介，迅速甄别出社区认可度高、维护活跃的核心库，避免踩坑。\n- **按需直达具体工具**：利用详细的中文子分类，几分钟内就找到了用于多语言嵌入的 `LASER` 和用于端到端语音处理的 `espnet`，大幅缩短选型时间。\n- **系统化学习路径**：新成员依据“教程 & 书籍 & 示例”板块提供的中文指引，快速掌握了 PyTorch 基础，将原本两周的环境搭建期压缩至两天。\n\nAwesome-pytorch-list-CNVersion 通过本地化的高质量整理，将国内开发者在 PyTorch 生态中的探索成本降低了 80%，让技术选型从“大海捞针”变为“按图索骥”。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fxavier-zy_Awesome-pytorch-list-CNVersion_ee4141e0.png","xavier-zy","Zhaoyu Zhang","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fxavier-zy_5b5742c5.jpg","undergraduate student",null,"mid_earth123@outlook.com","https:\u002F\u002Fgithub.com\u002Fxavier-zy",[80],{"name":81,"color":82,"percentage":83},"Jupyter Notebook","#DA5B0B",100,1786,405,"2026-04-04T20:12:05",1,"","未说明（列表中包含多个项目，部分项目如 pytorch 提到强 GPU 加速，但具体型号、显存及 CUDA 版本需参考各子项目文档）","未说明",{"notes":92,"python":90,"dependencies":93},"该仓库是一个 PyTorch 相关项目和库的精选列表（Awesome List），而非单一软件工具。因此没有统一的运行环境需求。列表中每个项目（如 transformers, detectron2, fairseq 等）都有独立的环境配置要求，请根据您具体想要运行的子项目查阅其各自的 README 或文档。",[94,95,96],"torch","torchvision","torchaudio",[15,35,16,14],[99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117],"pytorch","python","machine-learning","deep-learning","tutorials","papers","pytorch-tutorials","data-sicence","nlp","nlp-library","cv","computer-vision","facebook","probabilistic-programming","utility-library","neural-network","pytorch-models","awsome-pytorch-list","cnversion","2026-03-27T02:49:30.150509","2026-04-07T13:32:18.705434",[],[]]