[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-weiaicunzai--awesome-image-classification":3,"tool-weiaicunzai--awesome-image-classification":64},[4,17,27,35,43,56],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":16},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,3,"2026-04-05T11:01:52",[13,14,15],"开发框架","图像","Agent","ready",{"id":18,"name":19,"github_repo":20,"description_zh":21,"stars":22,"difficulty_score":23,"last_commit_at":24,"category_tags":25,"status":16},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",138956,2,"2026-04-05T11:33:21",[13,15,26],"语言模型",{"id":28,"name":29,"github_repo":30,"description_zh":31,"stars":32,"difficulty_score":23,"last_commit_at":33,"category_tags":34,"status":16},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,"2026-04-03T11:11:01",[13,14,15],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":23,"last_commit_at":41,"category_tags":42,"status":16},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[13,26],{"id":44,"name":45,"github_repo":46,"description_zh":47,"stars":48,"difficulty_score":23,"last_commit_at":49,"category_tags":50,"status":16},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,"2026-04-05T10:45:23",[14,51,52,53,15,54,26,13,55],"数据工具","视频","插件","其他","音频",{"id":57,"name":58,"github_repo":59,"description_zh":60,"stars":61,"difficulty_score":10,"last_commit_at":62,"category_tags":63,"status":16},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,"2026-04-04T04:44:48",[15,14,13,26,54],{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":70,"readme_en":71,"readme_zh":72,"quickstart_zh":73,"use_case_zh":74,"hero_image_url":75,"owner_login":76,"owner_name":77,"owner_avatar_url":78,"owner_bio":79,"owner_company":80,"owner_location":81,"owner_email":77,"owner_twitter":77,"owner_website":77,"owner_url":82,"languages":77,"stars":83,"forks":84,"last_commit_at":85,"license":77,"difficulty_score":86,"env_os":87,"env_gpu":88,"env_ram":88,"env_deps":89,"category_tags":92,"github_topics":93,"view_count":23,"oss_zip_url":77,"oss_zip_packed_at":77,"status":16,"created_at":100,"updated_at":101,"faqs":102,"releases":103},2177,"weiaicunzai\u002Fawesome-image-classification","awesome-image-classification","A curated list of deep learning image classification papers and codes","awesome-image-classification 是一份精心整理的深度学习图像分类资源清单，收录了自 2014 年以来该领域的重要学术论文与对应代码实现。它主要解决了初学者和研究者在进入计算机视觉领域时面临的痛点：面对海量文献不知从何下手，或缺乏系统性的学习路径指引。通过梳理从经典的 VGG、GoogleNet 到 ResNet、Inception 系列等主流模型的发展脉络，这份清单为用户提供了清晰的技术演进视图。\n\n该项目特别适合深度学习入门者、高校研究人员以及需要快速复现基准模型的开发者使用。对于零基础用户，作者还贴心地给出了建议的学习顺序，帮助其稳步建立知识体系。其独特亮点在于不仅罗列论文，还提供了详细的性能对比表，直观展示各模型在 ImageNet 数据集上的 Top-1 和 Top-5 准确率、发表会议等关键信息，方便用户根据精度需求或模型复杂度进行选型。此外，项目还关联了部分网络的 PyTorch 实现仓库，进一步降低了代码复现的门槛。无论是希望夯实基础的学生，还是寻求最新研究灵感的专家，awesome-image-classification 都是一份极具价值的参","awesome-image-classification 是一份精心整理的深度学习图像分类资源清单，收录了自 2014 年以来该领域的重要学术论文与对应代码实现。它主要解决了初学者和研究者在进入计算机视觉领域时面临的痛点：面对海量文献不知从何下手，或缺乏系统性的学习路径指引。通过梳理从经典的 VGG、GoogleNet 到 ResNet、Inception 系列等主流模型的发展脉络，这份清单为用户提供了清晰的技术演进视图。\n\n该项目特别适合深度学习入门者、高校研究人员以及需要快速复现基准模型的开发者使用。对于零基础用户，作者还贴心地给出了建议的学习顺序，帮助其稳步建立知识体系。其独特亮点在于不仅罗列论文，还提供了详细的性能对比表，直观展示各模型在 ImageNet 数据集上的 Top-1 和 Top-5 准确率、发表会议等关键信息，方便用户根据精度需求或模型复杂度进行选型。此外，项目还关联了部分网络的 PyTorch 实现仓库，进一步降低了代码复现的门槛。无论是希望夯实基础的学生，还是寻求最新研究灵感的专家，awesome-image-classification 都是一份极具价值的参考指南。","\n\n# Awesome - Image Classification\n\n[![Awesome](https:\u002F\u002Fcdn.rawgit.com\u002Fsindresorhus\u002Fawesome\u002Fd7305f38d29fed78fa85652e3a63e154dd8e8829\u002Fmedia\u002Fbadge.svg)](https:\u002F\u002Fgithub.com\u002Fsindresorhus\u002Fawesome)\n\nA curated list of deep learning image classification papers and codes since 2014, Inspired by [awesome-object-detection](https:\u002F\u002Fgithub.com\u002Famusi\u002Fawesome-object-detection), [deep_learning_object_detection](https:\u002F\u002Fgithub.com\u002Fhoya012\u002Fdeep_learning_object_detection) and [awesome-deep-learning-papers](https:\u002F\u002Fgithub.com\u002Fterryum\u002Fawesome-deep-learning-papers).\n\n## Background\n\nI believe image classification is a great start point before diving into other computer vision fields, espacially\nfor begginers who know nothing about deep learning. When I started to learn computer vision, I've made a lot of mistakes, I wish someone could have told me that which paper I should start with back then. There doesn't seem to have a repository to have a list of image classification papers like [deep_learning_object_detection](https:\u002F\u002Fgithub.com\u002Fhoya012\u002Fdeep_learning_object_detection) until now. Therefore, I decided to make a repository\nof a list of deep learning image classification papers and codes to help others. My personal advice for people who\nknow nothing about deep learning, try to start with vgg, then googlenet, resnet, feel free to continue reading other listed papers or switch to other fields after you are finished.\n\n**Note: I also have a repository of pytorch implementation of some of the image classification networks, you can check out [here](https:\u002F\u002Fgithub.com\u002Fweiaicunzai\u002Fpytorch-cifar100).**\n\n## Performance Table\n\nFor simplicity reason, I only listed the best top1 and top5 accuracy on ImageNet from the papers. Note that this does not necessarily mean one network is better than another when the acc is higher, cause some networks are focused on reducing the model complexity instead of improving accuracy, or some papers only give the single crop results on ImageNet, but others give the model fusion or multicrop results.\n\n- ConvNet: name of the covolution network\n- ImageNet top1 acc: best top1 accuracy on ImageNet from the Paper\n- ImageNet top5 acc: best top5 accuracy on ImageNet from the Paper\n- Published In: which conference or journal the paper was published in.\n\n|         ConvNet            | ImageNet top1 acc | ImageNet top5 acc |   Published In     |\n|:--------------------------:|:-----------------:|:-----------------:|:------------------:|\n|           Vgg              |      76.3         |       93.2        |      ICLR2015      |\n|        GoogleNet           |       -           |       93.33       |      CVPR2015      |\n|        PReLU-nets          |       -           |       95.06       |      ICCV2015      |\n|          ResNet            |       -           |       96.43       |      CVPR2015      |\n|       PreActResNet         |      79.9         |       95.2        |      CVPR2016      |\n|       Inceptionv3          |      82.8         |       96.42       |      CVPR2016      |\n|       Inceptionv4          |      82.3         |       96.2        |      AAAI2016      |\n|    Inception-ResNet-v2     |      82.4         |       96.3        |      AAAI2016      |\n|Inceptionv4 + Inception-ResNet-v2|      83.5         |       96.92       |      AAAI2016      |\n|           RiR              |       -           |         -         |  ICLR Workshop2016 |\n|  Stochastic Depth ResNet   |      78.02        |         -         |      ECCV2016      |\n|           WRN              |      78.1         |       94.21       |      BMVC2016      |\n|       SqueezeNet           |      60.4         |       82.5        |      arXiv2017([rejected by ICLR2017](https:\u002F\u002Fopenreview.net\u002Fforum?id=S1xh5sYgx))     |\n|          GeNet             |      72.13        |       90.26       |      ICCV2017      |\n|         MetaQNN            |       -           |         -         |      ICLR2017      |\n|        PyramidNet          |      80.8         |       95.3        |      CVPR2017      |\n|         DenseNet           |      79.2         |       94.71       |      ECCV2017      |\n|        FractalNet          |      75.8         |       92.61       |      ICLR2017      |\n|         ResNext            |       -           |       96.97       |      CVPR2017      |\n|         IGCV1              |      73.05        |       91.08       |      ICCV2017      |\n| Residual Attention Network |      80.5         |       95.2        |      CVPR2017      |\n|        Xception            |       79          |       94.5        |      CVPR2017      |\n|        MobileNet           |      70.6         |         -         |      arXiv2017     |\n|         PolyNet            |      82.64        |       96.55       |      CVPR2017      |\n|           DPN              |       79          |       94.5        |      NIPS2017      |\n|        Block-QNN           |      77.4         |       93.54       |      CVPR2018      |\n|         CRU-Net            |      79.7         |       94.7        |      IJCAI2018     |\n|       DLA                  |      75.3         |         -         |      CVPR2018      |\n|       ShuffleNet           |      75.3         |         -         |      CVPR2018      |\n|       CondenseNet          |      73.8         |       91.7        |      CVPR2018      |\n|          NasNet            |      82.7         |       96.2        |      CVPR2018      |\n|       MobileNetV2          |      74.7         |         -         |      CVPR2018      |\n|         IGCV2              |      70.07        |         -         |      CVPR2018      |\n|          hier              |      79.7         |       94.8        |      ICLR2018      |\n|         PNasNet            |      82.9         |       96.2        |      ECCV2018      |\n|        AmoebaNet           |      83.9         |       96.6        |      AAAI2018      |\n|          SENet             |       -           |       97.749      |      CVPR2018      |\n|       ShuffleNetV2         |      81.44        |         -         |      ECCV2018      |\n|       CBAM                 |      79.93        |         94.41     |      ECCV2018      |\n|          IGCV3             |      72.2         |         -         |      BMVC2018      |\n|          BAM               |      77.56        |       93.71       |      BMVC2018      |\n|         MnasNet            |      76.13        |       92.85       |      CVPR2018      |\n|          SKNet             |      80.60        |         -         |      CVPR2019      |\n|          DARTS             |      73.3         |       91.3        |      ICLR2019      |\n|       ProxylessNAS         |      75.1         |       92.5        |      ICLR2019      |\n|       MobileNetV3          |      75.2         |         -         |      CVPR2019      |\n|          Res2Net           |      79.2         |       94.37       |      PAMI2019      |\n|       LIP-ResNet           |      79.33        |       94.6        |      ICCV2019      |\n|       EfficientNet         |      84.3         |       97.0        |      ICML2019      |\n|       FixResNeXt           |      86.4         |       98.0        |      NIPS2019      |\n|       BiT                  |      87.5         |         -         |      ECCV2020      |\n|       PSConv + ResNext101  |      80.502       |       95.276      |      ECCV2020      |\n|       NoisyStudent         |      88.4         |       98.7        |      CVPR2020      |\n|       RegNet               |      79.9         |       -           |      CVPR2020      |\n|       GhostNet             |      75.7         |       -           |      CVPR2020      |\n|       ViT                  |      88.55        |       -           |      ICLR2021      |\n|       DeiT                 |      85.2         |       -           |      ICML2021      |\n|       PVT                  |      81.7         |       -           |      ICCV2021      |\n|       T2T-Vit              |      83.3         |       -           |      ICCV2021      |\n|       DeepVit              |      80.9         |       -           |      Arvix2021     |\n|       ViL                  |      83.7         |       -           |      ICCV2021      |\n|       TNT                  |      83.9         |       -           |      Arvix2021     |\n|       CvT                  |      87.7         |       -           |      ICCV2021      |\n|       CViT                 |      84.1         |       -           |      ICCV2021      |\n|       Focal-T              |      84.0         |       -           |      NIPS2021      |\n|       Twins                |      83.7         |       -           |      NIPS2021      |\n|       PVTv2                |      81.7         |       -           |      CVM2022       |\n\n\n## Papers&Codes\n\n### VGG\n**Very Deep Convolutional Networks for Large-Scale Image Recognition.**\nKaren Simonyan, Andrew Zisserman\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1409.1556](https:\u002F\u002Farxiv.org\u002Fabs\u002F1409.1556)\n- code: [torchvision : https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fvision\u002Fblob\u002Fmaster\u002Ftorchvision\u002Fmodels\u002Fvgg.py](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fvision\u002Fblob\u002Fmaster\u002Ftorchvision\u002Fmodels\u002Fvgg.py)\n- code: [keras-applications : https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fvgg16.py](https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fvgg16.py)\n- code: [keras-applications : https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fvgg19.py](https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fvgg19.py)\n\n### GoogleNet\n**Going Deeper with Convolutions**\nChristian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1409.4842](https:\u002F\u002Farxiv.org\u002Fabs\u002F1409.4842)\n- code: [unofficial-tensorflow : https:\u002F\u002Fgithub.com\u002Fconan7882\u002FGoogLeNet-Inception](https:\u002F\u002Fgithub.com\u002Fconan7882\u002FGoogLeNet-Inception)\n- code: [unofficial-caffe : https:\u002F\u002Fgithub.com\u002Flim0606\u002Fcaffe-googlenet-bn](https:\u002F\u002Fgithub.com\u002Flim0606\u002Fcaffe-googlenet-bn)\n\n### PReLU-nets\n**Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification**\nKaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1502.01852](https:\u002F\u002Farxiv.org\u002Fabs\u002F1502.01852)\n- code: [unofficial-chainer : https:\u002F\u002Fgithub.com\u002Fnutszebra\u002Fprelu_net](https:\u002F\u002Fgithub.com\u002Fnutszebra\u002Fprelu_net)\n\n### ResNet\n**Deep Residual Learning for Image Recognition**\nKaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1512.03385](https:\u002F\u002Farxiv.org\u002Fabs\u002F1512.03385)\n- code: [facebook-torch : https:\u002F\u002Fgithub.com\u002Ffacebook\u002Ffb.resnet.torch](https:\u002F\u002Fgithub.com\u002Ffacebook\u002Ffb.resnet.torch)\n- code: [torchvision : https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fvision\u002Fblob\u002Fmaster\u002Ftorchvision\u002Fmodels\u002Fresnet.py](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fvision\u002Fblob\u002Fmaster\u002Ftorchvision\u002Fmodels\u002Fresnet.py)\n- code: [keras-applications : https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fresnet.py](https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fresnet.py)\n- code: [unofficial-keras : https:\u002F\u002Fgithub.com\u002Fraghakot\u002Fkeras-resnet](https:\u002F\u002Fgithub.com\u002Fraghakot\u002Fkeras-resnet)\n- code: [unofficial-tensorflow : https:\u002F\u002Fgithub.com\u002Fry\u002Ftensorflow-resnet](https:\u002F\u002Fgithub.com\u002Fry\u002Ftensorflow-resnet)\n\n### PreActResNet\n**Identity Mappings in Deep Residual Networks**\nKaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1603.05027](https:\u002F\u002Farxiv.org\u002Fabs\u002F1603.05027)\n- code: [facebook-torch : https:\u002F\u002Fgithub.com\u002Ffacebook\u002Ffb.resnet.torch\u002Fblob\u002Fmaster\u002Fmodels\u002Fpreresnet.lua](https:\u002F\u002Fgithub.com\u002Ffacebook\u002Ffb.resnet.torch\u002Fblob\u002Fmaster\u002Fmodels\u002Fpreresnet.lua)\n- code: [official : https:\u002F\u002Fgithub.com\u002FKaimingHe\u002Fresnet-1k-layers](https:\u002F\u002Fgithub.com\u002FKaimingHe\u002Fresnet-1k-layers)\n- code: [unoffical-pytorch : https:\u002F\u002Fgithub.com\u002Fkuangliu\u002Fpytorch-cifar\u002Fblob\u002Fmaster\u002Fmodels\u002Fpreact_resnet.py](https:\u002F\u002Fgithub.com\u002Fkuangliu\u002Fpytorch-cifar\u002Fblob\u002Fmaster\u002Fmodels\u002Fpreact_resnet.py)\n- code: [unoffical-mxnet : https:\u002F\u002Fgithub.com\u002Ftornadomeet\u002FResNet](https:\u002F\u002Fgithub.com\u002Ftornadomeet\u002FResNet)\n\n### Inceptionv3\n**Rethinking the Inception Architecture for Computer Vision**\nChristian Szegedy, Vincent Vanhoucke, Sergey Ioffe, Jonathon Shlens, Zbigniew Wojna\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1512.00567](https:\u002F\u002Farxiv.org\u002Fabs\u002F1512.00567)\n- code: [torchvision : https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fvision\u002Fblob\u002Fmaster\u002Ftorchvision\u002Fmodels\u002Finception.py](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fvision\u002Fblob\u002Fmaster\u002Ftorchvision\u002Fmodels\u002Finception.py)\n- code: [keras-applications : https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Finception_v3.py](https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Finception_v3.py)\n\n### Inceptionv4 && Inception-ResNetv2\n**Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning**\nChristian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1602.07261](https:\u002F\u002Farxiv.org\u002Fabs\u002F1602.07261)\n- code: [unofficial-keras : https:\u002F\u002Fgithub.com\u002Fkentsommer\u002Fkeras-inceptionV4](https:\u002F\u002Fgithub.com\u002Fkentsommer\u002Fkeras-inceptionV4)\n- code: [unofficial-keras : https:\u002F\u002Fgithub.com\u002Ftitu1994\u002FInception-v4](https:\u002F\u002Fgithub.com\u002Ftitu1994\u002FInception-v4)\n- code: [unofficial-keras : https:\u002F\u002Fgithub.com\u002Fyuyang-huang\u002Fkeras-inception-resnet-v2](https:\u002F\u002Fgithub.com\u002Fyuyang-huang\u002Fkeras-inception-resnet-v2)\n\n### RiR\n**Resnet in Resnet: Generalizing Residual Architectures**\nSasha Targ, Diogo Almeida, Kevin Lyman\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1603.08029](https:\u002F\u002Farxiv.org\u002Fabs\u002F1603.08029)\n- code: [unofficial-tensorflow : https:\u002F\u002Fgithub.com\u002FSunnerLi\u002FRiR-Tensorflow](https:\u002F\u002Fgithub.com\u002FSunnerLi\u002FRiR-Tensorflow)\n- code: [unofficial-chainer : https:\u002F\u002Fgithub.com\u002Fnutszebra\u002Fresnet_in_resnet](https:\u002F\u002Fgithub.com\u002Fnutszebra\u002Fresnet_in_resnet)\n\n### Stochastic Depth ResNet\n**Deep Networks with Stochastic Depth**\nGao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, Kilian Weinberger\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1603.09382](https:\u002F\u002Farxiv.org\u002Fabs\u002F1603.09382)\n- code: [unofficial-torch : https:\u002F\u002Fgithub.com\u002Fyueatsprograms\u002FStochastic_Depth](https:\u002F\u002Fgithub.com\u002Fyueatsprograms\u002FStochastic_Depth)\n- code: [unofficial-chainer : https:\u002F\u002Fgithub.com\u002Fyasunorikudo\u002Fchainer-ResDrop](https:\u002F\u002Fgithub.com\u002Fyasunorikudo\u002Fchainer-ResDrop)\n- code: [unofficial-keras : https:\u002F\u002Fgithub.com\u002FdblN\u002Fstochastic_depth_keras](https:\u002F\u002Fgithub.com\u002FdblN\u002Fstochastic_depth_keras)\n\n### WRN\n**Wide Residual Networks**\nSergey Zagoruyko, Nikos Komodakis\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1605.07146](https:\u002F\u002Farxiv.org\u002Fabs\u002F1605.07146)\n- code: [official : https:\u002F\u002Fgithub.com\u002Fszagoruyko\u002Fwide-residual-networks](https:\u002F\u002Fgithub.com\u002Fszagoruyko\u002Fwide-residual-networks)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Fxternalz\u002FWideResNet-pytorch](https:\u002F\u002Fgithub.com\u002Fxternalz\u002FWideResNet-pytorch)\n- code: [unofficial-keras : https:\u002F\u002Fgithub.com\u002Fasmith26\u002Fwide_resnets_keras](https:\u002F\u002Fgithub.com\u002Fasmith26\u002Fwide_resnets_keras)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Fmeliketoy\u002Fwide-resnet.pytorch](https:\u002F\u002Fgithub.com\u002Fmeliketoy\u002Fwide-resnet.pytorch)\n\n### SqueezeNet\n**SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and \u003C0.5MB model size**\nForrest N. Iandola, Song Han, Matthew W. Moskewicz, Khalid Ashraf, William J. Dally, Kurt Keutzer\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1602.07360](https:\u002F\u002Farxiv.org\u002Fabs\u002F1602.07360)\n- code: [torchvision : https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fvision\u002Fblob\u002Fmaster\u002Ftorchvision\u002Fmodels\u002Fsqueezenet.py](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fvision\u002Fblob\u002Fmaster\u002Ftorchvision\u002Fmodels\u002Fsqueezenet.py)\n- code: [unofficial-caffe : https:\u002F\u002Fgithub.com\u002FDeepScale\u002FSqueezeNet](https:\u002F\u002Fgithub.com\u002FDeepScale\u002FSqueezeNet)\n- code: [unofficial-keras : https:\u002F\u002Fgithub.com\u002Frcmalli\u002Fkeras-squeezenet](https:\u002F\u002Fgithub.com\u002Frcmalli\u002Fkeras-squeezenet)\n- code: [unofficial-caffe : https:\u002F\u002Fgithub.com\u002Fsonghan\u002FSqueezeNet-Residual](https:\u002F\u002Fgithub.com\u002Fsonghan\u002FSqueezeNet-Residual)\n\n### GeNet\n**Genetic CNN**\nLingxi Xie, Alan Yuille\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.01513](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.01513)\n- code: [unofficial-tensorflow : https:\u002F\u002Fgithub.com\u002Faqibsaeed\u002FGenetic-CNN](https:\u002F\u002Fgithub.com\u002Faqibsaeed\u002FGenetic-CNN)\n\n### MetaQNN\n**Designing Neural Network Architectures using Reinforcement Learning**\nBowen Baker, Otkrist Gupta, Nikhil Naik, Ramesh Raskar\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.02167](https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.02167)\n- code: [official : https:\u002F\u002Fgithub.com\u002Fbowenbaker\u002Fmetaqnn](https:\u002F\u002Fgithub.com\u002Fbowenbaker\u002Fmetaqnn)\n\n### PyramidNet\n**Deep Pyramidal Residual Networks**\nDongyoon Han, Jiwhan Kim, Junmo Kim\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1610.02915](https:\u002F\u002Farxiv.org\u002Fabs\u002F1610.02915)\n- code: [official : https:\u002F\u002Fgithub.com\u002Fjhkim89\u002FPyramidNet](https:\u002F\u002Fgithub.com\u002Fjhkim89\u002FPyramidNet)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Fdyhan0920\u002FPyramidNet-PyTorch](https:\u002F\u002Fgithub.com\u002Fdyhan0920\u002FPyramidNet-PyTorch)\n\n### DenseNet\n**Densely Connected Convolutional Networks**\nGao Huang, Zhuang Liu, Laurens van der Maaten, Kilian Q. Weinberger\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1608.06993](https:\u002F\u002Farxiv.org\u002Fabs\u002F1608.06993)\n- code: [official : https:\u002F\u002Fgithub.com\u002Fliuzhuang13\u002FDenseNet](https:\u002F\u002Fgithub.com\u002Fliuzhuang13\u002FDenseNet)\n- code: [unofficial-keras : https:\u002F\u002Fgithub.com\u002Ftitu1994\u002FDenseNet](https:\u002F\u002Fgithub.com\u002Ftitu1994\u002FDenseNet)\n- code: [unofficial-caffe : https:\u002F\u002Fgithub.com\u002Fshicai\u002FDenseNet-Caffe](https:\u002F\u002Fgithub.com\u002Fshicai\u002FDenseNet-Caffe)\n- code: [unofficial-tensorflow : https:\u002F\u002Fgithub.com\u002FYixuanLi\u002Fdensenet-tensorflow](https:\u002F\u002Fgithub.com\u002FYixuanLi\u002Fdensenet-tensorflow)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002FYixuanLi\u002Fdensenet-tensorflow](https:\u002F\u002Fgithub.com\u002FYixuanLi\u002Fdensenet-tensorflow)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Fbamos\u002Fdensenet.pytorch](https:\u002F\u002Fgithub.com\u002Fbamos\u002Fdensenet.pytorch)\n- code: [unofficial-keras : https:\u002F\u002Fgithub.com\u002Fflyyufelix\u002FDenseNet-Keras](https:\u002F\u002Fgithub.com\u002Fflyyufelix\u002FDenseNet-Keras)\n\n### FractalNet\n**FractalNet: Ultra-Deep Neural Networks without Residuals**\nGustav Larsson, Michael Maire, Gregory Shakhnarovich\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1605.07648](https:\u002F\u002Farxiv.org\u002Fabs\u002F1605.07648)\n- code: [unofficial-caffe : https:\u002F\u002Fgithub.com\u002Fgustavla\u002Ffractalnet](https:\u002F\u002Fgithub.com\u002Fgustavla\u002Ffractalnet)\n- code: [unofficial-keras : https:\u002F\u002Fgithub.com\u002Fsnf\u002Fkeras-fractalnet](https:\u002F\u002Fgithub.com\u002Fsnf\u002Fkeras-fractalnet)\n- code: [unofficial-tensorflow : https:\u002F\u002Fgithub.com\u002Ftensorpro\u002FFractalNet](https:\u002F\u002Fgithub.com\u002Ftensorpro\u002FFractalNet)\n\n### ResNext\n**Aggregated Residual Transformations for Deep Neural Networks**\nSaining Xie, Ross Girshick, Piotr Dollár, Zhuowen Tu, Kaiming He\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.05431](https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.05431)\n- code: [official : https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FResNeXt](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FResNeXt)\n- code: [keras-applications : https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fresnext.py](https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fresnext.py)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Fprlz77\u002FResNeXt.pytorch](https:\u002F\u002Fgithub.com\u002Fprlz77\u002FResNeXt.pytorch)\n- code: [unofficial-keras : https:\u002F\u002Fgithub.com\u002Ftitu1994\u002FKeras-ResNeXt](https:\u002F\u002Fgithub.com\u002Ftitu1994\u002FKeras-ResNeXt)\n- code: [unofficial-tensorflow : https:\u002F\u002Fgithub.com\u002Ftaki0112\u002FResNeXt-Tensorflow](https:\u002F\u002Fgithub.com\u002Ftaki0112\u002FResNeXt-Tensorflow)\n- code: [unofficial-tensorflow : https:\u002F\u002Fgithub.com\u002Fwenxinxu\u002FResNeXt-in-tensorflow](https:\u002F\u002Fgithub.com\u002Fwenxinxu\u002FResNeXt-in-tensorflow)\n\n### IGCV1\n**Interleaved Group Convolutions for Deep Neural Networks**\nTing Zhang, Guo-Jun Qi, Bin Xiao, Jingdong Wang\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.02725](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.02725)\n- code [official : https:\u002F\u002Fgithub.com\u002Fhellozting\u002FInterleavedGroupConvolutions](https:\u002F\u002Fgithub.com\u002Fhellozting\u002FInterleavedGroupConvolutions)\n\n### Residual Attention Network\n**Residual Attention Network for Image Classification**\nFei Wang, Mengqing Jiang, Chen Qian, Shuo Yang, Cheng Li, Honggang Zhang, Xiaogang Wang, Xiaoou Tang\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.06904](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.06904)\n- code: [official : https:\u002F\u002Fgithub.com\u002Ffwang91\u002Fresidual-attention-network](https:\u002F\u002Fgithub.com\u002Ffwang91\u002Fresidual-attention-network)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Ftengshaofeng\u002FResidualAttentionNetwork-pytorch](https:\u002F\u002Fgithub.com\u002Ftengshaofeng\u002FResidualAttentionNetwork-pytorch)\n- code: [unofficial-gluon : https:\u002F\u002Fgithub.com\u002FPistonY\u002FResidualAttentionNetwork](https:\u002F\u002Fgithub.com\u002FPistonY\u002FResidualAttentionNetwork)\n- code: [unofficial-keras : https:\u002F\u002Fgithub.com\u002Fkoichiro11\u002Fresidual-attention-network](https:\u002F\u002Fgithub.com\u002Fkoichiro11\u002Fresidual-attention-network)\n\n### Xception\n**Xception: Deep Learning with Depthwise Separable Convolutions**\nFrançois Chollet\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1610.02357](https:\u002F\u002Farxiv.org\u002Fabs\u002F1610.02357)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Fjfzhang95\u002Fpytorch-deeplab-xception\u002Fblob\u002Fmaster\u002Fmodeling\u002Fbackbone\u002Fxception.py](https:\u002F\u002Fgithub.com\u002Fjfzhang95\u002Fpytorch-deeplab-xception\u002Fblob\u002Fmaster\u002Fmodeling\u002Fbackbone\u002Fxception.py)\n- code: [unofficial-tensorflow : https:\u002F\u002Fgithub.com\u002Fkwotsin\u002FTensorFlow-Xception](https:\u002F\u002Fgithub.com\u002Fkwotsin\u002FTensorFlow-Xception)\n- code: [unofficial-caffe : https:\u002F\u002Fgithub.com\u002Fyihui-he\u002FXception-caffe](https:\u002F\u002Fgithub.com\u002Fyihui-he\u002FXception-caffe)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Ftstandley\u002FXception-PyTorch](https:\u002F\u002Fgithub.com\u002Ftstandley\u002FXception-PyTorch)\n- code: [keras-applications : https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fxception.py](https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fxception.py)\n\n### MobileNet\n**MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications**\nAndrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, Tobias Weyand, Marco Andreetto, Hartwig Adam\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.04861](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.04861)\n- code: [unofficial-tensorflow : https:\u002F\u002Fgithub.com\u002FZehaos\u002FMobileNet](https:\u002F\u002Fgithub.com\u002FZehaos\u002FMobileNet)\n- code: [unofficial-caffe : https:\u002F\u002Fgithub.com\u002Fshicai\u002FMobileNet-Caffe](https:\u002F\u002Fgithub.com\u002Fshicai\u002FMobileNet-Caffe)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Fmarvis\u002Fpytorch-mobilenet](https:\u002F\u002Fgithub.com\u002Fmarvis\u002Fpytorch-mobilenet)\n- code: [keras-applications : https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fmobilenet.py](https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fmobilenet.py)\n\n### PolyNet\n**PolyNet: A Pursuit of Structural Diversity in Very Deep Networks**\nXingcheng Zhang, Zhizhong Li, Chen Change Loy, Dahua Lin\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.05725](https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.05725)\n- code: [official : https:\u002F\u002Fgithub.com\u002Fopen-mmlab\u002Fpolynet](https:\u002F\u002Fgithub.com\u002Fopen-mmlab\u002Fpolynet)\n\n### DPN\n**Dual Path Networks**\nYunpeng Chen, Jianan Li, Huaxin Xiao, Xiaojie Jin, Shuicheng Yan, Jiashi Feng\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.01629](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.01629)\n- code: [official : https:\u002F\u002Fgithub.com\u002Fcypw\u002FDPNs](https:\u002F\u002Fgithub.com\u002Fcypw\u002FDPNs)\n- code: [unoffical-keras : https:\u002F\u002Fgithub.com\u002Ftitu1994\u002FKeras-DualPathNetworks](https:\u002F\u002Fgithub.com\u002Ftitu1994\u002FKeras-DualPathNetworks)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Foyam\u002Fpytorch-DPNs](https:\u002F\u002Fgithub.com\u002Foyam\u002Fpytorch-DPNs)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Frwightman\u002Fpytorch-dpn-pretrained](https:\u002F\u002Fgithub.com\u002Frwightman\u002Fpytorch-dpn-pretrained)\n\n### Block-QNN\n**Practical Block-wise Neural Network Architecture Generation**\nZhao Zhong, Junjie Yan, Wei Wu, Jing Shao, Cheng-Lin Liu\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1708.05552](https:\u002F\u002Farxiv.org\u002Fabs\u002F1708.05552)\n\n### CRU-Net\n**Sharing Residual Units Through Collective Tensor Factorization in Deep Neural Networks**\nChen Yunpeng, Jin Xiaojie, Kang Bingyi, Feng Jiashi, Yan Shuicheng\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.02180](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.02180)\n- code [official : https:\u002F\u002Fgithub.com\u002Fcypw\u002FCRU-Net](https:\u002F\u002Fgithub.com\u002Fcypw\u002FCRU-Net)\n- code [unofficial-mxnet : https:\u002F\u002Fgithub.com\u002Fbruinxiong\u002FModified-CRUNet-and-Residual-Attention-Network.mxnet](https:\u002F\u002Fgithub.com\u002Fbruinxiong\u002FModified-CRUNet-and-Residual-Attention-Network.mxnet)\n\n## DLA\n**Deep Layer Aggregation**\nFisher Yu, Dequan Wang, Evan Shelhamer, Trevor Darrell\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.06484](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.06484)\n- code: [official-pytorch: https:\u002F\u002Fgithub.com\u002Fucbdrive\u002Fdla](https:\u002F\u002Fgithub.com\u002Fucbdrive\u002Fdla)\n\n### ShuffleNet\n**ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices**\nXiangyu Zhang, Xinyu Zhou, Mengxiao Lin, Jian Sun\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.01083](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.01083)\n- code: [unofficial-tensorflow : https:\u002F\u002Fgithub.com\u002FMG2033\u002FShuffleNet](https:\u002F\u002Fgithub.com\u002FMG2033\u002FShuffleNet)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Fjaxony\u002FShuffleNet](https:\u002F\u002Fgithub.com\u002Fjaxony\u002FShuffleNet)\n- code: [unofficial-caffe : https:\u002F\u002Fgithub.com\u002Ffarmingyard\u002FShuffleNet](https:\u002F\u002Fgithub.com\u002Ffarmingyard\u002FShuffleNet)\n- code: [unofficial-keras : https:\u002F\u002Fgithub.com\u002Fscheckmedia\u002Fkeras-shufflenet](https:\u002F\u002Fgithub.com\u002Fscheckmedia\u002Fkeras-shufflenet)\n\n### CondenseNet\n**CondenseNet: An Efficient DenseNet using Learned Group Convolutions**\nGao Huang, Shichen Liu, Laurens van der Maaten, Kilian Q. Weinberger\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.09224](https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.09224)\n- code: [official : https:\u002F\u002Fgithub.com\u002FShichenLiu\u002FCondenseNet](https:\u002F\u002Fgithub.com\u002FShichenLiu\u002FCondenseNet)\n- code: [unofficial-tensorflow : https:\u002F\u002Fgithub.com\u002Fmarkdtw\u002Fcondensenet-tensorflow](https:\u002F\u002Fgithub.com\u002Fmarkdtw\u002Fcondensenet-tensorflow)\n\n### NasNet\n**Learning Transferable Architectures for Scalable Image Recognition**\nBarret Zoph, Vijay Vasudevan, Jonathon Shlens, Quoc V. Le\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.07012](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.07012)\n- code: [unofficial-keras : https:\u002F\u002Fgithub.com\u002Ftitu1994\u002FKeras-NASNet](https:\u002F\u002Fgithub.com\u002Ftitu1994\u002FKeras-NASNet)\n- code: [keras-applications : https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fnasnet.py](https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fnasnet.py)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Fwandering007\u002Fnasnet-pytorch](https:\u002F\u002Fgithub.com\u002Fwandering007\u002Fnasnet-pytorch)\n- code: [unofficial-tensorflow : https:\u002F\u002Fgithub.com\u002Fyeephycho\u002Fnasnet-tensorflow](https:\u002F\u002Fgithub.com\u002Fyeephycho\u002Fnasnet-tensorflow)\n\n### MobileNetV2\n**MobileNetV2: Inverted Residuals and Linear Bottlenecks**\nMark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1801.04381](https:\u002F\u002Farxiv.org\u002Fabs\u002F1801.04381)\n- code: [unofficial-keras : https:\u002F\u002Fgithub.com\u002Fxiaochus\u002FMobileNetV2](https:\u002F\u002Fgithub.com\u002Fxiaochus\u002FMobileNetV2)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002FRandl\u002FMobileNetV2-pytorch](https:\u002F\u002Fgithub.com\u002FRandl\u002FMobileNetV2-pytorch)\n- code: [unofficial-tensorflow : https:\u002F\u002Fgithub.com\u002Fneuleaf\u002FMobileNetV2](https:\u002F\u002Fgithub.com\u002Fneuleaf\u002FMobileNetV2)\n\n### IGCV2\n**IGCV2: Interleaved Structured Sparse Convolutional Neural Networks**\nGuotian Xie, Jingdong Wang, Ting Zhang, Jianhuang Lai, Richang Hong, Guo-Jun Qi\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1804.06202](https:\u002F\u002Farxiv.org\u002Fabs\u002F1804.06202)\n\n### hier\n**Hierarchical Representations for Efficient Architecture Search**\nHanxiao Liu, Karen Simonyan, Oriol Vinyals, Chrisantha Fernando, Koray Kavukcuoglu\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.00436](https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.00436)\n\n### PNasNet\n**Progressive Neural Architecture Search**\nChenxi Liu, Barret Zoph, Maxim Neumann, Jonathon Shlens, Wei Hua, Li-Jia Li, Li Fei-Fei, Alan Yuille, Jonathan Huang, Kevin Murphy\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1712.00559](https:\u002F\u002Farxiv.org\u002Fabs\u002F1712.00559)\n- code: [tensorflow-slim : https:\u002F\u002Fgithub.com\u002Ftensorflow\u002Fmodels\u002Fblob\u002Fmaster\u002Fresearch\u002Fslim\u002Fnets\u002Fnasnet\u002Fpnasnet.py](https:\u002F\u002Fgithub.com\u002Ftensorflow\u002Fmodels\u002Fblob\u002Fmaster\u002Fresearch\u002Fslim\u002Fnets\u002Fnasnet\u002Fpnasnet.py)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Fchenxi116\u002FPNASNet.pytorch](https:\u002F\u002Fgithub.com\u002Fchenxi116\u002FPNASNet.pytorch)\n- code: [unofficial-tensorflow : https:\u002F\u002Fgithub.com\u002Fchenxi116\u002FPNASNet.TF](https:\u002F\u002Fgithub.com\u002Fchenxi116\u002FPNASNet.TF)\n\n### AmoebaNet\n**Regularized Evolution for Image Classifier Architecture Search**\nEsteban Real, Alok Aggarwal, Yanping Huang, Quoc V Le\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1802.01548](https:\u002F\u002Farxiv.org\u002Fabs\u002F1802.01548)\n- code: [tensorflow-tpu : https:\u002F\u002Fgithub.com\u002Ftensorflow\u002Ftpu\u002Ftree\u002Fmaster\u002Fmodels\u002Fofficial\u002Famoeba_net](https:\u002F\u002Fgithub.com\u002Ftensorflow\u002Ftpu\u002Ftree\u002Fmaster\u002Fmodels\u002Fofficial\u002Famoeba_net)\n\n### SENet\n**Squeeze-and-Excitation Networks**\nJie Hu, Li Shen, Samuel Albanie, Gang Sun, Enhua Wu\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1709.01507](https:\u002F\u002Farxiv.org\u002Fabs\u002F1709.01507)\n- code: [official : https:\u002F\u002Fgithub.com\u002Fhujie-frank\u002FSENet](https:\u002F\u002Fgithub.com\u002Fhujie-frank\u002FSENet)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Fmoskomule\u002Fsenet.pytorch](https:\u002F\u002Fgithub.com\u002Fmoskomule\u002Fsenet.pytorch)\n- code: [unofficial-tensorflow : https:\u002F\u002Fgithub.com\u002Ftaki0112\u002FSENet-Tensorflow](https:\u002F\u002Fgithub.com\u002Ftaki0112\u002FSENet-Tensorflow)\n- code: [unofficial-caffe : https:\u002F\u002Fgithub.com\u002Fshicai\u002FSENet-Caffe](https:\u002F\u002Fgithub.com\u002Fshicai\u002FSENet-Caffe)\n- code: [unofficial-mxnet : https:\u002F\u002Fgithub.com\u002Fbruinxiong\u002FSENet.mxnet](https:\u002F\u002Fgithub.com\u002Fbruinxiong\u002FSENet.mxnet)\n\n### ShuffleNetV2\n**ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design**\nNingning Ma, Xiangyu Zhang, Hai-Tao Zheng, Jian Sun\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.11164](https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.11164)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002FRandl\u002FShuffleNetV2-pytorch](https:\u002F\u002Fgithub.com\u002FRandl\u002FShuffleNetV2-pytorch)\n- code: [unofficial-keras : https:\u002F\u002Fgithub.com\u002Fopconty\u002Fkeras-shufflenetV2](https:\u002F\u002Fgithub.com\u002Fopconty\u002Fkeras-shufflenetV2)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002FBugdragon\u002FShuffleNet_v2_PyTorch](https:\u002F\u002Fgithub.com\u002FBugdragon\u002FShuffleNet_v2_PyTorch)\n- code: [unofficial-caff2: https:\u002F\u002Fgithub.com\u002Fwolegechu\u002FShuffleNetV2.Caffe2](https:\u002F\u002Fgithub.com\u002Fwolegechu\u002FShuffleNetV2.Caffe2)\n\n### CBAM\nCBAM: Convolutional Block Attention Module\nSanghyun Woo, Jongchan Park, Joon-Young Lee, In So Kweon\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.06521](https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.06521)\n- code: [official-pytorch : https:\u002F\u002Fgithub.com\u002FJongchan\u002Fattention-module](https:\u002F\u002Fgithub.com\u002FJongchan\u002Fattention-module)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Fluuuyi\u002FCBAM.PyTorch](https:\u002F\u002Fgithub.com\u002Fluuuyi\u002FCBAM.PyTorch)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Felbuco1\u002FCBAM](https:\u002F\u002Fgithub.com\u002Felbuco1\u002FCBAM)\n- code: [unofficial-keras : https:\u002F\u002Fgithub.com\u002Fkobiso\u002FCBAM-keras](https:\u002F\u002Fgithub.com\u002Fkobiso\u002FCBAM-keras)\n\n\n### IGCV3\n**IGCV3: Interleaved Low-Rank Group Convolutions for Efficient Deep Neural Networks**\nKe Sun, Mingjie Li, Dong Liu, Jingdong Wang\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1806.00178](https:\u002F\u002Farxiv.org\u002Fabs\u002F1806.00178)\n- code: [official : https:\u002F\u002Fgithub.com\u002Fhomles11\u002FIGCV3](https:\u002F\u002Fgithub.com\u002Fhomles11\u002FIGCV3)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Fxxradon\u002FIGCV3-pytorch](https:\u002F\u002Fgithub.com\u002Fxxradon\u002FIGCV3-pytorch)\n- code: [unofficial-tensorflow : https:\u002F\u002Fgithub.com\u002FZHANG-SHI-CHANG\u002FIGCV3](https:\u002F\u002Fgithub.com\u002FZHANG-SHI-CHANG\u002FIGCV3)\n\n### BAM\n**BAM: Bottleneck Attention Module**\nJongchan Park, Sanghyun Woo, Joon-Young Lee, In So Kweon\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.06514](https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.06514)\n- code: [official-pytorch : https:\u002F\u002Fgithub.com\u002FJongchan\u002Fattention-module](https:\u002F\u002Fgithub.com\u002FJongchan\u002Fattention-module)\n- code: [unofficial-tensorflow : https:\u002F\u002Fgithub.com\u002Fhuyz1117\u002FBAM](https:\u002F\u002Fgithub.com\u002Fhuyz1117\u002FBAM)\n\n### MNasNet\n**MnasNet: Platform-Aware Neural Architecture Search for Mobile**\nMingxing Tan, Bo Chen, Ruoming Pang, Vijay Vasudevan, Quoc V. Le\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.11626](https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.11626)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002FAnjieZheng\u002FMnasNet-PyTorch](https:\u002F\u002Fgithub.com\u002FAnjieZheng\u002FMnasNet-PyTorch)\n- code: [unofficial-caffe : https:\u002F\u002Fgithub.com\u002FLiJianfei06\u002FMnasNet-caffe](https:\u002F\u002Fgithub.com\u002FLiJianfei06\u002FMnasNet-caffe)\n- code: [unofficial-MxNet : https:\u002F\u002Fgithub.com\u002Fchinakook\u002FMnasnet.MXNet](https:\u002F\u002Fgithub.com\u002Fchinakook\u002FMnasnet.MXNet)\n- code: [unofficial-keras : https:\u002F\u002Fgithub.com\u002FShathe\u002FMNasNet-Keras-Tensorflow](https:\u002F\u002Fgithub.com\u002FShathe\u002FMNasNet-Keras-Tensorflow)\n\n### SKNet\n**Selective Kernel Networks**\nXiang Li, Wenhai Wang, Xiaolin Hu, Jian Yang\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1903.06586](https:\u002F\u002Farxiv.org\u002Fabs\u002F1903.06586)\n- code: [official : https:\u002F\u002Fgithub.com\u002Fimplus\u002FSKNet](https:\u002F\u002Fgithub.com\u002Fimplus\u002FSKNet)\n\n### DARTS\n**DARTS: Differentiable Architecture Search**\nHanxiao Liu, Karen Simonyan, Yiming Yang\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1806.09055](https:\u002F\u002Farxiv.org\u002Fabs\u002F1806.09055)\n- code: [official : https:\u002F\u002Fgithub.com\u002Fquark0\u002Fdarts](https:\u002F\u002Fgithub.com\u002Fquark0\u002Fdarts)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Fkhanrc\u002Fpt.darts](https:\u002F\u002Fgithub.com\u002Fkhanrc\u002Fpt.darts)\n- code: [unofficial-tensorflow : https:\u002F\u002Fgithub.com\u002FNeroLoh\u002Fdarts-tensorflow](https:\u002F\u002Fgithub.com\u002FNeroLoh\u002Fdarts-tensorflow)\n\n### ProxylessNAS\n**ProxylessNAS: Direct Neural Architecture Search on Target Task and Hardware**\nHan Cai, Ligeng Zhu, Song Han\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1812.00332](https:\u002F\u002Farxiv.org\u002Fabs\u002F1812.00332)\n- code: [official : https:\u002F\u002Fgithub.com\u002Fmit-han-lab\u002FProxylessNAS](https:\u002F\u002Fgithub.com\u002Fmit-han-lab\u002FProxylessNAS)\n\n### MobileNetV3\n**Searching for MobileNetV3**\nAndrew Howard, Mark Sandler, Grace Chu, Liang-Chieh Chen, Bo Chen, Mingxing Tan, Weijun Wang, Yukun Zhu, Ruoming Pang, Vijay Vasudevan, Quoc V. Le, Hartwig Adam\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.02244](https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.02244)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Fxiaolai-sqlai\u002Fmobilenetv3](https:\u002F\u002Fgithub.com\u002Fxiaolai-sqlai\u002Fmobilenetv3)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Fkuan-wang\u002Fpytorch-mobilenet-v3](https:\u002F\u002Fgithub.com\u002Fkuan-wang\u002Fpytorch-mobilenet-v3)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Fleaderj1001\u002FMobileNetV3-Pytorch](https:\u002F\u002Fgithub.com\u002Fleaderj1001\u002FMobileNetV3-Pytorch)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Fd-li14\u002Fmobilenetv3.pytorch](https:\u002F\u002Fgithub.com\u002Fd-li14\u002Fmobilenetv3.pytorch)\n- code: [unofficial-caffe : https:\u002F\u002Fgithub.com\u002Fjixing0415\u002Fcaffe-mobilenet-v3](https:\u002F\u002Fgithub.com\u002Fjixing0415\u002Fcaffe-mobilenet-v3)\n- code: [unofficial-keras : https:\u002F\u002Fgithub.com\u002Fxiaochus\u002FMobileNetV3](https:\u002F\u002Fgithub.com\u002Fxiaochus\u002FMobileNetV3)\n\n### Res2Net\n**Res2Net: A New Multi-scale Backbone Architecture**\nShang-Hua Gao, Ming-Ming Cheng, Kai Zhao, Xin-Yu Zhang, Ming-Hsuan Yang, Philip Torr\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1904.01169](https:\u002F\u002Farxiv.org\u002Fabs\u002F1904.01169)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002F4uiiurz1\u002Fpytorch-res2net](https:\u002F\u002Fgithub.com\u002F4uiiurz1\u002Fpytorch-res2net)\n- code: [unofficial-keras : https:\u002F\u002Fgithub.com\u002Ffupiao1998\u002Fres2net-keras](https:\u002F\u002Fgithub.com\u002Ffupiao1998\u002Fres2net-keras)\n- code: [official-pytorch : https:\u002F\u002Fgithub.com\u002FRes2Net](https:\u002F\u002Fgithub.com\u002FRes2Net)\n\n### LIP-ResNet\n**LIP: Local Importance-based Pooling**\nZiteng Gao, Limin Wang, Gangshan Wu\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.04156](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.04156)\n- code: [official-pytorch : https:\u002F\u002Fgithub.com\u002Fsebgao\u002FLIP](https:\u002F\u002Fgithub.com\u002Fsebgao\u002FLIP)\n\n### EfficientNet\n\n**EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks**\nMingxing Tan, Quoc V. Le\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.11946](https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.11946)\n- code: [unofficial-pytorch : https:\u002F\u002Fgithub.com\u002Flukemelas\u002FEfficientNet-PyTorch](https:\u002F\u002Fgithub.com\u002Flukemelas\u002FEfficientNet-PyTorch)\n- code: [official-tensorflow : https:\u002F\u002Fgithub.com\u002Ftensorflow\u002Ftpu\u002Ftree\u002Fmaster\u002Fmodels\u002Fofficial\u002Fefficientnet](https:\u002F\u002Fgithub.com\u002Ftensorflow\u002Ftpu\u002Ftree\u002Fmaster\u002Fmodels\u002Fofficial\u002Fefficientnet)\n\n\n### FixResNeXt \n**Fixing the train-test resolution discrepancy**\nHugo Touvron, Andrea Vedaldi, Matthijs Douze, Hervé Jégou\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1906.06423](https:\u002F\u002Farxiv.org\u002Fabs\u002F1906.06423)\n- code: [official-pytorch : https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FFixRes](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FFixRes)\n\n\n### BiT\n**Big Transfer (BiT): General Visual Representation Learning**\nAlexander Kolesnikov, Lucas Beyer, Xiaohua Zhai, Joan Puigcerver, Jessica Yung, Sylvain Gelly, Neil Houlsby\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1912.11370](https:\u002F\u002Farxiv.org\u002Fabs\u002F1912.11370)\n- code: [official-tensorflow: https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fbig_transfer](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fbig_transfer)\n\n### PSConv + ResNext101\n**PSConv: Squeezing Feature Pyramid into One Compact Poly-Scale Convolutional Layer**\nDuo Li1, Anbang Yao2B, and Qifeng Chen1B\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.06191](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.06191)\n- code: [https:\u002F\u002Fgithub.com\u002Fd-li14\u002FPSConv](https:\u002F\u002Fgithub.com\u002Fd-li14\u002FPSConv)\n\n\n### NoisyStudent\n**Self-training with Noisy Student improves ImageNet classification**\nQizhe Xie, Minh-Thang Luong, Eduard Hovy, Quoc V. Le\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.04252](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.04252)\n- code: [official-tensorflow: https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fnoisystudent](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fnoisystudent)\n- code: [unofficial-pytorch: https:\u002F\u002Fgithub.com\u002Fsally20921\u002FNoisyStudent](https:\u002F\u002Fgithub.com\u002Fsally20921\u002FNoisyStudent)\n\n### RegNet\n**Designing Network Design Spaces**\nIlija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F2003.13678](https:\u002F\u002Farxiv.org\u002Fabs\u002F2003.13678)\n- code: [official-pytorch: https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fpycls](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fpycls)\n- code: [unofficial-pytorch: https:\u002F\u002Fgithub.com\u002Fd-li14\u002Fregnet.pytorch](https:\u002F\u002Fgithub.com\u002Fd-li14\u002Fregnet.pytorch)\n\n### GhostNet\n**GhostNet: More Features from Cheap Operations**\nKai Han, Yunhe Wang, Qi Tian, Jianyuan Guo, Chunjing Xu, Chang Xu\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.11907](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.11907)\n- code: [official-pytorch: https:\u002F\u002Fgithub.com\u002Fhuawei-noah\u002Fghostnet](https:\u002F\u002Fgithub.com\u002Fhuawei-noah\u002Fghostnet)\n\n### ViT\n**An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale**\nAlexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F2010.11929](https:\u002F\u002Farxiv.org\u002Fabs\u002F2010.11929)\n- code: [official-tensorflow: https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fvision_transformer](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fvision_transformer)\n- code: [unofficial-pytorch: https:\u002F\u002Fgithub.com\u002Fjeonsworld\u002FViT-pytorch](https:\u002F\u002Fgithub.com\u002Fjeonsworld\u002FViT-pytorch)\n\n### DeiT\n**Training data-efficient image transformers & distillation through attention**\nHugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.12877](https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.12877)\n- code: [official-pytorch: https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fdeit](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fdeit)\n\n### PVT\n**Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions**\nWenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.12122](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.12122)\n- code: [official-pytorch: https:\u002F\u002Fgithub.com\u002Fwhai362\u002FPVT](https:\u002F\u002Fgithub.com\u002Fwhai362\u002FPVT)\n\n### T2T\n**Tokens-to-Token ViT: Training Vision Transformers from Scratch on ImageNet**\nLi Yuan, Yunpeng Chen, Tao Wang, Weihao Yu, Yujun Shi, Zihang Jiang, Francis EH Tay, Jiashi Feng, Shuicheng Yan\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F2101.11986](https:\u002F\u002Farxiv.org\u002Fabs\u002F2101.11986)\n- code: [official-pytorch: https:\u002F\u002Fgithub.com\u002Fyitu-opensource\u002FT2T-ViT](https:\u002F\u002Fgithub.com\u002Fyitu-opensource\u002FT2T-ViT)\n\n### DeepVit\n**DeepViT: Towards Deeper Vision Transformer**\nDaquan Zhou, Bingyi Kang, Xiaojie Jin, Linjie Yang, Xiaochen Lian, Zihang Jiang, Qibin Hou, and Jiashi Feng.\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.11886](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.11886)\n- code: [official-pytorch: https:\u002F\u002Fgithub.com\u002Fzhoudaquan\u002Fdvit_repo](https:\u002F\u002Fgithub.com\u002Fzhoudaquan\u002Fdvit_repo)\n\n### ViL\n**Multi-Scale Vision Longformer: A New Vision Transformer for High-Resolution Image Encoding**\nPengchuan Zhang, Xiyang Dai, Jianwei Yang, Bin Xiao, Lu Yuan, Lei Zhang, Jianfeng Gao\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.15358](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.15358)\n- code: [official-pytorch: https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002Fvision-longformer](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002Fvision-longformer)\n\n### TNT\n**Transformer in Transformer**\nKai Han, An Xiao, Enhua Wu, Jianyuan Guo, Chunjing Xu, Yunhe Wang\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.00112](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.00112)\n- code: [https:\u002F\u002Fgithub.com\u002Fhuawei-noah\u002FCV-Backbones](https:\u002F\u002Fgithub.com\u002Fhuawei-noah\u002FCV-Backbones)\n\n### CvT\n**CvT: Introducing Convolutions to Vision Transformers**\nHaiping Wu, Bin Xiao, Noel Codella, Mengchen Liu, Xiyang Dai, Lu Yuan, Lei Zhang\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.15808](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.15808)\n- code: [https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FCvT](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FCvT)\n\n### CViT\n**CrossViT: Cross-Attention Multi-Scale Vision Transformer for Image Classification**\nChun-Fu (Richard) Chen, Quanfu Fan, Rameswar Panda\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.14899](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.14899)\n- code: [https:\u002F\u002Fgithub.com\u002FIBM\u002FCrossViT](https:\u002F\u002Fgithub.com\u002FIBM\u002FCrossViT)\n\n### Focal-T\n**Focal Attention for Long-Range Interactions in Vision Transformers**\nJianwei Yang, Chunyuan Li, Pengchuan Zhang, Xiyang Dai, Bin Xiao, Lu Yuan, Jianfeng Gao\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.00641](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.00641)\n- code: [ https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FFocal-Transformer](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FFocal-Transformer)\n\n### Twins\n**Twins: Revisiting the Design of Spatial Attention in Vision Transformers**\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.13840](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.13840)\n- code: [https:\u002F\u002Fgit.io\u002FTwins]( https:\u002F\u002Fgit.io\u002FTwins)\n\n### PVTv2\n**Wenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao**\n- pdf: [https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.13797](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.13797)\n- code: [official-pytorch: https:\u002F\u002Fgithub.com\u002Fwhai362\u002FPVT](https:\u002F\u002Fgithub.com\u002Fwhai362\u002FPVT)","# 令人惊叹 - 图像分类\n\n[![Awesome](https:\u002F\u002Fcdn.rawgit.com\u002Fsindresorhus\u002Fawesome\u002Fd7305f38d29fed78fa85652e3a63e154dd8e8829\u002Fmedia\u002Fbadge.svg)](https:\u002F\u002Fgithub.com\u002Fsindresorhus\u002Fawesome)\n\n自2014年以来，深度学习图像分类领域的论文与代码精选列表。受 [awesome-object-detection](https:\u002F\u002Fgithub.com\u002Famusi\u002Fawesome-object-detection)、[deep_learning_object_detection](https:\u002F\u002Fgithub.com\u002Fhoya012\u002Fdeep_learning_object_detection) 和 [awesome-deep-learning-papers](https:\u002F\u002Fgithub.com\u002Fterryum\u002Fawesome-deep-learning-papers) 的启发而创建。\n\n## 背景\n\n我认为，在深入其他计算机视觉领域之前，图像分类是一个非常好的起点，尤其是对于完全不了解深度学习的初学者而言。当我刚开始学习计算机视觉时，犯过许多错误。当时如果有人能告诉我应该从哪篇论文入手就好了。直到现在，似乎还没有一个像 [deep_learning_object_detection](https:\u002F\u002Fgithub.com\u002Fhoya012\u002Fdeep_learning_object_detection) 那样专门列出图像分类论文的仓库。因此，我决定创建这样一个包含深度学习图像分类论文和代码的仓库，以帮助他人。我个人建议那些对深度学习一无所知的人，可以从 VGG 开始，然后是 GoogLeNet、ResNet，读完这些之后再继续阅读列表中的其他论文，或者转到其他领域。\n\n**注：我还维护了一个关于部分图像分类网络的 PyTorch 实现仓库，你可以在这里查看：[pytorch-cifar100](https:\u002F\u002Fgithub.com\u002Fweiaicunzai\u002Fpytorch-cifar100)。**\n\n## 性能表格\n\n为简化起见，我仅列出了各论文在 ImageNet 数据集上取得的最佳 Top-1 和 Top-5 准确率。需要注意的是，即使某个模型的准确率更高，也不一定意味着它就比另一个更好。这是因为有些模型更注重降低模型复杂度而非提升准确率，或者有些论文只提供了单裁剪的结果，而另一些则给出了模型融合或多裁剪的结果。\n\n- ConvNet：卷积网络名称\n- ImageNet Top-1 Acc：该论文在 ImageNet 上取得的最佳 Top-1 准确率\n- ImageNet Top-5 Acc：该论文在 ImageNet 上取得的最佳 Top-5 准确率\n- Published In：论文发表的会议或期刊名称\n\n|         卷积神经网络            | ImageNet Top-1准确率 | ImageNet Top-5准确率 |   发表时间     |\n|:--------------------------:|:-----------------:|:-----------------:|:------------------:|\n|           Vgg              |      76.3         |       93.2        |      ICLR2015      |\n|        GoogleNet           |       -           |       93.33       |      CVPR2015      |\n|        PReLU-nets          |       -           |       95.06       |      ICCV2015      |\n|          ResNet            |       -           |       96.43       |      CVPR2015      |\n|       PreActResNet         |      79.9         |       95.2        |      CVPR2016      |\n|       Inceptionv3          |      82.8         |       96.42       |      CVPR2016      |\n|       Inceptionv4          |      82.3         |       96.2        |      AAAI2016      |\n|    Inception-ResNet-v2     |      82.4         |       96.3        |      AAAI2016      |\n|Inceptionv4 + Inception-ResNet-v2|      83.5         |       96.92       |      AAAI2016      |\n|           RiR              |       -           |         -         |  ICLR Workshop2016 |\n|  Stochastic Depth ResNet   |      78.02        |         -         |      ECCV2016      |\n|           WRN              |      78.1         |       94.21       |      BMVC2016      |\n|       SqueezeNet           |      60.4         |       82.5        |      arXiv2017([被ICLR2017拒绝](https:\u002F\u002Fopenreview.net\u002Fforum?id=S1xh5sYgx))     |\n|          GeNet             |      72.13        |       90.26       |      ICCV2017      |\n|         MetaQNN            |       -           |         -         |      ICLR2017      |\n|        PyramidNet          |      80.8         |       95.3        |      CVPR2017      |\n|         DenseNet           |      79.2         |       94.71       |      ECCV2017      |\n|        FractalNet          |      75.8         |       92.61       |      ICLR2017      |\n|         ResNext            |       -           |       96.97       |      CVPR2017      |\n|         IGCV1              |      73.05        |       91.08       |      ICCV2017      |\n| Residual Attention Network |      80.5         |       95.2        |      CVPR2017      |\n|        Xception            |       79          |       94.5        |      CVPR2017      |\n|        MobileNet           |      70.6         |         -         |      arXiv2017     |\n|         PolyNet            |      82.64        |       96.55       |      CVPR2017      |\n|           DPN              |       79          |       94.5        |      NIPS2017      |\n|        Block-QNN           |      77.4         |       93.54       |      CVPR2018      |\n|         CRU-Net            |      79.7         |       94.7        |      IJCAI2018     |\n|       DLA                  |      75.3         |         -         |      CVPR2018      |\n|       ShuffleNet           |      75.3         |         -         |      CVPR2018      |\n|       CondenseNet          |      73.8         |       91.7        |      CVPR2018      |\n|          NasNet            |      82.7         |       96.2        |      CVPR2018      |\n|       MobileNetV2          |      74.7         |         -         |      CVPR2018      |\n|         IGCV2              |      70.07        |         -         |      CVPR2018      |\n|          hier              |      79.7         |       94.8        |      ICLR2018      |\n|         PNasNet            |      82.9         |       96.2        |      ECCV2018      |\n|        AmoebaNet           |      83.9         |       96.6        |      AAAI2018      |\n|          SENet             |       -           |       97.749      |      CVPR2018      |\n|       ShuffleNetV2         |      81.44        |         -         |      ECCV2018      |\n|       CBAM                 |      79.93        |       94.41       |      ECCV2018      |\n|          IGCV3             |      72.2         |         -         |      BMVC2018      |\n|          BAM               |      77.56        |       93.71       |      BMVC2018      |\n|         MnasNet            |      76.13        |       92.85       |      CVPR2018      |\n|          SKNet             |      80.60        |         -         |      CVPR2019      |\n|          DARTS             |      73.3         |       91.3        |      ICLR2019      |\n|       ProxylessNAS         |      75.1         |       92.5        |      ICLR2019      |\n|       MobileNetV3          |      75.2         |         -         |      CVPR2019      |\n|          Res2Net           |      79.2         |       94.37       |      PAMI2019      |\n|       LIP-ResNet           |      79.33        |       94.6        |      ICCV2019      |\n|       EfficientNet         |      84.3         |       97.0        |      ICML2019      |\n|       FixResNeXt           |      86.4         |       98.0        |      NIPS2019      |\n|       BiT                  |      87.5         |         -         |      ECCV2020      |\n|       PSConv + ResNext101  |      80.502       |       95.276      |      ECCV2020      |\n|       NoisyStudent         |      88.4         |       98.7        |      CVPR2020      |\n|       RegNet               |      79.9         |       -           |      CVPR2020      |\n|       GhostNet             |      75.7         |       -           |      CVPR2020      |\n|       ViT                  |      88.55        |       -           |      ICLR2021      |\n|       DeiT                 |      85.2         |       -           |      ICML2021      |\n|       PVT                  |      81.7         |       -           |      ICCV2021      |\n|       T2T-Vit              |      83.3         |       -           |      ICCV2021      |\n|       DeepVit              |      80.9         |       -           |      Arvix2021     |\n|       ViL                  |      83.7         |       -           |      ICCV2021      |\n|       TNT                  |      83.9         |       -           |      Arvix2021     |\n|       CvT                  |      87.7         |       -           |      ICCV2021      |\n|       CViT                 |      84.1         |       -           |      ICCV2021      |\n|       Focal-T              |      84.0         |       -           |      NIPS2021      |\n|       Twins                |      83.7         |       -           |      NIPS2021      |\n|       PVTv2                |      81.7         |       -           |      CVM2022       |\n\n\n\n\n## 论文与代码\n\n### VGG\n**用于大规模图像识别的超深卷积网络。**\n卡伦·西蒙尼扬，安德鲁·齐瑟曼\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1409.1556](https:\u002F\u002Farxiv.org\u002Fabs\u002F1409.1556)\n- 代码：[torchvision：https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fvision\u002Fblob\u002Fmaster\u002Ftorchvision\u002Fmodels\u002Fvgg.py](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fvision\u002Fblob\u002Fmaster\u002Ftorchvision\u002Fmodels\u002Fvgg.py)\n- 代码：[keras-applications：https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fvgg16.py](https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fvgg16.py)\n- 代码：[keras-applications：https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fvgg19.py](https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fvgg19.py)\n\n### GoogleNet\n**通过卷积更深入地学习**\n克里斯蒂安·塞格迪，魏刘，杨青·贾，皮埃尔·塞尔马内，斯科特·里德，德拉戈米尔·安古洛夫，杜米特鲁·埃尔汉，文森特·范胡克，安德鲁·拉比诺维奇\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1409.4842](https:\u002F\u002Farxiv.org\u002Fabs\u002F1409.4842)\n- 代码：[非官方TensorFlow实现：https:\u002F\u002Fgithub.com\u002Fconan7882\u002FGoogLeNet-Inception](https:\u002F\u002Fgithub.com\u002Fconan7882\u002FGoogLeNet-Inception)\n- 代码：[非官方Caffe实现：https:\u002F\u002Fgithub.com\u002Flim0606\u002Fcaffe-googlenet-bn](https:\u002F\u002Fgithub.com\u002Flim0606\u002Fcaffe-googlenet-bn)\n\n### PReLU-nets\n**深入研究修正线性单元：在ImageNet分类任务上超越人类水平的表现**\n凯明·何，张祥宇，任少卿，孙健\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1502.01852](https:\u002F\u002Farxiv.org\u002Fabs\u002F1502.01852)\n- 代码：[非官方Chainer实现：https:\u002F\u002Fgithub.com\u002Fnutszebra\u002Fprelu_net](https:\u002F\u002Fgithub.com\u002Fnutszebra\u002Fprelu_net)\n\n### ResNet\n**用于图像识别的深度残差学习**\n凯明·何，张祥宇，任少卿，孙健\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1512.03385](https:\u002F\u002Farxiv.org\u002Fabs\u002F1512.03385)\n- 代码：[Facebook Torch实现：https:\u002F\u002Fgithub.com\u002Ffacebook\u002Ffb.resnet.torch](https:\u002F\u002Fgithub.com\u002Ffacebook\u002Ffb.resnet.torch)\n- 代码：[torchvision：https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fvision\u002Fblob\u002Fmaster\u002Ftorchvision\u002Fmodels\u002Fresnet.py](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fvision\u002Fblob\u002Fmaster\u002Ftorchvision\u002Fmodels\u002Fresnet.py)\n- 代码：[keras-applications：https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fresnet.py](https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fresnet.py)\n- 代码：[非官方Keras实现：https:\u002F\u002Fgithub.com\u002Fraghakot\u002Fkeras-resnet](https:\u002F\u002Fgithub.com\u002Fraghakot\u002Fkeras-resnet)\n- 代码：[非官方TensorFlow实现：https:\u002F\u002Fgithub.com\u002Fry\u002Ftensorflow-resnet](https:\u002F\u002Fgithub.com\u002Fry\u002Ftensorflow-resnet)\n\n### PreActResNet\n**深度残差网络中的恒等映射**\n凯明·何，张祥宇，任少卿，孙健\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1603.05027](https:\u002F\u002Farxiv.org\u002Fabs\u002F1603.05027)\n- 代码：[Facebook Torch实现：https:\u002F\u002Fgithub.com\u002Ffacebook\u002Ffb.resnet.torch\u002Fblob\u002Fmaster\u002Fmodels\u002Fpreresnet.lua](https:\u002F\u002Fgithub.com\u002Ffacebook\u002Ffb.resnet.torch\u002Fblob\u002Fmaster\u002Fmodels\u002Fpreresnet.lua)\n- 代码：[官方实现：https:\u002F\u002Fgithub.com\u002FKaimingHe\u002Fresnet-1k-layers](https:\u002F\u002Fgithub.com\u002FKaimingHe\u002Fresnet-1k-layers)\n- 代码：[非官方PyTorch实现：https:\u002F\u002Fgithub.com\u002Fkuangliu\u002Fpytorch-cifar\u002Fblob\u002Fmaster\u002Fmodels\u002Fpreact_resnet.py](https:\u002F\u002Fgithub.com\u002Fkuangliu\u002Fpytorch-cifar\u002Fblob\u002Fmaster\u002Fmodels\u002Fpreact_resnet.py)\n- 代码：[非官方MXNet实现：https:\u002F\u002Fgithub.com\u002Ftornadomeet\u002FResNet](https:\u002F\u002Fgithub.com\u002Ftornadomeet\u002FResNet)\n\n### Inceptionv3\n**重新思考计算机视觉中的Inception架构**\n克里斯蒂安·塞格迪，文森特·范胡克，谢尔盖·伊奥费，乔纳森·施伦斯，兹比格涅夫·沃伊纳\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1512.00567](https:\u002F\u002Farxiv.org\u002Fabs\u002F1512.00567)\n- 代码：[torchvision：https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fvision\u002Fblob\u002Fmaster\u002Ftorchvision\u002Fmodels\u002Finception.py](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fvision\u002Fblob\u002Fmaster\u002Ftorchvision\u002Fmodels\u002Finception.py)\n- 代码：[keras-applications：https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Finception_v3.py](https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Finception_v3.py)\n\n### Inceptionv4 && Inception-ResNetv2\n**Inception-v4、Inception-ResNet以及残差连接对学习的影响**\n克里斯蒂安·塞格迪，谢尔盖·伊奥费，文森特·范胡克，亚历克斯·阿莱米\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1602.07261](https:\u002F\u002Farxiv.org\u002Fabs\u002F1602.07261)\n- 代码：[非官方Keras实现：https:\u002F\u002Fgithub.com\u002Fkentsommer\u002Fkeras-inceptionV4](https:\u002F\u002Fgithub.com\u002Fkentsommer\u002Fkeras-inceptionV4)\n- 代码：[非官方Keras实现：https:\u002F\u002Fgithub.com\u002Ftitu1994\u002FInception-v4](https:\u002F\u002Fgithub.com\u002Ftitu1994\u002FInception-v4)\n- 代码：[非官方Keras实现：https:\u002F\u002Fgithub.com\u002Fyuyang-huang\u002Fkeras-inception-resnet-v2](https:\u002F\u002Fgithub.com\u002Fyuyang-huang\u002Fkeras-inception-resnet-v2)\n\n### RiR\n**残差网络中的残差网络：泛化残差架构**\n萨莎·塔格，迪奥戈·阿尔梅达，凯文·莱曼\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1603.08029](https:\u002F\u002Farxiv.org\u002Fabs\u002F1603.08029)\n- 代码：[非官方TensorFlow实现：https:\u002F\u002Fgithub.com\u002FSunnerLi\u002FRiR-Tensorflow](https:\u002F\u002Fgithub.com\u002FSunnerLi\u002FRiR-Tensorflow)\n- 代码：[非官方Chainer实现：https:\u002F\u002Fgithub.com\u002Fnutszebra\u002Fresnet_in_resnet](https:\u002F\u002Fgithub.com\u002Fnutszebra\u002Fresnet_in_resnet)\n\n### 随机深度ResNet\n**具有随机深度的深度网络**\n高黄，于孙，庄刘，丹尼尔·塞德拉，基利安·温伯格\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1603.09382](https:\u002F\u002Farxiv.org\u002Fabs\u002F1603.09382)\n- 代码：[非官方Torch实现：https:\u002F\u002Fgithub.com\u002Fyueatsprograms\u002FStochastic_Depth](https:\u002F\u002Fgithub.com\u002Fyueatsprograms\u002FStochastic_Depth)\n- 代码：[非官方Chainer实现：https:\u002F\u002Fgithub.com\u002Fyasunorikudo\u002Fchainer-ResDrop](https:\u002F\u002Fgithub.com\u002Fyasunorikudo\u002Fchainer-ResDrop)\n- 代码：[非官方Keras实现：https:\u002F\u002Fgithub.com\u002FdblN\u002Fstochastic_depth_keras](https:\u002F\u002Fgithub.com\u002FdblN\u002Fstochastic_depth_keras)\n\n### WRN\n**宽残差网络**\n谢尔盖·扎戈鲁伊科，尼科斯·科莫达基斯\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1605.07146](https:\u002F\u002Farxiv.org\u002Fabs\u002F1605.07146)\n- 代码：[官方实现：https:\u002F\u002Fgithub.com\u002Fszagoruyko\u002Fwide-residual-networks](https:\u002F\u002Fgithub.com\u002Fszagoruyko\u002Fwide-residual-networks)\n- 代码：[非官方PyTorch实现：https:\u002F\u002Fgithub.com\u002Fxternalz\u002FWideResNet-pytorch](https:\u002F\u002Fgithub.com\u002Fxternalz\u002FWideResNet-pytorch)\n- 代码：[非官方Keras实现：https:\u002F\u002Fgithub.com\u002Fasmith26\u002Fwide_resnets_keras](https:\u002F\u002Fgithub.com\u002Fasmith26\u002Fwide_resnets_keras)\n- 代码：[非官方PyTorch实现：https:\u002F\u002Fgithub.com\u002Fmeliketoy\u002Fwide-resnet.pytorch](https:\u002F\u002Fgithub.com\u002Fmeliketoy\u002Fwide-resnet.pytorch)\n\n### SqueezeNet\n**SqueezeNet：参数量减少50倍、模型大小小于0.5MB，同时达到AlexNet级别的精度**\nForrest N. Iandola, Song Han, Matthew W. Moskewicz, Khalid Ashraf, William J. Dally, Kurt Keutzer\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1602.07360](https:\u002F\u002Farxiv.org\u002Fabs\u002F1602.07360)\n- 代码：[torchvision：https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fvision\u002Fblob\u002Fmaster\u002Ftorchvision\u002Fmodels\u002Fsqueezenet.py](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fvision\u002Fblob\u002Fmaster\u002Ftorchvision\u002Fmodels\u002Fsqueezenet.py)\n- 代码：[非官方-Caffe：https:\u002F\u002Fgithub.com\u002FDeepScale\u002FSqueezeNet](https:\u002F\u002Fgithub.com\u002FDeepScale\u002FSqueezeNet)\n- 代码：[非官方-Keras：https:\u002F\u002Fgithub.com\u002Frcmalli\u002Fkeras-squeezenet](https:\u002F\u002Fgithub.com\u002Frcmalli\u002Fkeras-squeezenet)\n- 代码：[非官方-Caffe：https:\u002F\u002Fgithub.com\u002Fsonghan\u002FSqueezeNet-Residual](https:\u002F\u002Fgithub.com\u002Fsonghan\u002FSqueezeNet-Residual)\n\n### GeNet\n**遗传CNN**\nLingxi Xie, Alan Yuille\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.01513](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.01513)\n- 代码：[非官方-TensorFlow：https:\u002F\u002Fgithub.com\u002Faqibsaeed\u002FGenetic-CNN](https:\u002F\u002Fgithub.com\u002Faqibsaeed\u002FGenetic-CNN)\n\n### MetaQNN\n**利用强化学习设计神经网络架构**\nBowen Baker, Otkrist Gupta, Nikhil Naik, Ramesh Raskar\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.02167](https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.02167)\n- 代码：[官方：https:\u002F\u002Fgithub.com\u002Fbowenbaker\u002Fmetaqnn](https:\u002F\u002Fgithub.com\u002Fbowenbaker\u002Fmetaqnn)\n\n### PyramidNet\n**深度金字塔残差网络**\nDongyoon Han, Jiwhan Kim, Junmo Kim\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1610.02915](https:\u002F\u002Farxiv.org\u002Fabs\u002F1610.02915)\n- 代码：[官方：https:\u002F\u002Fgithub.com\u002Fjhkim89\u002FPyramidNet](https:\u002F\u002Fgithub.com\u002Fjhkim89\u002FPyramidNet)\n- 代码：[非官方-PyTorch：https:\u002F\u002Fgithub.com\u002Fdyhan0920\u002FPyramidNet-PyTorch](https:\u002F\u002Fgithub.com\u002Fdyhan0920\u002FPyramidNet-PyTorch)\n\n### DenseNet\n**密集连接的卷积网络**\nGao Huang, Zhuang Liu, Laurens van der Maaten, Kilian Q. Weinberger\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1608.06993](https:\u002F\u002Farxiv.org\u002Fabs\u002F1608.06993)\n- 代码：[官方：https:\u002F\u002Fgithub.com\u002Fliuzhuang13\u002FDenseNet](https:\u002F\u002Fgithub.com\u002Fliuzhuang13\u002FDenseNet)\n- 代码：[非官方-Keras：https:\u002F\u002Fgithub.com\u002Ftitu1994\u002FDenseNet](https:\u002F\u002Fgithub.com\u002Ftitu1994\u002FDenseNet)\n- 代码：[非官方-Caffe：https:\u002F\u002Fgithub.com\u002Fshicai\u002FDenseNet-Caffe](https:\u002F\u002Fgithub.com\u002Fshicai\u002FDenseNet-Caffe)\n- 代码：[非官方-TensorFlow：https:\u002F\u002Fgithub.com\u002FYixuanLi\u002Fdensenet-tensorflow](https:\u002F\u002Fgithub.com\u002FYixuanLi\u002Fdensenet-tensorflow)\n- 代码：[非官方-PyTorch：https:\u002F\u002Fgithub.com\u002FYixuanLi\u002Fdensenet-tensorflow](https:\u002F\u002Fgithub.com\u002FYixuanLi\u002Fdensenet-tensorflow)\n- 代码：[非官方-PyTorch：https:\u002F\u002Fgithub.com\u002Fbamos\u002Fdensenet.pytorch](https:\u002F\u002Fgithub.com\u002Fbamos\u002Fdensenet.pytorch)\n- 代码：[非官方-Keras：https:\u002F\u002Fgithub.com\u002Fflyyufelix\u002FDenseNet-Keras](https:\u002F\u002Fgithub.com\u002Fflyyufelix\u002FDenseNet-Keras)\n\n### FractalNet\n**FractalNet：无残差连接的超深层神经网络**\nGustav Larsson, Michael Maire, Gregory Shakhnarovich\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1605.07648](https:\u002F\u002Farxiv.org\u002Fabs\u002F1605.07648)\n- 代码：[非官方-Caffe：https:\u002F\u002Fgithub.com\u002Fgustavla\u002Ffractalnet](https:\u002F\u002Fgithub.com\u002Fgustavla\u002Ffractalnet)\n- 代码：[非官方-Keras：https:\u002F\u002Fgithub.com\u002Fsnf\u002Fkeras-fractalnet](https:\u002F\u002Fgithub.com\u002Fsnf\u002Fkeras-fractalnet)\n- 代码：[非官方-TensorFlow：https:\u002F\u002Fgithub.com\u002Ftensorpro\u002FFractalNet](https:\u002F\u002Fgithub.com\u002Ftensorpro\u002FFractalNet)\n\n### ResNext\n**用于深度神经网络的聚合残差变换**\nSaining Xie, Ross Girshick, Piotr Dollár, Zhuowen Tu, Kaiming He\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.05431](https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.05431)\n- 代码：[官方：https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FResNeXt](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FResNeXt)\n- 代码：[Keras Applications：https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fresnext.py](https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fresnext.py)\n- 代码：[非官方-PyTorch：https:\u002F\u002Fgithub.com\u002Fprlz77\u002FResNeXt.pytorch](https:\u002F\u002Fgithub.com\u002Fprlz77\u002FResNeXt.pytorch)\n- 代码：[非官方-Keras：https:\u002F\u002Fgithub.com\u002Ftitu1994\u002FKeras-ResNeXt](https:\u002F\u002Fgithub.com\u002Ftitu1994\u002FKeras-ResNeXt)\n- 代码：[非官方-TensorFlow：https:\u002F\u002Fgithub.com\u002Ftaki0112\u002FResNeXt-Tensorflow](https:\u002F\u002Fgithub.com\u002Ftaki0112\u002FResNeXt-Tensorflow)\n- 代码：[非官方-TensorFlow：https:\u002F\u002Fgithub.com\u002Fwenxinxu\u002FResNeXt-in-tensorflow](https:\u002F\u002Fgithub.com\u002Fwenxinxu\u002FResNeXt-in-tensorflow)\n\n### IGCV1\n**用于深度神经网络的交错分组卷积**\nTing Zhang, Guo-Jun Qi, Bin Xiao, Jingdong Wang\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.02725](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.02725)\n- 代码：[官方：https:\u002F\u002Fgithub.com\u002Fhellozting\u002FInterleavedGroupConvolutions](https:\u002F\u002Fgithub.com\u002Fhellozting\u002FInterleavedGroupConvolutions)\n\n### 残差注意力网络\n**用于图像分类的残差注意力网络**\nFei Wang, Mengqing Jiang, Chen Qian, Shuo Yang, Cheng Li, Honggang Zhang, Xiaogang Wang, Xiaoou Tang\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.06904](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.06904)\n- 代码：[官方：https:\u002F\u002Fgithub.com\u002Ffwang91\u002Fresidual-attention-network](https:\u002F\u002Fgithub.com\u002Ffwang91\u002Fresidual-attention-network)\n- 代码：[非官方-PyTorch：https:\u002F\u002Fgithub.com\u002Ftengshaofeng\u002FResidualAttentionNetwork-pytorch](https:\u002F\u002Fgithub.com\u002Ftengshaofeng\u002FResidualAttentionNetwork-pytorch)\n- 代码：[非官方-Gluon：https:\u002F\u002Fgithub.com\u002FPistonY\u002FResidualAttentionNetwork](https:\u002F\u002Fgithub.com\u002FPistonY\u002FResidualAttentionNetwork)\n- 代码：[非官方-Keras：https:\u002F\u002Fgithub.com\u002Fkoichiro11\u002Fresidual-attention-network](https:\u002F\u002Fgithub.com\u002Fkoichiro11\u002Fresidual-attention-network)\n\n### Xception\n**Xception：基于深度可分离卷积的深度学习**\nFrançois Chollet\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1610.02357](https:\u002F\u002Farxiv.org\u002Fabs\u002F1610.02357)\n- 代码：[非官方-PyTorch：https:\u002F\u002Fgithub.com\u002Fjfzhang95\u002Fpytorch-deeplab-xception\u002Fblob\u002Fmaster\u002Fmodeling\u002Fbackbone\u002Fxception.py](https:\u002F\u002Fgithub.com\u002Fjfzhang95\u002Fpytorch-deeplab-xception\u002Fblob\u002Fmaster\u002Fmodeling\u002Fbackbone\u002Fxception.py)\n- 代码：[非官方-TensorFlow：https:\u002F\u002Fgithub.com\u002Fkwotsin\u002FTensorFlow-Xception](https:\u002F\u002Fgithub.com\u002Fkwotsin\u002FTensorFlow-Xception)\n- 代码：[非官方-Caffe：https:\u002F\u002Fgithub.com\u002Fyihui-he\u002FXception-caffe](https:\u002F\u002Fgithub.com\u002Fyihui-he\u002FXception-caffe)\n- 代码：[非官方-PyTorch：https:\u002F\u002Fgithub.com\u002Ftstandley\u002FXception-PyTorch](https:\u002F\u002Fgithub.com\u002Ftstandley\u002FXception-PyTorch)\n- 代码：[Keras Applications：https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fxception.py](https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fxception.py)\n\n### MobileNet\n**MobileNets：面向移动视觉应用的高效卷积神经网络**\nAndrew G. Howard、Menglong Zhu、Bo Chen、Dmitry Kalenichenko、Weijun Wang、Tobias Weyand、Marco Andreetto、Hartwig Adam\n- 论文PDF：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.04861](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.04861)\n- 代码（TensorFlow非官方实现）：[https:\u002F\u002Fgithub.com\u002FZehaos\u002FMobileNet](https:\u002F\u002Fgithub.com\u002FZehaos\u002FMobileNet)\n- 代码（Caffe非官方实现）：[https:\u002F\u002Fgithub.com\u002Fshicai\u002FMobileNet-Caffe](https:\u002F\u002Fgithub.com\u002Fshicai\u002FMobileNet-Caffe)\n- 代码（PyTorch非官方实现）：[https:\u002F\u002Fgithub.com\u002Fmarvis\u002Fpytorch-mobilenet](https:\u002F\u002Fgithub.com\u002Fmarvis\u002Fpytorch-mobilenet)\n- 代码（Keras应用库）：[https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fmobilenet.py](https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fmobilenet.py)\n\n### PolyNet\n**PolyNet：在超深度网络中追求结构多样性**\nXingcheng Zhang、Zhizhong Li、Chen Change Loy、Dahua Lin\n- 论文PDF：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.05725](https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.05725)\n- 代码（官方实现）：[https:\u002F\u002Fgithub.com\u002Fopen-mmlab\u002Fpolynet](https:\u002F\u002Fgithub.com\u002Fopen-mmlab\u002Fpolynet)\n\n### DPN\n**双路径网络**\nYunpeng Chen、Jianan Li、Huaxin Xiao、Xiaojie Jin、Shuicheng Yan、Jiashi Feng\n- 论文PDF：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.01629](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.01629)\n- 代码（官方实现）：[https:\u002F\u002Fgithub.com\u002Fcypw\u002FDPNs](https:\u002F\u002Fgithub.com\u002Fcypw\u002FDPNs)\n- 代码（Keras非官方实现）：[https:\u002F\u002Fgithub.com\u002Ftitu1994\u002FKeras-DualPathNetworks](https:\u002F\u002Fgithub.com\u002Ftitu1994\u002FKeras-DualPathNetworks)\n- 代码（PyTorch非官方实现）：[https:\u002F\u002Fgithub.com\u002Foyam\u002Fpytorch-DPNs](https:\u002F\u002Fgithub.com\u002Foyam\u002Fpytorch-DPNs)\n- 代码（PyTorch非官方实现）：[https:\u002F\u002Fgithub.com\u002Frwightman\u002Fpytorch-dpn-pretrained](https:\u002F\u002Fgithub.com\u002Frwightman\u002Fpytorch-dpn-pretrained)\n\n### Block-QNN\n**实用的分块式神经网络架构生成**\nZhao Zhong、Junjie Yan、Wei Wu、Jing Shao、Cheng-Lin Liu\n- 论文PDF：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1708.05552](https:\u002F\u002Farxiv.org\u002Fabs\u002F1708.05552)\n\n### CRU-Net\n**通过深度神经网络中的集体张量分解共享残差单元**\nChen Yunpeng、Jin Xiaojie、Kang Bingyi、Feng Jiashi、Yan Shuicheng\n- 论文PDF：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.02180](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.02180)\n- 代码（官方实现）：[https:\u002F\u002Fgithub.com\u002Fcypw\u002FCRU-Net](https:\u002F\u002Fgithub.com\u002Fcypw\u002FCRU-Net)\n- 代码（MXNet非官方实现）：[https:\u002F\u002Fgithub.com\u002Fbruinxiong\u002FModified-CRUNet-and-Residual-Attention-Network.mxnet](https:\u002F\u002Fgithub.com\u002Fbruinxiong\u002FModified-CRUNet-and-Residual-Attention-Network.mxnet)\n\n## DLA\n**深度层聚合**\nFisher Yu、Dequan Wang、Evan Shelhamer、Trevor Darrell\n- 论文PDF：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.06484](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.06484)\n- 代码（PyTorch官方实现）：[https:\u002F\u002Fgithub.com\u002Fucbdrive\u002Fdla](https:\u002F\u002Fgithub.com\u002Fucbdrive\u002Fdla)\n\n### ShuffleNet\n**ShuffleNet：一种极其高效的移动端卷积神经网络**\nXiangyu Zhang、Xinyu Zhou、Mengxiao Lin、Jian Sun\n- 论文PDF：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.01083](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.01083)\n- 代码（TensorFlow非官方实现）：[https:\u002F\u002Fgithub.com\u002FMG2033\u002FShuffleNet](https:\u002F\u002Fgithub.com\u002FMG2033\u002FShuffleNet)\n- 代码（PyTorch非官方实现）：[https:\u002F\u002Fgithub.com\u002Fjaxony\u002FShuffleNet](https:\u002F\u002Fgithub.com\u002Fjaxony\u002FShuffleNet)\n- 代码（Caffe非官方实现）：[https:\u002F\u002Fgithub.com\u002Ffarmingyard\u002FShuffleNet](https:\u002F\u002Fgithub.com\u002Ffarmingyard\u002FShuffleNet)\n- 代码（Keras非官方实现）：[https:\u002F\u002Fgithub.com\u002Fscheckmedia\u002Fkeras-shufflenet](https:\u002F\u002Fgithub.com\u002Fscheckmedia\u002Fkeras-shufflenet)\n\n### CondenseNet\n**CondenseNet：一种使用学习型组卷积的高效DenseNet**\nGao Huang、Shichen Liu、Laurens van der Maaten、Kilian Q. Weinberger\n- 论文PDF：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.09224](https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.09224)\n- 代码（官方实现）：[https:\u002F\u002Fgithub.com\u002FShichenLiu\u002FCondenseNet](https:\u002F\u002Fgithub.com\u002FShichenLiu\u002FCondenseNet)\n- 代码（TensorFlow非官方实现）：[https:\u002F\u002Fgithub.com\u002Fmarkdtw\u002Fcondensenet-tensorflow](https:\u002F\u002Fgithub.com\u002Fmarkdtw\u002Fcondensenet-tensorflow)\n\n### NasNet\n**学习可迁移的架构以实现可扩展的图像识别**\nBarret Zoph、Vijay Vasudevan、Jonathon Shlens、Quoc V. Le\n- 论文PDF：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.07012](https:\u002F\u002Farxiv.org\u002Fabs\u002F1707.07012)\n- 代码（Keras非官方实现）：[https:\u002F\u002Fgithub.com\u002Ftitu1994\u002FKeras-NASNet](https:\u002F\u002Fgithub.com\u002Ftitu1994\u002FKeras-NASNet)\n- 代码（Keras应用库）：[https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fnasnet.py](https:\u002F\u002Fgithub.com\u002Fkeras-team\u002Fkeras-applications\u002Fblob\u002Fmaster\u002Fkeras_applications\u002Fnasnet.py)\n- 代码（PyTorch非官方实现）：[https:\u002F\u002Fgithub.com\u002Fwandering007\u002Fnasnet-pytorch](https:\u002F\u002Fgithub.com\u002Fwandering007\u002Fnasnet-pytorch)\n- 代码（TensorFlow非官方实现）：[https:\u002F\u002Fgithub.com\u002Fyeephycho\u002Fnasnet-tensorflow](https:\u002F\u002Fgithub.com\u002Fyeephycho\u002Fnasnet-tensorflow)\n\n### MobileNetV2\n**MobileNetV2：倒置残差与线性瓶颈**\nMark Sandler、Andrew Howard、Menglong Zhu、Andrey Zhmoginov、Liang-Chieh Chen\n- 论文PDF：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1801.04381](https:\u002F\u002Farxiv.org\u002Fabs\u002F1801.04381)\n- 代码（Keras非官方实现）：[https:\u002F\u002Fgithub.com\u002Fxiaochus\u002FMobileNetV2](https:\u002F\u002Fgithub.com\u002Fxiaochus\u002FMobileNetV2)\n- 代码（PyTorch非官方实现）：[https:\u002F\u002Fgithub.com\u002FRandl\u002FMobileNetV2-pytorch](https:\u002F\u002Fgithub.com\u002FRandl\u002FMobileNetV2-pytorch)\n- 代码（TensorFlow非官方实现）：[https:\u002F\u002Fgithub.com\u002Fneuleaf\u002FMobileNetV2](https:\u002F\u002Fgithub.com\u002Fneuleaf\u002FMobileNetV2)\n\n### IGCV2\n**IGCV2：交错式结构化稀疏卷积神经网络**\nGuotian Xie、Jingdong Wang、Ting Zhang、Jianhuang Lai、Richang Hong、Guo-Jun Qi\n- 论文PDF：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1804.06202](https:\u002F\u002Farxiv.org\u002Fabs\u002F1804.06202)\n\n### hier\n**用于高效架构搜索的层次化表示**\nHanxiao Liu、Karen Simonyan、Oriol Vinyals、Chrisantha Fernando、Koray Kavukcuoglu\n- 论文PDF：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.00436](https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.00436)\n\n### PNasNet\n**渐进式神经架构搜索**\nChenxi Liu、Barret Zoph、Maxim Neumann、Jonathon Shlens、Wei Hua、Li-Jia Li、Li Fei-Fei、Alan Yuille、Jonathan Huang、Kevin Murphy\n- 论文PDF：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1712.00559](https:\u002F\u002Farxiv.org\u002Fabs\u002F1712.00559)\n- 代码（TensorFlow-Slim实现）：[https:\u002F\u002Fgithub.com\u002Ftensorflow\u002Fmodels\u002Fblob\u002Fmaster\u002Fresearch\u002Fslim\u002Fnets\u002Fnasnet\u002Fpnasnet.py](https:\u002F\u002Fgithub.com\u002Ftensorflow\u002Fmodels\u002Fblob\u002Fmaster\u002Fresearch\u002Fslim\u002Fnets\u002Fnasnet\u002Fpnasnet.py)\n- 代码（PyTorch非官方实现）：[https:\u002F\u002Fgithub.com\u002Fchenxi116\u002FPNASNet.pytorch](https:\u002F\u002Fgithub.com\u002Fchenxi116\u002FPNASNet.pytorch)\n- 代码（TensorFlow非官方实现）：[https:\u002F\u002Fgithub.com\u002Fchenxi116\u002FPNASNet.TF](https:\u002F\u002Fgithub.com\u002Fchenxi116\u002FPNASNet.TF)\n\n### AmoebaNet\n**基于正则化进化的图像分类器架构搜索**\nEsteban Real、Alok Aggarwal、Yanping Huang、Quoc V Le\n- 论文PDF：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1802.01548](https:\u002F\u002Farxiv.org\u002Fabs\u002F1802.01548)\n- 代码（TensorFlow TPU实现）：[https:\u002F\u002Fgithub.com\u002Ftensorflow\u002Ftpu\u002Ftree\u002Fmaster\u002Fmodels\u002Fofficial\u002Famoeba_net](https:\u002F\u002Fgithub.com\u002Ftensorflow\u002Ftpu\u002Ftree\u002Fmaster\u002Fmodels\u002Fofficial\u002Famoeba_net)\n\n### SENet\n**挤压与激励网络**\n胡杰、沈力、塞缪尔·阿尔巴尼、孙刚、吴恩华\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1709.01507](https:\u002F\u002Farxiv.org\u002Fabs\u002F1709.01507)\n- 代码：[官方：https:\u002F\u002Fgithub.com\u002Fhujie-frank\u002FSENet](https:\u002F\u002Fgithub.com\u002Fhujie-frank\u002FSENet)\n- 代码：[非官方-PyTorch：https:\u002F\u002Fgithub.com\u002Fmoskomule\u002Fsenet.pytorch](https:\u002F\u002Fgithub.com\u002Fmoskomule\u002Fsenet.pytorch)\n- 代码：[非官方-TensorFlow：https:\u002F\u002Fgithub.com\u002Ftaki0112\u002FSENet-Tensorflow](https:\u002F\u002Fgithub.com\u002Ftaki0112\u002FSENet-Tensorflow)\n- 代码：[非官方-Caffe：https:\u002F\u002Fgithub.com\u002Fshicai\u002FSENet-Caffe](https:\u002F\u002Fgithub.com\u002Fshicai\u002FSENet-Caffe)\n- 代码：[非官方-MXNet：https:\u002F\u002Fgithub.com\u002Fbruinxiong\u002FSENet.mxnet](https:\u002F\u002Fgithub.com\u002Fbruinxiong\u002FSENet.mxnet)\n\n### ShuffleNetV2\n**ShuffleNet V2：高效CNN架构设计的实用指南**\n马宁宁、张翔宇、郑海涛、孙剑\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.11164](https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.11164)\n- 代码：[非官方-PyTorch：https:\u002F\u002Fgithub.com\u002FRandl\u002FShuffleNetV2-pytorch](https:\u002F\u002Fgithub.com\u002FRandl\u002FShuffleNetV2-pytorch)\n- 代码：[非官方-Keras：https:\u002F\u002Fgithub.com\u002Fopconty\u002Fkeras-shufflenetV2](https:\u002F\u002Fgithub.com\u002Fopconty\u002Fkeras-shufflenetV2)\n- 代码：[非官方-PyTorch：https:\u002F\u002Fgithub.com\u002FBugdragon\u002FShuffleNet_v2_PyTorch](https:\u002F\u002Fgithub.com\u002FBugdragon\u002FShuffleNet_v2_PyTorch)\n- 代码：[非官方-Caffe2：https:\u002F\u002Fgithub.com\u002Fwolegechu\u002FShuffleNetV2.Caffe2](https:\u002F\u002Fgithub.com\u002Fwolegechu\u002FShuffleNetV2.Caffe2)\n\n### CBAM\nCBAM：卷积块注意力模块\n桑贤宇、朴宗灿、李俊英、权仁昭\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.06521](https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.06521)\n- 代码：[官方-PyTorch：https:\u002F\u002Fgithub.com\u002FJongchan\u002Fattention-module](https:\u002F\u002Fgithub.com\u002FJongchan\u002Fattention-module)\n- 代码：[非官方-PyTorch：https:\u002F\u002Fgithub.com\u002Fluuuyi\u002FCBAM.PyTorch](https:\u002F\u002Fgithub.com\u002Fluuuyi\u002FCBAM.PyTorch)\n- 代码：[非官方-PyTorch：https:\u002F\u002Fgithub.com\u002Felbuco1\u002FCBAM](https:\u002F\u002Fgithub.com\u002Felbuco1\u002FCBAM)\n- 代码：[非官方-Keras：https:\u002F\u002Fgithub.com\u002Fkobiso\u002FCBAM-keras](https:\u002F\u002Fgithub.com\u002Fkobiso\u002FCBAM-keras)\n\n\n### IGCV3\n**IGCV3：用于高效深度神经网络的交错低秩分组卷积**\n孙柯、李明杰、刘东、王井东\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1806.00178](https:\u002F\u002Farxiv.org\u002Fabs\u002F1806.00178)\n- 代码：[官方：https:\u002F\u002Fgithub.com\u002Fhomles11\u002FIGCV3](https:\u002F\u002Fgithub.com\u002Fhomles11\u002FIGCV3)\n- 代码：[非官方-PyTorch：https:\u002F\u002Fgithub.com\u002Fxxradon\u002FIGCV3-pytorch](https:\u002F\u002Fgithub.com\u002Fxxradon\u002FIGCV3-pytorch)\n- 代码：[非官方-TensorFlow：https:\u002F\u002Fgithub.com\u002FZHANG-SHI-CHANG\u002FIGCV3](https:\u002F\u002Fgithub.com\u002FZHANG-SHI-CHANG\u002FIGCV3)\n\n### BAM\n**BAM：瓶颈注意力模块**\n朴宗灿、桑贤宇、李俊英、权仁昭\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.06514](https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.06514)\n- 代码：[官方-PyTorch：https:\u002F\u002Fgithub.com\u002FJongchan\u002Fattention-module](https:\u002F\u002Fgithub.com\u002FJongchan\u002Fattention-module)\n- 代码：[非官方-TensorFlow：https:\u002F\u002Fgithub.com\u002Fhuyz1117\u002FBAM](https:\u002F\u002Fgithub.com\u002Fhuyz1117\u002FBAM)\n\n### MNasNet\n**MnasNet：面向移动设备的平台感知神经架构搜索**\n谭铭星、陈博、庞若明、瓦苏德万、黎魁\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.11626](https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.11626)\n- 代码：[非官方-PyTorch：https:\u002F\u002Fgithub.com\u002FAnjieZheng\u002FMnasNet-PyTorch](https:\u002F\u002Fgithub.com\u002FAnjieZheng\u002FMnasNet-PyTorch)\n- 代码：[非官方-Caffe：https:\u002F\u002Fgithub.com\u002FLiJianfei06\u002FMnasNet-caffe](https:\u002F\u002Fgithub.com\u002FLiJianfei06\u002FMnasNet-caffe)\n- 代码：[非官方-MXNet：https:\u002F\u002Fgithub.com\u002Fchinakook\u002FMnasnet.MXNet](https:\u002F\u002Fgithub.com\u002Fchinakook\u002FMnasnet.MXNet)\n- 代码：[非官方-Keras：https:\u002F\u002Fgithub.com\u002FShathe\u002FMNasNet-Keras-Tensorflow](https:\u002F\u002Fgithub.com\u002FShathe\u002FMNasNet-Keras-Tensorflow)\n\n### SKNet\n**选择性卷积网络**\n李翔、王文海、胡晓林、杨健\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1903.06586](https:\u002F\u002Farxiv.org\u002Fabs\u002F1903.06586)\n- 代码：[官方：https:\u002F\u002Fgithub.com\u002Fimplus\u002FSKNet](https:\u002F\u002Fgithub.com\u002Fimplus\u002FSKNet)\n\n### DARTS\n**DARTS：可微架构搜索**\n刘瀚霄、卡伦·西蒙扬、杨一鸣\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1806.09055](https:\u002F\u002Farxiv.org\u002Fabs\u002F1806.09055)\n- 代码：[官方：https:\u002F\u002Fgithub.com\u002Fquark0\u002Fdarts](https:\u002F\u002Fgithub.com\u002Fquark0\u002Fdarts)\n- 代码：[非官方-PyTorch：https:\u002F\u002Fgithub.com\u002Fkhanrc\u002Fpt.darts](https:\u002F\u002Fgithub.com\u002Fkhanrc\u002Fpt.darts)\n- 代码：[非官方-TensorFlow：https:\u002F\u002Fgithub.com\u002FNeroLoh\u002Fdarts-tensorflow](https:\u002F\u002Fgithub.com\u002FNeroLoh\u002Fdarts-tensorflow)\n\n### ProxylessNAS\n**ProxylessNAS：直接在目标任务和硬件上进行神经架构搜索**\n蔡汉、朱立耕、韩松\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1812.00332](https:\u002F\u002Farxiv.org\u002Fabs\u002F1812.00332)\n- 代码：[官方：https:\u002F\u002Fgithub.com\u002Fmit-han-lab\u002FProxylessNAS](https:\u002F\u002Fgithub.com\u002Fmit-han-lab\u002FProxylessNAS)\n\n### MobileNetV3\n**MobileNetV3的搜索**\n安德鲁·霍华德、马克·桑德勒、格蕾丝·楚、梁哲·陈、陈博、谭铭星、王伟军、朱玉坤、庞若明、瓦苏德万、黎魁、哈特维格·亚当\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.02244](https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.02244)\n- 代码：[非官方-PyTorch：https:\u002F\u002Fgithub.com\u002Fxiaolai-sqlai\u002Fmobilenetv3](https:\u002F\u002Fgithub.com\u002Fxiaolai-sqlai\u002Fmobilenetv3)\n- 代码：[非官方-PyTorch：https:\u002F\u002Fgithub.com\u002Fkuan-wang\u002Fpytorch-mobilenet-v3](https:\u002F\u002Fgithub.com\u002Fkuan-wang\u002Fpytorch-mobilenet-v3)\n- 代码：[非官方-PyTorch：https:\u002F\u002Fgithub.com\u002Fleaderj1001\u002FMobileNetV3-Pytorch](https:\u002F\u002Fgithub.com\u002Fleaderj1001\u002FMobileNetV3-Pytorch)\n- 代码：[非官方-PyTorch：https:\u002F\u002Fgithub.com\u002Fd-li14\u002Fmobilenetv3.pytorch](https:\u002F\u002Fgithub.com\u002Fd-li14\u002Fmobilenetv3.pytorch)\n- 代码：[非官方-Caffe：https:\u002F\u002Fgithub.com\u002Fjixing0415\u002Fcaffe-mobilenet-v3](https:\u002F\u002Fgithub.com\u002Fjixing0415\u002Fcaffe-mobilenet-v3)\n- 代码：[非官方-Keras：https:\u002F\u002Fgithub.com\u002Fxiaochus\u002FMobileNetV3](https:\u002F\u002Fgithub.com\u002Fxiaochus\u002FMobileNetV3)\n\n### Res2Net\n**Res2Net：一种新的多尺度骨干网络架构**\n高尚华、程明明、赵凯、张鑫宇、杨明轩、托尔\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1904.01169](https:\u002F\u002Farxiv.org\u002Fabs\u002F1904.01169)\n- 代码：[非官方-PyTorch：https:\u002F\u002Fgithub.com\u002F4uiiurz1\u002Fpytorch-res2net](https:\u002F\u002Fgithub.com\u002F4uiiurz1\u002Fpytorch-res2net)\n- 代码：[非官方-Keras：https:\u002F\u002Fgithub.com\u002Ffupiao1998\u002Fres2net-keras](https:\u002F\u002Fgithub.com\u002Ffupiao1998\u002Fres2net-keras)\n- 代码：[官方-PyTorch：https:\u002F\u002Fgithub.com\u002FRes2Net](https:\u002F\u002Fgithub.com\u002FRes2Net)\n\n### LIP-ResNet\n**LIP：基于局部重要性的池化**\n高子腾、王利民、吴刚山\n- 论文：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.04156](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.04156)\n- 代码：[官方-PyTorch：https:\u002F\u002Fgithub.com\u002Fsebgao\u002FLIP](https:\u002F\u002Fgithub.com\u002Fsebgao\u002FLIP)\n\n### EfficientNet\n\n**EfficientNet：重新思考卷积神经网络的模型缩放**\nMingxing Tan, Quoc V. Le\n- pdf：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.11946](https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.11946)\n- 代码：[非官方 PyTorch 版本：https:\u002F\u002Fgithub.com\u002Flukemelas\u002FEfficientNet-PyTorch](https:\u002F\u002Fgithub.com\u002Flukemelas\u002FEfficientNet-PyTorch)\n- 代码：[官方 TensorFlow 版本：https:\u002F\u002Fgithub.com\u002Ftensorflow\u002Ftpu\u002Ftree\u002Fmaster\u002Fmodels\u002Fofficial\u002Fefficientnet](https:\u002F\u002Fgithub.com\u002Ftensorflow\u002Ftpu\u002Ftree\u002Fmaster\u002Fmodels\u002Fofficial\u002Fefficientnet)\n\n\n### FixResNeXt \n**解决训练与测试分辨率不一致的问题**\nHugo Touvron, Andrea Vedaldi, Matthijs Douze, Hervé Jégou\n- pdf：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1906.06423](https:\u002F\u002Farxiv.org\u002Fabs\u002F1906.06423)\n- 代码：[官方 PyTorch 版本：https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FFixRes](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FFixRes)\n\n\n### BiT\n**大迁移（BiT）：通用视觉表征学习**\nAlexander Kolesnikov, Lucas Beyer, Xiaohua Zhai, Joan Puigcerver, Jessica Yung, Sylvain Gelly, Neil Houlsby\n- pdf：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1912.11370](https:\u002F\u002Farxiv.org\u002Fabs\u002F1912.11370)\n- 代码：[官方 TensorFlow 版本：https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fbig_transfer](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fbig_transfer)\n\n### PSConv + ResNext101\n**PSConv：将特征金字塔压缩进一个紧凑的多尺度卷积层**\nDuo Li1, Anbang Yao2B, 和 Qifeng Chen1B\n- pdf：[https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.06191](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.06191)\n- 代码：[https:\u002F\u002Fgithub.com\u002Fd-li14\u002FPSConv](https:\u002F\u002Fgithub.com\u002Fd-li14\u002FPSConv)\n\n\n### NoisyStudent\n**带有噪声的学生自训练提升 ImageNet 分类性能**\nQizhe Xie, Minh-Thang Luong, Eduard Hovy, Quoc V. Le\n- pdf：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.04252](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.04252)\n- 代码：[官方 TensorFlow 版本：https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fnoisystudent](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fnoisystudent)\n- 代码：[非官方 PyTorch 版本：https:\u002F\u002Fgithub.com\u002Fsally20921\u002FNoisyStudent](https:\u002F\u002Fgithub.com\u002Fsally20921\u002FNoisyStudent)\n\n### RegNet\n**设计网络设计空间**\nIlija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár\n- pdf：[https:\u002F\u002Farxiv.org\u002Fabs\u002F2003.13678](https:\u002F\u002Farxiv.org\u002Fabs\u002F2003.13678)\n- 代码：[官方 PyTorch 版本：https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fpycls](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fpycls)\n- 代码：[非官方 PyTorch 版本：https:\u002F\u002Fgithub.com\u002Fd-li14\u002Fregnet.pytorch](https:\u002F\u002Fgithub.com\u002Fd-li14\u002Fregnet.pytorch)\n\n### GhostNet\n**GhostNet：以低成本操作获得更多特征**\nKai Han, Yunhe Wang, Qi Tian, Jianyuan Guo, Chunjing Xu, Chang Xu\n- pdf：[https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.11907](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.11907)\n- 代码：[官方 PyTorch 版本：https:\u002F\u002Fgithub.com\u002Fhuawei-noah\u002Fghostnet](https:\u002F\u002Fgithub.com\u002Fhuawei-noah\u002Fghostnet)\n\n### ViT\n**一张图胜过 16×16 个词：大规模图像识别中的 Transformer**\nAlexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby\n- pdf：[https:\u002F\u002Farxiv.org\u002Fabs\u002F2010.11929](https:\u002F\u002Farxiv.org\u002Fabs\u002F2010.11929)\n- 代码：[官方 TensorFlow 版本：https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fvision_transformer](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fvision_transformer)\n- 代码：[非官方 PyTorch 版本：https:\u002F\u002Fgithub.com\u002Fjeonsworld\u002FViT-pytorch](https:\u002F\u002Fgithub.com\u002Fjeonsworld\u002FViT-pytorch)\n\n### DeiT\n**数据高效训练的视觉 Transformer 及通过注意力进行蒸馏**\nHugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou\n- pdf：[https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.12877](https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.12877)\n- 代码：[官方 PyTorch 版本：https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fdeit](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fdeit)\n\n### PVT\n**金字塔视觉 Transformer：无需卷积的密集预测通用骨干网络**\nWenhai Wang, Enze Xie, Xiang Li, Deng-Ping Fan, Kaitao Song, Ding Liang, Tong Lu, Ping Luo, Ling Shao\n- pdf：[https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.12122](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.12122)\n- 代码：[官方 PyTorch 版本：https:\u002F\u002Fgithub.com\u002Fwhai362\u002FPVT](https:\u002F\u002Fgithub.com\u002Fwhai362\u002FPVT)\n\n### T2T\n**Token-to-Token ViT：从头开始在 ImageNet 上训练视觉 Transformer**\nLi Yuan, Yunpeng Chen, Tao Wang, Weihao Yu, Yujun Shi, Zihang Jiang, Francis EH Tay, Jiashi Feng, Shuicheng Yan\n- pdf：[https:\u002F\u002Farxiv.org\u002Fabs\u002F2101.11986](https:\u002F\u002Farxiv.org\u002Fabs\u002F2101.11986)\n- 代码：[官方 PyTorch 版本：https:\u002F\u002Fgithub.com\u002Fyitu-opensource\u002FT2T-ViT](https:\u002F\u002Fgithub.com\u002Fyitu-opensource\u002FT2T-ViT)\n\n### DeepVit\n**DeepViT：迈向更深的视觉 Transformer**\nDaquan Zhou, Bingyi Kang, Xiaojie Jin, Linjie Yang, Xiaochen Lian, Zihang Jiang, Qibin Hou，以及 Jiashi Feng。\n- pdf：[https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.11886](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.11886)\n- 代码：[官方 PyTorch 版本：https:\u002F\u002Fgithub.com\u002Fzhoudaquan\u002Fdvit_repo](https:\u002F\u002Fgithub.com\u002Fzhoudaquan\u002Fdvit_repo)\n\n### ViL\n**多尺度视觉 Longformer：一种用于高分辨率图像编码的新视觉 Transformer**\nPengchuan Zhang, Xiyang Dai, Jianwei Yang, Bin Xiao, Lu Yuan, Lei Zhang，以及 Jianfeng Gao\n- pdf：[https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.15358](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.15358)\n- 代码：[官方 PyTorch 版本：https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002Fvision-longformer](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002Fvision-longformer)\n\n### TNT\n**Transformer in Transformer**\nKai Han, An Xiao, Enhua Wu, Jianyuan Guo, Chunjing Xu，以及 Yunhe Wang\n- pdf：[https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.00112](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.00112)\n- 代码：[https:\u002F\u002Fgithub.com\u002Fhuawei-noah\u002FCV-Backbones](https:\u002F\u002Fgithub.com\u002Fhuawei-noah\u002FCV-Backbones)\n\n### CvT\n**CvT：将卷积引入视觉 Transformer**\nHaiping Wu, Bin Xiao, Noel Codella，以及 Mengchen Liu、Xiyang Dai、Lu Yuan、Lei Zhang\n- pdf：[https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.15808](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.15808)\n- 代码：[https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FCvT](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FCvT)\n\n### CViT\n**CrossViT：用于图像分类的交叉注意力多尺度视觉 Transformer**\nChun-Fu (Richard) Chen, Quanfu Fan，以及 Rameswar Panda\n- pdf：[https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.14899](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.14899)\n- 代码：[https:\u002F\u002Fgithub.com\u002FIBM\u002FCrossViT](https:\u002F\u002Fgithub.com\u002FIBM\u002FCrossViT)\n\n### Focal-T\n**视觉 Transformer 中用于长距离交互的焦点注意力**\nJianwei Yang, Chunyuan Li, Pengchuan Zhang，以及 Xiyang Dai、Bin Xiao、Lu Yuan、Jianfeng Gao\n- pdf：[https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.00641](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.00641)\n- 代码：[https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FFocal-Transformer](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FFocal-Transformer)\n\n### Twins\n**Twins：重新审视视觉 Transformer 中的空间注意力设计**\n- pdf：[https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.13840](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.13840)\n- 代码：[https:\u002F\u002Fgit.io\u002FTwins](https:\u002F\u002Fgit.io\u002FTwins)\n\n### PVTv2\n**Wenhai Wang, Enze Xie, Xiang Li，以及 Deng-Ping Fan、Kaitao Song、Ding Liang、Tong Lu、Ping Luo、Ling Shao**\n- pdf：[https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.13797](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.13797)\n- 代码：[官方 PyTorch 版本：https:\u002F\u002Fgithub.com\u002Fwhai362\u002FPVT](https:\u002F\u002Fgithub.com\u002Fwhai362\u002FPVT)","# Awesome-Image-Classification 快速上手指南\n\n`awesome-image-classification` 并非一个可直接安装的单一软件包，而是一个精选的深度学习图像分类论文与代码实现清单。本指南将指导你如何利用该清单中的资源，快速搭建环境并运行经典的图像分类模型（以 ResNet 为例）。\n\n## 环境准备\n\n在开始之前，请确保你的开发环境满足以下要求：\n\n*   **操作系统**：Linux (推荐 Ubuntu 18.04+), macOS, 或 Windows (需配置 WSL2)。\n*   **硬件要求**：建议使用配备 NVIDIA GPU 的机器以加速训练和推理（CUDA 支持）。\n*   **前置依赖**：\n    *   Python 3.8 或更高版本\n    *   pip 包管理工具\n    *   Git\n\n## 安装步骤\n\n由于清单中包含了多种框架（PyTorch, TensorFlow, Keras 等）的实现，以下步骤以目前最主流的 **PyTorch** 框架为例，演示如何安装依赖并获取代码。\n\n### 1. 创建虚拟环境（推荐）\n\n```bash\npython -m venv img_cls_env\nsource img_cls_env\u002Fbin\u002Factivate  # Windows 用户请使用: img_cls_env\\Scripts\\activate\n```\n\n### 2. 安装 PyTorch\n\n推荐使用国内镜像源（如清华大学开源软件镜像站）加速安装。以下命令安装支持 CUDA 11.8 的稳定版 PyTorch：\n\n```bash\npip install torch torchvision torchaudio --index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu118\n```\n\n*注：若无 NVIDIA GPU，请使用 CPU 版本：`pip install torch torchvision torchaudio --index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcpu`*\n\n### 3. 获取参考代码\n\n清单中推荐了多个代码库。你可以直接克隆作者提供的 PyTorch 实现仓库，其中包含了 VGG, ResNet, DenseNet 等多种模型的复现代码：\n\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002Fweiaicunzai\u002Fpytorch-cifar100.git\ncd pytorch-cifar100\n```\n\n同时安装该项目所需的额外依赖：\n\n```bash\npip install -r requirements.txt\n```\n\n## 基本使用\n\n以下示例展示如何使用已安装的 `pytorch-cifar100` 仓库运行一个经典的 **ResNet-18** 模型进行简单的推理测试。\n\n### 1. 加载预定义模型\n\n在项目根目录下，创建一个名为 `quick_start.py` 的文件，填入以下代码：\n\n```python\nimport torch\nfrom models.resnet import resnet18\nfrom torchvision import transforms\nfrom PIL import Image\n\n# 1. 初始化模型\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nmodel = resnet18(num_classes=100) # CIFAR-100 有 100 个类别\nmodel.to(device)\nmodel.eval()\n\n# 2. 定义图像预处理流程 (需与训练时保持一致)\ntransform = transforms.Compose([\n    transforms.Resize((32, 32)), # CIFAR 数据集图像大小为 32x32\n    transforms.ToTensor(),\n    transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))\n])\n\n# 3. 加载示例图像 (请替换为你本地的图片路径)\n# 如果没有图片，可以使用随机张量模拟输入\n# image = Image.open(\"your_image.jpg\").convert('RGB')\ninput_tensor = torch.rand(1, 3, 32, 32).to(device) \n\n# 4. 执行推理\nwith torch.no_grad():\n    output = model(input_tensor)\n    probabilities = torch.nn.functional.softmax(output[0], dim=0)\n    \n    # 获取预测类别索引\n    predicted_class = torch.argmax(probabilities).item()\n    confidence = probabilities[predicted_class].item()\n\nprint(f\"Predicted Class Index: {predicted_class}\")\nprint(f\"Confidence: {confidence:.4f}\")\n```\n\n### 2. 运行脚本\n\n在终端执行以下命令运行脚本：\n\n```bash\npython quick_start.py\n```\n\n若输出类似 `Predicted Class Index: 45` 和 `Confidence: 0.0123`，则说明环境配置成功，模型已可正常调用。\n\n> **提示**：若要复现清单中其他论文（如 EfficientNet, ViT 等）的效果，请访问 [awesome-image-classification](https:\u002F\u002Fgithub.com\u002Fweiaicunzai\u002Fawesome-image-classification) 原文查找对应的论文链接和官方\u002F非官方代码仓库地址，参照上述步骤进行克隆和运行。","某初创公司的计算机视觉算法工程师小李，正负责为一款新的工业质检系统搭建图像分类基线模型，需要在短时间内复现并对比多种主流网络架构的性能。\n\n### 没有 awesome-image-classification 时\n- **文献检索效率低下**：需要在 Google Scholar、arXiv 和各大会议官网间反复跳转搜索，难以系统性梳理从 VGG 到 Inception 系列的演进脉络。\n- **代码复现门槛高**：找到的论文往往缺乏官方开源代码，或实现的框架版本过旧（如仅支持 TensorFlow 1.x），导致环境配置耗时数天且报错频发。\n- **选型依据模糊**：缺乏统一的性能对比表格，难以区分哪些模型是追求极致精度（如 Inception-ResNet-v2），哪些是侧重轻量化（如 SqueezeNet），容易选错技术路线。\n- **学习路径混乱**：作为深度学习新手，面对海量论文不知从何入手，极易在复杂的数学推导中迷失，忽略了作者推荐的\"VGG -> GoogLeNet -> ResNet\"最佳入门顺序。\n\n### 使用 awesome-image-classification 后\n- **资源一站式获取**：直接查阅按时间排序的精选列表，快速定位到 2014 年至今的关键论文及其对应的 PyTorch\u002FTensorFlow 实现链接，检索时间从几天缩短至半小时。\n- **复现成功率提升**：利用列表中关联的高质量开源代码库（如作者提供的 pytorch-cifar100 实现），迅速跑通基准模型，将精力集中在业务数据适配而非调试基础网络。\n- **科学决策模型架构**：参考清晰的 ImageNet Top-1\u002FTop-5 准确率对比表，结合项目对推理速度和精度的双重需求，果断选择适合的预训练模型作为起点。\n- **建立清晰认知体系**：遵循仓库作者建议的学习路径，由浅入深理解卷积神经网络设计思想，避免了盲目阅读高难度论文带来的挫败感。\n\nawesome-image-classification 通过整合分散的学术资源与工程代码，将图像分类领域的入门与研发周期大幅压缩，让开发者能专注于核心业务创新而非重复造轮子。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fweiaicunzai_awesome-image-classification_0bf7d0a6.png","weiaicunzai",null,"https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fweiaicunzai_52ce7d43.png","email：by@bupt.edu.cn\r\n\r\n","Beijing University of Posts and Telecommunications","Beijing, China","https:\u002F\u002Fgithub.com\u002Fweiaicunzai",3056,611,"2026-04-02T15:51:04",5,"","未说明",{"notes":90,"python":88,"dependencies":91},"该仓库是一个深度学习图像分类论文和代码的精选列表（Awesome List），本身不是一个可直接运行的单一软件工具。它列出了多种网络架构（如 VGG, ResNet, EfficientNet 等）及其对应的原始论文链接和第三方实现代码（涵盖 PyTorch, TensorFlow, Keras, Caffe, Chainer, MXNet 等多种框架）。具体的运行环境需求取决于用户选择实现的特定模型和所使用的深度学习框架版本。",[],[13,14],[94,95,96,97,98,99],"deep-learning","computer-vision","image-classification","awesome","awesome-list","papers","2026-03-27T02:49:30.150509","2026-04-06T07:12:36.467996",[],[]]