[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-ashishpatel26--365-Days-Computer-Vision-Learning-Linkedin-Post":3,"tool-ashishpatel26--365-Days-Computer-Vision-Learning-Linkedin-Post":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",147882,2,"2026-04-09T11:32:47",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",108111,"2026-04-08T11:23:26",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":32,"last_commit_at":50,"category_tags":51,"status":17},4721,"markitdown","microsoft\u002Fmarkitdown","MarkItDown 是一款由微软 AutoGen 团队打造的轻量级 Python 工具，专为将各类文件高效转换为 Markdown 格式而设计。它支持 PDF、Word、Excel、PPT、图片（含 OCR）、音频（含语音转录）、HTML 乃至 YouTube 链接等多种格式的解析，能够精准提取文档中的标题、列表、表格和链接等关键结构信息。\n\n在人工智能应用日益普及的今天，大语言模型（LLM）虽擅长处理文本，却难以直接读取复杂的二进制办公文档。MarkItDown 恰好解决了这一痛点，它将非结构化或半结构化的文件转化为模型“原生理解”且 Token 效率极高的 Markdown 格式，成为连接本地文件与 AI 分析 pipeline 的理想桥梁。此外，它还提供了 MCP（模型上下文协议）服务器，可无缝集成到 Claude Desktop 等 LLM 应用中。\n\n这款工具特别适合开发者、数据科学家及 AI 研究人员使用，尤其是那些需要构建文档检索增强生成（RAG）系统、进行批量文本分析或希望让 AI 助手直接“阅读”本地文件的用户。虽然生成的内容也具备一定可读性，但其核心优势在于为机器",93400,"2026-04-06T19:52:38",[52,14],"插件",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":10,"last_commit_at":59,"category_tags":60,"status":17},4487,"LLMs-from-scratch","rasbt\u002FLLMs-from-scratch","LLMs-from-scratch 是一个基于 PyTorch 的开源教育项目，旨在引导用户从零开始一步步构建一个类似 ChatGPT 的大型语言模型（LLM）。它不仅是同名技术著作的官方代码库，更提供了一套完整的实践方案，涵盖模型开发、预训练及微调的全过程。\n\n该项目主要解决了大模型领域“黑盒化”的学习痛点。许多开发者虽能调用现成模型，却难以深入理解其内部架构与训练机制。通过亲手编写每一行核心代码，用户能够透彻掌握 Transformer 架构、注意力机制等关键原理，从而真正理解大模型是如何“思考”的。此外，项目还包含了加载大型预训练权重进行微调的代码，帮助用户将理论知识延伸至实际应用。\n\nLLMs-from-scratch 特别适合希望深入底层原理的 AI 开发者、研究人员以及计算机专业的学生。对于不满足于仅使用 API，而是渴望探究模型构建细节的技术人员而言，这是极佳的学习资源。其独特的技术亮点在于“循序渐进”的教学设计：将复杂的系统工程拆解为清晰的步骤，配合详细的图表与示例，让构建一个虽小但功能完备的大模型变得触手可及。无论你是想夯实理论基础，还是为未来研发更大规模的模型做准备",90106,"2026-04-06T11:19:32",[35,15,13,14],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":67,"readme_en":68,"readme_zh":69,"quickstart_zh":70,"use_case_zh":71,"hero_image_url":72,"owner_login":73,"owner_name":74,"owner_avatar_url":75,"owner_bio":76,"owner_company":77,"owner_location":78,"owner_email":79,"owner_twitter":69,"owner_website":80,"owner_url":81,"languages":69,"stars":82,"forks":83,"last_commit_at":84,"license":69,"difficulty_score":85,"env_os":86,"env_gpu":87,"env_ram":87,"env_deps":88,"category_tags":91,"github_topics":92,"view_count":32,"oss_zip_url":69,"oss_zip_packed_at":69,"status":110,"created_at":111,"updated_at":112,"faqs":113,"releases":114},5967,"ashishpatel26\u002F365-Days-Computer-Vision-Learning-Linkedin-Post","365-Days-Computer-Vision-Learning-Linkedin-Post","365 Days Computer Vision Learning Linkedin Post","365-Days-Computer-Vision-Learning-Linkedin-Post 是一个专为计算机视觉爱好者打造的系统化学习资源库。它通过连续 365 天的连载形式，每天深入解析一个核心算法或技术主题，涵盖从经典的 YOLO 系列、Faster R-CNN 变体，到前沿的 Vision Transformer、DeiT 以及各类语义分割网络（如 UNet、DeepLab 系列）和注意力机制模型。\n\n在计算机视觉领域，技术迭代迅速且知识点繁杂，学习者往往难以构建完整的知识体系或追踪最新进展。该项目有效解决了这一痛点，将碎片化的学术成果整理为结构清晰、循序渐进的学习路径，帮助用户高效掌握从目标检测、图像分割到模型轻量化等关键技能。\n\n这套资源特别适合 AI 开发者、算法研究人员以及希望深入理解深度学习原理的学生使用。无论是需要快速回顾经典架构的资深工程师，还是试图入门该领域的初学者，都能从中获得清晰的理论指引。其独特亮点在于“日更”式的陪伴学习模式，不仅覆盖了 EfficientDet、RepVGG 等工业界热门模型，还包含了 Grad-CAM 等可解释性技术，配合详细的链接","365-Days-Computer-Vision-Learning-Linkedin-Post 是一个专为计算机视觉爱好者打造的系统化学习资源库。它通过连续 365 天的连载形式，每天深入解析一个核心算法或技术主题，涵盖从经典的 YOLO 系列、Faster R-CNN 变体，到前沿的 Vision Transformer、DeiT 以及各类语义分割网络（如 UNet、DeepLab 系列）和注意力机制模型。\n\n在计算机视觉领域，技术迭代迅速且知识点繁杂，学习者往往难以构建完整的知识体系或追踪最新进展。该项目有效解决了这一痛点，将碎片化的学术成果整理为结构清晰、循序渐进的学习路径，帮助用户高效掌握从目标检测、图像分割到模型轻量化等关键技能。\n\n这套资源特别适合 AI 开发者、算法研究人员以及希望深入理解深度学习原理的学生使用。无论是需要快速回顾经典架构的资深工程师，还是试图入门该领域的初学者，都能从中获得清晰的理论指引。其独特亮点在于“日更”式的陪伴学习模式，不仅覆盖了 EfficientDet、RepVGG 等工业界热门模型，还包含了 Grad-CAM 等可解释性技术，配合详细的链接索引，让复杂的论文阅读变得轻松有序，是构建扎实视觉算法基础的理想指南。","## 365 Days Computer Vision Learning LinkedIn Post\r\n\r\nFollow me on LinkedIn : https:\u002F\u002Fwww.linkedin.com\u002Fin\u002Fashishpatel2604\u002F\r\n\r\n![](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fashishpatel26_365-Days-Computer-Vision-Learning-Linkedin-Post_readme_22bfb6743e72.gif)\r\n\r\n| Days | Topic                                        | Post Link              |\r\n| ---- | -------------------------------------------- | ---------------------- |\r\n| 1    | **EfficientDet**                             | https:\u002F\u002Fbit.ly\u002F362NWHa |\r\n| 2    | **Yolact++**                                 | https:\u002F\u002Fbit.ly\u002F3o5OaU3 |\r\n| 3    | **YOLO Series**                              | https:\u002F\u002Fbit.ly\u002F3650LAJ |\r\n| 4    | **Detr**                                     | https:\u002F\u002Fbit.ly\u002F39S5F57 |\r\n| 5    | **Vision Transformer**                       | https:\u002F\u002Fbit.ly\u002F39UMHLd |\r\n| 6    | **Dynamic RCNN**                             | https:\u002F\u002Fbit.ly\u002F3939gy5 |\r\n| 7    | **DeiT: (Data-efficient image Transformer)** | https:\u002F\u002Fbit.ly\u002F363ZABt |\r\n| 8    | **Yolov5**                                   | https:\u002F\u002Fbit.ly\u002F39QHTXq |\r\n| 9    | **DropBlock**                                | https:\u002F\u002Fbit.ly\u002F3sM4TiG |\r\n| 10   | **FCN**                                      | https:\u002F\u002Fbit.ly\u002F3iE9U8C |\r\n| 11   | **Unet**                                     | https:\u002F\u002Fbit.ly\u002F3izdbG2 |\r\n| 12   | **RetinaNet**                                | https:\u002F\u002Fbit.ly\u002F3o5NrlN |\r\n| 13   | **SegNet**                                   | https:\u002F\u002Fbit.ly\u002F3qIauVz |\r\n| 14   | **CAM**                                      | https:\u002F\u002Fbit.ly\u002F2Y2I8ZR |\r\n| 15   | **R-FCN**                                    | https:\u002F\u002Fbit.ly\u002F3iCKsQL |\r\n| 16   | **RepVGG**                                   | https:\u002F\u002Fbit.ly\u002F2Y2pGjV |\r\n| 17   | **Graph Convolution Network**                | https:\u002F\u002Fbit.ly\u002F2LS9RK8 |\r\n| 18   | **DeconvNet**                                | https:\u002F\u002Fbit.ly\u002F2Mhwzes |\r\n| 19   | **ENet**                                     | https:\u002F\u002Fbit.ly\u002F2Y2HgEz |\r\n| 20   | **Deeplabv1**                                | https:\u002F\u002Fbit.ly\u002F3o7Utqn |\r\n| 21   | **CRF-RNN**                                  | https:\u002F\u002Fbit.ly\u002F2Y5nsR4 |\r\n| 22   | **Deeplabv2**                                | https:\u002F\u002Fbit.ly\u002F2Y9DgSx |\r\n| 23   | **DPN**                                      | https:\u002F\u002Fbit.ly\u002F363Cye2 |\r\n| 24   | **Grad-CAM**                                 | https:\u002F\u002Fbit.ly\u002F3iF006q |\r\n| 25   | **ParseNet**                                 | https:\u002F\u002Fbit.ly\u002F3oesFk5 |\r\n| 26   | **ResNeXt**                                  | https:\u002F\u002Fbit.ly\u002F2M2sXxe |\r\n| 27   | **AmoebaNet**                                | https:\u002F\u002Fbit.ly\u002F2YgRIbN |\r\n| 28   | **DilatedNet**                               | https:\u002F\u002Fbit.ly\u002F2M9fuDS |\r\n| 29   | **DRN**                                      | https:\u002F\u002Fbit.ly\u002F2KXVmUH |\r\n| 30   | **RefineNet**                                | https:\u002F\u002Fbit.ly\u002F3cpCBVq |\r\n| 31   | **Preactivation-Resnet**                     | https:\u002F\u002Fbit.ly\u002F2MJtgwQ |\r\n| 32   | **SqueezeNet**                               | https:\u002F\u002Fbit.ly\u002F3cv3Ca0 |\r\n| 33   | **FractalNet**                               | https:\u002F\u002Fbit.ly\u002F3pSv712 |\r\n| 34   | **PolyNet**                                  | https:\u002F\u002Fbit.ly\u002F3atCQfJ |\r\n| 35   | **DeepSim(Image Quality Assessment)**        | https:\u002F\u002Fbit.ly\u002F3oKJGTi |\r\n| 36   | **Residual Attention Network**               | https:\u002F\u002Fbit.ly\u002F3cIjupL |\r\n| 37   | **IGCNet \u002F IGCV**                            | https:\u002F\u002Fbit.ly\u002F36LRfTo |\r\n| 38   | **Resnet38**                                 | https:\u002F\u002Fbit.ly\u002F2N7tpKL |\r\n| 39   | **SqueezeNext**                              | https:\u002F\u002Fbit.ly\u002F3cSev5W |\r\n| 40   | **Group Normalization**                      | https:\u002F\u002Fbit.ly\u002F3ryNxEI |\r\n| 41   | **ENAS**                                     | https:\u002F\u002Fbit.ly\u002F2LB6pDC |\r\n| 42   | **PNASNet**                                  | https:\u002F\u002Fbit.ly\u002F3tIX6mx |\r\n| 43   | **ShuffleNetV2**                             | https:\u002F\u002Fbit.ly\u002F2Zb3xAM |\r\n| 44   | **BAM**                                      | https:\u002F\u002Fbit.ly\u002F3b67xb2 |\r\n| 45   | **CBAM**                                     | https:\u002F\u002Fbit.ly\u002F3plxHvJ |\r\n| 46   | **MorphNet**                                 | https:\u002F\u002Fbit.ly\u002F3rWzcSM |\r\n| 47   | **NetAdapt**                                 | https:\u002F\u002Fbit.ly\u002F2NtlFmE |\r\n| 48   | **ESPNetv2**                                 | https:\u002F\u002Fbit.ly\u002F3jWVoJv |\r\n| 49   | **FBNet**                                    | https:\u002F\u002Fbit.ly\u002F3k1PXZL |\r\n| 50   | **HideandSeek**                              | https:\u002F\u002Fbit.ly\u002F3qELCP0 |\r\n| 51   | **MR-CNN & S-CNN**                           | https:\u002F\u002Fbit.ly\u002F2Zw6QTf |\r\n| 52   | **ACoL: Adversarial Complementary Learning** | https:\u002F\u002Fbit.ly\u002F3qKFNiU |\r\n| 53   | **CutMix**                                   | https:\u002F\u002Fbit.ly\u002F2Nt5shI |\r\n| 54   | **ADL**                                      | https:\u002F\u002Fbit.ly\u002F3qNeFQm |\r\n| 55   | **SAOL**                                     | https:\u002F\u002Fbit.ly\u002F2NVuBBs |\r\n| 56   | **SSD**                                      | https:\u002F\u002Fbit.ly\u002F37PWpyo |\r\n| 57   | **NOC**                                      | https:\u002F\u002Fbit.ly\u002F3uBrZJJ |\r\n| 58   | **G-RMI**                                    | https:\u002F\u002Fbit.ly\u002F3kJDlap |\r\n| 59   | **TDM**                                      | https:\u002F\u002Fbit.ly\u002F3dV5zgN |\r\n| 60   | **DSSD**                                     | https:\u002F\u002Fbit.ly\u002F3q6EHg8 |\r\n| 61   | **FPN**                                      | https:\u002F\u002Fbit.ly\u002F2OewZn0 |\r\n| 62   | **DCN**                                      | https:\u002F\u002Fbit.ly\u002F3e3G4Kg |\r\n| 63   | **Light-Head-RCNN**                          | https:\u002F\u002Fbit.ly\u002F388rtcT |\r\n| 64   | **Cascade RCNN**                             | https:\u002F\u002Fbit.ly\u002F3uUDlZz |\r\n| 65   | **MegNet**                                   | https:\u002F\u002Fbit.ly\u002F3bkNvuM |\r\n| 66   | **StairNet**                                 | https:\u002F\u002Fbit.ly\u002F3bluE2P |\r\n| 67   | **ImageNet Rethinking**                      | https:\u002F\u002Fbit.ly\u002F3bqBfZZ |\r\n| 68   | **ERFNet**                                   | https:\u002F\u002Fbit.ly\u002F2OxgC5c |\r\n| 69   | **LayerCascade**                             | https:\u002F\u002Fbit.ly\u002F3qzWdd8 |\r\n| 70   | **IDW-CNN**                                  | https:\u002F\u002Fbit.ly\u002F3letEAY |\r\n| 71   | **DIS**                                      | https:\u002F\u002Fbit.ly\u002F3vi3xh3 |\r\n| 72   | **SDN**                                      | https:\u002F\u002Fbit.ly\u002F3lftn0k |\r\n| 73   | **ResNet-DUC-HDC**                           | https:\u002F\u002Fbit.ly\u002F3lmdhlN |\r\n| 74   | **Deeplabv3+**                               | https:\u002F\u002Fbit.ly\u002F3lfSRuR |\r\n| 75   | **AutoDeeplab**                              | https:\u002F\u002Fbit.ly\u002F2P14kSF |\r\n| 76   | **c3**                                       | https:\u002F\u002Fbit.ly\u002F3qX0yqK |\r\n| 77   | **DRRN**                                     | https:\u002F\u002Fbit.ly\u002F3ltkWP9 |\r\n| 78   | **BR²Net**                                   | https:\u002F\u002Fbit.ly\u002F3f0jGlI |\r\n| 79   | **SDS**                                      | https:\u002F\u002Fbit.ly\u002F3f0CZLw |\r\n| 80   | **AdderNet**                                 | https:\u002F\u002Fbit.ly\u002F3sfMdYa |\r\n| 81   | **HyperColumn**                              | https:\u002F\u002Fbit.ly\u002F3vV7Jn5 |\r\n| 82   | **DeepMask**                                 | https:\u002F\u002Fbit.ly\u002F3cY2RVR |\r\n| 83   | **SharpMask**                                | https:\u002F\u002Fbit.ly\u002F3rg0h2r |\r\n| 84   | **MultipathNet**                             | https:\u002F\u002Fbit.ly\u002F31fcTMR |\r\n| 85   | **MNC**                                      | https:\u002F\u002Fbit.ly\u002F39rRXqj |\r\n| 86   | **InstanceFCN**                              | https:\u002F\u002Fbit.ly\u002F3wbQuy8 |\r\n| 87   | **FCIS**                                     | https:\u002F\u002Fbit.ly\u002F3dhPz6B |\r\n| 88   | **MaskLab**                                  | https:\u002F\u002Fbit.ly\u002F3wb3Vya |\r\n| 89   | **PANet**                                    | https:\u002F\u002Fbit.ly\u002F2PmQTNs |\r\n| 90   | **CUDMedVision1**                            | https:\u002F\u002Fbit.ly\u002F3rETZd1 |\r\n| 91   | **CUDMedVision2**                            | https:\u002F\u002Fbit.ly\u002F3mago0q |\r\n| 92   | **CFS-FCN**                                  | https:\u002F\u002Fbit.ly\u002F3cXP0zX |\r\n| 93   | **U-net+Res-net**                            | https:\u002F\u002Fbit.ly\u002F3mpKD3P |\r\n| 94   | **Multi-Channel**                            | https:\u002F\u002Fbit.ly\u002F2Q1WCbN |\r\n| 95   | **V-Net**                                    | https:\u002F\u002Fbit.ly\u002F3sYxGAt |\r\n| 96   | **3D-Unet**                                  | https:\u002F\u002Fbit.ly\u002F3uvNOcS |\r\n| 97   | **M²FCN**                                    | https:\u002F\u002Fbit.ly\u002F3cXSlPG |\r\n| 98   | **Suggestive Annotation**                    | https:\u002F\u002Fbit.ly\u002F3t1UbV8 |\r\n| 99   | **3D Unet + Resnet**                         | https:\u002F\u002Fbit.ly\u002F3wRu3i9 |\r\n| 100  | **Cascade 3D-Unet**                          | https:\u002F\u002Fbit.ly\u002F3siNsEX |\r\n| 101  | **DenseVoxNet**                              | https:\u002F\u002Fbit.ly\u002F2RGliYd |\r\n| 102  | **QSA + QNT**                                | https:\u002F\u002Fbit.ly\u002F3wWtyDf |\r\n| 103  | **Attention-Unet**                           | https:\u002F\u002Fbit.ly\u002F3eaMNAK |\r\n| 104  | **RUNet + R2Unet**                           | https:\u002F\u002Fbit.ly\u002F2Q4bIxG |\r\n| 105  | **VoxResNet**                                | https:\u002F\u002Fbit.ly\u002F32gLBWN |\r\n| 106  | **Unet++**                                   | https:\u002F\u002Fbit.ly\u002F3esShGV |\r\n| 107  | **H-DenseUnet**                              | https:\u002F\u002Fbit.ly\u002F3dN53kn |\r\n| 108  | **DUnet**                                    | https:\u002F\u002Fbit.ly\u002F3sPYrWS |\r\n| 109  | **MultiResUnet**                             | https:\u002F\u002Fbit.ly\u002F32J7Epr |\r\n| 110  | **Unet3+**                                   | https:\u002F\u002Fbit.ly\u002F3vj4lRX |\r\n| 111  | **VGGNet For Covid19**                       | https:\u002F\u002Fbit.ly\u002F3ewquW6 |\r\n| 112  | 𝗗𝗲𝗻𝘀𝗲-𝗚𝗮𝘁𝗲𝗱 𝗨-𝗡𝗲𝘁 (𝗗𝗚𝗡𝗲𝘁)                    | https:\u002F\u002Fbit.ly\u002F3tR67cM |\r\n| 113  | **Ki-Unet**                                  | https:\u002F\u002Fbit.ly\u002F3gD4wDK |\r\n| 114  | **Medical Transformer**                      | https:\u002F\u002Fbit.ly\u002F3dLw9Zf |\r\n| 115  | **Deep Snake- Instance Segmentation**        | https:\u002F\u002Fbit.ly\u002F3dQmdhm |\r\n| 116  | **BlendMask**                                | https:\u002F\u002Fbit.ly\u002F32LVXyf |\r\n| 117  | **CenterNet**                                | https:\u002F\u002Fbit.ly\u002F3aJrJQD |\r\n| 118  | **SRCNN**                                    | https:\u002F\u002Fbit.ly\u002F3t82eie |\r\n| 119  | **Swin Transformer**                         | https:\u002F\u002Fbit.ly\u002F2QMWxct |\r\n| 120  | **Polygon-RNN**                              | https:\u002F\u002Fbit.ly\u002F3ujEJ7D |\r\n| 121  | **PolyTransform**                            | https:\u002F\u002Fbit.ly\u002F3gT11ZZ |\r\n| 122  | **D2Det**                                    | https:\u002F\u002Fbit.ly\u002F3b2EDJL |\r\n| 123  | **PolarMask**                                | https:\u002F\u002Fbit.ly\u002F3uklSsO |\r\n| 124  | **FGN**                                      | https:\u002F\u002Fbit.ly\u002F3uiyyAl |\r\n| 125  | **Meta-SR**                                  | https:\u002F\u002Fbit.ly\u002F3ekFyr9 |\r\n| 126  | **Iterative Kernel Correlation**             | https:\u002F\u002Fbit.ly\u002F3xPGZp6 |\r\n| 127  | **SRFBN**                                    | https:\u002F\u002Fbit.ly\u002F2Qc1c7z |\r\n| 128  | **ODE**                                      | https:\u002F\u002Fbit.ly\u002F3w1K8k4 |\r\n| 129  | **SRNTT**                                    | https:\u002F\u002Fbit.ly\u002F2RNT9hS |\r\n| 130  | **Parallax Attention**                       | https:\u002F\u002Fbit.ly\u002F3tIr74x |\r\n| 131  | **3D Super Resolution**                      | https:\u002F\u002Fbit.ly\u002F3bliXJa |\r\n| 132  | **FSTRN**                                    | https:\u002F\u002Fbit.ly\u002F3uWJ8h7 |\r\n| 133  | **PointGroup**                               | https:\u002F\u002Fbit.ly\u002F2QfeKPP |\r\n| 134  | **3D-MPA**                                   | https:\u002F\u002Fbit.ly\u002F3bqz9J6 |\r\n| 135  | **Saliency Propagation**                     |                 https:\u002F\u002Fbit.ly\u002F3tXTvj4 |\r\n| 136  | **Libra R-CNN**                              | https:\u002F\u002Fbit.ly\u002F3hDytnt |\r\n| 137  | **SiamRPN++**                                | https:\u002F\u002Fbit.ly\u002F33TNjyi |\r\n| 138 | **LoFTR** | https:\u002F\u002Fbit.ly\u002F3eUtlJS |\r\n| 139 | **MZSR** | https:\u002F\u002Fbit.ly\u002F3ul5gAs |\r\n| 140 | **UCTGAN** | https:\u002F\u002Fbit.ly\u002F3fQg9ox |\r\n| 141 | **OccuSeg** | https:\u002F\u002Fbit.ly\u002F3bUJtta |\r\n| 142 | **LAPGAN** | https:\u002F\u002Fbit.ly\u002F3unOjW1 |\r\n| 143 | **TPN** | https:\u002F\u002Fbit.ly\u002F3vvyIoW |\r\n| 144 | **GTAD** | https:\u002F\u002Fbit.ly\u002F3c09yqK |\r\n| 145 | **SlowFast** | https:\u002F\u002Fbit.ly\u002F3fMrI0d |\r\n| 146 | **IDU** | https:\u002F\u002Fbit.ly\u002F2ROcIa5 |\r\n| 147 | **ATSS** | https:\u002F\u002Fbit.ly\u002F3hTIflC |\r\n| 148 | **Attention-RPN** | https:\u002F\u002Fbit.ly\u002F3oYescY |\r\n| 149 | **Aug-FPN**                                  | https:\u002F\u002Fbit.ly\u002F3fUbdzi |\r\n| 150 | **Hit-Detector** | https:\u002F\u002Fbit.ly\u002F3uGCLgB |\r\n| 151 | **MCN** | https:\u002F\u002Fbit.ly\u002F3ySpjtq |\r\n| 152 | **CentripetalNet** | https:\u002F\u002Fbit.ly\u002F2S1WNVB |\r\n| 153 | **ROAM** | https:\u002F\u002Fbit.ly\u002F34Ft8Ex |\r\n| 154 | **PF-NET(3D)** | https:\u002F\u002Fbit.ly\u002F2TzQiK9 |\r\n| 155 | **PointAugment** | https:\u002F\u002Fbit.ly\u002F3uMc8Hr |\r\n| 156 | **C-Flow** | https:\u002F\u002Fbit.ly\u002F3xgDlUn |\r\n| 157 | **RandLA-Net** | https:\u002F\u002Fbit.ly\u002F3fYajD9 |\r\n| 158 | **Total3DUnderStanding** | https:\u002F\u002Fbit.ly\u002F3v3jy9c |\r\n| 159 | **IF-Nets** | https:\u002F\u002Fbit.ly\u002F3v7XjPj |\r\n| 160 | **PerfectShape** | https:\u002F\u002Fbit.ly\u002F3za20vk |\r\n| 161 | **ACNe** | https:\u002F\u002Fbit.ly\u002F3gaJQSN |\r\n| 162 | **PQ-Net** | https:\u002F\u002Fbit.ly\u002F35dVPsm |\r\n| 163 | **SG-NN** | https:\u002F\u002Fbit.ly\u002F3iQ4yca |\r\n| 164 | **Cascade Cost Volume** | https:\u002F\u002Fbit.ly\u002F3gyZHtt |\r\n| 165 | **SketchGCN** | https:\u002F\u002Fbit.ly\u002F3pVoxI8 |\r\n| 166 | **Spektral (Graph Neural Network)** | https:\u002F\u002Fbit.ly\u002F3q2T079 |\r\n| 167 | **Graph Convolution Neural Network** | https:\u002F\u002Fbit.ly\u002F3gAkiNX |\r\n| 168 | **Fast Localized Spectral Filtering(Graph Kernel)** | https:\u002F\u002Fbit.ly\u002F3iRUEa0 |\r\n| 169 | **GraphSAGE** | https:\u002F\u002Fbit.ly\u002F3gCj9Xx |\r\n| 170 | **ARMA Convolution** | https:\u002F\u002Fbit.ly\u002F3qcubpC |\r\n| 171 | **Graph Attention Networks** | https:\u002F\u002Fbit.ly\u002F3h1gfKy |\r\n| 172 | **Axial-Deeplab** | https:\u002F\u002Fbit.ly\u002F3qiIF7l |\r\n| 173 | **Tide** | https:\u002F\u002Fbit.ly\u002F3j5evmh |\r\n| 174 | **SipMask** | https:\u002F\u002Fbit.ly\u002F3gMBoJE |\r\n| 175 | **UFO²** | https:\u002F\u002Fbit.ly\u002F2SVS2xA |\r\n| 176 | **SCAN** | https:\u002F\u002Fbit.ly\u002F2ThBv70 |\r\n| 177 | **AABO** : **Adaptive Anchor Box Optimization** | https:\u002F\u002Fbit.ly\u002F3qCSRaP |\r\n| 178 | **SimAug** | https:\u002F\u002Fbit.ly\u002F3dlV6tK |\r\n| 179 | **Instant-teaching** | https:\u002F\u002Fbit.ly\u002F3h0E2LU |\r\n| 180 | **Refinement Network for RGB-D** | https:\u002F\u002Fbit.ly\u002F3dtRh5O |\r\n| 181 | **Polka Lines** | https:\u002F\u002Fbit.ly\u002F3hlNbhd |\r\n| 182 | **HOTR** | https:\u002F\u002Fbit.ly\u002F3hsV44i |\r\n| 183 | **Soft-IntroVAE** | https:\u002F\u002Fbit.ly\u002F3jFozTk |\r\n| 184 | **ReXNet** | https:\u002F\u002Fbit.ly\u002F3r42WO9 |\r\n| 185 | **DiNTS** | https:\u002F\u002Fbit.ly\u002F3AQibii |\r\n| 186 | **Pose2Mesh** | https:\u002F\u002Fbit.ly\u002F3wFTORi |\r\n| 187 | **Keep Eyes on the Lane** | https:\u002F\u002Fbit.ly\u002F3wxs4hl |\r\n| 188 | **AssembleNet++** | https:\u002F\u002Fbit.ly\u002F3xAHhjf |\r\n| 189 | **SNE-RoadSeg** | https:\u002F\u002Fbit.ly\u002F3hyCEAL |\r\n| 190 | **AdvPC** | https:\u002F\u002Fbit.ly\u002F3i3dGrV |\r\n| 191 | **Eagle eye** | https:\u002F\u002Fbit.ly\u002F3e5Iqaz |\r\n| 192 | **Deep Hough Transform** | https:\u002F\u002Fbit.ly\u002F2UEFbAm |\r\n| 193 | **WeightNet** | https:\u002F\u002Fbit.ly\u002F3rfDSUL |\r\n| 194 | **StyleMAPGAN** | https:\u002F\u002Fbit.ly\u002F2URgPTO |\r\n| 195 | **PD-GAN** | https:\u002F\u002Fbit.ly\u002F3xQMCmM |\r\n| 196 | **Non-Local Sparse Attention** | https:\u002F\u002Fbit.ly\u002F3xJZbAd |\r\n| 197 | **TediGAN** | https:\u002F\u002Fbit.ly\u002F3wH67MZ |\r\n| 198 | **FedDG** | https:\u002F\u002Fbit.ly\u002F3zfKiGe |\r\n| 199 | **Auto-Exposure Fusion** | https:\u002F\u002Fbit.ly\u002F3y3F2W1 |\r\n| 200 | **Involution** | https:\u002F\u002Fbit.ly\u002F36Ksiaz |\r\n| 201 | **MutualNet** | https:\u002F\u002Fbit.ly\u002F3zhfd4N |\r\n| 202 | **Teachers do more than teach - Image to Image translation** | https:\u002F\u002Fbit.ly\u002F36RP28K |\r\n| 203 | **VideoMoCo** | https:\u002F\u002Fbit.ly\u002F3f6Pq7Z |\r\n| 204 | **ArtGAN** | https:\u002F\u002Fbit.ly\u002F3rvDCB9 |\r\n| 205 | **Vip-DeepLab** | https:\u002F\u002Fbit.ly\u002F3xmzmVX |\r\n| 206 | **PSConvolution** | https:\u002F\u002Fbit.ly\u002F3rEIgMY |\r\n| 207 | **Deep learning technique on Semantic Segmentation** | https:\u002F\u002Fbit.ly\u002F375hrID |\r\n| 208 | **Synthetic to Real** | https:\u002F\u002Fbit.ly\u002F3yfZSRO |\r\n| 209 | **Panoptic Segmentation** | https:\u002F\u002Fbit.ly\u002F376tbdA |\r\n| 210 | **HistoGAN** | https:\u002F\u002Fbit.ly\u002F3zSYyVD |\r\n| 211 | **Semantic Image Matting** | https:\u002F\u002Fbit.ly\u002F3s5ZD9F |\r\n| 212 | **Anchor-Free Person Search** | https:\u002F\u002Fbit.ly\u002F2VI0KAD |\r\n| 213 | **Spatial-Phase-Shallow-Learning** | https:\u002F\u002Fbit.ly\u002F3CDAl82 |\r\n| 214 | **LiteFlowNet3** | https:\u002F\u002Fbit.ly\u002F3yDILcO |\r\n| 215 | **EfficientNetv2** | https:\u002F\u002Fbit.ly\u002F3xAQsiE |\r\n| 216 | **CBNETv2** | https:\u002F\u002Fbit.ly\u002F3s3ptvb |\r\n| 217 | **PerPixel Classification** | https:\u002F\u002Fbit.ly\u002F3lOomyg |\r\n| 218 | **Kaleido-BERT** | https:\u002F\u002Fbit.ly\u002F3ywh2Lf |\r\n| 219 | **DARKGAN** | https:\u002F\u002Fbit.ly\u002F3lTW05J |\r\n| 220 | **PPDM** | https:\u002F\u002Fbit.ly\u002F3lPgjBt |\r\n| 221 | **SEAN** | https:\u002F\u002Fbit.ly\u002F3yOUJ3L |\r\n| 222 | **Closed-Loop Matters** | https:\u002F\u002Fbit.ly\u002F3CzBnlq |\r\n| 223 | **Elastic Graph Neural Network** | https:\u002F\u002Fbit.ly\u002F3jket9S |\r\n| 224 | **Deep Imbalance Regression** | https:\u002F\u002Fbit.ly\u002F3yn0Ue3 |\r\n| 225 | **PIPAL** - Image Quality Assessment | https:\u002F\u002Fbit.ly\u002F3gCliSx |\r\n| 226 | **Mobile-Former** | https:\u002F\u002Fbit.ly\u002F3kxCSbm |\r\n| 227 | **Rank and Sort Loss** | https:\u002F\u002Fbit.ly\u002F3sPQt1s |\r\n| 228 | **Room Classification using Graph Neural Network** | https:\u002F\u002Fbit.ly\u002F3gD8Odv |\r\n| 229 | **Pyramid Vision Transformer** | https:\u002F\u002Fbit.ly\u002F3zmod9h |\r\n| 230 | **EigenGAN** | https:\u002F\u002Fbit.ly\u002F3BfdIVO |\r\n| 231 | **GNeRF** | https:\u002F\u002Fbit.ly\u002F3mD3kTR |\r\n| 232 | **DetCo** | https:\u002F\u002Fbit.ly\u002F3sQiRk9 |\r\n| 233 | **DERT with Special Modulated Co-Attention**                 | https:\u002F\u002Fbit.ly\u002F3sPQ5jw |\r\n|      | **Residual Attention** | https:\u002F\u002Fbit.ly\u002F3yni4bJ |\r\n| 235 | **MG-GAN** | https:\u002F\u002Fbit.ly\u002F3mD30o7 |\r\n| 236 | **Adaptable GAN Encoders** | https:\u002F\u002Fbit.ly\u002F3yh4XJ3 |\r\n| 237 | **AdaAttN** | https:\u002F\u002Fbit.ly\u002F3BepKPa |\r\n| 238 | **Conformer** | https:\u002F\u002Fbit.ly\u002F3gCkj4N |\r\n| 239 | **YOLOP** | https:\u002F\u002Fbit.ly\u002F3BicysB |\r\n| 240 | **VMNet** | https:\u002F\u002Fbit.ly\u002F3k73jFZ |\r\n| 241 | **Airbert** | https:\u002F\u002Fbit.ly\u002F3nvcrGs |\r\n| 242 | 𝗢𝗿𝗶𝗲𝗻𝘁𝗲𝗱 𝗥-𝗖𝗡𝗡 | https:\u002F\u002Fbit.ly\u002F397Zius |\r\n| 243 | **Battle of Network Structure** | https:\u002F\u002Fbit.ly\u002F2XcHbB0 |\r\n| 244 | **InSeGAN** | https:\u002F\u002Fbit.ly\u002F3z9wyMF |\r\n| 245 | **Efficient Person Search** | https:\u002F\u002Fbit.ly\u002F3CpbZOr |\r\n| 246 | **DeepGCNs** | https:\u002F\u002Fbit.ly\u002F3AevSHg |\r\n| 247 | **GroupFormer** | https:\u002F\u002Fbit.ly\u002F3lqzm2Y |\r\n| 248 | **SLIDE** | https:\u002F\u002Fbit.ly\u002F3hwpiEp |\r\n| 249 | **Super Neuron** | https:\u002F\u002Fbit.ly\u002F3zkXE3D |\r\n| 250 | **SOTR** | https:\u002F\u002Fbit.ly\u002F3hvqCYl |\r\n| 251 | **Survey : Instance Segmentation** | https:\u002F\u002Fbit.ly\u002F3k90xQB |\r\n| 252 | **SO-Pose** | https:\u002F\u002Fbit.ly\u002F3C56KD8 |\r\n| 253 | **CANet** | https:\u002F\u002Fbit.ly\u002F2XlDKZ2 |\r\n| 254 | **XVFI** | https:\u002F\u002Fbit.ly\u002F3lrOpcZ |\r\n| 255 | **TxT** | https:\u002F\u002Fbit.ly\u002F3tGFlEH |\r\n| 256 | **ConvMLP** | https:\u002F\u002Fbit.ly\u002F2XlE8Xu |\r\n| 257 | **Cross Domain Contrastive Learning** | https:\u002F\u002Fbit.ly\u002F3tDb2id |\r\n| 258 | **OS2D: One Stage Object Detection** | https:\u002F\u002Fbit.ly\u002F3ufnEMD |\r\n| 259 | **PointManifoldCut** | https:\u002F\u002Fbit.ly\u002F3CKvAIL |\r\n| 260 | **Large Scale Facial Expression Dataset** | https:\u002F\u002Fbit.ly\u002F2ZqtT4V |\r\n| 261 | **Graph-FPN** | https:\u002F\u002Fbit.ly\u002F2XH8T9f |\r\n| 262 | **3D Shape Reconstruction** | https:\u002F\u002Fbit.ly\u002F2XTe9aq |\r\n| 263 | **Open Graph Benchmark Dataset** | https:\u002F\u002Fbit.ly\u002F3ET2Lfl |\r\n| 264 | **ShiftAddNet** | https:\u002F\u002Fbit.ly\u002F3i6eb5C |\r\n| 265 | **WatchOut! Motion Blurring the vision of your DNN** | https:\u002F\u002Fbit.ly\u002F3CKTzrw |\r\n| 266 | **Rethinking Learnable Tree Filter** | https:\u002F\u002Fbit.ly\u002F3zHfPAC |\r\n| 267 | **Neuron Merging** | https:\u002F\u002Fbit.ly\u002F39DwLNS |\r\n| 268 | **Distance IOU Loss** | https:\u002F\u002Fbit.ly\u002F3i7Zj6z |\r\n| 269 | **Deep Imitation learning** | https:\u002F\u002Fbit.ly\u002F3AzGVd6 |\r\n| 270 | **Pixel Level Cycle Association** | https:\u002F\u002Fbit.ly\u002F3iTZMK6 |\r\n| 271 | **Deep Model Fusion** | https:\u002F\u002Fbit.ly\u002F2YK45kl |\r\n| 272 | **Object Representation Network** | https:\u002F\u002Fbit.ly\u002F3BA0mnE |\r\n| 273 | **HOI Analysis** | https:\u002F\u002Fbit.ly\u002F3FH2Key |\r\n| 274 | **Deep Equilibrium Models** | https:\u002F\u002Fbit.ly\u002F3FDH2IB |\r\n| 275 | **Sampling from k-DPP** | https:\u002F\u002Fbit.ly\u002F3BAyRuc |\r\n| 276 | **Rotated Binary Neural Network** | https:\u002F\u002Fbit.ly\u002F3mIuYx3 |\r\n| 277 | **PP-LCNet** - **LightCNN** | https:\u002F\u002Fbit.ly\u002F3v1Zh5H |\r\n| 278 | **MC-Net+** | https:\u002F\u002Fbit.ly\u002F3v5tYqk |\r\n| 279 | **Fake it till you make it** | https:\u002F\u002Fbit.ly\u002F3AyGTSQ |\r\n| 280 | **Enformer** | https:\u002F\u002Fbit.ly\u002F3AAdCr9 |\r\n| 281 | **VideoClip** | https:\u002F\u002Fbit.ly\u002F3mOueGu |\r\n| 282 | **Moving Fashion** | https:\u002F\u002Fbit.ly\u002F3jdvAtN |\r\n| 283 | **Convolution to Transformer** | https:\u002F\u002Fbit.ly\u002F3v5yy8f |\r\n| 284 | **HeadGAN** | https:\u002F\u002Fbit.ly\u002F3BLzRvm |\r\n| 285 | **Focal Transformer** | https:\u002F\u002Fbit.ly\u002F3lvCYSI |\r\n| 286 | **StyleGAN3** | https:\u002F\u002Fbit.ly\u002F3kvFPKw |\r\n| 287 | **3Detr:3D Object Detection** | https:\u002F\u002Fbit.ly\u002F3Hfk6A8 |\r\n| 288 | **Do Self-Supervised and Supervised Methods Learn Similar Visual Representations?** | https:\u002F\u002Fbit.ly\u002F3kyWM6H |\r\n| 289 | **Back to the Features** | https:\u002F\u002Fbit.ly\u002F3kvsxh3 |\r\n| 290 | **Anticipative Video Transformer** | https:\u002F\u002Fbit.ly\u002F30mADl2 |\r\n| 291 | **Attention Meets Geometry** | https:\u002F\u002Fbit.ly\u002F3kweSpZ |\r\n| 292 | **DeepMoCaP:** Deep Optical Motion Capture | https:\u002F\u002Fbit.ly\u002F30mjTdT |\r\n| 293 | **TrOCR: Transformer-based Optical Character Recognition** | https:\u002F\u002Fbit.ly\u002F3DqenW5 |\r\n| 294 | **Moving Fashion** | https:\u002F\u002Fbit.ly\u002F2YGtjA1 |\r\n| 295 | **StyleNeRF** | https:\u002F\u002Fbit.ly\u002F31W4Mbz |\r\n| 296 | **ECA-Net: :Efficient Channel Attention** | https:\u002F\u002Fbit.ly\u002F3n92i1s |\r\n| 297 | **Inferring High Resolution Traffic Accident risk maps** | https:\u002F\u002Fbit.ly\u002F3HgovD6 |\r\n| 298 | **Bias Loss: For Mobile Neural Network** | https:\u002F\u002Fbit.ly\u002F3qvBPNO |\r\n| 299 | **ByteTrack: Multi-Object Tracking** | https:\u002F\u002Fbit.ly\u002F3c3l7wQ |\r\n| 300 | **Non-Deep Network** | https:\u002F\u002Fbit.ly\u002F3qwZwoV |\r\n| 301 | **Temporal Attentive Covariance** | https:\u002F\u002Fbit.ly\u002F3ontCbP |\r\n| 302 | **Plan-then-generate: Controlled Data to Text Generation** | https:\u002F\u002Fbit.ly\u002F3DcbsA6 |\r\n| 303 | **Dynamic Visual Reasoning** | https:\u002F\u002Fbit.ly\u002F31Q4BhP |\r\n| 304 | **MedMNIST: Medical MNIST Dataset** | https:\u002F\u002Fbit.ly\u002F3qxuqxq |\r\n| 305 | **Colossal-AI: A PyTorch-Based Deep Learning System For Large-Scale Parallel Training** | https:\u002F\u002Fbit.ly\u002F3wG6Xv8 |\r\n| 306 | **Recursively Embedded Atom Neural Network(REANN)** | https:\u002F\u002Fbit.ly\u002F3F1JKqe |\r\n| 307 | **PolyTrack: for fast multi-object tracking and segmentation** | https:\u002F\u002Fbit.ly\u002F3DeBmmS |\r\n| 308 | **Can contrastive learning avoid shortcut solutions?** | https:\u002F\u002Fbit.ly\u002F3wHJIk9 |\r\n| 309 | **ProjectedGAN:  To Improve Image Quality** | https:\u002F\u002Fbit.ly\u002F30hw8Zm |\r\n| 310 | **Arch-Net:  A Family Of Neural Networks Built With Operators To Bridge The Gap ** | https:\u002F\u002Fbit.ly\u002F3oFOCef |\r\n| 311 | **PP-ShiTu:A Practical Lightweight Image Recognition System** | https:\u002F\u002Fbit.ly\u002F3naurFw |\r\n| 312 | **EditGAN** | https:\u002F\u002Fbit.ly\u002F30gYd2Z |\r\n| 313 | **Panoptic 3D Scene Segmentation** | https:\u002F\u002Fbit.ly\u002F3caSvla |\r\n| 314 | **PARP: Improve the Efficiency of NN** | https:\u002F\u002Fbit.ly\u002F3DakTjt |\r\n| 315 | **WORD: Organ Segmentation Dataset** | https:\u002F\u002Fbit.ly\u002F3qv5OW2 |\r\n| 316 | **DenseULearn** | https:\u002F\u002Fbit.ly\u002F3ohRiyi |\r\n| 317 | **Does Thermal data make the detection systems more reliable?** | https:\u002F\u002Fbit.ly\u002F3sQgTSO |\r\n| 318 | **MADDNESS: Approximate Matrix Multiplication (AMM)** | https:\u002F\u002Fbit.ly\u002F3zgVIL4 |\r\n| 319 | **Deceive D: Adaptive Pseudo Augmentation** | https:\u002F\u002Fbit.ly\u002F3sIG6yA |\r\n| 320 | **OadTR** | https:\u002F\u002Fbit.ly\u002F3JsUHUF |\r\n| 321 | **OnePassImageNet** | https:\u002F\u002Fbit.ly\u002F3sKL6Ti |\r\n| 322 | **Image-specific Convolutional Kernel Modulation for Single Image Super-resolution** | https:\u002F\u002Fbit.ly\u002F3FUpA20 |\r\n| 323 | **TransMix** | https:\u002F\u002Fbit.ly\u002F3EH93gH |\r\n| 324 | **PytorchVideo** | https:\u002F\u002Fbit.ly\u002F3JvgDP7 |\r\n| 325 | **MetNet-2** | https:\u002F\u002Fbit.ly\u002F3sMZb2M |\r\n| 326 | **Unsupervised deep learning identifies semantic disentanglement** | https:\u002F\u002Fbit.ly\u002F3JyAwVi |\r\n| 327 | **Story Visualization** | https:\u002F\u002Fbit.ly\u002F3qB554i |\r\n| 328 | **MetaFormer** | https:\u002F\u002Fbit.ly\u002F3sLBebP |\r\n| 329 | **GauGAN2** | https:\u002F\u002Fbit.ly\u002F3pGrIVH |\r\n| 330 | **SciGAP** | https:\u002F\u002Fbit.ly\u002F3EB7e4U |\r\n| 331 | **Generative Flow Networks (GFlowNets)** | https:\u002F\u002Fbit.ly\u002F3Jv9YEz |\r\n| 332 | **Ensemble Inversion** | https:\u002F\u002Fbit.ly\u002F3ECwbg9 |\r\n| 333 | **SAVi** | https:\u002F\u002Fbit.ly\u002F3eF6txe |\r\n| 334 | **Digital Optical Neural Network** | https:\u002F\u002Fbit.ly\u002F3EI07rh |\r\n| 335 | **Image-Generation Research With Manifold Matching Via Metric Learning** | https:\u002F\u002Fbit.ly\u002F3FUomnq |\r\n| 336 | **GHN-2(Graph HyperNetworks)** | https:\u002F\u002Fbit.ly\u002F3qzc5yB |\r\n| 337 | **NeatNet** | https:\u002F\u002Fbit.ly\u002F3sLY17r |\r\n| 338 | **NeuralProphet** | https:\u002F\u002Fbit.ly\u002F3JrUK38 |\r\n| 339 | **Background Activation Suppression for Weakly Supervised Object Detection** | https:\u002F\u002Fbit.ly\u002F3Jvyzt2 |\r\n| 340 | **Learning to Detect Every Thing in an Open World** | https:\u002F\u002Fbit.ly\u002F3mKxOTc |\r\n| 341 | **PoolFormer** | https:\u002F\u002Fbit.ly\u002F3qFHNtS |\r\n| 342 | **GLIP** | https:\u002F\u002Fbit.ly\u002F3mK3bgx |\r\n| 343 | **PHALP** | https:\u002F\u002Fbit.ly\u002F3eJJvEV |\r\n| 344 | **PixMix** | https:\u002F\u002Fbit.ly\u002F3Hqh77m |\r\n| 345 | **CodeNet** | https:\u002F\u002Fbit.ly\u002F32RPx3X |\r\n| 346 | **GANgealing** | https:\u002F\u002Fbit.ly\u002F3EIkO6k |\r\n| 347 | **Semantic Diffusion Guidance** | https:\u002F\u002Fbit.ly\u002F3JsNzI3 |\r\n| 348 | **TokenLearner** | https:\u002F\u002Fbit.ly\u002F3mLG4lM |\r\n| 349 | **Temporal Fusion Transformer (TFT)** | https:\u002F\u002Fbit.ly\u002F3JuHcno |\r\n| 350 | **HiClass: Evaluation Metrics for Local Hierarchical Classification** | https:\u002F\u002Fbit.ly\u002F3JHmn8H |\r\n| 351 | **Stable Long Term Recurrent Video Super Resolution** | https:\u002F\u002Fbit.ly\u002F3qFlPHl |\r\n| 352 | **AdaViT** | https:\u002F\u002Fbit.ly\u002F3eDASMj |\r\n| 353 | **Few-Shot Learner (FSL)** | https:\u002F\u002Fbit.ly\u002F3ELOOym |\r\n| 354 | **Exemplar Transformers** | https:\u002F\u002Fbit.ly\u002F3qzJE3C |\r\n| 355 | **StyleSwin** | https:\u002F\u002Fbit.ly\u002F3HqkCe4 |\r\n| 356 | **RepMLNet** | https:\u002F\u002Fbit.ly\u002F32DxbUu |\r\n| 357 | **2 Stage Unet** | https:\u002F\u002Fbit.ly\u002F3JGjIMq |\r\n| 358 | **Untrained Deep NN** | https:\u002F\u002Fbit.ly\u002F3JplL7r |\r\n| 359 | **SeMask** | https:\u002F\u002Fbit.ly\u002F3zfouM8 |\r\n| 360 | **JoJoGAN** | https:\u002F\u002Fbit.ly\u002F31gl9Qi |\r\n| 361 | **ELSA** | https:\u002F\u002Fbit.ly\u002F3mLWScb |\r\n| 362 | **PRIME** | https:\u002F\u002Fbit.ly\u002F3FI14RZ |\r\n| 363 | **GLIDE** | https:\u002F\u002Fbit.ly\u002F31ixB20 |\r\n| 364 | **StyleGAN-V** | https:\u002F\u002Fbit.ly\u002F3Jvx91G |\r\n| 365 | **SLIP: Self-supervision meets Language-Image Pre-training** | https:\u002F\u002Fbit.ly\u002F3qAjL3r |\r\n| 366 | **SmoothNet: A Plug-and-Play Network for Refining Human Poses in Videos** | https:\u002F\u002Fbit.ly\u002F3tYNxlp |\r\n| 367 | **Multi-View Partial (MVP) Point Cloud Challenge 2021 on Completion and Registration: Methods and Results** | https:\u002F\u002Fbit.ly\u002F3tZFyEQ |\r\n| 368 | **PCACE: A Statistical Approach to Ranking Neurons for CNN Interpretability** | https:\u002F\u002Fbit.ly\u002F3LCKENk |\r\n| 369 | **Vision Transformer with Deformable Attention** | https:\u002F\u002Fbit.ly\u002F3tY3s3k |\r\n| 370 | **A Transformer-Based Siamese Network for Change Detection** | https:\u002F\u002Fbit.ly\u002F3DxPYP5 |\r\n| 371 | **Lawin Transformer: Improving Semantic Segmentation Transformer with Multi-Scale Representations via Large Window Attention** | https:\u002F\u002Fbit.ly\u002F3qRsTle |\r\n| 372 | **SASA: Semantics-Augmented Set Abstraction for Point-based 3D Object Detection** | https:\u002F\u002Fbit.ly\u002F3tXduls |\r\n| 373 | **HyperionSolarNet: Solar Panel Detection from Aerial Images** | https:\u002F\u002Fbit.ly\u002F35v2rX6 |\r\n| 374 | **Realistic Full-Body Anonymization with Surface-Guided GANs** | https:\u002F\u002Fbit.ly\u002F3DwBNd4 |\r\n| 375 | **Generalized Category Discovery** | https:\u002F\u002Fbit.ly\u002F3IZ1HaC |\r\n| 376 | **KerGNNs: Interpretable Graph Neural Networks with Graph Kernels** | https:\u002F\u002Fbit.ly\u002F3DtWtlU |\r\n| 377 | **Optimization Planning for 3D ConvNets** | https:\u002F\u002Fbit.ly\u002F3K38e5p |\r\n| 378 | **gDNA: Towards Generative Detailed Neural Avatars** | https:\u002F\u002Fbit.ly\u002F3DEtFHC |\r\n| 379 | **SeamlessGAN: Self-Supervised Synthesis of Tileable Texture Maps** | https:\u002F\u002Fbit.ly\u002F3NIieTA |\r\n| 380 | **HYDLA: Domain Adaptation in LiDAR Semantic Segmentation via Alternating Skip Connections and Hybrid Learning** | https:\u002F\u002Fbit.ly\u002F379dy8v |\r\n| 381 | **HardBoost: Boosting Zero-Shot Learning with Hard Classes** | https:\u002F\u002Fbit.ly\u002F379diX5 |\r\n| 382 | **DDU-Net: Dual-Decoder-U-Net for Road Extraction Using High-Resolution Remote Sensing Images** | https:\u002F\u002Fbit.ly\u002F3Lu0UzU |\r\n| 383 | **Q-ViT: Fully Differentiable Quantization for Vision Transformer** | https:\u002F\u002Fbit.ly\u002F3qXv9Ym |\r\n| 384 | **SPAMs: Structured Implicit Parametric Models** | https:\u002F\u002Fbit.ly\u002F3iU95cL |\r\n| 385 | **GeoFill: Reference-Based Image Inpainting of Scenes with Complex Geometry** | https:\u002F\u002Fbit.ly\u002F3qUwCP6 |\r\n| 386 | **Improving language models by retrieving from trillions of tokens** | https:\u002F\u002Fbit.ly\u002F37aKsG5 |\r\n| 387 | **StylEx finds and visualizes disentangled attributes that affect a classifier automatically.** | https:\u002F\u002Fbit.ly\u002F3qYwYEf |\r\n| 388 | **‘ReLICv2’: Pushing The Limits of Self-Supervised ResNet** | https:\u002F\u002Fbit.ly\u002F3JZXy7C |\r\n| 389 | **‘Detic’: A Method to Detect Twenty-Thousand Classes using Image-Level Supervision** | https:\u002F\u002Fbit.ly\u002F3iRtsqZ |\r\n| 390 | **Momentum Capsule Networks** | https:\u002F\u002Fbit.ly\u002F3NFDv0j |\r\n| 391 | **RelTR: Relation Transformer for Scene Graph Generation** | https:\u002F\u002Fbit.ly\u002F3iVBWgB |\r\n| 392 | **Transformer based SAR Images Despecking** | https:\u002F\u002Fbit.ly\u002F3qWeILH |\r\n| 393 | **ResiDualGAN: Resize-Residual DualGAN for Cross-Domain Remote Sensing Images Semantic Segmentation** | https:\u002F\u002Fbit.ly\u002F3wWGY4T |\r\n| 394 | **VRT: A Video Restoration Transformer** | https:\u002F\u002Fbit.ly\u002F3K44YXw |\r\n| 395 | **You Only Cut Once: Boosting Data Augmentation with a Single Cut** | https:\u002F\u002Fbit.ly\u002F36L8pDW |\r\n| 396 | **StyleGAN-XL: Scaling StyleGAN to Large Diverse Datasets** | https:\u002F\u002Fbit.ly\u002F3iRlEp8 |\r\n| 397 | **The KFIoU Loss for Rotated Object Detection** | https:\u002F\u002Fbit.ly\u002F3NHUL5e |\r\n| 398 | **The Met Dataset: Instance Level Recognition** | https:\u002F\u002Fbit.ly\u002F3K7lPJ2 |\r\n| 399 | **Alphacode: a System that can  compete at average human level** | https:\u002F\u002Fbit.ly\u002F3qXIIH5 |\r\n| 400 | **Third Time's the Charm? Image and Video Editing with StyleGAN3** | https:\u002F\u002Fbit.ly\u002F35vAoqx |\r\n| 401 | **NeuralFusion: Online Depth Fusion in Latent Space** | https:\u002F\u002Fbit.ly\u002F3uFaysA |\r\n| 402 | **VOS: Learning what you don't know by VIRTUAL OUTLIER SYNTHESIS** | https:\u002F\u002Fbit.ly\u002F3uPG9rG |\r\n| 403 | **Self-Conditioned Generative Adversarial Networks for Image Editing** | https:\u002F\u002Fbit.ly\u002F3tX8m0u |\r\n| 404 | **TransformNet: Self-supervised representation learning through predicting geometric transformations** | https:\u002F\u002Fbit.ly\u002F3uOCfPM |\r\n| 405 | **YOLOv7 - Framework Beyond Detection** | https:\u002F\u002Fbit.ly\u002F3wXU81y |\r\n| 406 | **F8Net: Fixed-Point 8-bit Only Multiplication for Network Quantization** | https:\u002F\u002Fbit.ly\u002F3DzhFXU |\r\n| 407 | **Block-NeRF: Scalable Large Scene Neural View Synthesis** | https:\u002F\u002Fbit.ly\u002F3LyELk5 |\r\n| 408 | **Patch-NetVLAD+: Learned patch descriptor and weighted matching strategy for place recognition** | https:\u002F\u002Fbit.ly\u002F375C76y |\r\n| 409 | **COLA: COarse LAbel pre-training for 3D semantic segmentation of sparse LiDAR datasets** | https:\u002F\u002Fbit.ly\u002F3NCK6bZ |\r\n| 410 | **ScoreNet: Learning Non-Uniform Attention and Augmentation for Transformer-Based Histopathological Image Classification** | https:\u002F\u002Fbit.ly\u002F3uJuMBz |\r\n| 411 | **Geometric Deep Learning: Grids, Groups, Graphs, Geodesics, and Gauges** | https:\u002F\u002Fbit.ly\u002F388imeT |\r\n| 412 | **How Do Vision Transformers Work?** | https:\u002F\u002Fbit.ly\u002F3NE1mO2 |\r\n| 413 | **Mirror-Yolo: An attention-based instance segmentation and detection model for mirrors** | https:\u002F\u002Fbit.ly\u002F3LBS96P |\r\n| 414 | **PENCIL: Deep Learning with Noisy Labels** | https:\u002F\u002Fbit.ly\u002F3iXvHc4 |\r\n| 415 | **VLP: A Survey on Vision-Language Pre-training** | https:\u002F\u002Fbit.ly\u002F3J0v2RZ |\r\n| 416 | **Visual Attention Network** | https:\u002F\u002Fbit.ly\u002F3Dt7rbv |\r\n| 417 | **GroupViT: Semantic Segmentation Emerges from Text Supervision** | https:\u002F\u002Fbit.ly\u002F3NQv7eG |\r\n| 418 | **Paying U-Attention to Textures: Multi-Stage Hourglass Vision Transformer for Universal Texture Synthesis** | https:\u002F\u002Fbit.ly\u002F373xs4T |\r\n| 419 | **End to End Cascaded Image De-raining and Object Detetion NN** | https:\u002F\u002Fbit.ly\u002F375PLGw |\r\n| 420 | **Level-K to Nash Equilibrium** | https:\u002F\u002Fbit.ly\u002F3NFRX8t |\r\n| 421 | **Machine Learning for Mechanical Ventilation Control** | https:\u002F\u002Fbit.ly\u002F3JZCMEV |\r\n| 422 | **The effect of fatigue on the performance of online writer recognition** | https:\u002F\u002Fbit.ly\u002F3wXSSLS |\r\n| 423 | **State-of-the-Art in the Architecture, Methods and Applications of StyleGAN** | https:\u002F\u002Fbit.ly\u002F3iRjl5s |\r\n| 424 | **Long-Tailed Classification with Gradual Balanced Loss and Adaptive Feature Generation** | https:\u002F\u002Fbit.ly\u002F3v5XZXR |\r\n| 425 | **Self-supervised Transformer for Deepfake Detection** | https:\u002F\u002Fbit.ly\u002F3tXtUdk |\r\n| 426 | **CenterSnap: Single-Shot Multi-Object 3D Shape Reconstruction and Categorical 6D Pose and Size** | https:\u002F\u002Fbit.ly\u002F3LxkrQa |\r\n| 427 | **TCTrack: Temporal Contexts for Aerial Tracking** | https:\u002F\u002Fbit.ly\u002F3uM5O4B |\r\n| 428 | **LatentFormer: Multi-Agent Transformer-Based Interaction Modeling and Trajectory Prediction** | https:\u002F\u002Fbit.ly\u002F3uOfKe0 |\r\n| 429 | **HyperTransformer: A Textural and Spectral Feature Fusion Transformer for Pansharpening** | https:\u002F\u002Fbit.ly\u002F35tRV2j |\r\n| 430 | **ZippyPoint: Fast Interest Point Detection, Description, and Matching through Mixed Precision Discretization** | https:\u002F\u002Fbit.ly\u002F3LwoMmy |\r\n| 431 | **MLSeg: Image and Video Segmentation** | https:\u002F\u002Fbit.ly\u002F38p9iCN |\r\n| 432 | **Image Steganography based on Style Transfer** | https:\u002F\u002Fbit.ly\u002F3DJHLaN |\r\n| 433 | **GrainSpace: A Large-scale Dataset for Fine-grained and Domain-adaptive Recognition of Cereal Grains** | https:\u002F\u002Fbit.ly\u002F3JYPrIg |\r\n| 434 | **AGCN: Augmented Graph Convolutional Network** | https:\u002F\u002Fbit.ly\u002F3DwZrWN |\r\n| 435 | **StyleBabel: Artistic Style Tagging and Captioning** | https:\u002F\u002Fbit.ly\u002F3j1Klit |\r\n| 436 | **ROOD-MRI: Benchmarking the robustness of deep learning segmentation models to out-of-distribution and corrupted data in MRI** | https:\u002F\u002Fbit.ly\u002F38maN4z |\r\n| 437 | **InsetGAN for Full-Body Image Generation** | https:\u002F\u002Fbit.ly\u002F3Dsu9At |\r\n| 438 | **Implicit Feature Decoupling with Depthwise Quantization** | https:\u002F\u002Fbit.ly\u002F3K1mxaA |\r\n| 439 | **Bamboo: Building Mega-Scale Vision Dataset** | https:\u002F\u002Fbit.ly\u002F3wVPalD |\r\n| 440 | **TensoRF: Tensorial Radiance Fields** | https:\u002F\u002Fbit.ly\u002F3iWAFWI |\r\n| 441 | **FERV39k: A Large-Scale Multi-Scene Dataset for Facial Expression Recognition** | https:\u002F\u002Fbit.ly\u002F3NCHTxd |\r\n| 442 | **One-Shot Adaptation of GAN in Just One CLIP** | https:\u002F\u002Fbit.ly\u002F36NOPab |\r\n| 443 | **SHREC 2021: Classification in cryo-electron tomograms** | https:\u002F\u002Fbit.ly\u002F3iSXpqv |\r\n| 444 | **MaskGIT: Masked Generative Image Transformer** | https:\u002F\u002Fbit.ly\u002F3qSQz8I |\r\n| 445 | **Detection, Recognition, and Tracking: A Survey** | https:\u002F\u002Fbit.ly\u002F378G8qw |\r\n| 446 | **Mixed Differential Privacy** | https:\u002F\u002Fbit.ly\u002F3IZ0MGU |\r\n| 447 | **Mixed DualStyleGAN** | https:\u002F\u002Fbit.ly\u002F3wTyAmD |\r\n| 448 | **BigDetection** | https:\u002F\u002Fbit.ly\u002F3DuZSRk |\r\n| 449 | **Feature visualization for convolutional neural network** | https:\u002F\u002Fbit.ly\u002F3Dwf6FJ |\r\n| 450 | **AutoAvatar** | https:\u002F\u002Fbit.ly\u002F38m9ClF |\r\n| 451 | **A Long Short-term Memory Based Recurrent Neural Network for Interventional MRI Reconstruction** | https:\u002F\u002Fbit.ly\u002F3Dz1idF |\r\n| 452 | **StyleT2I** | https:\u002F\u002Fbit.ly\u002F35u5Wx0 |\r\n| 453 | **L^3U-net** | https:\u002F\u002Fbit.ly\u002F3iTOq8r |\r\n| 454 | **Balanced MSE** | https:\u002F\u002Fbit.ly\u002F3rxt7yo |\r\n| 455 | **BEVFormer: Learning Bird's-Eye-View Representation from Multi-Camera Images via Spatiotemporal Transformers** | https:\u002F\u002Fbit.ly\u002F36m3HfC |\r\n| 456 | **TransEditor: Transformer-Based Dual-Space GAN for Highly Controllable Facial Editing** | https:\u002F\u002Fbit.ly\u002F3JQKZKS |\r\n| 457 | **On the Importance of Asymmetry for Siamese Representation Learning** | https:\u002F\u002Fbit.ly\u002F3JNgcyt |\r\n| 458 | **On One-Class Graph Neural Networks for Anomaly Detection in Attributed Networks** | https:\u002F\u002Fbit.ly\u002F3uQTC3P |\r\n| 459 | **Pyramid Frequency Network with Spatial Attention Residual Refinement Module for Monocular Depth** | https:\u002F\u002Fbit.ly\u002F3KWT6a4 |\r\n| 460 | **Unleashing Vanilla Vision Transformer with Masked Image Modeling for Object Detection** | https:\u002F\u002Fbit.ly\u002F3L8a59H |\r\n| 461 | **DaViT: Dual Attention Vision Transformers** | https:\u002F\u002Fbit.ly\u002F3Engc7e |\r\n| 462 | **SPAct: Self-supervised Privacy Preservation for Action Recognition** | https:\u002F\u002Fbit.ly\u002F3KTNvRW |\r\n| 463 | **Class-Incremental Learning with Strong Pre-trained Models** | https:\u002F\u002Fbit.ly\u002F3MdlcOq |\r\n| 464 | **RBGNet: Ray-based Grouping for 3D Object Detection by Center for Data Science** | https:\u002F\u002Fbit.ly\u002F3EqkydH |\r\n| 465 | **Event Transformer** | https:\u002F\u002Fbit.ly\u002F3KUsMxc |\r\n| 466 | **ReCLIP: A Strong Zero-Shot Baseline for Referring Expression Comprehension** | https:\u002F\u002Fbit.ly\u002F3M6RgDE |\r\n| 467 | **A9-Dataset: Multi-Sensor Infrastructure-Based Dataset for Mobility Research** | https:\u002F\u002Fbit.ly\u002F3xAyqRj |\r\n| 468 | **Simple Baselines for Image Restoration** | https:\u002F\u002Fbit.ly\u002F3vt4tjB |\r\n| 469 | **Masked Siamese Networks for Label-Efficient Learning** | https:\u002F\u002Fbit.ly\u002F3viEs6s |\r\n| 470 | **Neighborhood Attention Transformer** | https:\u002F\u002Fbit.ly\u002F3jNExK3 |\r\n| 471 | **TopFormer: Token Pyramid Transformer for Mobile Semantic Segmentation** | https:\u002F\u002Fbit.ly\u002F3M3EA0K |\r\n| 472 | **MVSTER: Epipolar Transformer for Efficient Multi-View Stereo** | https:\u002F\u002Fbit.ly\u002F3MaDTCR |\r\n| 473 | **Temporally Efficient Vision Transformer for Video Instance Segmentation** | https:\u002F\u002Fbit.ly\u002F3w6xkf3 |\r\n| 474 | **EditGAN: High-Precision Semantic Image Editing** | https:\u002F\u002Fbit.ly\u002F3yx2JJ2 |\r\n| 475 | **CenterNet++ for Object Detection** | https:\u002F\u002Fbit.ly\u002F3woxrBG |\r\n| 476 | **A case for using rotation invariant features in state of the art feature matchers** | https:\u002F\u002Fbit.ly\u002F3kZ1x9A |\r\n| 477 | **WebFace260M: A Benchmark for Million-Scale Deep Face Recognition** | https:\u002F\u002Fbit.ly\u002F3w2T3Vd |\r\n| 478 | **JIFF: Jointly-aligned Implicit Face Function for High-Quality Single View Clothed Human Reconstruction** | https:\u002F\u002Fbit.ly\u002F3N9Me9U |\r\n| 479 | **Image Data Augmentation for Deep Learning: A Survey** | https:\u002F\u002Fbit.ly\u002F3PfC1uA |\r\n| 480 | **StyleGAN-Human: A Data-Centric Odyssey of Human Generation** | https:\u002F\u002Fbit.ly\u002F3PqV710 |\r\n| 481 | **Few-shot Head Swapping In The Wild Secrets Revealed By Department Of Computer Vision Technology (vis)** | https:\u002F\u002Fbit.ly\u002F3w7xm6c |\r\n| 482 | **CLIP-GEN: Language-Free Training of a Text-to-Image Generator with CLIP** | https:\u002F\u002Fbit.ly\u002F3N3cEKu |\r\n| 483 | **HuMMan: Multi-Modal 4D Human Dataset for Versatile Sensing and Modeling** | https:\u002F\u002Fbit.ly\u002F3Nqnevx |\r\n| 484 | **Generative Adversarial Networks for Image Super-Resolution: A Survey** | https:\u002F\u002Fbit.ly\u002F39jyL0U |\r\n| 485 | **CLIP-Art: Contrastive Pre-training for Fine-Grained Art Classification** | https:\u002F\u002Fbit.ly\u002F3N7Qd6V |\r\n| 486 | **C3-STISR: Scene Text Image Super-resolution with Triple Clues** | https:\u002F\u002Fbit.ly\u002F3l1352C |\r\n| 487 | **Barbershop: GAN-based Image Compositing using Segmentation Masks** | https:\u002F\u002Fbit.ly\u002F39hus6d |\r\n| 488 | **DANBO: Disentangled Articulated Neural Body Representations** | https:\u002F\u002Fbit.ly\u002F3LkqWp3 |\r\n| 489 | **BlobGAN: Spatially Disentangled Scene Representations** | https:\u002F\u002Fbit.ly\u002F3sufEYz |\r\n| 490 | **Text to artistic image generation** | https:\u002F\u002Fbit.ly\u002F3w6wzmd |\r\n| 491 | **Sequencer: Deep LSTM for Image Classification** | https:\u002F\u002Fbit.ly\u002F3sulPvT |\r\n| 492 | **IVY: An Open-Source Tool To Make Deep Learning Code Compatible Across Frameworks** | https:\u002F\u002Fbit.ly\u002F3M6MbvJ |\r\n| 493 | **Introspective Deep Metric Learning** | https:\u002F\u002Fbit.ly\u002F3w2pZ02 |\r\n| 494 | **KeypointNeRF: Generalizing Image-based Volumetric Avatars using Relative Spatial Encoding of Keypoints** | https:\u002F\u002Fbit.ly\u002F3wnRhwF |\r\n| 495 | **GraphWorld: A Methodology For Analyzing The Performance Of GNN Architectures On Millions Of Synthetic Benchmark Datasets** | https:\u002F\u002Fbit.ly\u002F3PUQexk |\r\n| 496 | **Group R-CNN for Weakly Semi-supervised Object Detection with Points** | https:\u002F\u002Fbit.ly\u002F3zfvU3W |\r\n| 497 | **Few-Shot Head Swapping in the Wild** | https:\u002F\u002Fbit.ly\u002F3xapGkn |\r\n| 498 | **StyLandGAN: A StyleGAN based Landscape Image Synthesis using Depth-map** | https:\u002F\u002Fbit.ly\u002F3GKX4Bi |\r\n| 499 | **Spiking Approximations of the MaxPooling Operation in Deep SNNs** | https:\u002F\u002Fbit.ly\u002F3GLp7AG |\r\n| **500** | **Deep Spectral Methods: A Surprisingly Strong Baseline for Unsupervised Semantic Segmentation and Localization** | https:\u002F\u002Fbit.ly\u002F3NTGsJQ |\r\n\r\n***Thanks for Reading🎉🎉🎉🎉***\r\n\r\n----\r\n\r\n",null,"# 365 天计算机视觉学习指南快速上手\n\n本项目并非一个可安装的软件库或框架，而是一个由 Ashish Patel 整理的**计算机视觉（Computer Vision）学习资源索引**。它包含了 365 个经典的 CV 算法、模型架构及论文解读的 LinkedIn 帖子链接，涵盖目标检测、图像分割、Transformer、医学影像分析等前沿领域。\n\n本指南将指导开发者如何利用该资源列表进行系统性学习。\n\n## 环境准备\n\n由于本项目本质为资源清单，无需特定的系统环境或复杂的依赖安装。只需具备以下基础条件即可开始学习：\n\n*   **操作系统**：Windows \u002F macOS \u002F Linux 均可。\n*   **浏览器**：现代浏览器（Chrome, Edge, Firefox 等），用于访问 LinkedIn 文章及代码仓库链接。\n*   **账号要求**：建议注册并登录 **LinkedIn (领英)** 账号，以便完整阅读文章内容及与作者互动。\n    *   国内用户访问 LinkedIn 可能需要符合当地网络法规的网络环境。\n*   **开发环境（可选）**：若需复现文中提到的算法，建议准备 Python 环境及深度学习框架：\n    *   Python 3.8+\n    *   PyTorch 或 TensorFlow\n    *   CUDA (如需 GPU 加速)\n\n## 安装步骤\n\n本项目无需执行 `pip install` 或编译操作。获取学习列表的方式如下：\n\n1.  **在线浏览（推荐）**\n    直接访问 GitHub 仓库页面查看完整的每日学习主题表：\n    ```bash\n    # 在浏览器中打开以下地址\n    https:\u002F\u002Fgithub.com\u002Fashishpatel26\u002F365-Days-Computer-Vision-Learning-Linkedin-Post\n    ```\n\n2.  **克隆仓库（本地查阅）**\n    如果你希望离线查看 Markdown 表格或在本地整理笔记，可以克隆仓库：\n    ```bash\n    git clone https:\u002F\u002Fgithub.com\u002Fashishpatel26\u002F365-Days-Computer-Vision-Learning-Linkedin-Post.git\n    cd 365-Days-Computer-Vision-Learning-Linkedin-Post\n    ```\n\n3.  **关注作者**\n    为了获取最新更新和详细解读，建议在 LinkedIn 上关注作者：\n    *   作者主页：https:\u002F\u002Fwww.linkedin.com\u002Fin\u002Fashishpatel2604\u002F\n\n## 基本使用\n\n本项目的核心用法是**按图索骥**：根据目录选择感兴趣的主题，点击链接阅读深度解析，然后前往对应的官方代码库进行实践。\n\n### 使用流程示例\n\n假设你想学习 **YOLOv5** 或 **Vision Transformer**：\n\n1.  **查找主题**\n    在 README 的表格中找到对应天数和主题：\n    *   Day 5: **Vision Transformer**\n    *   Day 8: **Yolov5**\n\n2.  **阅读解析**\n    点击表格中对应的 `Post Link`（例如 YOLOv5 的链接 `https:\u002F\u002Fbit.ly\u002F39QHTXq`）。\n    *   这将跳转到 LinkedIn 文章，其中通常包含模型的核心原理图解、优缺点分析及关键公式。\n\n3.  **代码实践**\n    文章中通常会提供指向原始论文或官方 GitHub 仓库的链接。以 YOLOv5 为例，阅读完概念后，执行以下命令拉取代码进行训练：\n    ```bash\n    # 进入你的深度学习工作目录\n    cd ~\u002Fprojects\u002Fcv-study\n\n    # 克隆 YOLOv5 官方仓库 (示例)\n    git clone https:\u002F\u002Fgithub.com\u002Fultralytics\u002Fyolov5.git\n    cd yolov5\n\n    # 安装依赖\n    pip install -r requirements.txt\n\n    # 运行一个简单的训练示例 (需准备数据集)\n    python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt\n    ```\n\n### 学习路线建议\n\n表格内容已按逻辑大致分类，建议按以下阶段进行学习：\n\n*   **基础经典网络 (Day 10-40)**：从 FCN, Unet, ResNet, SqueezeNet 入手，理解卷积神经网络基石。\n*   **目标检测进阶 (Day 1-9, 56-64)**：深入学习 YOLO 系列, Faster R-CNN, RetinaNet, DETR 等。\n*   **注意力机制与 Transformer (Day 5, 7, 119)**：研究 ViT, DeiT, Swin Transformer 及 CBAM\u002FBAM 模块。\n*   **医学影像专用 (Day 90-114)**：专注于 3D-Unet, V-Net, Attention-Unet 等在医疗领域的应用。\n*   **超分辨率与生成模型 (Day 118-131)**：探索 SRCNN, GANs 及相关变体。\n\n通过每日研读一个主题并结合代码复现，可在一年内系统构建计算机视觉知识体系。","某计算机视觉算法工程师正计划为医疗影像项目选型最新的分割模型，同时希望建立个人技术品牌，在 LinkedIn 上持续输出高质量的专业内容。\n\n### 没有 365-Days-Computer-Vision-Learning-Linkedin-Post 时\n- **知识碎片化严重**：面对 EfficientDet、DeiT、SegNet 等数十种前沿模型，需花费大量时间在 arXiv 和 GitHub 间盲目搜索，难以系统掌握技术演进脉络。\n- **内容创作门槛高**：想要每日分享技术见解，却因缺乏清晰的选题规划和精炼的核心素材，导致写作耗时极长，难以维持“日更”节奏。\n- **技术视野受限**：容易陷入自己熟悉的 YOLO 或 ResNet 等旧有框架，忽略如 Dynamic RCNN、Graph Convolution Network 等新兴架构，导致方案选型不够优化。\n- **学习路径迷茫**：缺乏从基础 FCN 到高级 Vision Transformer 的循序渐进指南，自学过程容易半途而废，难以形成完整的知识体系。\n\n### 使用 365-Days-Computer-Vision-Learning-Linkedin-Post 后\n- **构建系统化知识库**：直接跟随其规划的 365 天课表，按天解锁从 Unet 到 AmoebaNet 等特定主题，快速建立起结构清晰的计算机视觉知识地图。\n- **实现高效内容输出**：每天直接获取一个经过提炼的核心话题（如 DropBlock 或 Grad-CAM）及官方解读链接，将数小时的调研工作缩短为几分钟，轻松实现专业内容日更。\n- **拓宽技术选型视野**：通过接触 RepVGG、ShuffleNetV2 等多样化模型，迅速发现更适合医疗影像轻量化部署的方案，显著提升项目性能与效率。\n- **明确进阶成长路径**：依托其从经典到前沿的排序逻辑，有条不紊地补齐理论短板，确保技术能力随行业趋势同步迭代，避免方向性偏差。\n\n365-Days-Computer-Vision-Learning-Linkedin-Post 将零散的前沿论文转化为可执行的每日行动计划，既是工程师的系统化学习导航，也是打造个人技术影响力的内容引擎。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fashishpatel26_365-Days-Computer-Vision-Learning-Linkedin-Post_6092b1f4.png","ashishpatel26","Ashish Patel","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fashishpatel26_9e4e7549.jpg","AI Researcher & Principal Architect AI\u002FML & Data Science at Oracle\r\n| xIBMers | Rank 3 Kaggle Kernel Master","Oracle | xIBMers","Ahmedabad","shriganesh.patel@gmail.com","https:\u002F\u002Fmedium.com\u002Fml-research-lab","https:\u002F\u002Fgithub.com\u002Fashishpatel26",798,205,"2026-04-09T11:52:50",1,"","未说明",{"notes":89,"python":87,"dependencies":90},"该仓库并非一个可直接运行的单一 AI 工具，而是一个包含 365 天计算机视觉学习主题的 LinkedIn 帖子链接列表（目录）。表格中列出的每一项（如 EfficientDet, YOLO, Vision Transformer 等）均指向外部的技术文章或教程链接。因此，本仓库本身没有特定的运行环境、GPU、内存或依赖库需求。用户需根据具体想要学习的某个算法，前往对应链接查找其原始代码仓库以获取相应的环境配置信息。",[],[15,14],[93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109],"linkedin","computer-vision","deep-learning","iclr","iclr2020","iclr2019","iclr2021","eccv","eccv2020","eccv-2018","eccv2019","cvpr","cvpr2020","cvpr2019","cvpr2018","jmlr","iclr2018","ready_partial","2026-03-27T02:49:30.150509","2026-04-10T02:43:10.091087",[],[]]