[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-xxxnell--how-do-vits-work":3,"tool-xxxnell--how-do-vits-work":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",150037,2,"2026-04-10T23:33:47",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",108322,"2026-04-10T11:39:34",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":32,"last_commit_at":50,"category_tags":51,"status":17},6121,"gemini-cli","google-gemini\u002Fgemini-cli","gemini-cli 是一款由谷歌推出的开源 AI 命令行工具，它将强大的 Gemini 大模型能力直接集成到用户的终端环境中。对于习惯在命令行工作的开发者而言，它提供了一条从输入提示词到获取模型响应的最短路径，无需切换窗口即可享受智能辅助。\n\n这款工具主要解决了开发过程中频繁上下文切换的痛点，让用户能在熟悉的终端界面内直接完成代码理解、生成、调试以及自动化运维任务。无论是查询大型代码库、根据草图生成应用，还是执行复杂的 Git 操作，gemini-cli 都能通过自然语言指令高效处理。\n\n它特别适合广大软件工程师、DevOps 人员及技术研究人员使用。其核心亮点包括支持高达 100 万 token 的超长上下文窗口，具备出色的逻辑推理能力；内置 Google 搜索、文件操作及 Shell 命令执行等实用工具；更独特的是，它支持 MCP（模型上下文协议），允许用户灵活扩展自定义集成，连接如图像生成等外部能力。此外，个人谷歌账号即可享受免费的额度支持，且项目基于 Apache 2.0 协议完全开源，是提升终端工作效率的理想助手。",100752,"2026-04-10T01:20:03",[52,13,15,14],"插件",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":32,"last_commit_at":59,"category_tags":60,"status":17},4721,"markitdown","microsoft\u002Fmarkitdown","MarkItDown 是一款由微软 AutoGen 团队打造的轻量级 Python 工具，专为将各类文件高效转换为 Markdown 格式而设计。它支持 PDF、Word、Excel、PPT、图片（含 OCR）、音频（含语音转录）、HTML 乃至 YouTube 链接等多种格式的解析，能够精准提取文档中的标题、列表、表格和链接等关键结构信息。\n\n在人工智能应用日益普及的今天，大语言模型（LLM）虽擅长处理文本，却难以直接读取复杂的二进制办公文档。MarkItDown 恰好解决了这一痛点，它将非结构化或半结构化的文件转化为模型“原生理解”且 Token 效率极高的 Markdown 格式，成为连接本地文件与 AI 分析 pipeline 的理想桥梁。此外，它还提供了 MCP（模型上下文协议）服务器，可无缝集成到 Claude Desktop 等 LLM 应用中。\n\n这款工具特别适合开发者、数据科学家及 AI 研究人员使用，尤其是那些需要构建文档检索增强生成（RAG）系统、进行批量文本分析或希望让 AI 助手直接“阅读”本地文件的用户。虽然生成的内容也具备一定可读性，但其核心优势在于为机器",93400,"2026-04-06T19:52:38",[52,14],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":67,"readme_en":68,"readme_zh":69,"quickstart_zh":70,"use_case_zh":71,"hero_image_url":72,"owner_login":73,"owner_name":74,"owner_avatar_url":75,"owner_bio":76,"owner_company":77,"owner_location":78,"owner_email":79,"owner_twitter":73,"owner_website":80,"owner_url":81,"languages":82,"stars":91,"forks":92,"last_commit_at":93,"license":94,"difficulty_score":10,"env_os":95,"env_gpu":96,"env_ram":97,"env_deps":98,"category_tags":110,"github_topics":111,"view_count":32,"oss_zip_url":79,"oss_zip_packed_at":79,"status":17,"created_at":116,"updated_at":117,"faqs":118,"releases":119},4944,"xxxnell\u002Fhow-do-vits-work","how-do-vits-work","(ICLR 2022 Spotlight) Official PyTorch implementation of \"How Do Vision Transformers Work?\"","how-do-vits-work 是 ICLR 2022 亮点论文《How Do Vision Transformers Work?》的官方 PyTorch 实现，旨在深入解析视觉 Transformer（ViT）中多头自注意力机制（MSA）的真实工作原理。该项目挑战了传统认知，指出 MSA 的成功并非源于其弱归纳偏置或捕捉长距离依赖的能力，而是作为一种“广义空间平滑”操作，与卷积神经网络（CNN）形成互补。\n\n通过代码复现与实验，该工具回答了三个核心问题：首先，MSA 通过平坦化损失景观来优化训练，关键在于其数据特异性而非长程依赖；其次，MSA 表现为低通滤波器（偏向形状），而卷积是高通滤波器（偏向纹理），两者特性截然相反；最后，基于上述发现，项目提出了 AlterNet 架构，通过在神经网络的每个阶段末尾用 MSA 替换卷积，显著提升了模型在大小数据集上的表现。\n\n这一工具非常适合 AI 研究人员和深度学习开发者使用，尤其是那些希望理解 Transformer 底层机制、探索 CNN 与 ViT 融合架构设计的人员。它不仅提供了理论验证的代码基础，更为设计更高效、鲁棒的混合视觉模型","how-do-vits-work 是 ICLR 2022 亮点论文《How Do Vision Transformers Work?》的官方 PyTorch 实现，旨在深入解析视觉 Transformer（ViT）中多头自注意力机制（MSA）的真实工作原理。该项目挑战了传统认知，指出 MSA 的成功并非源于其弱归纳偏置或捕捉长距离依赖的能力，而是作为一种“广义空间平滑”操作，与卷积神经网络（CNN）形成互补。\n\n通过代码复现与实验，该工具回答了三个核心问题：首先，MSA 通过平坦化损失景观来优化训练，关键在于其数据特异性而非长程依赖；其次，MSA 表现为低通滤波器（偏向形状），而卷积是高通滤波器（偏向纹理），两者特性截然相反；最后，基于上述发现，项目提出了 AlterNet 架构，通过在神经网络的每个阶段末尾用 MSA 替换卷积，显著提升了模型在大小数据集上的表现。\n\n这一工具非常适合 AI 研究人员和深度学习开发者使用，尤其是那些希望理解 Transformer 底层机制、探索 CNN 与 ViT 融合架构设计的人员。它不仅提供了理论验证的代码基础，更为设计更高效、鲁棒的混合视觉模型提供了明确的设计准则与实践参考。","\n\n# How Do Vision Transformers Work?\n\n[[arxiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.06709), [poster](https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Fblob\u002Fmaster\u002Fresources\u002Fhow_do_vits_work_poster_iclr2022.pdf), [slides](https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Fblob\u002Fmaster\u002Fresources\u002Fhow_do_vits_work_talk.pdf)]\n\nThis repository provides a PyTorch implementation of [\"How Do Vision Transformers Work? (ICLR 2022 Spotlight)\"](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.06709) In the paper, we show that the success of multi-head self-attentions (MSAs) for computer vision ***does NOT lie in their weak inductive bias and the capturing of long-range dependencies***. MSAs are not merely generalized Convs, but rather generalized spatial smoothings that *complement* Convs.\nIn particular, we address the following three key questions of MSAs and Vision Transformers (ViTs): \n\n***Q1. What properties of MSAs do we need to better optimize NNs?***  \n\nA1. MSAs have their pros and cons. MSAs improve NNs by flattening the loss landscapes. A key feature is their data specificity (data dependency), not long-range dependency. On the other hand, ViTs suffers from non-convex losses.\n\n\n***Q2. Do MSAs act like Convs?***  \n\nA2. MSAs and Convs exhibit opposite behaviors—e.g., MSAs are low-pass filters, but Convs are high-pass filters. It suggests that MSAs are shape-biased, whereas Convs are texture-biased. Therefore, MSAs and Convs are complementary.\n\n\n***Q3. How can we harmonize MSAs with Convs?***  \n\nA3. MSAs at the end of a stage (not a model) significantly improve the accuracy. Based on this, we introduce *AlterNet* by replacing Convs at the end of a stage with MSAs. AlterNet outperforms CNNs not only in large data regimes but also in small data regimes.\n\n\n👇 Let's find the detailed answers below!\n\n\n### I. What Properties of MSAs Do We Need to Improve Optimization?\n\n\u003Cp align=\"center\">\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fxxxnell_how-do-vits-work_readme_33eba8fa0c11.png\" style=\"width:83%;\">\n\u003C\u002Fp>\n\nMSAs improve not only accuracy but also generalization by flattening the loss landscapes (reducing the magnitude of Hessian eigenvalues). ***Such improvement is primarily attributable to their data specificity, NOT long-range dependency*** 😱 On the other hand, ViTs suffers from non-convex losses (negative Hessian eigenvalues). Their weak inductive bias and long-range dependency produce negative Hessian eigenvalues in small data regimes, and these non-convex points disrupt NN training. Large datasets and loss landscape smoothing methods alleviate this problem.\n\n\n### II. Do MSAs Act Like Convs?\n\n\u003Cp align=\"center\">\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fxxxnell_how-do-vits-work_readme_e7bab8727b41.png\" style=\"width:83%;\">\n\u003C\u002Fp>\n\nMSAs and Convs exhibit opposite behaviors. Therefore, MSAs and Convs are complementary. For example, MSAs are low-pass filters, but Convs are high-pass filters. Likewise, Convs are vulnerable to high-frequency noise but that MSAs are vulnerable to low-frequency noise: it suggests that MSAs are shape-biased, whereas Convs are texture-biased. In addition, Convs transform feature maps and MSAs aggregate transformed feature map predictions. Thus, it is effective to place MSAs after Convs.\n\n\n### III. How Can We Harmonize MSAs With Convs?\n\n\u003Cp align=\"center\">\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fxxxnell_how-do-vits-work_readme_7e582cadf4b6.png\" style=\"width:83%;\">\n\u003C\u002Fp>\n\nMulti-stage neural networks behave like a series connection of small individual models. In addition, MSAs at the end of a stage (not the end of a model) play a key role in prediction. Considering these insights, we propose design rules to harmonize MSAs with Convs. NN stages using this design pattern consists of a number of CNN blocks and one (or a few) MSA block. The design pattern naturally derives the structure of the canonical Transformer, which has one MLP block for one MSA block.\n\nBased on these design rules, we introduce AlterNet ([code](https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work\u002Fblob\u002Ftransformer\u002Fmodels\u002Falternet.py)) by replacing Conv blocks at the end of a stage with MSA blocks. ***Surprisingly, AlterNet outperforms CNNs not only in large data regimes but also in small data regimes***, e.g., CIFAR. This contrasts with canonical ViTs, models that perform poorly on small amounts of data. For more details, see below ([\"How to Apply MSA to Your Own Model\"](#how-to-apply-msa-to-your-own-model) section).\n\n\u003Cp align=\"center\">\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fxxxnell_how-do-vits-work_readme_9d5f8303e331.png\" style=\"width:70%;\">\n\u003C\u002Fp>\n\nBut why do Vision Transformers work that way? Our recent paper, [\"Blurs Behaves Like Ensembles: Spatial Smoothings to Improve Accuracy, Uncertainty, and Robustness (ICML 2022)\"](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.12639) ([code and summary](https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fspatial-smoothing) :octocat:, [poster](https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fspatial-smoothing-storage\u002Fblob\u002Fmaster\u002Fresources\u002Fblurs_behave_like_ensembles_poster_icml2022.pdf)), shows that even a simple (non-trainable) 2 ✕ 2 box blur filter has the same properties. Spatial smoothings improve accuracy, uncertainty, and robustness simultaneously by *ensembling* spatially nearby feature maps of CNNs and flattening loss landscapes, and self-attentions can be deemed as trainable importance-weighted ensembles of feature maps. In conclusion, MSA is not simply generalized Conv, but rather a generalized (trainable) blur filter that complements Conv. Please check it out!\n\n\n\n\n## Getting Started \n\nThe following packages are required:\n\n* pytorch\n* matplotlib\n* notebook\n* ipywidgets\n* timm\n* einops\n* tensorboard\n* seaborn (optional)\n\nWe mainly use docker images `pytorch\u002Fpytorch:1.9.0-cuda11.1-cudnn8-runtime` for the code. \n\nSee [```classification.ipynb```](classification.ipynb) ([Colab notebook](https:\u002F\u002Fcolab.research.google.com\u002Fgithub\u002Fxxxnell\u002Fhow-do-vits-work\u002Fblob\u002Ftransformer\u002Fclassification.ipynb)) for image classification. Run all cells to train and test models on CIFAR-10, CIFAR-100, and ImageNet. \n\n**Metrics.** We provide several metrics for measuring accuracy and uncertainty: Acuracy (Acc, ↑) and Acc for 90% certain results (Acc-90, ↑), negative log-likelihood (NLL, ↓), Expected Calibration Error (ECE, ↓), Intersection-over-Union (IoU, ↑) and IoU for certain results (IoU-90, ↑), Unconfidence (Unc-90, ↑), and Frequency for certain results (Freq-90, ↑). We also define a method to plot a reliability diagram for visualization.\n\n**Models.** We provide AlexNet, VGG, pre-activation VGG, ResNet, pre-activation ResNet, ResNeXt, WideResNet, ViT, PiT, Swin, MLP-Mixer, and Alter-ResNet by default. timm implementations also can be used.\n\n\n\n\n\n\n\u003Cdetails>\n\u003Csummary>\n  Pretrained models for CIFAR-100 are also provided: \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Freleases\u002Fdownload\u002Fv0.1\u002Fresnet_50_cifar100_691cc9a9e4.pth.tar\">ResNet-50\u003C\u002Fa>, \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Freleases\u002Fdownload\u002Fv0.1\u002Fvit_ti_cifar100_9857b21357.pth.tar\">ViT-Ti\u003C\u002Fa>, \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Freleases\u002Fdownload\u002Fv0.1\u002Fpit_ti_cifar100_0645889efb.pth.tar\">PiT-Ti\u003C\u002Fa>, and \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Freleases\u002Fdownload\u002Fv0.1\u002Fswin_ti_cifar100_ec2894492b.pth.tar\">Swin-Ti\u003C\u002Fa>. We recommend using \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Frwightman\u002Fpytorch-image-models\">timm\u003C\u002Fa> for ImageNet-1K for the sake of simplicity (e.g., please refer to \u003Ccode>\u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work\u002Fblob\u002Ftransformer\u002Ffourier_analysis.ipynb\">fourier_analysis.ipynb\u003C\u002Fa>\u003C\u002Fcode>).\n  \u003C\u002Fsummary>\n\u003Cbr\u002F>\nThe codes below are snippets for (a) loading pretrained models and (b) converting them into block sequences.\n  \u003Cbr\u002F>\n\n```python\n# ResNet-50\nimport models\n  \n# a. download and load a pretrained model for CIFAR-100\nurl = \"https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Freleases\u002Fdownload\u002Fv0.1\u002Fresnet_50_cifar100_691cc9a9e4.pth.tar\"\npath = \"checkpoints\u002Fresnet_50_cifar100_691cc9a9e4.pth.tar\"\nmodels.download(url=url, path=path)\n\nname = \"resnet_50\"\nmodel = models.get_model(name, num_classes=100,  # timm does not provide a ResNet for CIFAR\n                         stem=model_args.get(\"stem\", False))\nmap_location = \"cuda\" if torch.cuda.is_available() else \"cpu\"\ncheckpoint = torch.load(path, map_location=map_location)\nmodel.load_state_dict(checkpoint[\"state_dict\"])\n\n# b. model → blocks. `blocks` is a sequence of blocks\nblocks = [\n    model.layer0,\n    *model.layer1,\n    *model.layer2,\n    *model.layer3,\n    *model.layer4,\n    model.classifier,\n]\n```\n\n```python\n# ViT-Ti\nimport copy\nimport timm\nimport torch\nimport torch.nn as nn\nimport models\n\n# a. download and load a pretrained model for CIFAR-100\nurl = \"https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Freleases\u002Fdownload\u002Fv0.1\u002Fvit_ti_cifar100_9857b21357.pth.tar\"\npath = \"checkpoints\u002Fvit_ti_cifar100_9857b21357.pth.tar\"\nmodels.download(url=url, path=path)\n\nmodel = timm.models.vision_transformer.VisionTransformer(\n    num_classes=100, img_size=32, patch_size=2,  # for CIFAR\n    embed_dim=192, depth=12, num_heads=3, qkv_bias=False,  # for ViT-Ti \n)\nmodel.name = \"vit_ti\"\nmodels.stats(model)\nmap_location = \"cuda\" if torch.cuda.is_available() else \"cpu\"\ncheckpoint = torch.load(path, map_location=map_location)\nmodel.load_state_dict(checkpoint[\"state_dict\"])\n\n\n# b. model → blocks. `blocks` is a sequence of blocks\n\nclass PatchEmbed(nn.Module):\n    def __init__(self, model):\n        super().__init__()\n        self.model = copy.deepcopy(model)\n        \n    def forward(self, x, **kwargs):\n        x = self.model.patch_embed(x)\n        cls_token = self.model.cls_token.expand(x.shape[0], -1, -1)\n        x = torch.cat((cls_token, x), dim=1)\n        x = self.model.pos_drop(x + self.model.pos_embed)\n        return x\n\n\nclass Residual(nn.Module):\n    def __init__(self, *fn):\n        super().__init__()\n        self.fn = nn.Sequential(*fn)\n        \n    def forward(self, x, **kwargs):\n        return self.fn(x, **kwargs) + x\n    \n    \nclass Lambda(nn.Module):\n    def __init__(self, fn):\n        super().__init__()\n        self.fn = fn\n        \n    def forward(self, x):\n        return self.fn(x)\n\n\ndef flatten(xs_list):\n    return [x for xs in xs_list for x in xs]\n\n\n# model → blocks. `blocks` is a sequence of blocks\nblocks = [\n    PatchEmbed(model),\n    *flatten([[Residual(b.norm1, b.attn), Residual(b.norm2, b.mlp)] \n              for b in model.blocks]),\n    nn.Sequential(model.norm, Lambda(lambda x: x[:, 0]), model.head),\n]\n```\n\n  \n```python\n# PiT-Ti\nimport copy\nimport math\nimport timm\n\nimport torch\nimport torch.nn as nn\n\n# a. download and load a pretrained model for CIFAR-100\nurl = \"https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Freleases\u002Fdownload\u002Fv0.1\u002Fpit_ti_cifar100_0645889efb.pth.tar\"\npath = \"checkpoints\u002Fpit_ti_cifar100_0645889efb.pth.tar\"\nmodels.download(url=url, path=path)\n\nmodel = timm.models.pit.PoolingVisionTransformer(\n    num_classes=100, img_size=32, patch_size=2, stride=1,  # for CIFAR-100\n    base_dims=[32, 32, 32], depth=[2, 6, 4], heads=[2, 4, 8], mlp_ratio=4,  # for PiT-Ti\n)\nmodel.name = \"pit_ti\"\nmodels.stats(model)\nmap_location = \"cuda\" if torch.cuda.is_available() else \"cpu\"\ncheckpoint = torch.load(path, map_location=map_location)\nmodel.load_state_dict(checkpoint[\"state_dict\"])\n\n\n# b. model → blocks. `blocks` is a sequence of blocks\n\nclass PatchEmbed(nn.Module):\n    def __init__(self, model):\n        super().__init__()\n        self.model = copy.deepcopy(model)\n        \n    def forward(self, x, **kwargs):\n        x = self.model.patch_embed(x)\n        x = self.model.pos_drop(x + self.model.pos_embed)\n        cls_tokens = self.model.cls_token.expand(x.shape[0], -1, -1)\n\n        return (x, cls_tokens)\n\n    \nclass Concat(nn.Module):\n    def __init__(self, model):\n        super().__init__()\n        self.model = copy.deepcopy(model)\n        \n    def forward(self, x, **kwargs):\n        x, cls_tokens = x\n        B, C, H, W = x.shape\n        token_length = cls_tokens.shape[1]\n\n        x = x.flatten(2).transpose(1, 2)\n        x = torch.cat((cls_tokens, x), dim=1)\n\n        return x\n    \n    \nclass Pool(nn.Module):\n    def __init__(self, block, token_length):\n        super().__init__()\n        self.block = copy.deepcopy(block)\n        self.token_length = token_length\n        \n    def forward(self, x, **kwargs):\n        cls_tokens = x[:, :self.token_length]\n        x = x[:, self.token_length:]\n        B, N, C = x.shape\n        H, W = int(math.sqrt(N)), int(math.sqrt(N))\n        x = x.transpose(1, 2).reshape(B, C, H, W)\n\n        x, cls_tokens = self.block(x, cls_tokens)\n        \n        return x, cls_tokens\n    \n    \nclass Classifier(nn.Module):\n    def __init__(self, norm, head):\n        super().__init__()\n        self.head = copy.deepcopy(head)\n        self.norm = copy.deepcopy(norm)\n        \n    def forward(self, x, **kwargs):\n        x = x[:,0]\n        x = self.norm(x)\n        x = self.head(x)\n        return x\n\n    \nclass Residual(nn.Module):\n    def __init__(self, *fn):\n        super().__init__()\n        self.fn = nn.Sequential(*fn)\n        \n    def forward(self, x, **kwargs):\n        return self.fn(x, **kwargs) + x\n\n    \ndef flatten(xs_list):\n    return [x for xs in xs_list for x in xs]\n\n\nblocks = [\n    nn.Sequential(PatchEmbed(model), Concat(model),),\n    *flatten([[Residual(b.norm1, b.attn), Residual(b.norm2, b.mlp)] \n              for b in model.transformers[0].blocks]),\n    nn.Sequential(Pool(model.transformers[0].pool, 1), Concat(model),),\n    *flatten([[Residual(b.norm1, b.attn), Residual(b.norm2, b.mlp)] \n              for b in model.transformers[1].blocks]),\n    nn.Sequential(Pool(model.transformers[1].pool, 1), Concat(model),),\n    *flatten([[Residual(b.norm1, b.attn), Residual(b.norm2, b.mlp)] \n              for b in model.transformers[2].blocks]),\n    Classifier(model.norm, model.head),\n]\n```\n\n\n```python\n# Swin-Ti\nimport copy\nimport timm\nimport models\n\nimport torch\nimport torch.nn as nn\n\n# a. download and load a pretrained model for CIFAR-100\nurl = \"https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Freleases\u002Fdownload\u002Fv0.1\u002Fswin_ti_cifar100_ec2894492b.pth.tar\"\npath = \"checkpoints\u002Fswin_ti_cifar100_ec2894492b.pth.tar\"\nmodels.download(url=url, path=path)\n\nmodel = timm.models.swin_transformer.SwinTransformer(\n    num_classes=100, img_size=32, patch_size=1, window_size=4,  # for CIFAR-100\n    embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), qkv_bias=False,  # for Swin-Ti\n)\nmodel.name = \"swin_ti\"\nmodels.stats(model)\nmap_location = \"cuda\" if torch.cuda.is_available() else \"cpu\"\ncheckpoint = torch.load(path, map_location=map_location)\nmodel.load_state_dict(checkpoint[\"state_dict\"])\n\n\n# b. model → blocks. `blocks` is a sequence of blocks\n\nclass Attn(nn.Module):\n    def __init__(self, block):\n        super().__init__()\n        self.block = copy.deepcopy(block)\n        self.block.mlp = nn.Identity()\n        self.block.norm2 = nn.Identity()\n        \n    def forward(self, x, **kwargs):\n        x = self.block(x)\n        x = x \u002F 2\n        \n        return x\n\nclass MLP(nn.Module):\n    def __init__(self, block):\n        super().__init__()\n        block = copy.deepcopy(block)\n        self.mlp = block.mlp\n        self.norm2 = block.norm2\n        \n    def forward(self, x, **kwargs):\n        x = x + self.mlp(self.norm2(x))\n\n        return x\n\n    \nclass Classifier(nn.Module):\n    def __init__(self, norm, head):\n        super().__init__()\n        self.norm = copy.deepcopy(norm)\n        self.head = copy.deepcopy(head)\n        \n    def forward(self, x, **kwargs):\n        x = self.norm(x)\n        x = x.mean(dim=1)\n        x = self.head(x)\n\n        return x\n\n    \ndef flatten(xs_list):\n    return [x for xs in xs_list for x in xs]\n\n\nblocks = [\n    model.patch_embed,\n    *flatten([[Attn(block), MLP(block)] for block in model.layers[0].blocks]),\n    model.layers[0].downsample,\n    *flatten([[Attn(block), MLP(block)] for block in model.layers[1].blocks]),\n    model.layers[1].downsample,\n    *flatten([[Attn(block), MLP(block)] for block in model.layers[2].blocks]),\n    model.layers[2].downsample,\n    *flatten([[Attn(block), MLP(block)] for block in model.layers[3].blocks]),\n    Classifier(model.norm, model.head)\n]\n```\n\u003C\u002Fdetails>\n\n\n\n## Fourier Analysis of Representations \n\nRefer to [```fourier_analysis.ipynb```](fourier_analysis.ipynb) ([Colab notebook](https:\u002F\u002Fcolab.research.google.com\u002Fgithub\u002Fxxxnell\u002Fhow-do-vits-work\u002Fblob\u002Ftransformer\u002Ffourier_analysis.ipynb)) to analyze feature maps through the lens of Fourier transform. Run all cells to visualize Fourier transformed feature maps. Fourier analysis shows that MSAs reduce high-frequency signals, while Convs amplified high-frequency components.\n\n\n## Measuring Feature Map Variances\n\nRefer to [```featuremap_variance.ipynb```](featuremap_variance.ipynb) ([Colab notebook](https:\u002F\u002Fcolab.research.google.com\u002Fgithub\u002Fxxxnell\u002Fhow-do-vits-work\u002Fblob\u002Ftransformer\u002Ffeaturemap_variance.ipynb)) to measure feature map variance. Run all cells to visualize feature map variances. Feature map variance shows that MSAs aggregate feature maps, but Convs and MLPs diversify them.\n\n\n## Visualizing the Loss Landscapes\n\nRefer to [```losslandscape.ipynb```](losslandscape.ipynb) ([Colab notebook](https:\u002F\u002Fcolab.research.google.com\u002Fgithub\u002Fxxxnell\u002Fhow-do-vits-work\u002Fblob\u002Ftransformer\u002Flosslandscape.ipynb)) or [the original repo](https:\u002F\u002Fgithub.com\u002Ftomgoldstein\u002Floss-landscape) for exploring the loss landscapes. Run all cells to get predictive performance of the model for weight space grid. Loss landscape visualization shows that ViT has a flatter loss than ResNet.\n\n\n## Evaluating Robustness on Corrupted Datasets\n\nRefer to [```robustness.ipynb```](robustness.ipynb) ([Colab notebook](https:\u002F\u002Fcolab.research.google.com\u002Fgithub\u002Fxxxnell\u002Fhow-do-vits-work\u002Fblob\u002Ftransformer\u002Frobustness.ipynb)) for evaluation corruption robustness on [corrupted datasets](https:\u002F\u002Fgithub.com\u002Fhendrycks\u002Frobustness) such as CIFAR-10-C and CIFAR-100-C. Run all cells to get predictive performance of the model on datasets which consist of data corrupted by 15 different types with 5 levels of intensity each. \n\n\n## How to Apply MSA to Your Own Model\n\n\u003Cp align=\"center\">\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fxxxnell_how-do-vits-work_readme_fed8e3ab2184.gif\" style=\"width:90%;\">\n\u003C\u002Fp>\n\nWe find that MSA complements Conv (not replaces Conv), and *MSA closer to the end of a stage* improves predictive performance significantly. Based on these insights, we propose the following build-up rules:\n\n1. Alternately replace Conv blocks with MSA blocks from the end of a baseline CNN model. \n2. If the added MSA block does not improve predictive performance, replace a Conv block located at the end of an earlier stage with an MSA. \n3. Use more heads and higher hidden dimensions for MSA blocks in late stages.\n\nIn the animation above, we replace Convs of ResNet with MSAs one by one according to the build-up rules. Note that several MSAs in `c3` harm the accuracy, but the MSA at the end of `c2` improves it. As a result, surprisingly, the model with MSAs following the appropriate build-up rule outperforms CNNs even in the small data regimes, e.g., CIFAR-100!\n\n\n\n\n## Investigate Loss Landscapes and Hessians With L2 Regularization on Augmented Datasets\n\nTwo common mistakes are investigating loss landscapes and Hessians (1) *'without considering L2 regularization'* on (2) *'clean datasets'*. However, note that NNs are optimized with L2 regularization on augmented datasets. Therefore, it is appropriate to visualize *'NLL + L2'* on *'augmented datasets'*. Measuring criteria without L2 on clean datasets would give incorrect results.\n\n\n\n## Citation\n\nIf you find this useful, please consider citing 📑 the paper and starring 🌟 this repository. Please do not hesitate to contact Namuk Park (email: namuk.park at gmail dot com, twitter: [xxxnell](https:\u002F\u002Ftwitter.com\u002Fxxxnell)) with any comments or feedback.\n\n```\n@inproceedings{park2022how,\n  title={How Do Vision Transformers Work?},\n  author={Namuk Park and Songkuk Kim},\n  booktitle={International Conference on Learning Representations},\n  year={2022}\n}\n```\n\n\n## License\n\nAll code is available to you under Apache License 2.0. CNN models build off the torchvision models which are BSD licensed. ViTs build off the [PyTorch Image Models](https:\u002F\u002Fgithub.com\u002Frwightman\u002Fpytorch-image-models) and [Vision Transformer - Pytorch](https:\u002F\u002Fgithub.com\u002Flucidrains\u002Fvit-pytorch) which are Apache 2.0 and MIT licensed.\n\nCopyright the maintainers.\n\n\n\n","# 视觉Transformer是如何工作的？\n\n[[arxiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.06709), [海报](https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Fblob\u002Fmaster\u002Fresources\u002Fhow_do_vits_work_poster_iclr2022.pdf), [幻灯片](https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Fblob\u002Fmaster\u002Fresources\u002Fhow_do_vits_work_talk.pdf)]\n\n本仓库提供了论文《视觉Transformer是如何工作的？（ICLR 2022 Spotlight）》的PyTorch实现。在该论文中，我们指出多头自注意力机制（MSA）在计算机视觉领域的成功***并不在于其弱的归纳偏置和对长距离依赖的捕捉***。MSA不仅仅是卷积的泛化形式，更是一种与卷积*互补*的空间平滑操作。\n\n具体而言，我们探讨了关于MSA和视觉Transformer（ViT）的以下三个关键问题：\n\n***Q1. 我们需要MSA的哪些特性来更好地优化神经网络？***\n\nA1. MSA有利有弊。MSA通过使损失曲面更加平坦来提升神经网络性能。其关键特性是数据特异性（数据依赖性），而非长距离依赖。另一方面，ViT模型则面临非凸损失的问题。\n\n\n***Q2. MSA是否像卷积一样工作？***\n\nA2. MSA和卷积表现出截然相反的行为——例如，MSA是低通滤波器，而卷积则是高通滤波器。这表明MSA偏向于形状信息，而卷积则偏向于纹理信息。因此，MSA和卷积具有互补性。\n\n\n***Q3. 如何将MSA与卷积有机结合？***\n\nA3. 在每个阶段的末尾使用MSA（而非在整个模型的末尾）能够显著提升模型精度。基于此，我们提出了AlterNet架构，用MSA替换每个阶段末尾的卷积模块。AlterNet不仅在大数据场景下优于传统CNN，在小数据场景下也同样表现出色。\n\n\n👇 下面让我们详细了解一下这些问题的答案！\n\n\n### I. MSA有哪些特性有助于优化？\n\n\u003Cp align=\"center\">\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fxxxnell_how-do-vits-work_readme_33eba8fa0c11.png\" style=\"width:83%;\">\n\u003C\u002Fp>\n\nMSA不仅能提升模型精度，还能通过使损失曲面更加平坦（降低Hessian矩阵特征值的幅度）来增强泛化能力。***这种改进主要归功于MSA的数据特异性，而非长距离依赖*** 😱 另一方面，ViT模型却存在非凸损失问题（Hessian矩阵出现负特征值）。在小数据集情况下，其弱的归纳偏置和长距离依赖会导致出现非凸点，从而干扰训练过程。而大规模数据集以及损失曲面平滑方法可以缓解这一问题。\n\n\n### II. MSA是否像卷积一样工作？\n\n\u003Cp align=\"center\">\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fxxxnell_how-do-vits-work_readme_e7bab8727b41.png\" style=\"width:83%;\">\n\u003C\u002Fp>\n\nMSA和卷积的表现截然相反，因此两者具有互补性。例如，MSA是低通滤波器，而卷积则是高通滤波器。同样地，卷积容易受到高频噪声的影响，而MSA则容易受到低频噪声的影响：这表明MSA偏向于形状信息，而卷积则偏向于纹理信息。此外，卷积负责变换特征图，而MSA则负责聚合这些变换后的特征图预测结果。因此，将MSA置于卷积之后是十分有效的。\n\n\n### III. 如何将MSA与卷积有机结合？\n\n\u003Cp align=\"center\">\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fxxxnell_how-do-vits-work_readme_7e582cadf4b6.png\" style=\"width:83%;\">\n\u003C\u002Fp>\n\n多阶段神经网络可以看作是由多个小型子模型串联而成。此外，每个阶段末尾的MSA（而非整个模型末尾的MSA）在最终预测中起着关键作用。基于这些发现，我们提出了一种将MSA与卷积结合的设计规则：每个网络阶段由若干个卷积块和一个（或几个）MSA块组成。这种设计模式自然引出了经典Transformer的结构——即每个MSA块对应一个MLP块。\n\n根据上述设计规则，我们提出了AlterNet架构（[代码链接](https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work\u002Fblob\u002Ftransformer\u002Fmodels\u002Falternet.py)），用MSA块替换了每个阶段末尾的卷积块。***令人惊讶的是，AlterNet不仅在大数据场景下表现优异，在小数据场景下也同样超越了传统CNN***，例如在CIFAR数据集上。这与经典的ViT模型形成鲜明对比——后者在小数据量下表现较差。更多细节请参阅下方的“如何将MSA应用到自己的模型中”部分。\n\n\u003Cp align=\"center\">\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fxxxnell_how-do-vits-work_readme_9d5f8303e331.png\" style=\"width:70%;\">\n\u003C\u002Fp>\n\n那么，为什么视觉Transformer会以这种方式工作呢？我们最近发表的一篇论文《模糊操作如同集成学习：空间平滑技术提升准确率、不确定性与鲁棒性（ICML 2022）》（[arXiv链接](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.12639)，[代码及摘要](https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fspatial-smoothing) :octocat:，[海报](https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fspatial-smoothing-storage\u002Fblob\u002Fmaster\u002Fresources\u002Fblurs_behave_like_ensembles_poster_icml2022.pdf)）表明，即使是一个简单的（不可训练的）2×2方框模糊滤波器，也具备类似的特性。空间平滑操作可以通过对CNN中空间邻近的特征图进行集成，并使损失曲面更加平坦，从而同时提升准确率、不确定性和鲁棒性；而自注意力机制则可以被视为一种可训练的、加权集成的特征图集合。综上所述，MSA并非单纯的卷积泛化，而是一种与卷积互补的可训练模糊滤波器。欢迎进一步了解！\n\n## 入门指南\n\n以下软件包是必需的：\n\n* PyTorch\n* Matplotlib\n* Jupyter Notebook\n* ipywidgets\n* timm\n* einops\n* TensorBoard\n* seaborn（可选）\n\n我们主要使用 Docker 镜像 `pytorch\u002Fpytorch:1.9.0-cuda11.1-cudnn8-runtime` 来运行代码。\n\n请参阅 [```classification.ipynb```](classification.ipynb)（[Colab 笔记本](https:\u002F\u002Fcolab.research.google.com\u002Fgithub\u002Fxxxnell\u002Fhow-do-vits-work\u002Fblob\u002Ftransformer\u002Fclassification.ipynb)）进行图像分类实验。运行所有单元格以在 CIFAR-10、CIFAR-100 和 ImageNet 数据集上训练和测试模型。\n\n**指标。** 我们提供了多种用于衡量准确性和不确定性的指标：准确率（Acc，↑）以及置信度达到 90% 的结果的准确率（Acc-90，↑）、负对数似然（NLL，↓）、期望校准误差（ECE，↓）、交并比（IoU，↑）及置信度达到 90% 的结果的交并比（IoU-90，↑）、不确信度（Unc-90，↑）以及置信度达到 90% 的结果的频率（Freq-90，↑）。我们还定义了一种绘制可靠性图的方法以便于可视化。\n\n**模型。** 默认情况下，我们提供了 AlexNet、VGG、预激活 VGG、ResNet、预激活 ResNet、ResNeXt、WideResNet、ViT、PiT、Swin、MLP-Mixer 和 Alter-ResNet。也可以使用 timm 提供的实现。\n\n\n\n\n\n\u003Cdetails>\n\u003Csummary>\n  同时也提供了 CIFAR-100 的预训练模型： \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Freleases\u002Fdownload\u002Fv0.1\u002Fresnet_50_cifar100_691cc9a9e4.pth.tar\">ResNet-50\u003C\u002Fa>、\u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Freleases\u002Fdownload\u002Fv0.1\u002Fvit_ti_cifar100_9857b21357.pth.tar\">ViT-Ti\u003C\u002Fa>、\u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Freleases\u002Fdownload\u002Fv0.1\u002Fpit_ti_cifar100_0645889efb.pth.tar\">PiT-Ti\u003C\u002Fa> 和 \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Freleases\u002Fdownload\u002Fv0.1\u002Fswin_ti_cifar100_ec2894492b.pth.tar\">Swin-Ti\u003C\u002Fa>。为了简化操作，我们建议使用 \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Frwightman\u002Fpytorch-image-models\">timm\u003C\u002Fa> 来处理 ImageNet-1K 数据集（例如，请参考 \u003Ccode>\u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work\u002Fblob\u002Ftransformer\u002Ffourier_analysis.ipynb\">fourier_analysis.ipynb\u003C\u002Fa>\u003C\u002Fcode>）。\n  \u003C\u002Fsummary>\n\u003Cbr\u002F>\n以下代码片段展示了如何 (a) 加载预训练模型以及 (b) 将其转换为模块序列。\n  \u003Cbr\u002F>\n\n```python\n# ResNet-50\nimport models\n  \n# a. 下载并加载 CIFAR-100 的预训练模型\nurl = \"https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Freleases\u002Fdownload\u002Fv0.1\u002Fresnet_50_cifar100_691cc9a9e4.pth.tar\"\npath = \"checkpoints\u002Fresnet_50_cifar100_691cc9a9e4.pth.tar\"\nmodels.download(url=url, path=path)\n\nname = \"resnet_50\"\nmodel = models.get_model(name, num_classes=100,  # timm 不提供 CIFAR 的 ResNet\n                         stem=model_args.get(\"stem\", False))\nmap_location = \"cuda\" if torch.cuda.is_available() else \"cpu\"\ncheckpoint = torch.load(path, map_location=map_location)\nmodel.load_state_dict(checkpoint[\"state_dict\"])\n\n# b. 模型 → 模块。`blocks` 是一个模块序列\nblocks = [\n    model.layer0,\n    *model.layer1,\n    *model.layer2,\n    *model.layer3,\n    *model.layer4,\n    model.classifier,\n]\n```\n\n```python\n# ViT-Ti\nimport copy\nimport timm\nimport torch\nimport torch.nn as nn\nimport models\n\n# a. 下载并加载 CIFAR-100 的预训练模型\nurl = \"https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Freleases\u002Fdownload\u002Fv0.1\u002Fvit_ti_cifar100_9857b21357.pth.tar\"\npath = \"checkpoints\u002Fvit_ti_cifar100_9857b21357.pth.tar\"\nmodels.download(url=url, path=path)\n\nmodel = timm.models.vision_transformer.VisionTransformer(\n    num_classes=100, img_size=32, patch_size=2,  # 适用于 CIFAR\n    embed_dim=192, depth=12, num_heads=3, qkv_bias=False,  # 对应 ViT-Ti \n)\nmodel.name = \"vit_ti\"\nmodels.stats(model)\nmap_location = \"cuda\" if torch.cuda.is_available() else \"cpu\"\ncheckpoint = torch.load(path, map_location=map_location)\nmodel.load_state_dict(checkpoint[\"state_dict\"])\n\n\n# b. 模型 → 模块。`blocks` 是一个模块序列\n\nclass PatchEmbed(nn.Module):\n    def __init__(self, model):\n        super().__init__()\n        self.model = copy.deepcopy(model)\n        \n    def forward(self, x, **kwargs):\n        x = self.model.patch_embed(x)\n        cls_token = self.model.cls_token.expand(x.shape[0], -1, -1)\n        x = torch.cat((cls_token, x), dim=1)\n        x = self.model.pos_drop(x + self.model.pos_embed)\n        return x\n\n\nclass Residual(nn.Module):\n    def __init__(self, *fn):\n        super().__init__()\n        self.fn = nn.Sequential(*fn)\n        \n    def forward(self, x, **kwargs):\n        return self.fn(x, **kwargs) + x\n    \n    \nclass Lambda(nn.Module):\n    def __init__(self, fn):\n        super().__init__()\n        self.fn = fn\n        \n    def forward(self, x):\n        return self.fn(x)\n\n\ndef flatten(xs_list):\n    return [x for xs in xs_list for x in xs]\n\n\n# 模型 → 模块。`blocks` 是一个模块序列\nblocks = [\n    PatchEmbed(model),\n    *flatten([[Residual(b.norm1, b.attn), Residual(b.norm2, b.mlp)] \n              for b in model.blocks]),\n    nn.Sequential(model.norm, Lambda(lambda x: x[:, 0]), model.head),\n]\n```\n\n  \n```python\n# PiT-Ti\nimport copy\nimport math\nimport timm\n\nimport torch\nimport torch.nn as nn\n\n# a. 下载并加载 CIFAR-100 的预训练模型\nurl = \"https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Freleases\u002Fdownload\u002Fv0.1\u002Fpit_ti_cifar100_0645889efb.pth.tar\"\npath = \"checkpoints\u002Fpit_ti_cifar100_0645889efb.pth.tar\"\nmodels.download(url=url, path=path)\n\nmodel = timm.models.pit.PoolingVisionTransformer(\n    num_classes=100, img_size=32, patch_size=2, stride=1,  # 适用于 CIFAR-100\n    base_dims=[32, 32, 32], depth=[2, 6, 4], heads=[2, 4, 8], mlp_ratio=4,  # 对应 PiT-Ti\n)\nmodel.name = \"pit_ti\"\nmodels.stats(model)\nmap_location = \"cuda\" if torch.cuda.is_available() else \"cpu\"\ncheckpoint = torch.load(path, map_location=map_location)\nmodel.load_state_dict(checkpoint[\"state_dict\"])\n\n# b. 模型 → 块。`blocks` 是一系列块\n\nclass PatchEmbed(nn.Module):\n    def __init__(self, model):\n        super().__init__()\n        self.model = copy.deepcopy(model)\n        \n    def forward(self, x, **kwargs):\n        x = self.model.patch_embed(x)\n        x = self.model.pos_drop(x + self.model.pos_embed)\n        cls_tokens = self.model.cls_token.expand(x.shape[0], -1, -1)\n\n        return (x, cls_tokens)\n\n    \nclass Concat(nn.Module):\n    def __init__(self, model):\n        super().__init__()\n        self.model = copy.deepcopy(model)\n        \n    def forward(self, x, **kwargs):\n        x, cls_tokens = x\n        B, C, H, W = x.shape\n        token_length = cls_tokens.shape[1]\n\n        x = x.flatten(2).transpose(1, 2)\n        x = torch.cat((cls_tokens, x), dim=1)\n\n        return x\n    \n    \nclass Pool(nn.Module):\n    def __init__(self, block, token_length):\n        super().__init__()\n        self.block = copy.deepcopy(block)\n        self.token_length = token_length\n        \n    def forward(self, x, **kwargs):\n        cls_tokens = x[:, :self.token_length]\n        x = x[:, self.token_length:]\n        B, N, C = x.shape\n        H, W = int(math.sqrt(N)), int(math.sqrt(N))\n        x = x.transpose(1, 2).reshape(B, C, H, W)\n\n        x, cls_tokens = self.block(x, cls_tokens)\n        \n        return x, cls_tokens\n    \n    \nclass Classifier(nn.Module):\n    def __init__(self, norm, head):\n        super().__init__()\n        self.head = copy.deepcopy(head)\n        self.norm = copy.deepcopy(norm)\n        \n    def forward(self, x, **kwargs):\n        x = x[:,0]\n        x = self.norm(x)\n        x = self.head(x)\n        return x\n\n    \nclass Residual(nn.Module):\n    def __init__(self, *fn):\n        super().__init__()\n        self.fn = nn.Sequential(*fn)\n        \n    def forward(self, x, **kwargs):\n        return self.fn(x, **kwargs) + x\n\n    \ndef flatten(xs_list):\n    return [x for xs in xs_list for x in xs]\n\n\nblocks = [\n    nn.Sequential(PatchEmbed(model), Concat(model),),\n    *flatten([[Residual(b.norm1, b.attn), Residual(b.norm2, b.mlp)] \n              for b in model.transformers[0].blocks]),\n    nn.Sequential(Pool(model.transformers[0].pool, 1), Concat(model),),\n    *flatten([[Residual(b.norm1, b.attn), Residual(b.norm2, b.mlp)] \n              for b in model.transformers[1].blocks]),\n    nn.Sequential(Pool(model.transformers[1].pool, 1), Concat(model),),\n    *flatten([[Residual(b.norm1, b.attn), Residual(b.norm2, b.mlp)] \n              for b in model.transformers[2].blocks]),\n    Classifier(model.norm, model.head),\n]\n```\n\n\n```python\n# Swin-Ti\nimport copy\nimport timm\nimport models\n\nimport torch\nimport torch.nn as nn\n\n# a. 下载并加载用于 CIFAR-100 的预训练模型\nurl = \"https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Freleases\u002Fdownload\u002Fv0.1\u002Fswin_ti_cifar100_ec2894492b.pth.tar\"\npath = \"checkpoints\u002Fswin_ti_cifar100_ec2894492b.pth.tar\"\nmodels.download(url=url, path=path)\n\nmodel = timm.models.swin_transformer.SwinTransformer(\n    num_classes=100, img_size=32, patch_size=1, window_size=4,  # 适用于 CIFAR-100\n    embed_dim=96, depths=(2, 2, 6, 2), num_heads=(3, 6, 12, 24), qkv_bias=False,  # 对应 Swin-Ti\n)\nmodel.name = \"swin_ti\"\nmodels.stats(model)\nmap_location = \"cuda\" if torch.cuda.is_available() else \"cpu\"\ncheckpoint = torch.load(path, map_location=map_location)\nmodel.load_state_dict(checkpoint[\"state_dict\"])\n\n\n# b. 模型 → 块。`blocks` 是一系列块\n\nclass Attn(nn.Module):\n    def __init__(self, block):\n        super().__init__()\n        self.block = copy.deepcopy(block)\n        self.block.mlp = nn.Identity()\n        self.block.norm2 = nn.Identity()\n        \n    def forward(self, x, **kwargs):\n        x = self.block(x)\n        x = x \u002F 2\n        \n        return x\n\nclass MLP(nn.Module):\n    def __init__(self, block):\n        super().__init__()\n        block = copy.deepcopy(block)\n        self.mlp = block.mlp\n        self.norm2 = block.norm2\n        \n    def forward(self, x, **kwargs):\n        x = x + self.mlp(self.norm2(x))\n\n        return x\n\n    \nclass Classifier(nn.Module):\n    def __init__(self, norm, head):\n        super().__init__()\n        self.norm = copy.deepcopy(norm)\n        self.head = copy.deepcopy(head)\n        \n    def forward(self, x, **kwargs):\n        x = self.norm(x)\n        x = x.mean(dim=1)\n        x = self.head(x)\n\n        return x\n\n    \ndef flatten(xs_list):\n    return [x for xs in xs_list for x in xs]\n\n\nblocks = [\n    model.patch_embed,\n    *flatten([[Attn(block), MLP(block)] for block in model.layers[0].blocks]),\n    model.layers[0].downsample,\n    *flatten([[Attn(block), MLP(block)] for block in model.layers[1].blocks]),\n    model.layers[1].downsample,\n    *flatten([[Attn(block), MLP(block)] for block in model.layers[2].blocks]),\n    model.layers[2].downsample,\n    *flatten([[Attn(block), MLP(block)] for block in model.layers[3].blocks]),\n    Classifier(model.norm, model.head)\n]\n```\n\u003C\u002Fdetails>\n\n\n\n## 表征的傅里叶分析\n\n请参阅 [```fourier_analysis.ipynb```](fourier_analysis.ipynb)（[Colab 笔记本](https:\u002F\u002Fcolab.research.google.com\u002Fgithub\u002Fxxxnell\u002Fhow-do-vits-work\u002Fblob\u002Ftransformer\u002Ffourier_analysis.ipynb)），通过傅里叶变换的视角分析特征图。运行所有单元格以可视化傅里叶变换后的特征图。傅里叶分析表明，MSA 会降低高频信号，而卷积则会增强高频成分。\n\n\n## 测量特征图方差\n\n请参阅 [```featuremap_variance.ipynb```](featuremap_variance.ipynb)（[Colab 笔记本](https:\u002F\u002Fcolab.research.google.com\u002Fgithub\u002Fxxxnell\u002Fhow-do-vits-work\u002Fblob\u002Ftransformer\u002Ffeaturemap_variance.ipynb)），以测量特征图的方差。运行所有单元格以可视化特征图的方差。特征图方差显示，MSA 会聚合特征图，而卷积和 MLP 则会使其多样化。\n\n\n## 可视化损失景观\n\n请参阅 [```losslandscape.ipynb```](losslandscape.ipynb)（[Colab 笔记本](https:\u002F\u002Fcolab.research.google.com\u002Fgithub\u002Fxxxnell\u002Fhow-do-vits-work\u002Fblob\u002Ftransformer\u002Flosslandscape.ipynb)）或 [原始仓库](https:\u002F\u002Fgithub.com\u002Ftomgoldstein\u002Floss-landscape)，以探索损失景观。运行所有单元格以获得模型在权重空间网格上的预测性能。损失景观可视化显示，ViT 的损失比 ResNet 更平坦。\n\n\n## 在损坏数据集上评估鲁棒性\n\n请参阅 [```robustness.ipynb```](robustness.ipynb)（[Colab 笔记本](https:\u002F\u002Fcolab.research.google.com\u002Fgithub\u002Fxxxnell\u002Fhow-do-vits-work\u002Fblob\u002Ftransformer\u002Frobustness.ipynb)），以评估对 [损坏数据集](https:\u002F\u002Fgithub.com\u002Fhendrycks\u002Frobustness) 的鲁棒性，例如 CIFAR-10-C 和 CIFAR-100-C。运行所有单元格以获得模型在包含 15 种不同损坏类型、每种类型有 5 个强度等级的数据集上的预测性能。\n\n## 如何将MSA应用到您自己的模型中\n\n\u003Cp align=\"center\">\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fxxxnell_how-do-vits-work_readme_fed8e3ab2184.gif\" style=\"width:90%;\">\n\u003C\u002Fp>\n\n我们发现，MSA是对卷积层的补充（而非替代），并且*在每个阶段的末端使用MSA*能够显著提升预测性能。基于这些发现，我们提出了以下构建规则：\n\n1. 从基准CNN模型的末尾开始，交替用MSA块替换卷积块。\n2. 如果新增的MSA块未能提升预测性能，则将较早阶段末尾的卷积块替换为MSA。\n3. 在较晚的阶段中，为MSA块使用更多的注意力头和更高的隐藏维度。\n\n在上方的动画中，我们按照上述构建规则，逐一将ResNet中的卷积层替换为MSA。请注意，`c3`中的几个MSA反而降低了准确率，而位于`c2`末尾的MSA却提升了准确率。因此，令人惊讶的是，遵循适当构建规则的MSA模型即使在小数据集场景下（例如CIFAR-100）也能超越传统的CNN模型！\n\n\n\n## 使用L2正则化在增强数据集上研究损失景观与Hessian矩阵\n\n常见的两个错误是：在（1）*未考虑L2正则化的情况下*，以及在（2）*干净数据集上*研究损失景观和Hessian矩阵。然而，需要注意的是，神经网络通常是在增强数据集上结合L2正则化进行优化的。因此，更合适的做法是在*增强数据集上可视化“NLL + L2”*。如果在未使用L2正则化的干净数据集上测量相关指标，将会得到不准确的结果。\n\n\n\n## 引用\n\n如果您觉得本项目有用，请考虑引用论文并给本仓库标星🌟。如有任何意见或反馈，欢迎随时联系Namuk Park（邮箱：namuk.park@gmail.com，Twitter：[xxxnell](https:\u002F\u002Ftwitter.com\u002Fxxxnell)）。\n\n```\n@inproceedings{park2022how,\n  title={How Do Vision Transformers Work?},\n  author={Namuk Park and Songkuk Kim},\n  booktitle={International Conference on Learning Representations},\n  year={2022}\n}\n```\n\n\n## 许可协议\n\n所有代码均以Apache License 2.0许可提供给您。其中，CNN模型基于torchvision提供的模型，而后者采用BSD许可证；ViT模型则基于[PyTorch Image Models](https:\u002F\u002Fgithub.com\u002Frwightman\u002Fpytorch-image-models)和[Vision Transformer - Pytorch](https:\u002F\u002Fgithub.com\u002Flucidrains\u002Fvit-pytorch)，分别采用Apache 2.0和MIT许可证。\n\n版权所有，由维护者保留。","# How Do Vision Transformers Work 快速上手指南\n\n本指南基于 `how-do-vits-work` 开源项目，帮助开发者快速复现论文《How Do Vision Transformers Work?》中的实验，理解多头自注意力（MSA）与卷积（Conv）的互补关系，并运行 AlterNet 等模型。\n\n## 环境准备\n\n### 系统要求\n- **操作系统**: Linux (推荐) 或 macOS\n- **GPU**: 支持 CUDA 的 NVIDIA 显卡（可选，用于加速训练）\n- **Python**: 3.7+\n\n### 前置依赖\n项目主要依赖以下 Python 包：\n- `pytorch` (核心框架)\n- `matplotlib`, `seaborn` (可视化)\n- `notebook`, `ipywidgets` (Jupyter 支持)\n- `timm` (预训练模型库)\n- `einops` (张量操作)\n- `tensorboard` (训练监控)\n\n> **提示**：作者推荐使用 Docker 镜像 `pytorch\u002Fpytorch:1.9.0-cuda11.1-cudnn8-runtime` 以获得最一致的运行环境。\n\n## 安装步骤\n\n### 方案 A：使用 Docker（推荐）\n直接拉取官方推荐的镜像，无需手动配置环境：\n```bash\ndocker pull pytorch\u002Fpytorch:1.9.0-cuda11.1-cudnn8-runtime\ndocker run --gpus all -it --rm -v $(pwd):\u002Fworkspace pytorch\u002Fpytorch:1.9.0-cuda11.1-cudnn8-runtime bash\n```\n进入容器后，安装剩余依赖：\n```bash\npip install matplotlib notebook ipywidgets timm einops tensorboard seaborn\n```\n\n### 方案 B：本地 Conda\u002FPip 安装\n如果你偏好本地环境，建议先创建虚拟环境，并使用国内镜像源加速安装：\n\n```bash\n# 创建虚拟环境\nconda create -n vit-work python=3.8 -y\nconda activate vit-work\n\n# 安装 PyTorch (根据是否使用 GPU 选择命令，此处以 CUDA 11.1 为例，使用清华源)\npip install torch==1.9.0+cu111 torchvision==0.10.0+cu111 -f https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n\n# 安装其他依赖 (使用清华源加速)\npip install matplotlib notebook ipywidgets timm einops tensorboard seaborn -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n```\n\n### 获取代码\n克隆仓库并进入目录：\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work.git\ncd how-do-vits-work\n```\n\n## 基本使用\n\n本项目主要通过 Jupyter Notebook 进行图像分类实验、损失景观分析及频域分析。\n\n### 1. 运行图像分类示例\n最直接的上手方式是运行 `classification.ipynb`。该脚本涵盖了在 CIFAR-10, CIFAR-100 和 ImageNet 数据集上训练和测试模型的全流程。\n\n**启动 Jupyter Notebook:**\n```bash\njupyter notebook classification.ipynb\n```\n或者在 Colab 中直接运行：\n[打开 Colab Notebook](https:\u002F\u002Fcolab.research.google.com\u002Fgithub\u002Fxxxnell\u002Fhow-do-vits-work\u002Fblob\u002Ftransformer\u002Fclassification.ipynb)\n\n**操作步骤:**\n1. 打开 Notebook 后，依次点击 \"Cell\" -> \"Run All\" 执行所有单元格。\n2. 代码将自动下载数据、初始化模型（如 ResNet, ViT, AlterNet 等）、训练并评估。\n3. 输出指标包括：准确率 (Acc)、负对数似然 (NLL)、期望校准误差 (ECE) 等。\n\n### 2. 加载预训练模型\n项目提供了针对 CIFAR-100 的预训练权重（ResNet-50, ViT-Ti, PiT-Ti, Swin-Ti）。以下是在 Python 脚本中加载 **ResNet-50** 并转换为块序列（blocks）的示例代码：\n\n```python\nimport models\nimport torch\n\n# a. 下载并加载 CIFAR-100 预训练模型\nurl = \"https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Freleases\u002Fdownload\u002Fv0.1\u002Fresnet_50_cifar100_691cc9a9e4.pth.tar\"\npath = \"checkpoints\u002Fresnet_50_cifar100_691cc9a9e4.pth.tar\"\nmodels.download(url=url, path=path)\n\nname = \"resnet_50\"\nmodel = models.get_model(name, num_classes=100,  # timm does not provide a ResNet for CIFAR\n                         stem=model_args.get(\"stem\", False))\nmap_location = \"cuda\" if torch.cuda.is_available() else \"cpu\"\ncheckpoint = torch.load(path, map_location=map_location)\nmodel.load_state_dict(checkpoint[\"state_dict\"])\n\n# b. 将模型转换为块序列 (blocks)\nblocks = [\n    model.layer0,\n    *model.layer1,\n    *model.layer2,\n    *model.layer3,\n    *model.layer4,\n    model.classifier,\n]\n```\n\n若需加载 **ViT-Ti** 模型，可使用以下代码片段：\n\n```python\nimport copy\nimport timm\nimport torch\nimport torch.nn as nn\nimport models\n\n# a. 下载并加载 ViT-Ti 预训练模型\nurl = \"https:\u002F\u002Fgithub.com\u002Fxxxnell\u002Fhow-do-vits-work-storage\u002Freleases\u002Fdownload\u002Fv0.1\u002Fvit_ti_cifar100_9857b21357.pth.tar\"\npath = \"checkpoints\u002Fvit_ti_cifar100_9857b21357.pth.tar\"\nmodels.download(url=url, path=path)\n\nmodel = timm.models.vision_transformer.VisionTransformer(\n    num_classes=100, img_size=32, patch_size=2,  # for CIFAR\n    embed_dim=192, depth=12, num_heads=3, qkv_bias=False,  # for ViT-Ti \n)\nmodel.name = \"vit_ti\"\nmodels.stats(model)\nmap_location = \"cuda\" if torch.cuda.is_available() else \"cpu\"\ncheckpoint = torch.load(path, map_location=map_location)\nmodel.load_state_dict(checkpoint[\"state_dict\"])\n\n# ... (后续 blocks 转换逻辑参考原仓库 classification.ipynb 或 README 中的完整代码)\n```\n\n### 3. 频域分析 (可选)\n若要深入分析 MSA 与 Conv 的频域特性（低通\u002F高通滤波行为），可运行：\n```bash\njupyter notebook fourier_analysis.ipynb\n```\n*注：对于 ImageNet-1K 的大规模实验，建议直接使用 `timm` 库加载模型以简化流程。*","某计算机视觉团队在开发医疗影像诊断模型时，面临小样本数据下卷积神经网络（CNN）泛化能力不足且难以融合 Transformer 优势的困境。\n\n### 没有 how-do-vits-work 时\n- 盲目堆叠自注意力机制（MSA），误以为其核心优势是捕捉长距离依赖，导致在小数据集上训练损失曲面非凸，模型难以收敛。\n- 将 MSA 简单视为广义卷积进行替换，忽略了两者频域特性的互补性（MSA 为低通、Conv 为高通），造成形状与纹理特征提取失衡。\n- 缺乏明确的架构设计准则，随意在网络各处插入 Transformer 模块，不仅未提升精度，反而增加了计算冗余和调试难度。\n- 面对小样本医疗数据，传统 CNN 容易过拟合纹理噪声，而直接套用标准 ViT 又因归纳偏置太弱导致性能甚至不如纯 CNN。\n\n### 使用 how-do-vits-work 后\n- 依据论文结论调整优化策略，利用 MSA 平坦化损失曲面的特性而非追求长程依赖，显著改善了小样本下的训练稳定性。\n- 遵循“互补原则”重构网络，利用 MSA 的形状偏置弥补 CNN 的纹理偏置，有效过滤高频噪声并增强对病灶形态的识别。\n- 采纳 AlterNet 架构设计规范，仅在每个计算阶段的末尾用 MSA 替换卷积块，以最小改动实现了超越纯 CNN 和标准 ViT 的诊断准确率。\n- 在小规模医疗影像数据集中，成功 harmonize（协调）了两种机制，既保留了 CNN 的特征变换能力，又发挥了 MSA 的预测聚合优势。\n\nhow-do-vits-work 通过揭示自注意力机制的本质属性，指导开发者从“盲目混搭”转向“科学互补”，以极低成本实现了小样本场景下模型性能的突破。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fxxxnell_how-do-vits-work_e7bab872.png","xxxnell","Namuk Park","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fxxxnell_1959a73d.jpg","Deep Learning Research Scientist","@evolutionaryscale","NYC",null,"namukpark.com","https:\u002F\u002Fgithub.com\u002Fxxxnell",[83,87],{"name":84,"color":85,"percentage":86},"Python","#3572A5",80.9,{"name":88,"color":89,"percentage":90},"Jupyter Notebook","#DA5B0B",19.1,821,77,"2026-03-05T07:28:55","Apache-2.0","Linux, macOS, Windows","非必需（支持 CPU 运行），若使用 GPU 推荐 NVIDIA 显卡，README 中提供的 Docker 镜像基于 CUDA 11.1","未说明（建议根据数据集大小配置，ImageNet 训练需较大内存）",{"notes":99,"python":100,"dependencies":101},"项目主要提供基于 Docker 镜像 'pytorch\u002Fpytorch:1.9.0-cuda11.1-cudnn8-runtime' 的运行环境。代码包含用于图像分类的 Jupyter Notebook（classification.ipynb），支持在 CIFAR-10\u002F100 和 ImageNet 上训练和测试。预训练模型需手动下载或通过网络加载。Seaborn 为可选依赖。","未说明（兼容 PyTorch 1.9.0 的版本，通常对应 Python 3.6-3.9）",[102,103,104,105,106,107,108,109],"pytorch","matplotlib","notebook","ipywidgets","timm","einops","tensorboard","seaborn",[35,14],[112,113,114,115,102],"vision-transformer","transformer","self-attention","loss-landscape","2026-03-27T02:49:30.150509","2026-04-11T16:55:56.178034",[],[]]