[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-TheBrainLab--Awesome-Spiking-Neural-Networks":3,"tool-TheBrainLab--Awesome-Spiking-Neural-Networks":64},[4,17,27,35,43,56],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":16},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,3,"2026-04-05T11:01:52",[13,14,15],"开发框架","图像","Agent","ready",{"id":18,"name":19,"github_repo":20,"description_zh":21,"stars":22,"difficulty_score":23,"last_commit_at":24,"category_tags":25,"status":16},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",140436,2,"2026-04-05T23:32:43",[13,15,26],"语言模型",{"id":28,"name":29,"github_repo":30,"description_zh":31,"stars":32,"difficulty_score":23,"last_commit_at":33,"category_tags":34,"status":16},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,"2026-04-03T11:11:01",[13,14,15],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":23,"last_commit_at":41,"category_tags":42,"status":16},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[13,26],{"id":44,"name":45,"github_repo":46,"description_zh":47,"stars":48,"difficulty_score":23,"last_commit_at":49,"category_tags":50,"status":16},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,"2026-04-05T10:45:23",[14,51,52,53,15,54,26,13,55],"数据工具","视频","插件","其他","音频",{"id":57,"name":58,"github_repo":59,"description_zh":60,"stars":61,"difficulty_score":10,"last_commit_at":62,"category_tags":63,"status":16},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,"2026-04-04T04:44:48",[15,14,13,26,54],{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":69,"readme_en":70,"readme_zh":71,"quickstart_zh":72,"use_case_zh":73,"hero_image_url":74,"owner_login":75,"owner_name":76,"owner_avatar_url":77,"owner_bio":78,"owner_company":79,"owner_location":79,"owner_email":79,"owner_twitter":79,"owner_website":79,"owner_url":80,"languages":79,"stars":81,"forks":82,"last_commit_at":83,"license":79,"difficulty_score":84,"env_os":85,"env_gpu":86,"env_ram":86,"env_deps":87,"category_tags":90,"github_topics":91,"view_count":23,"oss_zip_url":79,"oss_zip_packed_at":79,"status":16,"created_at":110,"updated_at":111,"faqs":112,"releases":113},2569,"TheBrainLab\u002FAwesome-Spiking-Neural-Networks","Awesome-Spiking-Neural-Networks","A paper list of spiking neural networks, including papers, codes, and related websites.  本仓库收集脉冲神经网络相关的顶会顶刊以及CNS论文和代码，正在持续更新中。","Awesome-Spiking-Neural-Networks 是一个专注于脉冲神经网络（SNN）领域的开源资源合集，旨在为研究者提供一站式的论文、代码及相关网站索引。随着类脑计算和神经形态工程的兴起，SNN 相关研究呈现爆发式增长，但高质量文献分散于各大顶会顶刊，检索难度较大。该仓库通过持续追踪并整理来自 Nature、Science、NeurIPS、ICLR、CVPR 等顶级会议与期刊的最新成果，有效解决了科研人员难以全面掌握前沿动态的痛点。\n\n无论是正在探索低功耗人工智能算法的研究人员，还是希望复现经典模型的开发者，都能从中快速定位所需资源。其独特亮点在于极高的更新频率与广泛的覆盖范围，不仅收录了传统的 CNS 领域论文，还紧跟趋势纳入了 2026 年 AAAI、ICLR 等会议中关于 SNN 鲁棒性训练、混合架构（如 Spiking Mamba-Transformer）及事件驱动攻击等前沿课题。作为一个由社区共同维护的知识库，它鼓励用户通过 Pull Request 贡献被遗漏的优质工作，非常适合希望深入理解脉冲神经网络机制、寻找创新灵感或构建基准测试的专业人士使用。","# Awesome Spiking Neural Networks[![Awesome](https:\u002F\u002Fcdn.rawgit.com\u002Fsindresorhus\u002Fawesome\u002Fd7305f38d29fed78fa85652e3a63e154dd8e8829\u002Fmedia\u002Fbadge.svg)](https:\u002F\u002Fgithub.com\u002Fsindresorhus\u002Fawesome)\n\nCollect some spiking neural network papers & codes.  (**Actively keep updating**)\n\nIf you own or find some overlooked SNN papers, you can add them to this document by pull request. \n\n## News\n\n\u003Cdetails>\n\u003Csummary> News 2026 \u003C\u002Fsummary>\n\n[2026.03.24] Update SNN-related papers in AAAI 2026 (33 papers), ICLR 2026 (30 papers).\n\n\u003C\u002Fdetails>\n\n\n\n\u003Cdetails>\n\u003Csummary> News 2025 \u003C\u002Fsummary>\n\n[2025.11.01] Update SNN-related papers in Nature, Science 2025 (11 papers).\n\n[2025.10.31] Update SNN-related papers in Neurips 2025 (30 papers).\n\n[2025.05.26] Update SNN-related papers in ICML 2025 (18 papers), IJCAI (11 papers), ICCV (7 papers), and ACM MM (9 papers).\n\n[2025.04.11] Update SNN-related papers in ICLR 2025 (11 papers), CVPR 2025 (14 papers).\n\n[2025.02.06] Update SNN-related papers in AAAI 2025 (18 papers).\n\n\u003C\u002Fdetails>\n\n\u003Cdetails>\n\u003Csummary> News 2024 \u003C\u002Fsummary>\n\n[2024.11.13] Update SNN-related papers in NeurIPS 2024 (18 papers).\n\n[2024.10.31] Update SNN-related papers in ACM MM 2024 (5 papers).\n\n[2024.10.15] Update SNN-related papers in ECCV 2024 (8 papers).\n\n[2024.05.29] Update SNN-related papers in ICML 2024 (13 papers), IJCAI 2024 (5).\n\n[2024.04.29] Update SNN-related papers in ICLR 2024 (17 papers), AAAI 2024 (8), CVPR 2024 (3).\n\n\u003C\u002Fdetails>\n\n\n\u003Cdetails>\n\n\u003Csummary> News 2023 \u003C\u002Fsummary>\n\n[2023.12.31] Update SNN-related papers in TPAMI 2023, Frontiers in Neuroscience 2023.\n\n[2023.10.31] Update SNN-related papers in CVPR 2023 (2 papers), ICML 2023 (2), IJCAI 2023 (3), and ICCV 2023 (10), NeurIPS 2023 (12).\n\n[2023.06.25] Update SNN-related papers in ICLR 2023 (6 papers), AAAI 2023 (6 papers).\n\n\u003C\u002Fdetails>\n\n\n\n## Papers\n\n### 2026\n\n**Nature, Science, Cell**\n\n**AAAI, ICLR**\n- Robustify Spiking Neural Networks via Dominant Singular Deflation under Heterogeneous Training Vulnerability (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10010693)]\n- A Brain-Inspired Gating Mechanism Unlocks Robust Computation in Spiking Neural Networks (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10011430)]\n- Training Deep Normalization-Free Spiking Neural Networks with Lateral Inhibition. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10009258)]\n- 3DSMT: A Hybrid Spiking Mamba-Transformer for Point Cloud Analysis. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10010096)]\n- Neural Dynamics Self-Attention for Spiking Transformers. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10007889)]\n- Time Is All It Takes: Spike-Retiming Attacks on Event-Driven Spiking Neural Networks. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10008650)]\n- Cannistraci-Hebb Training on Ultra-Sparse Spiking Neural Networks. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10007264)]\n- CaRe-BN: Precise Moving Statistics for Stabilizing Spiking Neural Networks in Reinforcement Learning. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10011021)]\n- Otters: An Energy-Efficient Spiking Transformer via Optical Time-to-First-Spike Encoding. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10007423)]\n- Online Pseudo-Zeroth-Order Training of Neuromorphic Spiking Neural Networks. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10011366)]\n- SAFA-SNN: Sparsity-Aware On-Device Few-Shot Class-Incremental Learning with Fast-Adaptive Structure of Spiking Neural Network. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10011088)]\n- Robust Spiking Neural Networks Against Adversarial Attacks. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10007241)]\n- TP-Spikformer: Token Pruned Spiking Transformer. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10010064)]\n- Breaking Gradient Temporal Collinearity for Robust Spiking Neural Networks. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10006881)]\n- Random Spiking Neural Networks are Stable and Spectrally Simple. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10009762)]\n- Many Eyes, One Mind: Temporal Multi-Perspective and Progressive Distillation for Spiking Neural Networks. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10009850)]\n- Robust Selective Activation with Randomized Temporal K-Winner-Take-All in Spiking Neural Networks for Continual Learning. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10006915)]\n- Towards Lossless Memory-efficient Training of Spiking Neural Networks via Gradient Checkpointing and Spike Compression. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10007453)]\n- Beyond Linear Processing: Dendritic Bilinear Integration in Spiking Neural Networks. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10011474)]\n- Spiking Discrepancy Transformer for Point Cloud Analysis. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10011321)]\n- PredNext: Explicit Cross-View Temporal Prediction for Unsupervised Learning in Spiking Neural Networks. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10010014)]\n- Difference Predictive Coding for Training Spiking Neural Networks. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10007923)]\n- Advancing Spatiotemporal Representations in Spiking Neural Networks via Parametric Invertible Transformation. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10011650)]\n- Fractional-Order Spiking Neural Network. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10009869)]\n- SpikeGen: Decoupled “Rods and Cones” Visual Representation Processing with Latent Generative Framework. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10009062)]\n- Biologically Plausible Learning via Bidirectional Spike-Based Distillation. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10009918)]\n- Distribution-Aware Multi-Granularity Phase Coding: Towards Lower Conversion Error for Spike-Driven Large Language Models. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10007555)]\n- SpikeStereoNet: A Brain-Inspired Framework for Stereo Depth Estimation from Spike Streams. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10007680)]\n- SpikePingpong: Spike Vision-based Fast-Slow Pingpong Robot System. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10008450)]\n- Spike-based Digital Brain: a novel fundamental model for brain activity analysis. (**ICLR 2026**). [[paper](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10009923)]\n- Activation-wise Propagation: A One-Timestep Strategy for Spiking Neural Networks (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37187)]\n- SpikingIR: A Novel Converted Spiking Neural Network for Efficient Image Restoration (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37769)] \n- A Closer Look at Knowledge Distillation in Spiking Neural Network Training (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37769)] \n- Optimization Method for Surrogate Function in Spiking Neural Networks Based on Membrane Potential Distribution (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F39769)] \n- Parallel Training Time-to-First-Spike Spiking Neural Networks (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37149)] \n- HardF-SNN: Hardware-Friendly Quantization for Spiking Neural Networks with Efficient Integer-Arithmetic-Only Inference (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37174)] \n- SpikCommander: A High-performance Spiking Transformer with Multi-view Learning for Efficient Speech Command Recognition (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37194)] \n- Timestep-Compressed Attack on Spiking Neural Networks Through Timestep-Level Backpropagation (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37479)] \n- Firing Bits Where It Matters: Spiking-Guided Just Recognizable Distortion Modeling for Machine-Centric Video Coding (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F38935)] \n- MPD-SGR: Robust Spiking Neural Networks with Membrane Potential Distribution-Driven Surrogate Gradient Regularization (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37166)] \n- Training-Free ANN-to-SNN Conversion for High-Performance Spiking Transformers (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37195)] \n- Spikingformer: A Key Foundation Model for Spiking Neural Networks (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37207)] \n- I2E: Real-Time Image-to-Event Conversion for High-Performance Spiking Neural Networks (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37179)] \n- S³: Spiking Neurons as an Isolating Segmenter for Brain Signal Decoding (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F38869)] \n- TDSNNs: Competitive Topographic Deep Spiking Neural Networks for Visual Cortex Modeling (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37208)] \n- Temporal Dynamics Enhancer for Directly Trained Spiking Object Detectors (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37178)] \n- Spiking Heterogeneous Graph Attention Networks (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F39068)] \n- Oligodendrocyte-Driven Spiking Neural Model (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F39306)] \n- HLML-SNN：Fast Continual Learning in Spiking Neural Networks Achieved via Hebbian Learning-Driven Meta-Learning (**AAAI 2026**). [[paper]()] \n- DS-ATGO: Dual-Stage Synergistic Learning via Forward Adaptive Threshold and Backward Gradient Optimization for Spiking Neural Networks (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37165)] \n- Pseudo-Spiking Neurons: A Noise-Based Training Framework for Heterogeneous-Latency Spiking Neural Networks (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F40086)] \n- Spatial-Frequency Spiking Neural Network for Underwater Object Detection (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F39109)] \n- SFedHIFI: Fire Rate-Based Heterogeneous Information Fusion for Spiking Federated Learning (**AAAI 2026**).[[paper]()] \n- Dynamic Weight Adaptation in Spiking Neural Networks Inspired by Biological Homeostasis (**AAAI 2026**).[[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F40146)] \n- GT-SNT: A Linear-Time Transformer for Large-Scale Graphs via Spiking Node Tokenization (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F38667)] \n- Exploring the Potentials of Spiking Neural Networks for Image Deraining (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37295)] \n- Spiking-Aided Neural Architecture for Efficient and Robust WiFi Sensing (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F39589)] \n- Stabilizing Spiking Neurons Through Biologically Inspired Polarization (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F39435)] \n- Spike Stream Memory Transfer for Dynamic Scene Reconstruction (**AAAI 2026**). \n- LAS: Loss-less ANN-SNN Conversion for Fully Spike-Driven Large Language Models (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37151)] \n- Spike Imaging Velocimetry: Dense Motion Estimation of Fluids Using Spike Streams (**AAAI 2026**).\n- BulletTime4D: Towards High Spatio-Temporal Resolution Dynamic Scene Rendering via Spike-Guided Stereo Vision (**AAAI 2026**).\n- Robust Noise Modeling for Spike Camera via Time-Interval Quantification and Spike-DSLR Multimodal Dataset in Low-Light Imaging (**AAAI 2026**). \n- HLML-SNN：Fast Continual Learning in Spiking Neural Networks Achieved via Hebbian Learning-Driven Meta-Learning(**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F39942)] \n- Generalized Threshold Optimization with Harmony Multi-Threshold Neurons for Accurate ANN-to-SNN Conversion (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37206)] \n- HypoxSpike: Ternary Spiking Neural Network for Opioid Overdose Detection (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F41237)] \n- Bi-Spectrum Distillation: Addressing Spectral Mismatch in ANN-SNN Knowledge Transfer (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F40085)] \n- Towards Training-Free and Accurate ANN-to-SNN Conversion via Activation-Aware Redistribution (**AAAI 2026**). [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37148)] \n\n\n### 2025\n\n**Nature, Science, Cell**\n- Spiking neural networks with fatigue spike-timing-dependent plasticity learning using hybrid memristor arrays (**Nature Electronics, 2026**). [[paper](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41928-025-01554-4)]\n- Neuromorphic computing paradigms enhance robustness through spiking neural networks (**Nature Communications, 2025**). [[paper](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-025-65197-x)][[code](https:\u002F\u002Fgithub.com\u002FDingJianhao\u002FSNNEnhancingRobustness)]\n- A spiking artificial neuron based on one diffusive memristor, one transistor and one resistor (**Nature Electronics, 2025**). [[paper](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41928-025-01488-x)][[code](https:\u002F\u002Fgithub.com\u002FGnohzZ\u002FBrain-Dynamics-Modeling-Acceleration)]\n- A biologically inspired artificial neuron with intrinsic plasticity based on monolayer molybdenum disulfide (**Nature Electronics, 2025**). [[paper](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41928-025-01433-y)]\n- Modeling macroscopic brain dynamics with brain-inspired computing architecture (**Nature Communications, 2025**). [[paper](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-025-64470-3)]\n- A frugal Spiking Neural Network for unsupervised multivariate temporal pattern classification and multichannel spike sorting (**Nature Communications, 2025**). [[paper](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-025-64231-2)][[code](https:\u002F\u002Fcodeocean.com\u002Fcapsule\u002F9829487\u002Ftree)]\n- Efficient and robust temporal processing with neural oscillations modulated spiking neural networks (**Nature Communications, 2025**). [[paper](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-025-63771-x)][[code](https:\u002F\u002Fgithub.com\u002FYinsongYan\u002FRhythm-SNN)]\n- Covariant spatio-temporal receptive fields for spiking neural networks (**Nature Communications, 2025**). [[paper](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-025-63493-0)][[code](https:\u002F\u002Fgithub.com\u002Fjegp\u002Fnrf)]\n- A multisynaptic spiking neuron for simultaneously encoding spatial and temporal dynamics in spiking neural networks (**Nature Communications, 2025**). [[paper](https:\u002F\u002Fdoi.org\u002F10.1038\u002Fs41467-025-62251-6)][[code](https:\u002F\u002Fgithub.com\u002Ffanliangwei\u002FMultisynaptic-spiking-neurons)]\n- Advancing spatio-temporal processing through adaptation in spiking neural networks (**Nature Communications, 2025**). [[paper](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-025-60878-z)][[code](https:\u002F\u002Fgithub.com\u002FIGITUGraz\u002FSE-adlif)]\n- Topology optimization of random memristors for input-aware dynamic SNN (**Science Advances, 2025**). [[paper](https:\u002F\u002Fwww.science.org\u002Fdoi\u002F10.1126\u002Fsciadv.ads5340)][[code](https:\u002F\u002Fgithub.com\u002Fbo-wang-up\u002FPRIME)]\n- Fully memristive spiking neural network for energy-efficient graph learning (**Science Advances, 2025**). [[paper](https:\u002F\u002Fwww.science.org\u002Fdoi\u002F10.1126\u002Fsciadv.adv2312)]\n  \n**AAAI, ICLR, CVPR, ICML, IJCAI, ICCV, ACM MM, Neurips**\n- Adaptive Surrogate Gradients for Sequential Reinforcement Learning in Spiking Neural Networks (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=oGmROC4e4W)] [[code](https:\u002F\u002Fgithub.com\u002Fkorneelf1\u002FSpikingCrazyflie)]\n- Toward Relative Positional Encoding in Spiking Transformers (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=MDWJlTWZHH)] [[code](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FSeqSNN)]\n- High Dynamic Range Imaging with Time-Encoding Spike Camera (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=flIdch9eTf)] [[code](https:\u002F\u002Fgithub.com\u002Fzkzhu123\u002FTESC)]\n- Bipolar Self-attention for Spiking Transformers (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=nG45z7lJ7D)] [[code](https:\u002F\u002Fopenreview.net\u002Fattachment?id=nG45z7lJ7D&name=supplementary_material)]\n- Spike-timing-dependent Hebbian learning as noisy gradient descent (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=YTbLri0siT)]\n- Spike-RetinexFormer: Rethinking Low-light Image Enhancement with Spiking Neural Networks (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=8W8SRZIpJP)]\n- SPACE: SPike-Aware Consistency Enhancement for Test-Time Adaptation in Spiking Neural Networks (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=Di0RasgbQ6)] [[code](https:\u002F\u002Fgithub.com\u002Fethanxyluo\u002FSPACE)]\n- MI-TRQR: Mutual Information-Based Temporal Redundancy Quantification and Reduction for Energy-Efficient Spiking Neural Networks (**Neurips  2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=NRqGpUAjV9)] [[code](https:\u002F\u002Fgithub.com\u002Fdfxue\u002FMI-TRQR)]\n- Spik-NeRF: Spiking Neural Networks for Neural Radiance Fields (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=047VzZEpnu)] \n- Dendritic Resonate-and-Fire Neuron for Effective and Efficient Long Sequence Modeling (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=ywzGKDStrm)] [[code](https:\u002F\u002Fopenreview.net\u002Fattachment?id=ywzGKDStrm&name=supplementary_material)]\n- Spiking Neural Networks Need High-Frequency Information (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=owNPAl7LNK)] [[code](https:\u002F\u002Fgithub.com\u002Fbic-L\u002FMaxFormer)]\n- Activity Pruning for Efficient Spiking Neural Networks (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=zjOXZEXQKZ)] [[code](https:\u002F\u002Fgithub.com\u002Fputshua\u002FActivity-Pruning-SNN)]\n- Multiplication-Free Parallelizable Spiking Neurons with Efficient Spatio-Temporal Dynamics (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=4q5ZYP0ynu)] [[code](https:\u002F\u002Fgithub.com\u002FPengXue0812\u002FMultiplication-Free-Parallelizable-Spiking-Neurons-with-Efficient-Spatio-Temporal-Dynamics)]\n- SpikingVTG: A Spiking Detection Transformer for Video Temporal Grounding (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=SkhF3cuyev)] [[code](https:\u002F\u002Fopenreview.net\u002Fattachment?id=SkhF3cuyev&name=supplementary_material)]\n- S$^2$M-Former: Spiking Symmetric Mixing Branchformer for Brain Auditory Attention Detection (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=WtMuGdHvh6)] [[code](https:\u002F\u002Fgithub.com\u002FJackieWang9811\u002FS2M-Former)]\n- Local-Global Coupling Spiking Graph Transformer for Brain Disorders Diagnosis from Two Perspectives (**Neurips 2025**).  [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=kkhRTTmXFV)]\n- A Scalable, Causal, and Energy Efficient Framework for Neural Decoding with Spiking Neural Networks (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=oAbaGU9N1X)] [[code](https:\u002F\u002Fspikachu-bci.github.io\u002F)]\n- Spiking Meets Attention: Efficient Remote Sensing Image Super-Resolution with Attention Spiking Neural Networks (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=VaE33hkqmg)] [[code](https:\u002F\u002Fgithub.com\u002FXY-boy\u002FSpikeSR)]\n- Adaptive Fission: Post-training Encoding for Low-latency Spike Neural Networks (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=2zZzdAMyYi)] [[code](https:\u002F\u002Fgithub.com\u002FJiangYizhou16\u002FAdaptive-Fission)]\n- S$^2$NN: Sub-bit Spiking Neural Networks (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=hFsCuVc1cB)] [[code](https:\u002F\u002Fopenreview.net\u002Fattachment?id=hFsCuVc1cB&name=supplementary_material)]\n- Seemingly Redundant Modules Enhance Robust Odor Learning in Fruit Flies (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=d6WUTRJqP3)] [[code](https:\u002F\u002Fgithub.com\u002FL-0cean\u002FFly-SNN)]\n- Fully Spiking Neural Networks for Unified Frame-Event Object Tracking (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=FooiwsnEH9)] [[code](https:\u002F\u002Fgithub.com\u002FNoctis-A\u002FSpikeFET)]\n- Enhanced Self-Distillation Framework for Efficient Spiking Neural Network Training (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=dpmMg6aK1D)] [[code](openreview.net\u002Fpdf?id=dpmMg6aK1D)]\n- Learning the Plasticity: Plasticity-Driven Learning Framework in Spiking Neural Networks (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=fllsm01JWS)] \n- HetSyn: Versatile Timescale Integration in Spiking Neural Networks via Heterogeneous Synapses (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=YYz4fumVed)] [[code](https:\u002F\u002Fgithub.com\u002Fdzcgood\u002FHetSyn)]\n- Unveiling the Spatial-temporal Effective Receptive Fields of Spiking Neural Networks (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=tYnJC5ba6j)] [[code](https:\u002F\u002Fgithub.com\u002FEricZhang1412\u002FSpatial-temporal-ERF)]\n- Brain-like Variational Inference (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=573IcLusXq)] [[code](https:\u002F\u002Fgithub.com\u002Fhadivafaii\u002FIterativeVAE)]\n- Proxy Target: Bridging the Gap Between Discrete Spiking Neural Networks and Continuous Control (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=RRBve5GwjS)] [[code](openreview.net\u002Fpdf?id=RRBve5GwjS)]\n- Synergy Between the Strong and the Weak: Spiking Neural Networks are Inherently Self-Distillers (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=BrmR69AhUg)]\n- Spike4DGS: Towards High-Speed Dynamic Scene Recontruction with 4D Gaussian Splatting via a Spike Camera Array (**Neurips 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=V5efEA8nIr)] [[code](https:\u002F\u002Fgithub.com\u002FQinghongye\u002FSpike4DGS)]\n- Toward End-to-End Bearing Fault Diagnosis for Industrial Scenarios with Spiking Neural Networks (**KDD 2025**). [[paper](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2408.11067)][[code](https:\u002F\u002Fgithub.com\u002Fyqding326\u002FMRA-SNN)]\n- DSF-Net: Dynamic Sparse Fusion of Event-RGB via Spike-Triggered Attention for High-Speed Detection (**ACM MM 2025**). \n- ESOD: Event-Based Small Object Detection (**ACM MM 2025**). \n- E-4DGS: High-Fidelity Dynamic Reconstruction from the Multi-view Event Cameras (**ACM MM 2025**). \n- Incorporating the Refractory Period into Spiking Neural Networks through Spike-Triggered Threshold Dynamics (**ACM MM 2025**). \n- Signal-SGN: A Spiking Graph Convolutional Network for Skeleton Action Recognition via Learning Temporal-Frequency Dynamics (**ACM MM 2025**). \n- SGM-Transformer: Rethinking Gradient Information Loss and Compensation in Spiking Neural Networks (**ACM MM 2025**). \n- Advanced SpikingYOLOX: Extending Spiking Neural Network on Object Detection with Spike-based Partial Self-Attention and 2D-Spiking Transformer (**ACM MM 2025**). \n- Spiking Neural Networks with Temporal Attention-Guided Adaptive Fusion for imbalanced Multi-modal Learning (**ACM MM 2025**). \n- Temporal-coded Spiking Transformer (**ACM MM 2025**). \n- ClearSight: Human Vision-Inspired Solutions for Event-Based Motion Deblurring (**ICCV 2025**). [[paper](https:\u002F\u002Ficcv.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F1849)]  \n- Robust Unfolding Network for HDR Imaging with Modulo Cameras (**ICCV 2025**). [[paper](https:\u002F\u002Ficcv.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F1986)]    \n- SpikeDiff: Zero-shot High-Quality Video Reconstruction from Chromatic Spike Camera and Sub-millisecond Spike Streams (**ICCV 2025**). [[paper](https:\u002F\u002Ficcv.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F1181)]    \n- Noise-Modeled Diffusion Models for Low-Light Spike Image Restoration (**ICCV 2025**). [[paper](https:\u002F\u002Ficcv.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F1467)]    \n- Efficient Spiking Point Mamba for Point Cloud Analysis (**ICCV 2025**). [[paper](https:\u002F\u002Ficcv.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F2271)]    \n- SpikePack: Enhanced Information Flow in Spiking Neural Networks with High Hardware Compatibility (**ICCV 2025**). [[paper](https:\u002F\u002Ficcv.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F345)]   \n- SpiLiFormer: Enhancing Spiking Transformers with Lateral Inhibition (**ICCV 2025**). [[paper](https:\u002F\u002Ficcv.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F1753)]   \n- Adaptive Gradient Learning for Spiking Neural Networks by Exploiting Membrane Potential Dynamics (**IJCAI 2025**). [[paper](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F7783.pdf)]  \n- ILIF: Temporal Inhibitory Leaky Integrate-and-Fire Neuron for Overactivation in Spiking Neural Networks (**IJCAI 2025**). [[paper](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F4566.pdf)]  \n- Neuromorphic Sequential Arena: A Benchmark for Neuromorphic Temporal Processing (**IJCAI 2025**). [[paper](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F7408.pdf)]  \n- MSVIT: Improving Spiking Vision Transformer Using Multi-scale Attention Fusion (**IJCAI 2025**). [[paper](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F7378.pdf)] \n- A Fast and Accurate ANN-SNN Conversion Algorithm with Negative Spikes (**IJCAI 2025**). [[paper](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F6149.pdf)] \n- ECC-SNN: Cost-Effective Edge-Cloud Collaboration for Spiking Neural Networks (**IJCAI 2025**). [[paper](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F3167.pdf)] \n- Cost-Effective On-Device Sequential Recommendation with Spiking Neural Networks (**IJCAI 2025**). [[paper](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F3150.pdf)]\n- SCNNs: Spike-based Coupling Neural Networks for Understanding Structural-Functional Relationships in the Human Brain (**IJCAI 2025**). [[paper](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F1337.pdf)]\n- Exploiting Label Skewness for Spiking Neural Networks in Federated Learning (**IJCAI 2025**). [[paper](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F1298.pdf)]\n- Binary Event-Driven Spiking Transformer (**IJCAI 2025**). [[paper](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F1206.pdf)]\n- Tackling Long-Tailed Data Challenges in Spiking Neural Networks via Heterogeneous Knowledge Distillation (**IJCAI 2025**). [[paper](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F787.pdf)]\n- SpikeVideoFormer: An Efficient Spike-Driven Video Transformer with Hamming Attention and $\\mathcal{O}(T)$ Complexity (**ICML 2025**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.10352)]\n- Efficient ANN-SNN Conversion with Error Compensation Learning (**ICML 2025**). [[paper](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2506.01968)]\n- Differential Coding for Training-Free ANN-to-SNN Conversion (**ICML 2025**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.00301)] \n- Efficient Logit-based Knowledge Distillation of Deep Spiking Neural Networks for Full-Range Timestep Deployment (**ICML 2025**). [[paper](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2025\u002Fposter\u002F44825)] \n- ReverB-SNN: Reversing Bit of the Weight and Activation for Spiking Neural Networks (**ICML 2025**). [[paper](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2025\u002Fposter\u002F43640)] \n- TTFSFormer: A TTFS-based Lossless Conversion of Spiking Transformer (**ICML 2025**). [[paper](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2025\u002Fposter\u002F44159)] \n- BSO: Binary Spiking Online Optimization (**ICML 2025**). [[paper](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2025\u002Fposter\u002F45087)] \n- Delay-DSGN: A Dynamic Spiking Graph Neural Network with Delay Mechanisms for Evolving Graph (**ICML 2025**). [[paper](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2025\u002Fposter\u002F43816)] \n- TS-SNN: Temporal Shift Module for Spiking Neural Networks (**ICML 2025**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.04165)] \n- SpikF: Spiking Fourier Network for Efficient Long-term Prediction (**ICML 2025**). [[paper](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2025\u002Fposter\u002F46411)] \n- Self-cross Feature based Spiking Neural Networks for Efficient Few-shot Learning (**ICML 2025**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.07921)] \n- Faster and Stronger: When ANN-SNN Conversion Meets Parallel Spiking Calculation (**ICML 2025**). [[paper](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2025\u002Fposter\u002F44986)] \n- Efficient Parallel Training Methods for Spiking Neural Networks with Constant Time Complexity (**ICML 2025**). [[paper](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2025\u002Fposter\u002F45776)] \n- Training High Performance Spiking Neural Networks by Temporal Model Calibration (**ICML 2025**). [[paper](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2025\u002Fposter\u002F44216)] \n- Temporal Misalignment in ANN-SNN Conversion and Its Mitigation via Probabilistic Spiking Neurons (**ICML 2025**). [[paper](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2025\u002Fposter\u002F45627)] \n- Time to Spike? Understanding the Representational Power of Spiking Neural Networks in Discrete Time (**ICML 2025**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.18023)] \n- Hybrid Spiking Vision Transformer for Object Detection with Event Cameras (**ICML 2025**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.07715)] \n- Sorbet: A Neuromorphic Hardware-Compatible Transformer-Based Spiking Language Model (**ICML 2025**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.15298)] \n- EventGPT: Event Stream Understanding with Multimodal Large Language Models (**CVPR 2025**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.00832)]  [[code](https:\u002F\u002Fgithub.com\u002FXduSyL\u002FEventGPT)]\n- Spk2SRImgNet: Super-Resolve Dynamic Scene from Spike Stream via Motion Aligned Collaborative Filtering (**CVPR 2025**). [[paper](https:\u002F\u002Fcvpr.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F33079)]\n- Decision SpikeFormer: Spike-Driven Transformer for Decision Making (**CVPR 2025**). [[paper](https:\u002F\u002Fcvpr.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F32864)]\n- Self-Supervised Learning for Color Spike Camera Reconstruction (**CVPR 2025**). [[paper](https:\u002F\u002Fcvpr.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F34093)]\n- USP-Gaussian: Unifying Spike-based Image Reconstruction, Pose Correction and Gaussian Splatting (**CVPR 2025**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.10504)]\n- VISTREAM: Improving Computation Efficiency of Visual Perception Streaming via Law-of-Charge-Conservation Inspired Spiking Neural Network (**CVPR 2025**). [[paper](https:\u002F\u002Fcvpr.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F34908)]\n- Efficient ANN-Guided Distillation: Aligning Rate-based Features of Spiking Neural Networks through Hybrid Block-wise Replacement (**CVPR 2025**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.16572)]\n- Spiking Transformer: Introducing Accurate Addition-Only Spiking Self-Attention for Transformer (**CVPR 2025**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.00226)]\n- Brain-Inspired Spiking Neural Networks for Energy-Efficient Object Detection (**CVPR 2025**). [[paper](https:\u002F\u002Fcvpr.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F33275)]\n- Temporal Separation with Entropy Regularization for Knowledge Distillation in Spiking Neural Networks (**CVPR 2025**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.03144)]\n- STAA-SNN: Spatial-Temporal Attention Aggregator for Spiking Neural Networks (**CVPR 2025**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.02689)]\n- Towards Effective and Sparse Adversarial Attack on Spiking Neural Networks via Breaking Invisible Surrogate Gradients (**CVPR 2025**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.03272)]\n- Rethinking Spiking Self-Attention Mechanism: Implementing α-XNOR Similarity Calculation in Spiking Transformers (**CVPR 2025**). [[paper](https:\u002F\u002Fcvpr.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F33850)]\n- Spiking Transformer with Spatial-Temporal Attention (**CVPR 2025**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.19764)]\n- Quantized Spike-driven Transformer (**ICLR 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=5J9B7Sb8rO)]\n- Rethinking Spiking Neural Networks from an Ensemble Learning Perspective (**ICLR 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=ZyknpOQwkT)]\n- DeepTAGE: Deep Temporal-Aligned Gradient Enhancement for Optimizing Spiking Neural Networks (**ICLR 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=drPDukdY3t)]\n- QP-SNN: Quantized and Pruned Spiking Neural Networks (**ICLR 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=MiPyle6Jef)]\n- Temporal Flexibility in Spiking Neural Networks: Towards Generalization Across Time Steps and Deployment Friendliness (**ICLR 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=9HsfTgflT7)]\n- P-SpikeSSM: Harnessing Probabilistic Spiking State Space Models for Long-Range Dependency Tasks (**ICLR 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=Sf4ep9Udjf)]\n- TS-LIF: A Temporal Segment Spiking Neuron Network for Time Series Forecasting (**ICLR 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=rDe9yQQYKt)]\n- Improving the Sparse Structure Learning of Spiking Neural Networks from the View of Compression Efficiency (**ICLR 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=gcouwCx7dG)]\n- SpikeLLM: Scaling up Spiking Neural Network to Large Language Models via Saliency-based Spiking (**ICLR 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=ZadnlOHsHv)]\n- Spiking Vision Transformer with Saccadic Attention (**ICLR 2025**). [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=qzZsz6MuEq)]\n- SpikeGS: Reconstruct 3D scene captured by a fast moving bio-inspired camera (**AAAI 2025**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.03771v2)]\n- Rethinking High-speed Image Reconstruction Framework with Spike Camera (**AAAI 2025**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.04477)] [[code](https:\u002F\u002Fgithub.com\u002Fchenkang455\u002FSpikeCLIP)]\n- Spiking Point Transformer for Point Cloud Classification (**AAAI 2025**).\n- Efficient 3D Recognition with Event-driven Spike Sparse Convolution (**AAAI 2025**).[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.07360)] [[code](https:\u002F\u002Fgithub.com\u002Fbollossom\u002Fe-3dsnn)]\n- GRSN: Gated Recurrent Spiking Neurons for POMDPs and MARL (**AAAI 2025**).[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.15597)]\n- EventZoom: A Progressive Approach to Event-Based Data Augmentation for Enhanced Neuromorphic Vision (**AAAI 2025**).[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.18880)]\n- Leveraging Asynchronous Spiking Neural Networks for Ultra Efficient Event-Based Visual Processing (**AAAI 2025**).\n- CREST: An Efficient Conjointly-trained Spike-driven Framework for Event-based Object Detection Exploiting Spatiotemporal Dynamics  (**AAAI 2025**).[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.12525)] [[code](https:\u002F\u002Fgithub.com\u002Fshen-aoyu\u002FCREST\u002F)]\n- UCF-Crime-DVS: A Novel Event-Based Dataset for Video Anomaly Detection with Spiking Neural Networks (**AAAI 2025**).\n- SpikingSSMs: Learning Long Sequences with Sparse and Parallel Spiking State Space Models (**AAAI 2025**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.14909)][[code](https:\u002F\u002Fgithub.com\u002Fshenshuaijie\u002FSDN\n)]\n- Advancing Spiking Neural Networks towards Multiscale Spatiotemporal Interaction Learning (**AAAI 2025**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.13672)]\n- SpikingYOLOX: Improved YOLOX Object Detection with Fast Fourier Convolution and Spiking Neural Networks(**AAAI 2025**).\n- ALADE-SNN: Adaptive Logit Alignment in Dynamically Expandable Spiking Neural Networks for Class Incremental Learning (**AAAI 2025**).[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.12696)]\n- Efficient Spike-driven Transformer For High-performance Image Segmentation (**AAAI 2025**).[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.14587)] [[code](https:\u002F\u002Fgithub.com\u002FBICLab\u002FSpike2Former)]\n- Towards Accurate Binary Spiking Neural Networks: Learning with Adaptive Gradient Modulation Mechanism (**AAAI 2025**).\n- Adaptive Calibration: A Unified Conversion Framework of Spiking Neural Networks (**AAAI 2025**).\n- Towards More Discriminative Feature Learning in SNNs with Temporal-Self-Erasing Supervision (**AAAI 2025**).\n- FSTA-SNN: Frequency-based Spatial-Temporal Attention Module for Spiking Neural Networks (**AAAI 2025**).[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.14744)] [[code](https:\u002F\u002Fgithub.com\u002Fyukairong\u002FFSTA-SNN)]\n\n### 2024\n\n**Review**\n- Direct Training High-Performance Deep Spiking Neural Networks: A Review of Theories and Methods (**Frontiers in Neuroscience 2024**). [[paper]](https:\u002F\u002Fwww.frontiersin.org\u002Fjournals\u002Fneuroscience\u002Farticles\u002F10.3389\u002Ffnins.2024.1383844\u002Ffull) [[arxiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.04289v2)] \n\n**NeurIPS, ACM MM, ECCV, AAAI, ICLR, Frontiers in Neuroscience, CVPR, ICML, IJCAI**\n- SpikedAttention: Training-Free and Fully Spike-Driven Transformer-to-SNN Conversion with Winner-Oriented Spike Shift for Softmax Operation (**NeurIPS 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=fs28jccJj5)]\n- Spiking Graph Neural Network on Riemannian Manifolds (**NeurIPS 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=VKt0K3iOmO)]\n- Rethinking the Dynamics of Spiking Neural Networks (**NeurIPS 2024**). [[paper](https:\u002F\u002Fneurips.cc\u002Fvirtual\u002F2024\u002Fposter\u002F96543)]\n- Long-Range Feedback Spiking Network Captures Dynamic and Static Representations of the Visual Cortex under Movie Stimuli (**NeurIPS 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=bxDok3uaK6)] [[code](https:\u002F\u002Fgithub.com\u002FGrasshlw\u002FSNN-Neural-Similarity-Movie)]\n- Take A Shortcut Back: Mitigating the Gradient Vanishing for Training Spiking Neural Networks (**NeurIPS 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=xjyU6zmZD7)]\n- Advancing Training Efficiency of Deep Spiking Neural Networks through Rate-based Backpropagation (**NeurIPS 2024**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.11488)] [[code](https:\u002F\u002Fgithub.com\u002FTab-ct\u002Frate-based-backpropagation)]\n- Latent Diffusion for Neural Spiking Data (**NeurIPS 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=ZX6CEo1Wtv)]\n- Autonomous Driving with Spiking Neural Networks (**NeurIPS 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=95VyH4VxN9)] [[code](https:\u002F\u002Fgithub.com\u002Fridgerchu\u002FSAD)]\n- Exact Gradients for Stochastic Spiking Neural Networks Driven by Rough Signals  (**NeurIPS 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=mCWZj7pa0M)]\n- Spatio-Temporal Interactive Learning for Efficient Image Reconstruction of Spiking Cameras  (**NeurIPS 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=S4ZqnMywcM)]\n- Slack-Free Spiking Neural Network Formulation for Hypergraph Minimum Vertex Cover (**NeurIPS 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=4A5IQEjG8c)]\n- EnOF: Training Accurate Spiking Neural Networks via Enhancing the Output Feature Representation (**NeurIPS 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf\u002F5a4dfaf8dc6861efa8e8356b3bd86743ab98838d.pdf)]\n- Spiking Token Mixer: A event-driven friendly Former structure for spiking neural networks (**NeurIPS 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=iYcY7KAkSy)] [[code](https:\u002F\u002Fgithub.com\u002Fbrain-intelligence-lab\u002FSTMixer_demo)]\n- SpGesture: Source-Free Domain-adaptive sEMG-based Gesture Recognition with Jaccard Attentive Spiking Neural Network (**NeurIPS 2024**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.14398)] [[code](https:\u002F\u002Fgithub.com\u002Fguoweiyu\u002FSpGesture\u002F)]\n- Spiking Transformer with Experts Mixture (**NeurIPS 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf\u002F35a5bc54de368426f66605d8e3f447638863888a.pdf)] \n- FEEL-SNN: Robust Spiking Neural Networks with Frequency Encoding and Evolutionary Leak Factor (**NeurIPS 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=TuCQdBo4NC)] [[code](https:\u002F\u002Fgithub.com\u002Fzju-bmi-lab\u002FFEEL_SNN)]\n- Spiking Neural Network as Adaptive Event Stream Slicer (**NeurIPS 2024**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.02249)] \n- Advancing Spiking Neural Networks for Sequential Modeling with Central Pattern Generators (**NeurIPS 2024**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.14362)] [[code](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FSeqSNN)]\n- QKFormer: Hierarchical Spiking Transformer using Q-K Attention (**NeurIPS 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=AVd7DpiooC)] [[code](https:\u002F\u002Fgithub.com\u002Fzhouchenlin2096\u002FQKFormer)]\n- Q-SNNs: Quantized Spiking Neural Networks (**ACM MM 2024**). [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3664647.3681186)]\n- RSC-SNN: Exploring the Trade-off Between Adversarial Robustness and Accuracy in Spiking Neural Networks via Randomized Smoothing Coding (**ACM MM 2024**). [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3664647.3680639)] [[code](https:\u002F\u002Fgithub.com\u002FKemingWu\u002FRSC-SNN)]\n- Reversing Structural Pattern Learning with Biologically Inspired Knowledge Distillation for Spiking Neural Networks (**ACM MM 2024**). [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3664647.3680655)]\n- Towards High-performance Spiking Transformers from ANN to SNN Conversion (**ACM MM 2024**). [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3664647.3680620)]  [[code](https:\u002F\u002Fgithub.com\u002Fh-z-h-cell\u002FTransformer-to-SNN-ECMT)]\n- Towards Low-latency Event-based Visual Recognition with Hybrid Step-wise Distillation Spiking Neural Networks (**ACM MM 2024**). [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3664647.3680832)]  [[code](https:\u002F\u002Fgithub.com\u002Fhsw0929\u002FHSD)]\n- Integer-Valued Training and Spike-Driven Inference Spiking Neural Network for High-performance and Energy-efficient Object Detection (**ECCV 2024**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.20708)] [[code](https:\u002F\u002Fgithub.com\u002FBICLab\u002FSpikeYOLO)]\n- Spiking Wavelet Transformer (**ECCV 2024**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.11138)] [[code](https:\u002F\u002Fgithub.com\u002Fbic-L\u002FSpiking-Wavelet-Transformer)]\n- Efficient Training of Spiking Neural Networks with Multi-Parallel Implicit Stream Architecture (**ECCV 2024**). [[paper](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2024\u002Fpapers_ECCV\u002Fpapers\u002F05068.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fkiritozc\u002FMPIS-SNNs)]\n- Asynchronous Bioplausible Neuron for Spiking Neural Networks for Event-Based Vision (**ECCV 2024**). [[paper](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2024\u002Fpapers_ECCV\u002Fpapers\u002F08133.pdf)] \n- BKDSNN: Enhancing the Performance of Learning-based Spiking Neural Networks Training with Blurred Knowledge Distillation (**ECCV 2024**). [[paper](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2024\u002Fpapers_ECCV\u002Fpapers\u002F06649.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FIntelligent-Computing-Research-Group\u002FBKDSNN)]\n- Exploring Vulnerabilities in Spiking Neural Networks: Direct Adversarial Attacks on Raw Event Data (**ECCV 2024**). [[paper](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2024\u002Fpapers_ECCV\u002Fpapers\u002F09164.pdf)]\n- EAS-SNN: End-to-End Adaptive Sampling and Representation for Event-based Detection with Recurrent Spiking Neural Networks (**ECCV 2024**). [[paper](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2024\u002Fpapers_ECCV\u002Fpapers\u002F07766.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FWindere\u002FEAS-SNN)]\n- Spike-Temporal Latent Representation for Energy-Efficient Event-to-Video Reconstruction (**ECCV 2024**). [[paper](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2024\u002Fpapers_ECCV\u002Fpapers\u002F05843.pdf)]\n- EC-SNN: Splitting Deep Spiking Neural Networks on Edge Devices (**IJCAI 2024**). [[code](https:\u002F\u002Fgithub.com\u002FAmazingDD\u002FEC-SNN)] \n- One-step Spiking Transformer with a Linear Complexity (**IJCAI 2024**).\n- TIM: An Efficient Temporal Interaction Module for Spiking Transformer (**IJCAI 2024**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2401.11687)] [[code](https:\u002F\u002Fgithub.com\u002FBrainCog-X\u002FBrain-Cog\u002Ftree\u002Fmain\u002Fexamples\u002FTIM)] \n- Learning a Spiking Neural Network for Efficient Image Deraining (**IJCAI 2024**). [[code](https:\u002F\u002Fgithub.com\u002FMingTian99\u002FESDNet)] \n- LitE-SNN: Designing Lightweight and Efficient Spiking Neural Network through Spatial-Temporal Compressive Network Search and Joint Optimization (**IJCAI 2024**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2401.14652)] \n- Temporal Spiking Neural Networks with Synaptic Delay for Graph Reasoning (**ICML 2024**). [[paper](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F35073)] \n- Towards efficient deep spiking neural networks construction with spiking activity based pruning (**ICML 2024**). [[paper](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F33505)] \n- Efficient and Effective Time-Series Forecasting with Spiking Neural Networks (**ICML 2024**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.01533)] \n- Autaptic Synaptic Circuit Enhances Spatio-temporal Predictive Learning of Spiking Neural Networks (**ICML 2024**). [[paper](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F33269)] \n- Robust Stable Spiking Neural Networks (**ICML 2024**). [[paper](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F33217)]\n- CLIF: Complementary Leaky Integrate-and-Fire Neuron for Spiking Neural Networks (**ICML 2024**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.04663)]\n- NDOT: Neuronal Dynamics-based Online Training for Spiking Neural Networks  (**ICML 2024**). [[paper](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F33481)]\n- High-Performance Temporal Reversible Spiking Neural Networks with $O(L)$ Training Memory and $O(1)$ Inference Cost (**ICML 2024**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.16466)]\n- Towards Efficient Spiking Transformer: a Token Sparsification Framework for Training and Inference Acceleration (**ICML 2024**). [[paper](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F32674)]\n- SpikeLM: Towards General Spike-Driven Language Modeling via Elastic Bi-Spiking Mechanisms (**ICML 2024**). [[paper](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F35024)]\n- Sign Gradient Descent-based Neuronal Dynamics: ANN-to-SNN Conversion Beyond ReLU Network (**ICML 2024**). [[paper](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F33242)]\n- Enhancing Adversarial Robustness in SNNs with Sparse Gradients (**ICML 2024**). [[paper](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F34066)]\n- SpikeZIP-TF: Conversion is All You Need for Transformer-based SNN (**ICML 2024**). [[paper](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F34194)]\n- Are Conventional SNNs Really Efficient? A Perspective from Network Quantization  (**CVPR 2024**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.10802)]\n- SFOD: Spiking Fusion Object Detector (**CVPR 2024**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.15192)] [[code](https:\u002F\u002Fgithub.com\u002Fyimeng-fan\u002FSFOD)]\n- SpikingResformer: Bridging ResNet and Vision Transformer in Spiking Neural Networks (**CVPR 2024**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.14302)] [[code](https:\u002F\u002Fgithub.com\u002Fxyshi2000\u002FSpikingResformer)]\n- SGLFormer: Spiking Global-Local-Fusion Transformer with high performance (**Frontiers in Neuroscience 2024**).[[paper](https:\u002F\u002Fwww.frontiersin.org\u002Fjournals\u002Fneuroscience\u002Farticles\u002F10.3389\u002Ffnins.2024.1371290\u002Ffull)] [[code](https:\u002F\u002Fgithub.com\u002FZhangHanN1\u002FSGLFormer)]\n- Towards Energy Efficient Spiking Neural Networks: An Unstructured Pruning Framework  (**ICLR 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=eoSeaK4QJo&referrer=%5Bthe%20profile%20of%20Zecheng%20Hao%5D(%2Fprofile%3Fid%3D~Zecheng_Hao1))]\n- Online Stabilization of Spiking Neural Networks  (**ICLR 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=CIj1CVbkpr)]\n- SpikePoint: An Efficient Point-based Spiking Neural Network for Event Cameras Action Recognition (**ICLR 2024**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.07189.pdf)]\n- Spatio-Temporal Approximation: A Training-Free SNN Conversion for Transformers  (**ICLR 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=XrunSYwoLr)]\n- Sparse Spiking Neural Network: Exploiting Heterogeneity in Timescales for Pruning Recurrent SNN (**ICLR 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=0jsfesDZDq)]\n- Learning Delays in Spiking Neural Networks using Dilated Convolutions with Learnable Spacings (**ICLR 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=4r2ybzJnmN)] [[code](https:\u002F\u002Fgithub.com\u002FThvnvtos\u002FSNN-delays)]\n- Threaten Spiking Neural Networks through Combining Rate and Temporal Information (**ICLR 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=xv8iGxENyI)] [[code](https:\u002F\u002Fgithub.com\u002Fhzc1208\u002FHART_Attack)]\n- TAB: Temporal Accumulated Batch Normalization in Spiking Neural Networks (**ICLR 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=k1wlmtPGLq&noteId=p5M9gOLAOf)] \n- Certified Adversarial Robustness for Rate Encoded Spiking Neural Networks (**ICLR 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=5bNYf0CqxY)] \n- Bayesian Bi-clustering of Neural Spiking Activity with Latent Structures (**ICLR 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=ZYm1Ql6udy)] \n- Adaptive deep spiking neural network with global-local learning via balanced excitatory and inhibitory mechanism (**ICLR 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=wpnlc2ONu0)] \n- Hebbian Learning based Orthogonal Projection for Continual Learning of Spiking Neural Networks (**ICLR 2024**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.11984.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fpkuxmq\u002FHLOP-SNN)]\n- A Progressive Training Framework for Spiking Neural Networks with Learnable Multi-hierarchical Model (**ICLR 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=g52tgL8jy6)] [[code](https:\u002F\u002Fgithub.com\u002Fhzc1208\u002FSTBP_LMH)]\n- LMUFormer: Low Complexity Yet Powerful Spiking Model With Legendre Memory Units (**ICLR 2024**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.04882.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fzeyuliu1037\u002FLMUFormer)]\n- Spike-driven Transformer V2: Meta Spiking Neural Network Architecture Inspiring the Design of Next-generation Neuromorphic Chips (**ICLR 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=1SIBN5Xyw7)] [[code](https:\u002F\u002Fgithub.com\u002FBICLab\u002FSpike-Driven-Transformer-V2)]\n- Can we get the best of both Binary Neural Networks and Spiking Neural Networks for Efficient Computer Vision? (**ICLR 2024**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=lGUyAuuTYZ)] [[code](https:\u002F\u002Fgithub.com\u002Fgodatta\u002FUltra-Low-Latency-SNN)]\n- A Graph is Worth 1-bit Spikes: When Graph Contrastive Learning Meets Spiking Neural Networks (**ICLR 2024**).  [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=LnLySuf1vp)] [[code](https:\u002F\u002Fgithub.com\u002FEdisonLeeeee\u002FSpikeGCL)]\n- Ternary Spike: Learning Ternary Spikes for Spiking Neural Networks (**AAAI 2024**).  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.06372.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fyfguo91\u002FTernary-Spike)]\n- Memory-Efficient Reversible Spiking Neural Networks (**AAAI 2024**).  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.07922.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fmi804\u002FRevSNN)]\n- Gated Attention Coding for Training High-performance and Efficient Spiking Neural Networks (**AAAI 2024**).  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2308.06582.pdf)]\n- SpikingBERT: Distilling BERT to Train Spiking Language Models Using Implicit Differentiation (**AAAI 2024**).  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2308.10873.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FNeuroCompLab-psu\u002FSpikingBERT)]\n- TC-LIF: A Two-Compartment Spiking Neuron Model for Long-Term Sequential Modelling (**AAAI 2024**).  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2308.13250.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FZhangShimin1\u002FTC-LIF)]\n- Shrinking Your TimeStep: Towards Low-Latency Neuromorphic Object Recognition with Spiking Neural Networks (**AAAI 2024**).  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2401.01912.pdf)]\n- Dynamic Spiking Graph Neural Networks (**AAAI 2024**).  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2401.05373.pdf)]\n- An Efficient Knowledge Transfer Strategy for Spiking Neural Networks from Static to Event Domain (**AAAI 2024**).  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.13077.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FBrain-Cog-Lab\u002FTransfer-for-DVS)]\n\n\n\n**Arxiv**\n- Brain-Inspired Spiking Neural Networks for Industrial Fault Diagnosis: A Survey, Challenges, and Opportunities. [paper](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2401.02429)  \n- Q-SNNs: Quantized Spiking Neural Networks. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.13672)]\n- Scalable MatMul-free Language Modeling. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.02528)] [[code](https:\u002F\u002Fgithub.com\u002Fridgerchu\u002Fmatmulfreellm)]\n- QKFormer: Hierarchical Spiking Transformer using Q-K Attention. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.16552.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fzhouchenlin2096\u002FQKFormer)]\n- Spikformer V2: Join the High Accuracy Club on ImageNet with an SNN Ticket. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2401.02020.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FZK-Zhou\u002Fspikformer)]\n- SpikeNAS: A Fast Memory-Aware Neural Architecture Search Framework for Spiking Neural Network Systems. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.11322.pdf)]\n- Astrocyte-Enabled Advancements in Spiking Neural Networks for Large Language Modeling. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.07625v2.pdf)]\n\n\n### 2023\n\n**Review**\n- Direct Learning-Based Deep Spiking Neural Networks: A Review (**Frontiers in Neuroscience 2023**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.19725.pdf)]\n\n**AAAI, ICLR, CVPR, ICML, IJCAI, ICCV, NeurIPS, TPAMI, Science Advances**\n- Time series prediction and anomaly detection with recurrent spiking neural networks (**IJCNN 2023**). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10191614)]\n- SpikingJelly: An open-source machine learning infrastructure platform for spike-based intelligence (**Science Advances 2023**). [[paper](https:\u002F\u002Fwww.science.org\u002Fdoi\u002F10.1126\u002Fsciadv.adi1480)] [[code](https:\u002F\u002Fgithub.com\u002Ffangwei123456\u002Fspikingjelly)]\n- Spike-driven Transformer [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.01694.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FBICLab\u002FSpike-Driven-Transformer)]\n- Parallel Spiking Neurons with High Efficiency and Long-term Dependencies Learning Ability (**NeurIPS 2023**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.12760)] [[code](https:\u002F\u002Fgithub.com\u002Ffangwei123456\u002FParallel-Spiking-Neuron)]\n- Temporal Conditioning Spiking Latent Variable Models of the Neural Response to Natural Visual Scenes (**NeurIPS 2023**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.12045.pdf)]\n- SEENN: Towards Temporal Spiking Early Exit Neural Networks (**NeurIPS 2023**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=mbaN0Y0QTw)]\n- EICIL: Joint Excitatory Inhibitory Cycle Iteration Learning for Deep Spiking Neural Networks (**NeurIPS 2023**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=OMDgOjdqoZ)]\n- Addressing the speed-accuracy simulation trade-off for adaptive spiking neurons (**NeurIPS 2023**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=Ht79ZTVMsn)]\n- Enhancing Adaptive History Reserving by Spiking Convolutional Block Attention Module in Recurrent Neural Networks (**NeurIPS 2023**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=aGZp61S9Lj)]\n- Trial matching: capturing variability with data-constrained spiking neural networks (**NeurIPS 2023**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.03603)]\n- Evolving Connectivity for Recurrent Spiking Neural Networks (**NeurIPS 2023**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.17650.pdf)]\n- SparseProp: Efficient Event-Based Simulation and Training of Sparse Recurrent Spiking Neural Networks (**NeurIPS 2023**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=yzZbwQPkmP)] \n- Spiking PointNet: Spiking Neural Networks for Point Clouds (**NeurIPS 2023**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.06232v1.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fdayongren\u002Fspiking-pointnet)]\n- Exploring Loss Functions for Time-based Training Strategy in Spiking Neural Networks (**NeurIPS 2023**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=8IvW2k5VeA)]\n- Membrane Potential Batch Normalization for Spiking Neural Networks (**ICCV 2023**). [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FGuo_Membrane_Potential_Batch_Normalization_for_Spiking_Neural_Networks_ICCV_2023_paper.pdf)]\n- Unleashing the Potential of Spiking Neural Networks with Dynamic Confidence (**ICCV 2023**). [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FLi_Unleashing_the_Potential_of_Spiking_Neural_Networks_with_Dynamic_Confidence_ICCV_2023_paper.pdf)]\n- RMP-Loss: Regularizing Membrane Potential Distribution for Spiking Neural Networks\t(**ICCV 2023**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.06787)]\n- Inherent Redundancy in Spiking Neural Networks\t(**ICCV 2023**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.08227)]\n- Temporal-Coded Spiking Neural Networks with Dynamic Firing Threshold: Learning with Event-Driven Backpropagation (**ICCV 2023**). [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FWei_Temporal-Coded_Spiking_Neural_Networks_with_Dynamic_Firing_Threshold_Learning_with_ICCV_2023_paper.pdf)]\n- Efficient Converted Spiking Neural Network for 3D and 2D Classification\t(**ICCV 2023**). [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FLan_Efficient_Converted_Spiking_Neural_Network_for_3D_and_2D_Classification_ICCV_2023_paper.pdf)]\n- Deep Directly-Trained Spiking Neural Networks for Object Detection (**ICCV 2023**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.11411)]\n- Towards Memory- and Time-Efficient Backpropagation for Training Spiking Neural Networks (**ICCV 2023**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.14311)]\n- SSF: Accelerating Training of Spiking Neural Networks with Stabilized Spiking Flow (**ICCV 2023**). [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FWang_SSF_Accelerating_Training_of_Spiking_Neural_Networks_with_Stabilized_Spiking_ICCV_2023_paper.pdf)]\n- Masked Spiking Transformer (**ICCV 2023**). [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FWang_Masked_Spiking_Transformer_ICCV_2023_paper.pdf)]\n- Spatial-Temporal Self-Attention for Asynchronous Spiking Neural Networks (**IJCAI 2023**). [[paper](https:\u002F\u002Fwww.ijcai.org\u002Fproceedings\u002F2023\u002F0344.pdf)]\n- Learnable Surrogate Gradient for Direct Training Spiking Neural Networks (**IJCAI 2023**). [[paper](https:\u002F\u002Fwww.ijcai.org\u002Fproceedings\u002F2023\u002F0335.pdf)]\n- Enhancing Efficient Continual Learning with Dynamic Structure Development of Spiking Neural Networks (**IJCAI 2023**). [[paper](https:\u002F\u002Fwww.ijcai.org\u002Fproceedings\u002F2023\u002F0334.pdf)]\n- Adaptive Smoothing Gradient Learning for Spiking Neural Networks (**ICML 2023**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=GdkwSGTpbC)]\n- Surrogate Module Learning: Reduce the Gradient Error Accumulation in Training Spiking Neural Networks (**ICML 2023**). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=zRkz4duLKp)] [[code](https:\u002F\u002Fgithub.com\u002Fbrain-intelligence-lab\u002Fsurrogate_module_learning)]\n- Rate Gradient Approximation Attack Threats Deep Spiking Neural Networks (**CVPR 2023**). [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FBu_Rate_Gradient_Approximation_Attack_Threats_Deep_Spiking_Neural_Networks_CVPR_2023_paper.pdf)]\n- Constructing Deep Spiking Neural Networks from Artificial Neural Networks with Knowledge Distillation (**CVPR 2023**). [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FXu_Constructing_Deep_Spiking_Neural_Networks_From_Artificial_Neural_Networks_With_CVPR_2023_paper.pdf)]\n- Attention Spiking Neural Networks  (**TPAMI 2023**) .[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10032591)] [[code](https:\u002F\u002Fgithub.com\u002Ffangwei123456\u002Fspikingjelly\u002Fpull\u002F329)]\n- Heterogeneous neuronal and synaptic dynamics for spike-efficient unsupervised learning: Theory and design principles (**ICLR 2023**).[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.11618.pdf)]\n- Spiking Convolutional Neural Networks for Text Classification (**ICLR 2023**) .[[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=pgU3k7QXuz0)]\n- Bridging the Gap between ANNs and SNNs by Calibrating Offset Spikes (**ICLR 2023**).[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.10685.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fhzc1208\u002FANN2SNN_COS)]\n- Spikformer: When Spiking Neural Network Meets Transformer (**ICLR 2023**) .[[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=frE4fUwz_h)] [[code](https:\u002F\u002Fgithub.com\u002FZK-Zhou\u002Fspikformer)]\n- A Unified Framework of Soft Threshold Pruning (**ICLR 2023**). [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=cCFqcrq0d8)] [[code](https:\u002F\u002Fgithub.com\u002FYanqi-Chen\u002FLATS)]\n- Bridging the Gap between ANNs and SNNs by Calibrating Offset Spikes (**ICLR 2023**). [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=PFbzoWZyZRX)] [[code](https:\u002F\u002Fgithub.com\u002Fhzc1208\u002FANN2SNN_COS)]\n- Reducing ANN-SNN Conversion Error through Residual Membrane Potential (**AAAI 2023**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.02091)] [[code](https:\u002F\u002Fgithub.com\u002Fhzc1208\u002FANN2SNN_SRP)]\n- Deep Spiking Neural Networks with High Representation Similarity Model Visual Pathways of Macaque and Mouse (**AAAI 2023**). [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.06060)]\n- ESL-SNNs: An Evolutionary Structure Learning Strategy for Spiking Neural Networks (**AAAI 2023**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.03693.pdf)]\n- Exploring Temporal Information Dynamics in Spiking Neural Networks (**AAAI 2023**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.14406.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FIntelligent-Computing-Lab-Yale\u002FExploring-Temporal-Information-Dynamics-in-Spiking-Neural-Networks)]\n- Scaling Up Dynamic Graph Representation Learning via Spiking Neural Networks(**AAAI 2023**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2208.10364.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FEdisonLeeeee\u002FSpikeNet)]\n- Complex Dynamic Neurons Improved Spiking Transformer Network for Efficient Automatic Speech Recognition(**AAAI 2023**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.01194.pdf)] \n\n**Arxiv**\n- Spikingformer: Spike-driven Residual Learning for Transformer-based Spiking Neural Network [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.11954)] [[code](https:\u002F\u002Fgithub.com\u002Fzhouchenlin2096\u002FSpikingformer)]\n- Enhancing the Performance of Transformer-based Spiking Neural Networks by Improved Downsampling with Precise Gradient Backpropagation [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.05954)] [[code](https:\u002F\u002Fgithub.com\u002Fzhouchenlin2096\u002FSpikingformer-CML)]\n- Training Full Spike Neural Networks via Auxiliary Accumulation Pathway [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2301.11929.pdf)]\n- MSS-DepthNet: Depth Prediction with Multi-Step Spiking Neural Network [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.12156)]\n- SpikeGPT: Generative Pre-trained Language Model with Spiking Neural Networks [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.13939)] [[code](https:\u002F\u002Fgithub.com\u002Fridgerchu\u002FSpikeGPT)]\n- Auto-Spikformer: Spikformer Architecture Search [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.00807.pdf)]\n- Advancing Spiking Neural Networks Towards Deep Residual Learning [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2112.08954.pdf)]\n\n\n### 2022\n\n**NeurIPS, CVPR, ICLR, AAAI, ICML, Nature Communications**\n\n- Event-based Video Reconstruction via Potential-assisted Spiking Neural Network [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.10943)] [[code](https:\u002F\u002Fgithub.com\u002FLinZhu111\u002FEVSNN)]\n- Optimal ANN-SNN Conversion for High-accuracy and Ultra-low-latency Spiking Neural Networks [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=7B3IJMM1k_M)] [[code](https:\u002F\u002Fgithub.com\u002Fputshua\u002FSNN-conversion-QCFS)]\n- Optimized Potential Initialization for Low-latency Spiking Neural Networks (**AAAI 2022**).  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.01440)]\n- AutoSNN: Towards Energy-Efficient Spiking Neural Networks [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.12738)]\n- Neural Architecture Search for Spiking Neural Networks [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.10355)] [[code](https:\u002F\u002Fgithub.com\u002FIntelligent-Computing-Lab-Yale\u002FNeural-Architecture-Search-for-Spiking-Neural-Networks)]\n- Neuromorphic Data Augmentation for Training Spiking Neural Networks [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.06145)] [[code](https:\u002F\u002Fgithub.com\u002FIntelligent-Computing-Lab-Yale\u002FNDA_SNN)]\n- State Transition of Dendritic Spines Improves Learning of Sparse Spiking Neural Networks [[paper](https:\u002F\u002Fproceedings.mlr.press\u002Fv162\u002Fchen22ac.html)] [[code](https:\u002F\u002Fgithub.com\u002FYanqi-Chen\u002FSTDS)]\n- Training High-Performance Low-Latency Spiking Neural Networks by Differentiation on Spike Representation [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.00459)] [[code](https:\u002F\u002Fgithub.com\u002Fqymeng94\u002FDSR)]\n- Exploring Lottery Ticket Hypothesis in Spiking Neural Networks [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.01382)] [[code](https:\u002F\u002Fgithub.com\u002FIntelligent-Computing-Lab-Yale\u002FExploring-Lottery-Ticket-Hypothesis-in-SNNs)]\n- Spiking Graph Convolutional Networks [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.02767)] [[code](https:\u002F\u002Fgithub.com\u002FZulunZhu\u002FSpikingGCN)]\n- A calibratable sensory neuron based on epitaxial VO2 for spike-based neuromorphic multisensory system [[paper](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-022-31747-w)] [[code](https:\u002F\u002Fgithub.com\u002Fbillyuanpku96\u002FSNN-for-sensory-neuron)]\n- Online Training Through Time for Spiking Neural Networks (**NeurIPS 2022**).  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.04195)] [[code](https:\u002F\u002Fgithub.com\u002Fpkuxmq\u002FOTTT-SNN)]\n- Training Spiking Neural Networks with Event-driven Backpropagation [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=d4JmP1T45WE)] [[code](https:\u002F\u002Fgithub.com\u002Fzhuyaoyu\u002FSNN-event-driven-learning)]\n- GLIF: A Unified Gated Leaky Integrate-and-Fire Neuron for Spiking Neural Networks [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=UmFSx2c4ubT)] [[code](https:\u002F\u002Fgithub.com\u002FIkarosy\u002FGated-LIF)]\n- Temporal Effective Batch Normalization in Spiking Neural Networks [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=fLIgyyQiJqz)]\n- Training Spiking Neural Networks with Local Tandem Learning (**NeurIPS 2022**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2210.04532.pdf)]\n- IM-Loss: Information Maximization Loss for Spiking Neural Networks (**NeurIPS 2022**). [[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2022\u002Ffile\u002F010c5ba0cafc743fece8be02e7adb8dd-Paper-Conference.pdf)]\n- Temporal Effective Batch Normalization in Spiking Neural Networks (**NeurIPS 2022**). [[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2022\u002Ffile\u002Fde2ad3ed44ee4e675b3be42aa0b615d0-Paper-Conference.pdf)]\n- Biologically Inspired Dynamic Thresholds for Spiking Neural Networks (**NeurIPS 2022**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.04426.pdf)]\n- Optimal Conversion of Conventional Artificial Neural Networks to Spiking Neural Networks (**ICLR 2022**).  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.00476.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FJackn0\u002Fsnn_optimal_conversion_pipeline)]\n- Multi-Level Firing with Spiking DS-ResNet: Enabling Better and Deeper Directly-Trained Spiking Neural Networks (**IJCAI 2022**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2210.06386.pdf)]\n\n### 2021\n\n**NeurIPS, ICCV, IJCAI, ICML, AAAI**\n\n- Deep Residual Learning in Spiking Neural Networks (**NeurIPS 2021**). [[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2021\u002Ffile\u002Fafe434653a898da20044041262b3ac74-Paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Ffangwei123456\u002FSpike-Element-Wise-ResNet)]\n- Spiking Deep Residual Network[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1805.01352.pdf)]\n- Incorporating Learnable Membrane Time Constant to Enhance Learning of Spiking Neural Networks (**ECCV 2021**).  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.05785)]  [[code](https:\u002F\u002Fgithub.com\u002Ffangwei123456\u002FParametric-Leaky-Integrate-and-Fire-Spiking-Neuron)]\n- Pruning of Deep Spiking Neural Networks through Gradient Rewiring [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.04916)] [[code](https:\u002F\u002Fgithub.com\u002FYanqi-Chen\u002FGradient-Rewiring)]\n- A Free Lunch From ANN: Towards Efficient, Accurate Spiking Neural Networks Calibration  (**ICML 2021**).  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.06984)] [[code](https:\u002F\u002Fgithub.com\u002Fyhhhli\u002FSNN_Calibration)]\n- Optimal ANN-SNN Conversion for Fast and Accurate Inference in Deep Spiking Neural Networks [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2105.11654)] [[code](https:\u002F\u002Fgithub.com\u002FDingJianhao\u002FOptSNNConvertion-RNL-RIL)]\n- Sparse Spiking Gradient Descent (**NeurIPS 2021**). [[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2021\u002Ffile\u002F61f2585b0ebcf1f532c4d1ec9a7d51aa-Paper.pdf)]\n- Training Spiking Neural Networks with Accumulated Spiking Flow (**AAAI 2021**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2011.05280.pdf)]\n- Temporal-wise Attention Spiking Neural Networks for Event Streams Classification. (**ECCV 2021**). [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FYao_Temporal-Wise_Attention_Spiking_Neural_Networks_for_Event_Streams_Classification_ICCV_2021_paper.pdf)]\n\n\n### Reference\nIf you find this repo useful, please consider citing:\n```\n@article{zhou2024direct,\n  title={Direct training high-performance deep spiking neural networks: a review of theories and methods},\n  author={Zhou, Chenlin and Zhang, Han and Yu, Liutao and Ye, Yumin and Zhou, Zhaokun and Huang, Liwei and Ma, Zhengyu and Fan, Xiaopeng and Zhou, Huihui and Tian, Yonghong},\n  journal={Frontiers in Neuroscience},\n  volume={18},\n  pages={1383844},\n  year={2024},\n  publisher={Frontiers Media SA}\n}\n```\n","# 令人惊叹的脉冲神经网络[![Awesome](https:\u002F\u002Fcdn.rawgit.com\u002Fsindresorhus\u002Fawesome\u002Fd7305f38d29fed78fa85652e3a63e154dd8e8829\u002Fmedia\u002Fbadge.svg)](https:\u002F\u002Fgithub.com\u002Fsindresorhus\u002Fawesome)\n\n收集一些脉冲神经网络相关的论文和代码。（**持续更新中**）\n\n如果你拥有或发现了一些被忽视的SNN论文，可以通过提交Pull Request将其添加到本文档中。\n\n## 新闻\n\n\u003Cdetails>\n\u003Csummary> 新闻 2026 \u003C\u002Fsummary>\n\n[2026.03.24] 更新AAAI 2026（33篇）、ICLR 2026（30篇）中的SNN相关论文。\n\n\u003C\u002Fdetails>\n\n\n\n\u003Cdetails>\n\u003Csummary> 新闻 2025 \u003C\u002Fsummary>\n\n[2025.11.01] 更新Nature、Science 2025中的SNN相关论文（11篇）。\n\n[2025.10.31] 更新NeurIPS 2025中的SNN相关论文（30篇）。\n\n[2025.05.26] 更新ICML 2025（18篇）、IJCAI（11篇）、ICCV（7篇）以及ACM MM（9篇）中的SNN相关论文。\n\n[2025.04.11] 更新ICLR 2025（11篇）、CVPR 2025（14篇）中的SNN相关论文。\n\n[2025.02.06] 更新AAAI 2025中的SNN相关论文（18篇）。\n\n\u003C\u002Fdetails>\n\n\u003Cdetails>\n\u003Csummary> 新闻 2024 \u003C\u002Fsummary>\n\n[2024.11.13] 更新NeurIPS 2024中的SNN相关论文（18篇）。\n\n[2024.10.31] 更新ACM MM 2024中的SNN相关论文（5篇）。\n\n[2024.10.15] 更新ECCV 2024中的SNN相关论文（8篇）。\n\n[2024.05.29] 更新ICML 2024（13篇）、IJCAI 2024（5篇）中的SNN相关论文。\n\n[2024.04.29] 更新ICLR 2024（17篇）、AAAI 2024（8篇）、CVPR 2024（3篇）中的SNN相关论文。\n\n\u003C\u002Fdetails>\n\n\n\u003Cdetails>\n\n\u003Csummary> 新闻 2023 \u003C\u002Fsummary>\n\n[2023.12.31] 更新TPAMI 2023、Frontiers in Neuroscience 2023中的SNN相关论文。\n\n[2023.10.31] 更新CVPR 2023（2篇）、ICML 2023（2篇）、IJCAI 2023（3篇）、ICCV 2023（10篇）以及NeurIPS 2023（12篇）中的SNN相关论文。\n\n[2023.06.25] 更新ICLR 2023（6篇）、AAAI 2023（6篇）中的SNN相关论文。\n\n\u003C\u002Fdetails>\n\n\n\n## 论文\n\n### 2026\n\n**Nature、Science、Cell**\n\n**AAAI、ICLR**\n- 基于异质训练脆弱性下的主导奇异值消去法增强脉冲神经网络的鲁棒性（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10010693)]\n- 受大脑启发的门控机制解锁脉冲神经网络中的稳健计算（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10011430)]\n- 利用侧向抑制训练深度无归一化脉冲神经网络。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10009258)]\n- 3DSMT：用于点云分析的混合脉冲Mamba-Transformer。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10010096)]\n- 脉冲Transformer中的神经动力学自注意力。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10007889)]\n- 时间就是一切：针对事件驱动型脉冲神经网络的脉冲重定时攻击。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10008650)]\n- 在超稀疏脉冲神经网络上进行Cannistraci-Hebb训练。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10007264)]\n- CaRe-BN：用于强化学习中稳定脉冲神经网络的精确移动统计量。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10011021)]\n- Otters：一种基于光学首次脉冲时间编码的节能脉冲Transformer。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10007423)]\n- 神经形态脉冲神经网络的在线伪零阶训练。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10011366)]\n- SAFA-SNN：一种面向设备端的小样本增量式学习方法，具有快速自适应结构的稀疏感知脉冲神经网络。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10011088)]\n- 面对抗攻击的鲁棒脉冲神经网络。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10007241)]\n- TP-Spikformer：令牌剪枝脉冲Transformer。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10010064)]\n- 打破梯度时间共线性以提升脉冲神经网络的鲁棒性。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10006881)]\n- 随机脉冲神经网络是稳定的且谱结构简单。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10009762)]\n- 多重视角，同一心智：脉冲神经网络中的时间多视角与渐进式蒸馏。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10009850)]\n- 在持续学习中利用随机时间K-胜者全取机制实现鲁棒的选择性激活。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10006915)]\n- 通过梯度检查点和脉冲压缩实现脉冲神经网络的无损高效训练。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10007453)]\n- 超越线性处理：脉冲神经网络中的树突双线性整合。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10011474)]\n- 用于点云分析的脉冲差异Transformer。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10011321)]\n- PredNext：脉冲神经网络中用于无监督学习的显式跨视图时间预测。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10010014)]\n- 差分预测编码用于训练脉冲神经网络。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10007923)]\n- 通过参数可逆变换推进脉冲神经网络中的时空表征。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10011650)]\n- 分数阶脉冲神经网络。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10009869)]\n- SpikeGen：基于潜在生成框架的解耦“视杆细胞与视锥细胞”视觉表征处理。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10009062)]\n- 基于双向脉冲蒸馏的生物 plausible 学习。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10009918)]\n- 分布感知的多粒度相位编码：迈向更低转换误差的脉冲驱动大型语言模型。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10007555)]\n- SpikeStereoNet：一种受大脑启发的框架，用于从脉冲流中估计立体深度。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10007680)]\n- SpikePingpong：基于脉冲视觉的快慢乒乓机器人系统。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10008450)]\n- 基于脉冲的数字大脑：一种用于脑活动分析的新基础模型。（**ICLR 2026**）。[[论文](https:\u002F\u002Ficlr.cc\u002Fvirtual\u002F2026\u002Fposter\u002F10009923)]\n- 按激活传播：脉冲神经网络的一步策略（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37187)]\n- SpikingIR：一种新型转换后的脉冲神经网络，用于高效的图像恢复（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37769)] \n- 脉冲神经网络训练中知识蒸馏的深入探讨（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37769)] \n- 基于膜电位分布的脉冲神经网络代理函数优化方法（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F39769)] \n- 并行首次脉冲时间训练脉冲神经网络（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37149)] \n- HardF-SNN：面向硬件友好的量化方法，支持仅使用整数运算的高效推理（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37174)] \n- SpikCommander：一种高性能的多视图学习脉冲Transformer，用于高效的语音命令识别（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37194)] \n- 通过时间步级反向传播对脉冲神经网络的时间步压缩攻击（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37479)] \n- 在关键区域发射比特：基于脉冲指导的仅可识别失真建模，用于机器中心的视频编码（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F38935)] \n- MPD-SGR：基于膜电位分布驱动的代理梯度正则化，用于构建鲁棒脉冲神经网络（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37166)] \n- 无需训练即可将人工神经网络转换为脉冲神经网络，适用于高性能脉冲Transformer（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37195)] \n- Spikingformer：脉冲神经网络的关键基础模型（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37207)] \n- I2E：实时图像到事件的转换，用于高性能脉冲神经网络（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37179)] \n- S³：将脉冲神经元作为隔离分割器用于脑信号解码（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F38869)] \n- TDSNNs：具有竞争力的地形深度脉冲神经网络，用于视觉皮层建模（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37208)] \n- 直接训练的脉冲目标检测器的时间动态增强器（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37178)] \n- 脉冲异构图注意力网络（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F39068)] \n- 少突胶质细胞驱动的脉冲神经模型（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F39306)] \n- HLML-SNN：通过赫布学习驱动的元学习实现脉冲神经网络中的快速持续学习（**AAAI 2026**）。[[论文]()] \n- DS-ATGO：通过前向自适应阈值和后向梯度优化实现的双阶段协同学习，用于脉冲神经网络（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37165)] \n- 伪脉冲神经元：一种基于噪声的训练框架，适用于异构延迟脉冲神经网络（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F40086)] \n- 用于水下目标检测的时空频率脉冲神经网络（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F39109)] \n- SFedHIFI：基于发放率的异构信息融合，用于联邦学习中的脉冲网络（**AAAI 2026**）。[[论文]()] \n- 受生物体内稳态启发的脉冲神经网络动态权重调整（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F40146)] \n- GT-SNT：一种线性时间Transformer，通过脉冲节点标记实现大规模图处理（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F38667)] \n- 探索脉冲神经网络在图像去雨方面的潜力（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37295)] \n- 基于脉冲辅助的神经架构，用于高效且鲁本的WiFi传感（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F39589)] \n- 通过生物启发的极化作用稳定脉冲神经元（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F39435)] \n- 基于脉冲流的记忆转移，用于动态场景重建（**AAAI 2026**）。\n- LAS：用于完全脉冲驱动大型语言模型的无损ANN-SNN转换（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37151)] \n- 脉冲成像测速法：利用脉冲流进行流体密集运动估计（**AAAI 2026**）。\n- BulletTime4D：通过脉冲引导的立体视觉，迈向高时空分辨率的动态场景渲染（**AAAI 2026**）。\n- 通过时间间隔量化和低光成像中的脉冲DSLR多模态数据集，为脉冲相机建立鲁棒的噪声模型（**AAAI 2026**）。\n- HLML-SNN：通过赫布学习驱动的元学习实现脉冲神经网络中的快速持续学习（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F39942)] \n- 通用阈值优化结合和谐多阈值神经元，实现精准的ANN-to-SNN转换（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37206)] \n- HypoxSpike：用于阿片类药物过量检测的三值脉冲神经网络（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F41237)] \n- 双频谱蒸馏：解决ANN-SNN知识迁移中的光谱不匹配问题（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F40085)] \n- 通过激活感知的重新分配，实现无需训练且准确的ANN-to-SNN转换（**AAAI 2026**）。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37148)]\n\n### 2025年\n\n**Nature、Science、Cell**\n- 基于混合忆阻器阵列的疲劳型时序依赖可塑性学习的脉冲神经网络（《Nature Electronics》，2026年）。[[论文](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41928-025-01554-4)]\n- 神经形态计算范式通过脉冲神经网络提升鲁棒性（《Nature Communications》，2025年）。[[论文](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-025-65197-x)][[代码](https:\u002F\u002Fgithub.com\u002FDingJianhao\u002FSNNEnhancingRobustness)]\n- 基于一个扩散型忆阻器、一个晶体管和一个电阻的脉冲人工神经元（《Nature Electronics》，2025年）。[[论文](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41928-025-01488-x)][[代码](https:\u002F\u002Fgithub.com\u002FGnohzZ\u002FBrain-Dynamics-Modeling-Acceleration)]\n- 基于单层二硫化钼的具有内在可塑性的生物启发式人工神经元（《Nature Electronics》，2025年）。[[论文](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41928-025-01433-y)]\n- 采用脑启发计算架构建模宏观脑动力学（《Nature Communications》，2025年）。[[论文](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-025-64470-3)]\n- 用于无监督多变量时间序列模式分类及多通道尖峰排序的经济型脉冲神经网络（《Nature Communications》，2025年）。[[论文](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-025-64231-2)][[代码](https:\u002F\u002Fcodeocean.com\u002Fcapsule\u002F9829487\u002Ftree)]\n- 利用神经振荡调制的脉冲神经网络实现高效稳健的时间处理（《Nature Communications》，2025年）。[[论文](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-025-63771-x)][[代码](https:\u002F\u002Fgithub.com\u002FYinsongYan\u002FRhythm-SNN)]\n- 脉冲神经网络中的协变时空感受野（《Nature Communications》，2025年）。[[论文](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-025-63493-0)][[代码](https:\u002F\u002Fgithub.com\u002Fjegp\u002Fnrf)]\n- 一种用于在脉冲神经网络中同时编码空间与时间动态的多突触脉冲神经元（《Nature Communications》，2025年）。[[论文](https:\u002F\u002Fdoi.org\u002F10.1038\u002Fs41467-025-62251-6)][[代码](https:\u002F\u002Fgithub.com\u002Ffanliangwei\u002FMultisynaptic-spiking-neurons)]\n- 通过脉冲神经网络中的适应机制推进时空处理（《Nature Communications》，2025年）。[[论文](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-025-60878-z)][[代码](https:\u002F\u002Fgithub.com\u002FIGITUGraz\u002FSE-adlif)]\n- 面向输入感知动态SNN的随机忆阻器拓扑优化（《Science Advances》，2025年）。[[论文](https:\u002F\u002Fwww.science.org\u002Fdoi\u002F10.1126\u002Fsciadv.ads5340)][[代码](https:\u002F\u002Fgithub.com\u002Fbo-wang-up\u002FPRIME)]\n- 全忆阻型脉冲神经网络用于节能图学习（《Science Advances》，2025年）。[[论文](https:\u002F\u002Fwww.science.org\u002Fdoi\u002F10.1126\u002Fsciadv.adv2312)]\n\n**AAAI、ICLR、CVPR、ICML、IJCAI、ICCV、ACM MM、NeurIPS**\n- 脉冲神经网络中用于序列强化学习的自适应代理梯度（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=oGmROC4e4W)] [[代码](https:\u002F\u002Fgithub.com\u002Fkorneelf1\u002FSpikingCrazyflie)]\n- 向脉冲Transformer中的相对位置编码迈进（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=MDWJlTWZHH)] [[代码](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FSeqSNN)]\n- 基于时间编码尖峰相机的高动态范围成像（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=flIdch9eTf)] [[代码](https:\u002F\u002Fgithub.com\u002Fzkzhu123\u002FTESC)]\n- 脉冲Transformer中的双极自注意力机制（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=nG45z7lJ7D)] [[代码](https:\u002F\u002Fopenreview.net\u002Fattachment?id=nG45z7lJ7D&name=supplementary_material)]\n- 时序依赖的赫布学习作为噪声梯度下降（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=YTbLri0siT)]\n- Spike-RetinexFormer：利用脉冲神经网络重新思考低光照图像增强（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=8W8SRZIpJP)]\n- SPACE：面向测试时适应的脉冲神经网络中的尖峰感知一致性增强（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=Di0RasgbQ6)] [[代码](https:\u002F\u002Fgithub.com\u002Fethanxyluo\u002FSPACE)]\n- MI-TRQR：基于互信息的时间冗余量化与削减，用于节能脉冲神经网络（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=NRqGpUAjV9)] [[代码](https:\u002F\u002Fgithub.com\u002Fdfxue\u002FMI-TRQR)]\n- Spik-NeRF：用于神经辐射场的脉冲神经网络（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=047VzZEpnu)]\n- 树突共振放电神经元用于高效长序列建模（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=ywzGKDStrm)] [[代码](https:\u002F\u002Fopenreview.net\u002Fattachment?id=ywzGKDStrm&name=supplementary_material)]\n- 脉冲神经网络需要高频信息（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=owNPAl7LNK)] [[代码](https:\u002F\u002Fgithub.com\u002Fbic-L\u002FMaxFormer)]\n- 活动剪枝用于高效脉冲神经网络（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=zjOXZEXQKZ)] [[代码](https:\u002F\u002Fgithub.com\u002Fputshua\u002FActivity-Pruning-SNN)]\n- 无乘法且可并行化的脉冲神经元，具备高效时空动态特性（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=4q5ZYP0ynu)] [[代码](https:\u002F\u002Fgithub.com\u002FPengXue0812\u002FMultiplication-Free-Parallelizable-Spiking-Neurons-with-Efficient-Spatio-Temporal-Dynamics)]\n- SpikingVTG：用于视频时间定位的尖峰检测Transformer（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=SkhF3cuyev)] [[代码](https:\u002F\u002Fopenreview.net\u002Fattachment?id=SkhF3cuyev&name=supplementary_material)]\n- S$^2$M-Former：用于大脑听觉注意力检测的对称混合分支脉冲Transformer（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=WtMuGdHvh6)] [[代码](https:\u002F\u002Fgithub.com\u002FJackieWang9811\u002FS2M-Former)]\n- 基于局部-全局耦合的脉冲图Transformer，从两个视角诊断脑部疾病（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=kkhRTTmXFV)]\n- 一种可扩展、因果且节能的框架，用于利用脉冲神经网络进行神经解码（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=oAbaGU9N1X)] [[代码](https:\u002F\u002Fspikachu-bci.github.io\u002F)]\n- 脉冲遇见注意力：利用注意力脉冲神经网络实现高效的遥感图像超分辨率（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=VaE33hkqmg)] [[代码](https:\u002F\u002Fgithub.com\u002FXY-boy\u002FSpikeSR)]\n- 自适应分裂：用于低延迟脉冲神经网络的训练后编码（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=2zZzdAMyYi)] [[代码](https:\u002F\u002Fgithub.com\u002FJiangYizhou16\u002FAdaptive-Fission)]\n- S$^2$NN：亚比特脉冲神经网络（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=hFsCuVc1cB)] [[代码](https:\u002F\u002Fopenreview.net\u002Fattachment?id=hFsCuVc1cB&name=supplementary_material)]\n- 表面上冗余的模块增强了果蝇的嗅觉学习鲁棒性（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=d6WUTRJqP3)] [[代码](https:\u002F\u002Fgithub.com\u002FL-0cean\u002FFly-SNN)]\n- 全脉冲神经网络用于统一的帧-事件目标跟踪（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=FooiwsnEH9)] [[代码](https:\u002F\u002Fgithub.com\u002FNoctis-A\u002FSpikeFET)]\n- 增强的自我蒸馏框架用于高效脉冲神经网络训练（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=dpmMg6aK1D)] [[代码](openreview.net\u002Fpdf?id=dpmMg6aK1D)]\n- 学习可塑性：脉冲神经网络中的可塑性驱动学习框架（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=fllsm01JWS)]\n- HetSyn：通过异质突触实现在脉冲神经网络中灵活的时间尺度整合（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=YYz4fumVed)] [[代码](https:\u002F\u002Fgithub.com\u002Fdzcgood\u002FHetSyn)]\n- 揭示脉冲神经网络的空间-时间有效感受野（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=tYnJC5ba6j)] [[代码](https:\u002F\u002Fgithub.com\u002FEricZhang1412\u002FSpatial-temporal-ERF)]\n- 类似大脑的变分推断（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=573IcLusXq)] [[代码](https:\u002F\u002Fgithub.com\u002Fhadivafaii\u002FIterativeVAE)]\n- 代理目标：弥合离散脉冲神经网络与连续控制之间的差距（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=RRBve5GwjS)] [[代码](openreview.net\u002Fpdf?id=RRBve5GwjS)]\n- 强者与弱者的协同作用：脉冲神经网络本质上是自我蒸馏器（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=BrmR69AhUg)]\n- Spike4DGS：通过尖峰相机阵列，利用4D高斯泼溅技术实现高速动态场景重建（NeurIPS 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=V5efEA8nIr)] [[代码](https:\u002F\u002Fgithub.com\u002FQinghongye\u002FSpike4DGS)]\n- 以脉冲神经网络实现工业场景下的端到端轴承故障诊断（KDD 2025）。[[论文](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2408.11067)][[代码](https:\u002F\u002Fgithub.com\u002Fyqding326\u002FMRA-SNN)]\n- DSF-Net：基于事件-RGB的动态稀疏融合，通过尖峰触发注意力实现高速检测（ACM MM 2025）。\n- ESOD：基于事件的小目标检测（ACM MM 2025）。\n- E-4DGS：从多视角事件相机中进行高保真动态重建（ACM MM 2025）。\n- 通过尖峰触发的阈值动态将不应期纳入脉冲神经网络中（ACM MM 2025）。\n- Signal-SGN：一种基于学习时频动态的骨骼动作识别脉冲图卷积网络（ACM MM 2025）。\n- SGM-Transformer：重新思考脉冲神经网络中的梯度信息损失与补偿（ACM MM 2025）。\n- 高级脉冲YOLOX：通过基于尖峰的部分自注意力和2D脉冲Transformer扩展脉冲神经网络的目标检测能力（ACM MM 2025）。\n- 具有时间注意力引导的自适应融合的脉冲神经网络，用于不平衡的多模态学习（ACM MM 2025）。\n- 时间编码的脉冲Transformer（ACM MM 2025）。\n- ClearSight：受人类视觉启发的事件驱动运动模糊去除解决方案（ICCV 2025）。[[论文](https:\u002F\u002Ficcv.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F1849)]\n- 用于模运算相机HDR成像的鲁棒展开网络（ICCV 2025）。[[论文](https:\u002F\u002Ficcv.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F1986)]\n- SpikeDiff：零样本高质量视频重建，基于彩色尖峰相机和亚毫秒级尖峰流（ICCV 2025）。[[论文](https:\u002F\u002Ficcv.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F1181)]\n- 用于低光下尖峰图像恢复的噪声建模扩散模型（ICCV 2025）。[[论文](https:\u002F\u002Ficcv.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F1467)]\n- 高效的脉冲点云Mamba用于点云分析（ICCV 2025）。[[论文](https:\u002F\u002Ficcv.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F2271)]\n- SpikePack：通过高硬件兼容性增强脉冲神经网络的信息流动（ICCV 2025）。[[论文](https:\u002F\u002Ficcv.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F345)]\n- SpiLiFormer：通过侧抑制增强脉冲Transformer（ICCV 2025）。[[论文](https:\u002F\u002Ficcv.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F1753)]\n- 利用膜电位动态为脉冲神经网络学习自适应梯度（IJCAI 2025）。[[论文](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F7783.pdf)]\n- ILIF：用于防止脉冲神经网络过度激活的时序抑制漏积分火神经元（IJCAI 2025）。[[论文](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F4566.pdf)]\n- 神经形态序列竞技场：神经形态时间处理的基准测试（IJCAI 2025）。[[论文](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F7408.pdf)]\n- MSVIT：利用多尺度注意力融合改进脉冲视觉Transformer（IJCAI 2025）。[[论文](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F7378.pdf)]\n- 一种快速准确的带有负尖峰的ANN-SNN转换算法（IJCAI 2025）。[[论文](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F6149.pdf)]\n- ECC-SNN：面向脉冲神经网络的成本效益边缘-云协作（IJCAI 2025）。[[论文](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F3167.pdf)]\n- 成本效益高的设备端序列推荐，使用脉冲神经网络（IJCAI 2025）。[[论文](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F3150.pdf)]\n- SCNNs：基于尖峰耦合的神经网络，用于理解人脑的结构-功能关系（IJCAI 2025）。[[论文](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F1337.pdf)]\n- 在联邦学习中利用标签偏斜为脉冲神经网络服务（IJCAI 2025）。[[论文](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F1298.pdf)]\n- 二进制事件驱动脉冲Transformer（IJCAI 2025）。[[论文](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F1206.pdf)]\n- 通过异构知识蒸馏应对脉冲神经网络中的长尾数据挑战（IJCAI 2025）。[[论文](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F787.pdf)]\n- SpikeVideoFormer：一种高效的尖峰驱动视频Transformer，具有汉明注意力和$\\mathcal{O}(T)$复杂度（ICML 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.10352)]\n- 带有误差补偿学习的高效ANN-SNN转换（ICML 2025）。[[论文](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2506.01968)]\n- 用于免训练ANN-to-SNN转换的差分编码（ICML 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.00301)]\n- 面向全范围时间步部署的深度脉冲神经网络的高效基于logit的知识蒸馏（ICML 2025）。[[论文](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2025\u002Fposter\u002F44825)]\n- ReverB-SNN：反转权重和激活的位，用于脉冲神经网络（ICML 2025）。[[论文](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2025\u002Fposter\u002F43640)]\n- TTFSFormer：一种基于TTFS的无损脉冲Transformer转换（ICML 2025）。[[论文](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2025\u002Fposter\u002F44159)]\n- BSO：二进制尖峰在线优化（ICML 2025）。[[论文](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2025\u002Fposter\u002F45087)]\n- Delay-DSGN：一种具有延迟机制的动态脉冲图神经网络，适用于演化图（ICML 2025）。[[论文](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2025\u002Fposter\u002F43816)]\n- TS-SNN：用于脉冲神经网络的时间移位模块（ICML 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.04165)]\n- SpikF：用于高效长期预测的脉冲傅里叶网络（ICML 2025）。[[论文](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2025\u002Fposter\u002F46411)]\n- 基于自交叉特征的脉冲神经网络，用于高效少样本学习（ICML 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.07921)]\n- 更快更强：当ANN-SNN转换遇到并行脉冲计算时（ICML 2025）。[[论文](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2025\u002Fposter\u002F44986)]\n- 具有恒定时间复杂度的脉冲神经网络高效并行训练方法（ICML 2025）。[[论文](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2025\u002Fposter\u002F45776)]\n- 通过时间模型校准训练高性能脉冲神经网络（ICML 2025）。[[论文](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2025\u002Fposter\u002F44216)]\n- ANN-SNN转换中的时间错配及其通过概率性脉冲神经元的缓解（ICML 2025）。[[论文](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2025\u002Fposter\u002F45627)]\n- 是时候尖峰了吗？理解离散时间下脉冲神经网络的表征能力（ICML 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.18023)]\n- 混合脉冲视觉Transformer，用于结合事件相机进行目标检测（ICML 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.07715)]\n- Sorbet：一款与神经形态硬件兼容的基于Transformer的脉冲语言模型（ICML 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.15298)]\n- EventGPT：利用多模态大型语言模型理解事件流（CVPR 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.00832)] [[代码](https:\u002F\u002Fgithub.com\u002FXduSyL\u002FEventGPT)]\n- Spk2SRImgNet：通过运动对齐的协同过滤，从尖峰流中超分辨率动态场景（CVPR 2025）。[[论文](https:\u002F\u002Fcvpr.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F33079)]\n- Decision SpikeFormer：用于决策制定的尖峰驱动Transformer（CVPR 2025）。[[论文](https:\u002F\u002Fcvpr.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F32864)]\n- 自监督学习用于彩色尖峰相机的重建（CVPR 2025）。[[论文](https:\u002F\u002Fcvpr.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F34093)]\n- USP-Gaussian：统一尖峰图像重建、姿态矫正和高斯泼溅（CVPR 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.10504)]\n- VISTREAM：通过受电荷守恒定律启发的脉冲神经网络提高视觉感知流的计算效率（CVPR 2025）。[[论文](https:\u002F\u002Fcvpr.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F34908)]\n- 高效的ANN引导蒸馏：通过混合块状替换对齐脉冲神经网络的速率相关特征（CVPR 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.16572)]\n- 脉冲Transformer：在Transformer中引入精确的仅加法脉冲自注意力（CVPR 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.00226)]\n- 受大脑启发的脉冲神经网络，用于节能目标检测（CVPR 2025）。[[论文](https:\u002F\u002Fcvpr.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F33275)]\n- 在脉冲神经网络的知识蒸馏中使用熵正则化进行时间分离（CVPR 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.03144)]\n- STAA-SNN：用于脉冲神经网络的空间-时间注意力聚合器（CVPR 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.02689)]\n- 通过破坏隐形代理梯度，实现对脉冲神经网络的有效且稀疏的对抗攻击（CVPR 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.03272)]\n- 重新思考脉冲自注意力机制：在脉冲Transformer中实现α-XNOR相似度计算（CVPR 2025）。[[论文](https:\u002F\u002Fcvpr.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F33850)]\n- 具有空间-时间注意力的脉冲Transformer（CVPR 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.19764)]\n- 量化尖峰驱动的Transformer（ICLR 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=5J9B7Sb8rO)]\n- 从集成学习的角度重新思考脉冲神经网络（ICLR 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=ZyknpOQwkT)]\n- DeepTAGE：深层时间对齐梯度增强，用于优化脉冲神经网络（ICLR 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=drPDukdY3t)]\n- QP-SNN：量化并修剪后的脉冲神经网络（ICLR 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=MiPyle6Jef)]\n- 脉冲神经网络中的时间灵活性：迈向跨时间步的泛化和部署友好性（ICLR 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=9HsfTgflT7)]\n- P-SpikeSSM：利用概率性脉冲状态空间模型处理长程依赖任务（ICLR 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=Sf4ep9Udjf)]\n- TS-LIF：用于时间序列预测的时间段脉冲神经元网络（ICLR 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=rDe9yQQYKt)]\n- 从压缩效率的角度改善脉冲神经网络的稀疏结构学习（ICLR 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=gcouwCx7dG)]\n- SpikeLLM：通过显著性驱动的尖峰方式将脉冲神经网络扩展到大型语言模型（ICLR 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=ZadnlOHsHv)]\n- 具有扫视注意力的脉冲视觉Transformer（ICLR 2025）。[[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=qzZsz6MuEq)]\n- SpikeGS：重建由快速移动的仿生相机捕捉的3D场景（AAAI 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.03771v2)]\n- 重新思考基于尖峰相机的高速图像重建框架（AAAI 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.04477)] [[代码](https:\u002F\u002Fgithub.com\u002Fchenkang455\u002FSpikeCLIP)]\n- 用于点云分类的脉冲点Transformer（AAAI 2025）。\n- 基于事件驱动的尖峰稀疏卷积实现高效3D识别（AAAI 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.07360)] [[代码](https:\u002F\u002Fgithub.com\u002Fbollossom\u002Fe-3dsnn)]\n- GRSN：用于POMDPs和MARL的门控递归脉冲神经元（AAAI 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.15597)]\n- EventZoom：一种渐进式的事件数据增强方法，用于提升神经形态视觉（AAAI 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.18880)]\n- 利用异步脉冲神经网络实现超高效的事件驱动视觉处理（AAAI 2025）。\n- CREST：一种高效的联合训练的尖峰驱动框架，利用时空动态进行事件驱动的目标检测（AAAI 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.12525)] [[代码](https:\u002F\u002Fgithub.com\u002Fshen-aoyu\u002FCREST\u002F)]\n- UCF-Crime-DVS：一个新的事件驱动数据集，用于利用脉冲神经网络进行视频异常检测（AAAI 2025）。\n- SpikingSSMs：利用稀疏且并行的状态空间模型学习长序列（AAAI 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.14909)] [[代码](https:\u002F\u002Fgithub.com\u002Fshenshuaijie\u002FSDN\n)]\n- 推进脉冲神经网络向多尺度时空交互学习发展（AAAI 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.13672)]\n- SpikingYOLOX：结合快速傅里叶卷积和脉冲神经网络改进的YOLOX目标检测（AAAI 2025）。\n- ALADE-SNN：在可动态扩展的脉冲神经网络中实现自适应logit对齐，用于类增量学习（AAAI 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.12696)]\n- 高性能图像分割的高效尖峰驱动Transformer（AAAI 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.14587)] [[代码](https:\u002F\u002Fgithub.com\u002FBICLab\u002FSpike2Former)]\n- 向准确的二进制脉冲神经网络迈进：通过自适应梯度调节机制学习（AAAI 2025）。\n- 自适应校准：一种统一的脉冲神经网络转换框架（AAAI 2025）。\n- 通过时间自擦除监督提升SNN中更具辨别力的特征学习（AAAI 2025）。\n- FSTA-SNN：基于频率的空间-时间注意力模块，用于脉冲神经网络（AAAI 2025）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.14744)] [[代码](https:\u002F\u002Fgithub.com\u002Fyukairong\u002FFSTA-SNN)]\n\n### 2024\n\n**综述**\n- 直接训练高性能深度脉冲神经网络：理论与方法综述（《神经科学前沿》2024年）。[[论文]](https:\u002F\u002Fwww.frontiersin.org\u002Fjournals\u002Fneuroscience\u002Farticles\u002F10.3389\u002Ffnins.2024.1383844\u002Ffull) [[arXiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.04289v2)]\n\n**NeurIPS、ACM MM、ECCV、AAAI、ICLR、Frontiers in Neuroscience、CVPR、ICML、IJCAI**\n- SpikedAttention：无需训练且完全由脉冲驱动的Transformer到SNN转换，采用胜者优先的脉冲偏移实现Softmax运算（**NeurIPS 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=fs28jccJj5)]\n- 流形上的脉冲图神经网络（**NeurIPS 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=VKt0K3iOmO)]\n- 重新思考脉冲神经网络的动力学行为（**NeurIPS 2024**）。[[论文](https:\u002F\u002Fneurips.cc\u002Fvirtual\u002F2024\u002Fposter\u002F96543)]\n- 长程反馈脉冲网络在电影刺激下捕捉视觉皮层的动态与静态表征（**NeurIPS 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=bxDok3uaK6)] [[代码](https:\u002F\u002Fgithub.com\u002FGrasshlw\u002FSNN-Neural-Similarity-Movie)]\n- 另辟蹊径：缓解脉冲神经网络训练中的梯度消失问题（**NeurIPS 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=xjyU6zmZD7)]\n- 基于速率的反向传播提升深度脉冲神经网络的训练效率（**NeurIPS 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.11488)] [[代码](https:\u002F\u002Fgithub.com\u002FTab-ct\u002Frate-based-backpropagation)]\n- 针对神经脉冲数据的潜在扩散模型（**NeurIPS 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=ZX6CEo1Wtv)]\n- 脉冲神经网络在自动驾驶中的应用（**NeurIPS 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=95VyH4VxN9)] [[代码](https:\u002F\u002Fgithub.com\u002Fridgerchu\u002FSAD)]\n- 粗糙信号驱动的随机脉冲神经网络的精确梯度计算（**NeurIPS 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=mCWZj7pa0M)]\n- 面向脉冲相机高效图像重建的时空交互学习（**NeurIPS 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=S4ZqnMywcM)]\n- 用于超图最小顶点覆盖问题的无松弛脉冲神经网络公式（**NeurIPS 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=4A5IQEjG8c)]\n- EnOF：通过增强输出特征表示训练高精度脉冲神经网络（**NeurIPS 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf\u002F5a4dfaf8dc6861efa8e8356b3bd86743ab98838d.pdf)]\n- 脉冲令牌混合器：一种事件驱动友好的脉冲神经网络前馈结构（**NeurIPS 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=iYcY7KAkSy)] [[代码](https:\u002F\u002Fgithub.com\u002Fbrain-intelligence-lab\u002FSTMixer_demo)]\n- SpGesture：基于Jaccard注意力机制的脉冲神经网络实现无源域自适应sEMG手势识别（**NeurIPS 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.14398)] [[代码](https:\u002F\u002Fgithub.com\u002Fguoweiyu\u002FSpGesture\u002F)]\n- 具有专家混合的脉冲Transformer（**NeurIPS 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf\u002F35a5bc54de368426f66605d8e3f447638863888a.pdf)]\n- FEEL-SNN：具有频率编码和进化泄漏因子的鲁棒脉冲神经网络（**NeurIPS 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=TuCQdBo4NC)] [[代码](https:\u002F\u002Fgithub.com\u002Fzju-bmi-lab\u002FFEEL_SNN)]\n- 脉冲神经网络作为自适应事件流切片器（**NeurIPS 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.02249)]\n- 利用中央模式发生器推进脉冲神经网络在序列建模中的应用（**NeurIPS 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.14362)] [[代码](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FSeqSNN)]\n- QKFormer：基于Q-K注意力的分层脉冲Transformer（**NeurIPS 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=AVd7DpiooC)] [[代码](https:\u002F\u002Fgithub.com\u002Fzhouchenlin2096\u002FQKFormer)]\n- Q-SNNs：量化脉冲神经网络（**ACM MM 2024**）。[[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3664647.3681186)]\n- RSC-SNN：通过随机平滑编码探索脉冲神经网络中对抗鲁棒性与准确性的权衡（**ACM MM 2024**）。[[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3664647.3680639)] [[代码](https:\u002F\u002Fgithub.com\u002FKemingWu\u002FRSC-SNN)]\n- 基于生物启发的知识蒸馏反向结构模式学习应用于脉冲神经网络（**ACM MM 2024**）。[[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3664647.3680655)]\n- 从ANN到SNN转换以构建高性能脉冲Transformer（**ACM MM 2024**）。[[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3664647.3680620)] [[代码](https:\u002F\u002Fgithub.com\u002Fh-z-h-cell\u002FTransformer-to-SNN-ECMT)]\n- 基于混合分步蒸馏的脉冲神经网络实现低延迟事件驱动视觉识别（**ACM MM 2024**）。[[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3664647.3680832)] [[代码](https:\u002F\u002Fgithub.com\u002Fhsw0929\u002FHSD)]\n- 面向高性能与节能目标的整数量化训练及脉冲驱动推理脉冲神经网络用于目标检测（**ECCV 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.20708)] [[代码](https:\u002F\u002Fgithub.com\u002FBICLab\u002FSpikeYOLO)]\n- 脉冲小波Transformer（**ECCV 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.11138)] [[代码](https:\u002F\u002Fgithub.com\u002Fbic-L\u002FSpiking-Wavelet-Transformer)]\n- 多并行隐式流架构提升脉冲神经网络训练效率（**ECCV 2024**）。[[论文](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2024\u002Fpapers_ECCV\u002Fpapers\u002F05068.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fkiritozc\u002FMPIS-SNNs)]\n- 用于事件驱动视觉的异步生物可信神经元（**ECCV 2024**）。[[论文](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2024\u002Fpapers_ECCV\u002Fpapers\u002F08133.pdf)]\n- BKDSNN：利用模糊知识蒸馏提升基于学习的脉冲神经网络训练性能（**ECCV 2024**）。[[论文](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2024\u002Fpapers_ECCV\u002Fpapers\u002F06649.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FIntelligent-Computing-Research-Group\u002FBKDSNN)]\n- 探索脉冲神经网络中的漏洞：针对原始事件数据的直接对抗攻击（**ECCV 2024**）。[[论文](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2024\u002Fpapers_ECCV\u002Fpapers\u002F09164.pdf)]\n- EAS-SNN：基于循环脉冲神经网络的端到端自适应采样与表示用于事件驱动检测（**ECCV 2024**）。[[论文](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2024\u002Fpapers_ECCV\u002Fpapers\u002F07766.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FWindere\u002FEAS-SNN)]\n- 用于节能型事件转视频重建的脉冲-时间潜在表征（**ECCV 2024**）。[[论文](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2024\u002Fpapers_ECCV\u002Fpapers\u002F05843.pdf)]\n- EC-SNN：在边缘设备上分割深度脉冲神经网络（**IJCAI 2024**）。[[代码](https:\u002F\u002Fgithub.com\u002FAmazingDD\u002FEC-SNN)]\n- 一步到位且线性复杂度的脉冲Transformer（**IJCAI 2024**）。\n- TIM：一种高效的脉冲Transformer时间交互模块（**IJCAI 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2401.11687)] [[代码](https:\u002F\u002Fgithub.com\u002FBrainCog-X\u002FBrain-Cog\u002Ftree\u002Fmain\u002Fexamples\u002FTIM)]\n- 学习用于高效图像去雨的脉冲神经网络（**IJCAI 2024**）。[[代码](https:\u002F\u002Fgithub.com\u002FMingTian99\u002FESDNet)]\n- LitE-SNN：通过时空压缩网络搜索与联合优化设计轻量高效脉冲神经网络（**IJCAI 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2401.14652)]\n- 具有突触延时的时序脉冲神经网络用于图推理（**ICML 2024**）。[[论文](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F35073)]\n- 基于脉冲活动剪枝构建高效深度脉冲神经网络（**ICML 2024**）。[[论文](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F33505)]\n- 利用脉冲神经网络进行高效且有效的时间序列预测（**ICML 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.01533)]\n- 自突触回路增强脉冲神经网络的时空预测学习能力（**ICML 2024**）。[[论文](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F33269)]\n- 鲁棒稳定的脉冲神经网络（**ICML 2024**）。[[论文](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F33217)]\n- CLIF：用于脉冲神经网络的互补漏积分发神经元（**ICML 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.04663)]\n- NDOT：基于神经动力学的脉冲神经网络在线训练（**ICML 2024**）。[[论文](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F33481)]\n- 高性能时间可逆脉冲神经网络，训练内存为$O(L)$，推理成本为$O(1)$（**ICML 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.16466)]\n- 向高效脉冲Transformer迈进：用于加速训练和推理的标记稀疏化框架（**ICML 2024**）。[[论文](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F32674)]\n- SpikeLM：通过弹性双脉冲机制迈向通用脉冲驱动的语言建模（**ICML 2024**）。[[论文](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F35024)]\n- 基于符号梯度下降的神经动力学：超越ReLU网络的ANN到SNN转换（**ICML 2024**）。[[论文](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F33242)]\n- 利用稀疏梯度提升SNN的对抗鲁棒性（**ICML 2024**）。[[论文](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F34066)]\n- SpikeZIP-TF：转换即一切——基于Transformer的SNN转换（**ICML 2024**）。[[论文](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F34194)]\n- 传统SNN真的高效吗？来自网络量化视角的思考（**CVPR 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.10802)]\n- SFOD：脉冲融合目标检测器（**CVPR 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.15192)] [[代码](https:\u002F\u002Fgithub.com\u002Fyimeng-fan\u002FSFOD)]\n- SpikingResformer：在脉冲神经网络中桥接ResNet与Vision Transformer（**CVPR 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.14302)] [[代码](https:\u002F\u002Fgithub.com\u002Fxyshi2000\u002FSpikingResformer)]\n- SGLFormer：高性能的脉冲全局-局部融合Transformer（**Frontiers in Neuroscience 2024**）。[[论文](https:\u002F\u002Fwww.frontiersin.org\u002Fjournals\u002Fneuroscience\u002Farticles\u002F10.3389\u002Ffnins.2024.1371290\u002Ffull)] [[代码](https:\u002F\u002Fgithub.com\u002FZhangHanN1\u002FSGLFormer)]\n- 向节能型脉冲神经网络迈进：一种非结构化剪枝框架（**ICLR 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=eoSeaK4QJo&referrer=%5Bthe%20profile%20of%20Zecheng%20Hao%5D(%2Fprofile%3Fid%3D~Zecheng_Hao1))]\n- 脉冲神经网络的在线稳定性提升（**ICLR 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=CIj1CVbkpr)]\n- SpikePoint：一种高效的基于点的脉冲神经网络，用于事件相机动作识别（**ICLR 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.07189.pdf)]\n- 空间-时间近似：一种无需训练的Transformer到SNN转换方法（**ICLR 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=XrunSYwoLr)]\n- 稀疏脉冲神经网络：利用时间尺度异质性剪枝递归SNN（**ICLR 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=0jsfesDZDq)]\n- 使用带有可学习间距的扩张卷积学习脉冲神经网络中的延迟（**ICLR 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=4r2ybzJnmN)] [[代码](https:\u002F\u002Fgithub.com\u002FThvnvtos\u002FSNN-delays)]\n- 结合速率与时间信息威胁脉冲神经网络（**ICLR 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=xv8iGxENyI)] [[代码](https:\u002F\u002Fgithub.com\u002Fhzc1208\u002FHART_Attack)]\n- TAB：脉冲神经网络中的时间累积批归一化（**ICLR 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=k1wlmtPGLq&noteId=p5M9gOLAOf)]\n- 针对速率编码脉冲神经网络的认证对抗鲁棒性（**ICLR 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=5bNYf0CqxY)]\n- 基于潜在结构的神经脉冲活动贝叶斯双聚类分析（**ICLR 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=ZYm1Ql6udy)]\n- 具有平衡兴奋抑制机制的自适应深度脉冲神经网络，实现全局-局部学习（**ICLR 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=wpnlc2ONu0)]\n- 基于赫布学习的正交投影用于脉冲神经网络的持续学习（**ICLR 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.11984.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fpkuxmq\u002FHLOP-SNN)]\n- 用于脉冲神经网络的渐进式训练框架，支持可学习的多层级模型（**ICLR 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=g52tgL8jy6)] [[代码](https:\u002F\u002Fgithub.com\u002Fhzc1208\u002FSTBP_LMH)]\n- LMUFormer：低复杂度却功能强大的脉冲模型，采用勒让德记忆单元（**ICLR 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.04882.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fzeyuliu1037\u002FLMUFormer)]\n- 脉冲驱动Transformer V2：启发下一代神经形态芯片设计的元脉冲神经网络架构（**ICLR 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=1SIBN5Xyw7)] [[代码](https:\u002F\u002Fgithub.com\u002FBICLab\u002FSpike-Driven-Transformer-V2)]\n- 我们能否兼得二值神经网络与脉冲神经网络的优势，实现高效的计算机视觉任务？（**ICLR 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=lGUyAuuTYZ)] [[代码](https:\u002F\u002Fgithub.com\u002Fgodatta\u002FUltra-Low-Latency-SNN)]\n- 一张图胜过1比特脉冲：当图对比学习遇到脉冲神经网络时（**ICLR 2024**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=LnLySuf1vp)] [[代码](https:\u002F\u002Fgithub.com\u002FEdisonLeeeee\u002FSpikeGCL)]\n- 三态脉冲：为脉冲神经网络学习三态脉冲（**AAAI 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.06372.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fyfguo91\u002FTernary-Spike)]\n- 内存高效的可逆脉冲神经网络（**AAAI 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.07922.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fmi804\u002FRevSNN)]\n- 门控注意力编码用于训练高性能且高效的脉冲神经网络（**AAAI 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2308.06582.pdf)]\n- SpikingBERT：利用隐式微分将BERT蒸馏为脉冲语言模型（**AAAI 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2308.10873.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FNeuroCompLab-psu\u002FSpikingBERT)]\n- TC-LIF：一种两室结构的脉冲神经元模型，适用于长期序列建模（**AAAI 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2308.13250.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FZhangShimin1\u002FTC-LIF)]\n- 缩小你的时间步长：迈向低延迟神经形态目标识别的脉冲神经网络（**AAAI 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2401.01912.pdf)]\n- 动态脉冲图神经网络（**AAAI 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2401.05373.pdf)]\n- 一种高效的脉冲神经网络知识迁移策略，用于从静态领域到事件领域的转换（**AAAI 2024**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.13077.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FBrain-Cog-Lab\u002FTransfer-for-DVS)]\n\n**Arxiv**\n- 受大脑启发的脉冲神经网络在工业故障诊断中的应用：综述、挑战与机遇。[论文](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2401.02429)  \n- Q-SNNs：量化脉冲神经网络。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.13672)]\n- 无矩阵乘法的可扩展语言建模。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.02528)] [[代码](https:\u002F\u002Fgithub.com\u002Fridgerchu\u002Fmatmulfreellm)]\n- QKFormer：基于Q-K注意力机制的层次化脉冲Transformer。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.16552.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fzhouchenlin2096\u002FQKFormer)]\n- Spikformer V2：以脉冲神经网络为“入场券”，加入ImageNet高精度俱乐部。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2401.02020.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FZK-Zhou\u002Fspikformer)]\n- SpikeNAS：面向脉冲神经网络系统的快速内存感知型神经架构搜索框架。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.11322.pdf)]\n- 星形胶质细胞助力脉冲神经网络在大规模语言建模中的发展。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.07625v2.pdf)]\n\n\n\n\n### 2023\n\n**综述**\n- 基于直接学习的深度脉冲神经网络：综述（《神经科学前沿》2023年）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.19725.pdf)]\n\n**AAAI、ICLR、CVPR、ICML、IJCAI、ICCV、NeurIPS、TPAMI、Science Advances**\n- 基于循环脉冲神经网络的时间序列预测与异常检测（**IJCNN 2023**）。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10191614)]\n- SpikingJelly：面向脉冲智能的开源机器学习基础设施平台（**Science Advances 2023**）。[[论文](https:\u002F\u002Fwww.science.org\u002Fdoi\u002F10.1126\u002Fsciadv.adi1480)] [[代码](https:\u002F\u002Fgithub.com\u002Ffangwei123456\u002Fspikingjelly)]\n- 脉冲驱动的Transformer [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.01694.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FBICLab\u002FSpike-Driven-Transformer)]\n- 具有高效率和长时依赖学习能力的并行脉冲神经元（**NeurIPS 2023**）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.12760)] [[代码](https:\u002F\u002Fgithub.com\u002Ffangwei123456\u002FParallel-Spiking-Neuron)]\n- 针对自然视觉场景下神经响应的时序条件化脉冲潜变量模型（**NeurIPS 2023**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.12045.pdf)]\n- SEENN：面向时序脉冲早期退出神经网络（**NeurIPS 2023**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=mbaN0Y0QTw)]\n- EICIL：用于深度脉冲神经网络的兴奋抑制联合循环迭代学习（**NeurIPS 2023**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=OMDgOjdqoZ)]\n- 解决自适应脉冲神经元的速度-精度仿真权衡问题（**NeurIPS 2023**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=Ht79ZTVMsn)]\n- 在循环神经网络中利用脉冲卷积块注意力模块增强自适应历史保留能力（**NeurIPS 2023**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=aGZp61S9Lj)]\n- 试验匹配：用数据约束型脉冲神经网络捕捉变异性（**NeurIPS 2023**）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.03603)]\n- 循环脉冲神经网络的进化连接性（**NeurIPS 2023**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.17650.pdf)]\n- SparseProp：稀疏循环脉冲神经网络的高效事件驱动仿真与训练（**NeurIPS 2023**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=yzZbwQPkmP)]\n- Spiking PointNet：用于点云的脉冲神经网络（**NeurIPS 2023**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.06232v1.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fdayongren\u002Fspiking-pointnet)]\n- 探索脉冲神经网络基于时间的训练策略中的损失函数（**NeurIPS 2023**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=8IvW2k5VeA)]\n- 脉冲神经网络的膜电位批归一化（**ICCV 2023**）。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FGuo_Membrane_Potential_Batch_Normalization_for_Spiking_Neural_Networks_ICCV_2023_paper.pdf)]\n- 利用动态置信度释放脉冲神经网络的潜力（**ICCV 2023**）。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FLi_Unleashing_the_Potential_of_Spiking_Neural_Networks_with_Dynamic_Confidence_ICCV_2023_paper.pdf)]\n- RMP-Loss：用于脉冲神经网络的膜电位分布正则化（**ICCV 2023**）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.06787)]\n- 脉冲神经网络中的固有冗余性（**ICCV 2023**）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.08227)]\n- 具有动态发放阈值的时序编码脉冲神经网络：基于事件驱动反向传播的学习（**ICCV 2023**）。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FWei_Temporal-Coded_Spiking_Neural_Networks_with_Dynamic_Firing_Threshold_Learning_with_ICCV_2023_paper.pdf)]\n- 用于3D和2D分类的高效转换型脉冲神经网络（**ICCV 2023**）。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FLan_Efficient_Converted_Spiking_Neural_Network_for_3D_and_2D_Classification_ICCV_2023_paper.pdf)]\n- 用于目标检测的深度直接训练脉冲神经网络（**ICCV 2023**）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.11411)]\n- 向内存和时间高效的脉冲神经网络训练反向传播迈进（**ICCV 2023**）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.14311)]\n- SSF：利用稳定化脉冲流加速脉冲神经网络训练（**ICCV 2023**）。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FWang_SSF_Accelerating_Training_of_Spiking_Neural_Networks_with_Stabilized_Spiking_ICCV_2023_paper.pdf)]\n- 掩码脉冲Transformer（**ICCV 2023**）。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FWang_Masked_Spiking_Transformer_ICCV_2023_paper.pdf)]\n- 异步脉冲神经网络的空间-时间自注意力机制（**IJCAI 2023**）。[[论文](https:\u002F\u002Fwww.ijcai.org\u002Fproceedings\u002F2023\u002F0344.pdf)]\n- 用于直接训练脉冲神经网络的可学习替代梯度（**IJCAI 2023**）。[[论文](https:\u002F\u002Fwww.ijcai.org\u002Fproceedings\u002F2023\u002F0335.pdf)]\n- 利用脉冲神经网络的动态结构发展提升高效持续学习能力（**IJCAI 2023**）。[[论文](https:\u002F\u002Fwww.ijcai.org\u002Fproceedings\u002F2023\u002F0334.pdf)]\n- 脉冲神经网络的自适应平滑梯度学习（**ICML 2023**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=GdkwSGTpbC)]\n- 替代模块学习：减少脉冲神经网络训练中的梯度误差累积（**ICML 2023**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=zRkz4duLKp)] [[代码](https:\u002F\u002Fgithub.com\u002Fbrain-intelligence-lab\u002Fsurrogate_module_learning)]\n- 率梯度近似攻击威胁深度脉冲神经网络（**CVPR 2023**）。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FBu_Rate_Gradient_Approximation_Attack_Threats_Deep_Spiking_Neural_Networks_CVPR_2023_paper.pdf)]\n- 利用知识蒸馏从人工神经网络构建深度脉冲神经网络（**CVPR 2023**）。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FXu_Constructing_Deep_Spiking_Neural_Networks_From_Artificial_Neural_Networks_With_CVPR_2023_paper.pdf)]\n- 注意力脉冲神经网络（**TPAMI 2023**）。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10032591)] [[代码](https:\u002F\u002Fgithub.com\u002Ffangwei123456\u002Fspikingjelly\u002Fpull\u002F329)]\n- 非均匀的神经元和突触动力学用于脉冲高效的无监督学习：理论与设计原则（**ICLR 2023**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.11618.pdf)]\n- 用于文本分类的脉冲卷积神经网络（**ICLR 2023**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=pgU3k7QXuz0)]\n- 通过校准偏移脉冲弥合ANN与SNN之间的差距（**ICLR 2023**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.10685.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fhzc1208\u002FANN2SNN_COS)]\n- Spikformer：当脉冲神经网络遇见Transformer时（**ICLR 2023**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=frE4fUwz_h)] [[代码](https:\u002F\u002Fgithub.com\u002FZK-Zhou\u002Fspikformer)]\n- 统一的软阈值剪枝框架（**ICLR 2023**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=cCFqcrq0d8)] [[代码](https:\u002F\u002Fgithub.com\u002FYanqi-Chen\u002FLATS)]\n- 通过校准偏移脉冲弥合ANN与SNN之间的差距（**ICLR 2023**）。[[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=PFbzoWZyZRX)] [[代码](https:\u002F\u002Fgithub.com\u002Fhzc1208\u002FANN2SNN_COS)]\n- 通过残差膜电位降低ANN到SNN的转换误差（**AAAI 2023**）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.02091)] [[代码](https:\u002F\u002Fgithub.com\u002Fhzc1208\u002FANN2SNN_SRP)]\n- 拥有高度表征相似性的深度脉冲神经网络模拟了猕猴和小鼠的视觉通路（**AAAI 2023**）。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.06060)]\n- ESL-SNNs：一种用于脉冲神经网络的进化式结构学习策略（**AAAI 2023**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.03693.pdf)]\n- 探索脉冲神经网络中的时序信息动态（**AAAI 2023**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.14406.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FIntelligent-Computing-Lab-Yale\u002FExploring-Temporal-Information-Dynamics-in-Spiking-Neural-Networks)]\n- 通过脉冲神经网络扩展动态图表示学习（**AAAI 2023**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2208.10364.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FEdisonLeeeee\u002FSpikeNet)]\n- 复杂动态神经元改进的脉冲Transformer网络，实现高效的自动语音识别（**AAAI 2023**）。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.01194.pdf)]\n\n**Arxiv**\n- Spikingformer: Spike-driven Residual Learning for Transformer-based Spiking Neural Network [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.11954)] [[code](https:\u002F\u002Fgithub.com\u002Fzhouchenlin2096\u002FSpikingformer)]\n- Enhancing the Performance of Transformer-based Spiking Neural Networks by Improved Downsampling with Precise Gradient Backpropagation [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.05954)] [[code](https:\u002F\u002Fgithub.com\u002Fzhouchenlin2096\u002FSpikingformer-CML)]\n- Training Full Spike Neural Networks via Auxiliary Accumulation Pathway [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2301.11929.pdf)]\n- MSS-DepthNet: Depth Prediction with Multi-Step Spiking Neural Network [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.12156)]\n- SpikeGPT: Generative Pre-trained Language Model with Spiking Neural Networks [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.13939)] [[code](https:\u002F\u002Fgithub.com\u002Fridgerchu\u002FSpikeGPT)]\n- Auto-Spikformer: Spikformer Architecture Search [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.00807.pdf)]\n- Advancing Spiking Neural Networks Towards Deep Residual Learning [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2112.08954.pdf)]\n\n\n\n\n### 2022\n\n**NeurIPS, CVPR, ICLR, AAAI, ICML, Nature Communications**\n\n- Event-based Video Reconstruction via Potential-assisted Spiking Neural Network [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.10943)] [[code](https:\u002F\u002Fgithub.com\u002FLinZhu111\u002FEVSNN)]\n- Optimal ANN-SNN Conversion for High-accuracy and Ultra-low-latency Spiking Neural Networks [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=7B3IJMM1k_M)] [[code](https:\u002F\u002Fgithub.com\u002Fputshua\u002FSNN-conversion-QCFS)]\n- Optimized Potential Initialization for Low-latency Spiking Neural Networks (**AAAI 2022**).  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.01440)]\n- AutoSNN: Towards Energy-Efficient Spiking Neural Networks [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.12738)]\n- Neural Architecture Search for Spiking Neural Networks [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.10355)] [[code](https:\u002F\u002Fgithub.com\u002FIntelligent-Computing-Lab-Yale\u002FNeural-Architecture-Search-for-Spiking-Neural-Networks)]\n- Neuromorphic Data Augmentation for Training Spiking Neural Networks [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.06145)] [[code](https:\u002F\u002Fgithub.com\u002FIntelligent-Computing-Lab-Yale\u002FNDA_SNN)]\n- State Transition of Dendritic Spines Improves Learning of Sparse Spiking Neural Networks [[paper](https:\u002F\u002Fproceedings.mlr.press\u002Fv162\u002Fchen22ac.html)] [[code](https:\u002F\u002Fgithub.com\u002FYanqi-Chen\u002FSTDS)]\n- Training High-Performance Low-Latency Spiking Neural Networks by Differentiation on Spike Representation [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.00459)] [[code](https:\u002F\u002Fgithub.com\u002Fqymeng94\u002FDSR)]\n- Exploring Lottery Ticket Hypothesis in Spiking Neural Networks [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.01382)] [[code](https:\u002F\u002Fgithub.com\u002FIntelligent-Computing-Lab-Yale\u002FExploring-Lottery-Ticket-Hypothesis-in-SNNs)]\n- Spiking Graph Convolutional Networks [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.02767)] [[code](https:\u002F\u002Fgithub.com\u002FZulunZhu\u002FSpikingGCN)]\n- A calibratable sensory neuron based on epitaxial VO2 for spike-based neuromorphic multisensory system [[paper](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-022-31747-w)] [[code](https:\u002F\u002Fgithub.com\u002Fbillyuanpku96\u002FSNN-for-sensory-neuron)]\n- Online Training Through Time for Spiking Neural Networks (**NeurIPS 2022**).  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.04195)] [[code](https:\u002F\u002Fgithub.com\u002Fpkuxmq\u002FOTTT-SNN)]\n- Training Spiking Neural Networks with Event-driven Backpropagation [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=d4JmP1T45WE)] [[code](https:\u002F\u002Fgithub.com\u002Fzhuyaoyu\u002FSNN-event-driven-learning)]\n- GLIF: A Unified Gated Leaky Integrate-and-Fire Neuron for Spiking Neural Networks [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=UmFSx2c4ubT)] [[code](https:\u002F\u002Fgithub.com\u002FIkarosy\u002FGated-LIF)]\n- Temporal Effective Batch Normalization in Spiking Neural Networks [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=fLIgyyQiJqz)]\n- Training Spiking Neural Networks with Local Tandem Learning (**NeurIPS 2022**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2210.04532.pdf)]\n- IM-Loss: Information Maximization Loss for Spiking Neural Networks (**NeurIPS 2022**). [[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2022\u002Ffile\u002F010c5ba0cafc743fece8be02e7adb8dd-Paper-Conference.pdf)]\n- Temporal Effective Batch Normalization in Spiking Neural Networks (**NeurIPS 2022**). [[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2022\u002Ffile\u002Fde2ad3ed44ee4e675b3be42aa0b615d0-Paper-Conference.pdf)]\n- Biologically Inspired Dynamic Thresholds for Spiking Neural Networks (**NeurIPS 2022**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.04426.pdf)]\n- Optimal Conversion of Conventional Artificial Neural Networks to Spiking Neural Networks (**ICLR 2022**).  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.00476.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FJackn0\u002Fsnn_optimal_conversion_pipeline)]\n- Multi-Level Firing with Spiking DS-ResNet: Enabling Better and Deeper Directly-Trained Spiking Neural Networks (**IJCAI 2022**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2210.06386.pdf)]\n\n### 2021\n\n**NeurIPS, ICCV, IJCAI, ICML, AAAI**\n\n- Deep Residual Learning in Spiking Neural Networks (**NeurIPS 2021**). [[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2021\u002Ffile\u002Fafe434653a898da20044041262b3ac74-Paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Ffangwei123456\u002FSpike-Element-Wise-ResNet)]\n- Spiking Deep Residual Network[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1805.01352.pdf)]\n- Incorporating Learnable Membrane Time Constant to Enhance Learning of Spiking Neural Networks (**ECCV 2021**).  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.05785)]  [[code](https:\u002F\u002Fgithub.com\u002Ffangwei123456\u002FParametric-Leaky-Integrate-and-Fire-Spiking-Neuron)]\n- Pruning of Deep Spiking Neural Networks through Gradient Rewiring [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.04916)] [[code](https:\u002F\u002Fgithub.com\u002FYanqi-Chen\u002FGradient-Rewiring)]\n- A Free Lunch From ANN: Towards Efficient, Accurate Spiking Neural Networks Calibration  (**ICML 2021**).  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.06984)] [[code](https:\u002F\u002Fgithub.com\u002Fyhhhli\u002FSNN_Calibration)]\n- Optimal ANN-SNN Conversion for Fast and Accurate Inference in Deep Spiking Neural Networks [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2105.11654)] [[code](https:\u002F\u002Fgithub.com\u002FDingJianhao\u002FOptSNNConvertion-RNL-RIL)]\n- Sparse Spiking Gradient Descent (**NeurIPS 2021**). [[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2021\u002Ffile\u002F61f2585b0ebcf1f532c4d1ec9a7d51aa-Paper.pdf)]\n- Training Spiking Neural Networks with Accumulated Spiking Flow (**AAAI 2021**). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2011.05280.pdf)]\n- Temporal-wise Attention Spiking Neural Networks for Event Streams Classification. (**ECCV 2021**). [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FYao_Temporal-Wise_Attention_Spiking_Neural_Networks_for_Event_Streams_Classification_ICCV_2021_paper.pdf)]\n\n### 参考文献\n如果您觉得本仓库对您有帮助，请考虑引用以下文献：\n```\n@article{zhou2024direct,\n  title={直接训练高性能深度脉冲神经网络：理论与方法综述},\n  author={周晨林、张瀚、于柳涛、叶宇民、周兆坤、黄立伟、马正宇、范晓鹏、周慧慧、田永红},\n  journal={Frontiers in Neuroscience},\n  volume={18},\n  pages={1383844},\n  year={2024},\n  publisher={Frontiers Media SA}\n}\n```","# Awesome-Spiking-Neural-Networks 快速上手指南\n\n`Awesome-Spiking-Neural-Networks` 并非一个单一的 Python 库或可执行工具，而是一个**持续更新的脉冲神经网络（SNN）论文与代码资源合集**。本指南将帮助开发者如何利用该资源库快速定位前沿研究、获取代码实现并复现最新成果。\n\n## 环境准备\n\n由于该仓库收录了来自 AAAI、ICLR、NeurIPS 等顶会的众多独立项目，不同论文对应的代码库依赖环境各异。建议准备以下通用开发环境：\n\n*   **操作系统**: Linux (推荐 Ubuntu 20.04\u002F22.04) 或 macOS\n*   **Python 版本**: Python 3.8 - 3.10 (多数深度学习项目兼容范围)\n*   **核心框架**: PyTorch (主流 SNN 代码多基于此)\n*   **硬件要求**: NVIDIA GPU (支持 CUDA)，用于加速大规模 SNN 训练与推理\n*   **前置依赖**:\n    *   Git (用于克隆仓库)\n    *   Conda 或 venv (用于管理不同论文的独立虚拟环境)\n\n## 安装步骤\n\n本项目本身是一个文档索引库，无需通过 `pip` 安装。请按以下步骤获取资源：\n\n1.  **克隆仓库**\n    使用 Git 将资源库下载到本地：\n    ```bash\n    git clone https:\u002F\u002Fgithub.com\u002FYOUR-TARGET-REPO\u002FAwesome-Spiking-Neural-Networks.git\n    # 注意：请替换为实际的 GitHub 仓库地址，若原链接不可用，可在 GitHub 搜索 \"Awesome Spiking Neural Networks\" 获取最新镜像\n    ```\n\n    *国内加速方案*: 如果直接克隆速度较慢，可使用 Gitee 镜像（如有）或通过代理加速：\n    ```bash\n    git clone https:\u002F\u002Fgitee.com\u002Fmirror\u002FAwesome-Spiking-Neural-Networks.git\n    ```\n\n2.  **浏览与筛选论文**\n    进入目录，查看 `README.md` 文件。该文件按年份（2026, 2025, 2024...）和会议（AAAI, ICLR, CVPR 等）分类整理了论文列表。\n    ```bash\n    cd Awesome-Spiking-Neural-Networks\n    cat README.md\n    ```\n\n3.  **获取具体代码**\n    在 `README.md` 中找到感兴趣的论文条目（例如 *Spikingformer: A Key Foundation Model for Spiking Neural Networks*），点击其附带的 `[paper]` 或代码链接。\n    *   大多数条目会指向具体的 GitHub 代码仓库。\n    *   进入对应子项目的仓库后，按照该项目独立的 `README` 进行环境配置（通常涉及 `pip install -r requirements.txt`）。\n\n## 基本使用\n\n使用该资源库的核心工作流是\"**检索 -> 定位 -> 复现**\"。以下是基于 2026 年最新成果的简单使用示例：\n\n### 示例：复现一篇 ICLR 2026 的 SNN 论文\n\n假设你想研究 **\"Spikingformer: A Key Foundation Model for Spiking Neural Networks\"** (AAAI 2026\u002FICLR 2026 相关方向)：\n\n1.  **查找链接**: 在本仓库的 `README.md` \"2026\" -> \"AAAI, ICLR\" 章节下，找到该论文标题及对应的代码链接。\n2.  **创建独立环境**: 为避免依赖冲突，为该项目创建新的 Conda 环境：\n    ```bash\n    conda create -n snn_spikingformer python=3.9\n    conda activate snn_spikingformer\n    ```\n3.  **克隆项目代码**: 跳转到该论文具体的 GitHub 仓库地址并克隆：\n    ```bash\n    git clone https:\u002F\u002Fgithub.com\u002Fauthor\u002FSpikingformer.git\n    cd Spikingformer\n    ```\n4.  **安装依赖并运行**: 根据该项目具体的说明安装依赖并运行演示脚本：\n    ```bash\n    pip install torch torchvision torchaudio --index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu118\n    pip install -r requirements.txt\n    \n    # 运行训练或测试示例 (命令依具体项目而定)\n    python main.py --config configs\u002Fspikingformer_cifar10.yml\n    ```\n\n### 资源利用技巧\n*   **追踪前沿**: 定期查看 `News` 部分，了解 2026 年 AAAI、ICLR 等会议新增的 30+ 篇 SNN 论文。\n*   **领域细分**: 利用目录结构快速定位特定任务，如“点云分析”（Point Cloud Analysis）、“图像恢复”（Image Restoration）或“大语言模型”（LLMs）相关的 SNN 研究。\n*   **代码对比**: 该仓库汇集了多种训练策略（如替代梯度、ANN-to-SNN 转换、无监督学习），可下载不同实现的代码进行性能对比。","某神经形态计算实验室的研究团队正致力于开发一款低功耗的无人机视觉避障系统，急需寻找适合边缘设备部署的最新脉冲神经网络（SNN）模型。\n\n### 没有 Awesome-Spiking-Neural-Networks 时\n- **文献检索如大海捞针**：研究人员需分别在 arXiv、IEEE Xplore 及各大顶会官网手动搜索，极易遗漏如 ICLR 2026 中关于\"3DSMT 混合架构”或\"Otters 光编码”等关键论文。\n- **代码复现成本高昂**：找到论文后，往往难以定位官方开源代码，常因缺少实现细节导致复现失败，浪费数周时间调试基础环境。\n- **技术选型缺乏依据**：无法快速对比不同方案在“鲁棒性”与“能耗”上的表现，难以判断哪些新机制（如侧向抑制训练）真正适用于无人机场景。\n- **前沿动态更新滞后**：依赖人工订阅邮件或定期刷新的方式，导致团队对 Nature、Science 或 NeurIPS 最新发布的 SNN 成果反应迟缓，错失技术迭代窗口。\n\n### 使用 Awesome-Spiking-Neural-Networks 后\n- **一站式获取顶会精华**：直接查阅按年份和会议（如 AAAI、ICLR、CVPR）分类的清单，瞬间锁定 2026 年最新的 30 篇 ICLR 论文及 11 篇 Nature\u002FScience 成果。\n- **论文代码无缝衔接**：每个条目均附带论文链接与对应代码库地址，团队可立即克隆\"TP-Spikformer\"等项目进行验证，将环境搭建时间从数周缩短至数天。\n- **精准匹配应用场景**：通过浏览标题与摘要，快速筛选出专攻“抗对抗攻击”或“少样本增量学习”的模型，迅速确定采用 SAFA-SNN 作为核心算法。\n- **实时同步全球进展**：依托仓库的持续更新机制，团队能第一时间掌握 2025-2026 年的最新突破，确保技术路线始终处于行业最前沿。\n\nAwesome-Spiking-Neural-Networks 将原本分散且高门槛的科研资源整合为高效的知识引擎，极大加速了从理论探索到工程落地的全过程。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FTheBrainLab_Awesome-Spiking-Neural-Networks_51a44941.png","TheBrainLab","The BRAIN Lab","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FTheBrainLab_914a9a25.png","The Bidirectional Research in AI and Neuroscience (BRAIN) Lab",null,"https:\u002F\u002Fgithub.com\u002FTheBrainLab",735,75,"2026-04-03T02:52:50",5,"","未说明",{"notes":88,"python":86,"dependencies":89},"该仓库是一个脉冲神经网络（SNN）相关论文和代码的精选列表（Awesome List），而非单一的独立软件工具。README 内容主要包含历年（2023-2026）顶级会议（如 ICLR, AAAI, NeurIPS 等）的论文标题及链接。由于它本身不提供统一的安装脚本或运行环境，具体的操作系统、GPU、内存、Python 版本及依赖库需求需参考列表中各个具体论文所附带的独立代码仓库。",[],[13],[92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109],"codes","papers","snn","spiking","spiking-neural-network","paperlist","deep-learning","brain-inspired","event-driven","neuromorphic-computing","spikingjelly","3rd-generation-of-artificial-neural-networks","biological-plausibility","energy-efficiency","binary","spike","awesome-list","awesome","2026-03-27T02:49:30.150509","2026-04-06T08:42:12.702566",[],[]]