[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-flyingdoog--awesome-graph-explainability-papers":3,"tool-flyingdoog--awesome-graph-explainability-papers":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",150037,2,"2026-04-10T23:33:47",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",108322,"2026-04-10T11:39:34",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":32,"last_commit_at":50,"category_tags":51,"status":17},6121,"gemini-cli","google-gemini\u002Fgemini-cli","gemini-cli 是一款由谷歌推出的开源 AI 命令行工具，它将强大的 Gemini 大模型能力直接集成到用户的终端环境中。对于习惯在命令行工作的开发者而言，它提供了一条从输入提示词到获取模型响应的最短路径，无需切换窗口即可享受智能辅助。\n\n这款工具主要解决了开发过程中频繁上下文切换的痛点，让用户能在熟悉的终端界面内直接完成代码理解、生成、调试以及自动化运维任务。无论是查询大型代码库、根据草图生成应用，还是执行复杂的 Git 操作，gemini-cli 都能通过自然语言指令高效处理。\n\n它特别适合广大软件工程师、DevOps 人员及技术研究人员使用。其核心亮点包括支持高达 100 万 token 的超长上下文窗口，具备出色的逻辑推理能力；内置 Google 搜索、文件操作及 Shell 命令执行等实用工具；更独特的是，它支持 MCP（模型上下文协议），允许用户灵活扩展自定义集成，连接如图像生成等外部能力。此外，个人谷歌账号即可享受免费的额度支持，且项目基于 Apache 2.0 协议完全开源，是提升终端工作效率的理想助手。",100752,"2026-04-10T01:20:03",[52,13,15,14],"插件",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":32,"last_commit_at":59,"category_tags":60,"status":17},4721,"markitdown","microsoft\u002Fmarkitdown","MarkItDown 是一款由微软 AutoGen 团队打造的轻量级 Python 工具，专为将各类文件高效转换为 Markdown 格式而设计。它支持 PDF、Word、Excel、PPT、图片（含 OCR）、音频（含语音转录）、HTML 乃至 YouTube 链接等多种格式的解析，能够精准提取文档中的标题、列表、表格和链接等关键结构信息。\n\n在人工智能应用日益普及的今天，大语言模型（LLM）虽擅长处理文本，却难以直接读取复杂的二进制办公文档。MarkItDown 恰好解决了这一痛点，它将非结构化或半结构化的文件转化为模型“原生理解”且 Token 效率极高的 Markdown 格式，成为连接本地文件与 AI 分析 pipeline 的理想桥梁。此外，它还提供了 MCP（模型上下文协议）服务器，可无缝集成到 Claude Desktop 等 LLM 应用中。\n\n这款工具特别适合开发者、数据科学家及 AI 研究人员使用，尤其是那些需要构建文档检索增强生成（RAG）系统、进行批量文本分析或希望让 AI 助手直接“阅读”本地文件的用户。虽然生成的内容也具备一定可读性，但其核心优势在于为机器",93400,"2026-04-06T19:52:38",[52,14],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":67,"readme_en":68,"readme_zh":69,"quickstart_zh":70,"use_case_zh":71,"hero_image_url":72,"owner_login":73,"owner_name":74,"owner_avatar_url":75,"owner_bio":76,"owner_company":77,"owner_location":77,"owner_email":77,"owner_twitter":77,"owner_website":78,"owner_url":79,"languages":77,"stars":80,"forks":81,"last_commit_at":82,"license":77,"difficulty_score":83,"env_os":84,"env_gpu":85,"env_ram":85,"env_deps":86,"category_tags":96,"github_topics":97,"view_count":105,"oss_zip_url":77,"oss_zip_packed_at":77,"status":17,"created_at":106,"updated_at":107,"faqs":108,"releases":109},2747,"flyingdoog\u002Fawesome-graph-explainability-papers","awesome-graph-explainability-papers","Papers about explainability of GNNs","awesome-graph-explainability-papers 是一个专注于图神经网络（GNN）可解释性研究的开源资源库。它系统性地整理了该领域的高质量学术论文、综述文章以及实用的评估平台代码，旨在帮助从业者深入理解 GNN 的决策逻辑。\n\n针对图神经网络常被视为“黑盒”、难以追溯预测依据这一痛点，该资源库提供了从理论定义、分类体系到评估指标的全方位指引。它不仅收录了关于可信图学习、反事实解释等前沿主题的权威综述，还汇集了 PyTorch Geometric、GraphXAI、BAGEL 等主流工具库与基准测试平台，甚至包含了由 CogDL 精选的高影响力经典论文，如 GNNExplainer 和 PGExplainer 等。\n\n这份清单特别适合人工智能研究人员、算法工程师以及对图机器学习感兴趣的高校师生使用。无论是希望快速把握领域发展脉络的初学者，还是正在寻找实验基准或对比方法的资深开发者，都能从中高效获取所需信息。通过整合理论与实战资源，awesome-graph-explainability-papers 极大地降低了探索 GNN 可解释性的门槛，是推动构建更透明、可信图","awesome-graph-explainability-papers 是一个专注于图神经网络（GNN）可解释性研究的开源资源库。它系统性地整理了该领域的高质量学术论文、综述文章以及实用的评估平台代码，旨在帮助从业者深入理解 GNN 的决策逻辑。\n\n针对图神经网络常被视为“黑盒”、难以追溯预测依据这一痛点，该资源库提供了从理论定义、分类体系到评估指标的全方位指引。它不仅收录了关于可信图学习、反事实解释等前沿主题的权威综述，还汇集了 PyTorch Geometric、GraphXAI、BAGEL 等主流工具库与基准测试平台，甚至包含了由 CogDL 精选的高影响力经典论文，如 GNNExplainer 和 PGExplainer 等。\n\n这份清单特别适合人工智能研究人员、算法工程师以及对图机器学习感兴趣的高校师生使用。无论是希望快速把握领域发展脉络的初学者，还是正在寻找实验基准或对比方法的资深开发者，都能从中高效获取所需信息。通过整合理论与实战资源，awesome-graph-explainability-papers 极大地降低了探索 GNN 可解释性的门槛，是推动构建更透明、可信图智能系统的重要参考指南。","# awesome-graph-explainability-papers\nPapers about the explainability of GNNs\n\n### Surveys\n1. [ACM computing survey 25] **Explaining the Explainers in Graph Neural Networks: a Comparative Study** [paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3696444)\n2. [Proceedings of the IEEE 24] **Trustworthy Graph Neural Networks: Aspects, Methods and Trends** [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.07424)\n3. [Preprint 24] **Graph-Based Explainable AI: A Comprehensive Survey** [paper](https:\u002F\u002Fhal.science\u002Fhal-04660442\u002F)\n4. [Arixv 23] **A Survey on Explainability of Graph Neural Networks** [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.01958)\n5. [ACM computing survey] **A Survey on Graph Counterfactual Explanations: Definitions, Methods, Evaluation, and Research Challenges** [paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3618105)\n6. [TPAMI 22]**Explainability in graph neural networks: A taxonomic survey**. *Yuan Hao, Yu Haiyang, Gui Shurui, Ji Shuiwang*. [paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.15445.pdf)\n7. [Arxiv 22]**A Survey of Explainable Graph Neural Networks: Taxonomy and Evaluation Metrics** [paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.12599.pdf)\n8. [Arxiv 22] **A Survey of Trustworthy Graph Learning: Reliability, Explainability, and Privacy Protection** [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.10014)\n9. [Big Data 2022]**A Survey of Explainable Graph Neural Networks for Cyber Malware Analysis** [paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10020943)\n10. [Machine Intelligence Research 24] **A Comprehensive Survey on Trustworthy Graph Neural Networks: Privacy, Robustness, Fairness, and Explainability**[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.08570)\n11. [Book 23] **Generative Explanation for Graph Neural Network: Methods and Evaluation** [paper](http:\u002F\u002Fsites.computer.org\u002Fdebull\u002FA23june\u002Fp64.pdf)\n\n### Platforms\n1. **PyTorch Geometric** [[Document]](https:\u002F\u002Fpytorch-geometric.readthedocs.io\u002Fen\u002Flatest\u002Ftutorial\u002Fexplain.html) [[Blog]](https:\u002F\u002Fmedium.com\u002F@pytorch_geometric\u002Fgraph-machine-learning-explainability-with-pyg-ff13cffc23c2)\n2. **DIG: A Turnkey Library for Diving into Graph Deep Learning Research** [paper](https:\u002F\u002Fwww.jmlr.org\u002Fpapers\u002Fv22\u002F21-0343.html) [Code](https:\u002F\u002Fgithub.com\u002Fdivelab\u002FDIG)\n2. **GraphXAI: Evaluating Explainability for Graph Neural Networks** [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.09339v2) [Code](https:\u002F\u002Fgithub.com\u002Fmims-harvard\u002Fgraphxai)\n3. **GraphFramEx: Towards Systematic Evaluation of Explainability Methods for Graph Neural Networks** [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.09677) [Code](https:\u002F\u002Fgithub.com\u002Fgraphframex\u002Fgraphframex)\n4. **GNNExplainer and PGExplainer** [paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=8JHrucviUf) [Code](https:\u002F\u002Fgithub.com\u002FLarsHoldijk\u002FRE-ParameterizedExplainerForGraphNeuralNetworks)\n5. **BAGEL: A Benchmark for Assessing Graph Neural Network Explanations** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.13983)[Code](https:\u002F\u002Fgithub.com\u002Fmandeep-rathee\u002Fbagel-benchmark)\n\n\n### Most Influential Papers selected by [Cogdl](https:\u002F\u002Fgithub.com\u002FTHUDM\u002Fcogdl\u002Fblob\u002Fmaster\u002Fgnn_papers.md#explainability\n1. **Explainability in graph neural networks: A taxonomic survey**. *Yuan Hao, Yu Haiyang, Gui Shurui, Ji Shuiwang*. ARXIV 2020. [paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.15445.pdf)\n2. **Gnnexplainer: Generating explanations for graph neural networks**. *Ying Rex, Bourgeois Dylan, You Jiaxuan, Zitnik Marinka, Leskovec Jure*. NeurIPS 2019. [paper](https:\u002F\u002Fwww.ncbi.nlm.nih.gov\u002Fpmc\u002Farticles\u002FPMC7138248\u002F) [code](https:\u002F\u002Fgithub.com\u002FRexYing\u002Fgnn-model-explainer)\n3. **Explainability methods for graph convolutional neural networks**. *Pope Phillip E, Kolouri Soheil, Rostami Mohammad, Martin Charles E, Hoffmann Heiko*. CVPR 2019.[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FPope_Explainability_Methods_for_Graph_Convolutional_Neural_Networks_CVPR_2019_paper.pdf)\n4. **Parameterized Explainer for Graph Neural Network**. *Luo Dongsheng, Cheng Wei, Xu Dongkuan, Yu Wenchao, Zong Bo, Chen Haifeng, Zhang Xiang*. NeurIPS 2020. [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.04573) [code](https:\u002F\u002Fgithub.com\u002Fflyingdoog\u002FPGExplainer)\n5. **Xgnn: Towards model-level explanations of graph neural networks**. *Yuan Hao, Tang Jiliang, Hu Xia, Ji Shuiwang*. KDD 2020. [paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3394486.3403085). \n6. **Evaluating Attribution for Graph Neural Networks**. *Sanchez-Lengeling Benjamin, Wei Jennifer, Lee Brian, Reif Emily, Wang Peter, Qian Wesley, McCloskey Kevin, Colwell  Lucy, Wiltschko Alexander*. NeurIPS  2020.[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Ffile\u002F417fbbf2e9d5a28a855a11894b2e795a-Paper.pdf)\n7. **PGM-Explainer: Probabilistic Graphical Model Explanations for Graph Neural Networks**. *Vu Minh, Thai My T.*. NeurIPS  2020.[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.05788.pdf)\n8. **Explanation-based Weakly-supervised Learning of Visual Relations with Graph Networks**. *Federico Baldassarre and Kevin Smith and Josephine Sullivan and Hossein Azizpour*. ECCV 2020.[paper](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fpapers\u002F123730613.pdf)\n9. **GCAN: Graph-aware Co-Attention Networks for Explainable Fake News Detection on Social Media**. *Lu, Yi-Ju and Li, Cheng-Te*. ACL 2020.[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2004.11648.pdf)\n10. **On Explainability of Graph Neural Networks via Subgraph Explorations**. *Yuan Hao, Yu Haiyang, Wang Jie, Li Kang, Ji Shuiwang*. ICML 2021.[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2102.05152.pdf)\n\n\n### Year 2026\n- [ICLR 26] **GNN Explanations that do not Explain and How to find Them**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2601.20815)\n\n\n### Year 2025\n-  [NIPS 25] **Robust Explanations of Graph Neural Networks via Graph Curvatures**[[paper]](https:\u002F\u002Fopenreview.net\u002Fpdf?id=48L3BEtH8w)\n-  [NIPS 25] **GnnXemplar: Exemplars to Explanations - Natural Language Rules for Global GNN Interpretability**[[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=eafIjoZAHm)\n-  [NIPS 25] **On Logic-based Self-Explainable Graph Neural Networks** [[paper]](https:\u002F\u002Fopenreview.net\u002Fpdf?id=OtAiYPP6GA)\n1. [ICLR 25] **Exact Computation of Any-Order Shapley Interactions for Graph Neural Networks**[[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=9tKC0YM8sX) \n2. [ICLR 25] **From GNNs to Trees: Multi-Granular Interpretability for Graph Neural Networks**[[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=KEUPk0wXXe)\n3. [ICLR 25] **Provably Robust Explainable Graph Neural Networks against Graph Perturbation Attacks** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=iFK0xoceR0)\n4. [ICLR 25] **Towards Explaining the Power of Constant-depth Graph Neural Networks for Linear Programming** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=INow59Vurm)\n5. [ICLR 25] **Explanations of GNN on Evolving Graphs via Axiomatic Layer edges** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=pXN8T5RwNN)\n6. [ICLR 25] **MAGE: Model-Level Graph Neural Networks Explanations via Motif-based Graph Generation** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=vue9P1Ypk6)\n7. [AAAI 25] **Higher Order Structures For Graph Explanations** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.03253)\n8. [AAAI 25] **Self-Explainable Graph Transformer for Link Sign Prediction**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.08754)\n9. [AAAI 25] **Faithful and Accurate Self-Attention Attribution for Message Passing Neural Networks via the Computation Tree Viewpoint**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.04612)\n10. [AAAI 25] **Graph Segmentation and Contrastive Enhanced Explainer for Graph Neural Networks** [[paper]](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F35440)\n11. [TKDD 25] **DyExplainer: Explainable Dynamic Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.16375)\n12. [Arxiv 25.05] **Dual Explanations via Subgraph Matching for Malware Detection** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.20904)\n13. [Arxiv 25.04] **On the Consistency of GNN Explanations for Malware Detection** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.16316)\n14. [Arxiv 25.01] **Watermarking Graph Neural Networks via Explanations for Ownership Protection** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.05614)\n15. [Arxiv 25.01] **Mixture-of-Experts Graph Transformers for Interpretable Particle Collision Detection** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.03432)\n16. [ACM Computing Surveys] **Can Graph Neural Networks be Adequately Explained? A Survey** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3711122)\n17. [IEEE TNSRE] **Finding Neural Biomarkers for Motor Learning and Rehabilitation using an Explainable Graph Neural Network** [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10843258)\n18. [Springer FCS] **Learning from shortcut: a shortcut-guided approach for explainable graph learning** [[paper]](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs11704-024-40452-4)\n19. [NN] **Local interpretable spammer detection model with multi-head graph channel attention network** [[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0893608024009985)\n20. [ Applied Intelligence ] **KnowGNN: a knowledge-aware and structure-sensitive model-level explainer for graph neural networks** [[paper]](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs10489-024-06034-4)\n21. [ICML 25] **TopInG: Topologically Interpretable Graph Learning via Persistent Rationale Filtration** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.05102) [[project]](https:\u002F\u002Fjackal092927.github.io\u002Fpublication\u002FTopInG_ICML2025)    \n\n### Year 2024\n1. [NeurIPS 24] **RegExplainer: Generating Explanations for Graph Neural Networks in Regression Task** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.07840)\n2. [NeurIPS 24] **GraphTrail: Translating GNN Predictions into Human-Interpretable Logical Rules**[[paper]](https:\u002F\u002Fnips.cc\u002Fvirtual\u002F2024\u002Fposter\u002F94172)\n3. [ICML 24] **Generating In-Distribution Proxy Graphs for Explaining Graph Neural Networks**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.02036)\n4. [ICML 24] **Predicting and Interpreting Energy Barriers of Metallic Glasses with Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.08627)\n5. [ICML 24] **Graph Neural Network Explanations are Fragile** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.03193)\n6. [ICML 24] **How Interpretable Are Interpretable Graph Neural Networks?** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.07955)\n7. [ICML 24] **Feature Attribution with Necessity and Sufficiency via Dual-stage Perturbation Test for Causal Explanation**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.08845)\n8. [ICML 24] **Explaining Graph Neural Networks via Structure-aware Interaction Index** [[paper]](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F34550)\n9. [ICML 24] **EiG-Search: Generating Edge-Induced Subgraphs for GNN Explanation in Linear Time** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.01762)\n10. [ICLR 24] **GraphChef: Decision-Tree Recipes to Explain Graph Neural Networks** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=IjMUGuUmBI)\n11. [ICLR 24] **GOAt: Explaining Graph Neural Networks via Graph Output Attribution** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=2Q8TZWAHv4)\n12. [ICLR 24] **Towards Robust Fidelity for Evaluating Explainability of Graph Neural Networks** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=up6hr4hIQH)\n13. [ICLR 24] **GNNX-BENCH: Unravelling the Utility of Perturbation-based GNN Explainers through In-depth Benchmarking** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.01794)\n14. [ICLR 24] **UNR-Explainer: Counterfactual Explanations for Unsupervised Node Representation Learning Models** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=0j9ZDzMPqr)\n15. [TPAMI 24] **Towards Inductive and Efficient Explanations for Graph Neural Networks**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10423141)\n20. [Openreview 24] **Robust Graph Attention for Graph Adversarial Attacks: An Information Bottleneck Inspired Approach**[[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=lTL4t68BNc)\n21. [Openreview 24] **AIMing for Explainability in GNNs**[[paper]]([https:\u002F\u002Fopenreview.net\u002Fforum?id=lTL4t68BNc](https:\u002F\u002Fopenreview.net\u002Fforum?id=KZII3faAs2))\n23. [Openreview 24] **Graph Distributional Analytics: Enhancing GNN Explainability through Scalable Embedding and Distribution Analysis**[[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=Fzz8acgC6X)\n25. [Openreview 24] **Watermarking Graph Neural Networks Via Explanations For Ownership Protection**[[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=EgP6IEyfYJ)\n26. [Openreview 24] **Explainable Graph Representation Learning via Graph Pattern Analysis** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=hXJrQWIoR3)\n28. [Openreview 24] **Robust Heterogeneous Graph Neural Network Explainer with Graph Information Bottleneck** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=IMWYNVBHob)\n29. [Openreview 24] **A Hierarchical Language Model Design For Interpretable Graph Reasoning** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=DRSSLefryd)\n30. [Openreview 24] **The GECo algorithm for Graph Neural Networks Explanation** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=sTQC4TeYo1)\n31. [Openreview 24] **On Explaining Equivariant Graph Networks via Improved Relevance Propagation** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=YkMg8sB8AH)\n32. [Openreview 24] **SIG: Self-Interpretable Graph Neural Network for Continuous-time Dynamic Graphs** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=j0KjevdhkH)\n33. [Openreview 24] **Interpretable and Adaptive Graph Contrastive Learning with Information Sharing for Biomedical Link Prediction** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=GlgD9o9bl4)\n35. [Openreview 24] **TAGExplainer: Narrating Graph Explanations for Text-Attributed Graph Learning Models** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=VWBYDo5NaM)\n37. [Openreview 24] **TreeX: Generating Global Graphical GNN Explanations via Critical Subtree Extraction** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=zSUXo1nkqR)\n38. [TMLR 24] **InduCE: Inductive Counterfactual Explanations for Graph Neural Networks** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=RZPN8cgqST)\n39. [PLDI 24] **PL4XGL: A Programming Language Approach to Explainable Graph Learning**[[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3656464)\n40. [Usenix Security 24] **INSIGHT: Attacking Industry-Adopted Learning Resilient Logic Locking Techniques Using Explainable Graph Neural Network**[[paper]](https:\u002F\u002Fwww.usenix.org\u002Fconference\u002Fusenixsecurity24\u002Fpresentation\u002Fmankali)\n41. [SIGMOD 24]**View-based Explanations for Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.02086)\n42. [ACM SIGMOD Record] **The Road to Explainable Graph Neural Networks** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3703922.3703930)\n43. [Thesis UCLA] **Explainable Artificial Intelligence for Graph Data**[[paper]](https:\u002F\u002Fescholarship.org\u002Fuc\u002Fitem\u002F6bf1g6dc)\n44. [Thesis UVA] **Algorithmic Fairness in Graph Machine Learning: Explanation, Optimization, and Certification**[[paper]](https:\u002F\u002Fwww.proquest.com\u002Fdocview\u002F3083271574)\n45. [KDD 24] **SEFraud: Graph-based Self-Explainable Fraud Detection via Interpretative Mask Learning**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.11389)\n46. [KDD 24] **Self-Explainable Temporal Graph Networks based on Graph Information Bottleneck**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.13214)\n47. [KDD 24] **Unveiling Global Interactive Patterns across Graphs: Towards Interpretable Graph Neural Networks**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.01979)\n48. [ICDE 24] **Generating Robust Counterfactual Witnesses for Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.19519)\n49. [ICDE 24] **SES: Bridging the Gap Between Explainability and Prediction of Graph Neural Networks**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.11358)\n50. [ICSE 24] **Coca: Improving and Explaining Graph Neural Network-Based Vulnerability Detection Systems**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.14886)\n51. [AAAI 24] **Generating Diagnostic and Actionable Explanations for Fair Graph Neural Networks** [[paper]](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F30168)\n52. [AAAI 24] **Stratifed GNN Explanations through Sufficient Expansion**[[paper]](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F29180)\n53. [AAAI 24] **Factorized Explainer for Graph Neural Networks**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.05596)\n54. [AAAI 24] **Self-Interpretable Graph Learning with Sufficient and Necessary Explanations**\n55. [AAAI 24] **Explainable Origin-Destination Crowd Flow Interpolation via Variational Multi-Modal Recurrent Graph Auto-Encoder** [[paper]](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F28796)\n13. [AISTATS 24] **Two Birds with One Stone: Enhancing Uncertainty Quantification and Interpretability with Graph Functional Neural Process** [[paper]](https:\u002F\u002Fproceedings.mlr.press\u002Fv238\u002Fkong24a.html)\n14. [WWW 24] **Game-theoretic Counterfactual Explanation for Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.06030)\n15. [WWW 24] **EXGC: Bridging Efficiency and Explainability in Graph Condensation**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.05962)\n16. [WWW 24] **Adversarial Mask Explainer for Graph Neural Networks** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3589334.3645608)\n17. [WWW 24] **Globally Interpretable Graph Learning via Distribution Matching**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.10447)\n18. [WWW 24] **GNNShap: Scalable and Accurate GNN Explanation using Shapley Values** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3589334.3645599)\n19. [LOG 24] **xAI-Drop: Don't Use What You Cannot Explain**[[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=adlpuqQD8Q)\n20. [LOG 24] **MOSE-GNN: A Motif-Based Self-Explaining Graph Neural Network for Molecular Property Prediction** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=nD1a6hSLhO)\n22. [TNNLS 24] **BrainIB: Interpretable Brain Network-based Psychiatric Diagnosis with Graph Information Bottleneck** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.03612)\n23. [TKDE 24] **On Regularization for Explaining Graph Neural Networks: An Information Theory Perspective** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=5rX7M4wa2R_)\n24. [TKDD 24] **Towards Prototype-Based Self-Explainable Graph Neural Network** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3689647)\n25. [TKDD 24] **Efficient GNN Explanation via Learning Removal-based Attribution** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3685678)\n26. [TAI 24] **Learning Counterfactual Explanation of Graph Neural Networks via Generative Flow Network**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10496445)\n27. [TAI 24] **Traffexplainer: A Framework towards GNN-based Interpretable Traffic Prediction** [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10680338)\n28. [TMC 24] **HGExplainer: Heterogeneous Graph Explainer for IoT Device Identification**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10736553)\n29. [IEEE TMI 24] **Multi-Modal Diagnosis of Alzheimer’s Disease using Interpretable Graph Convolutional Networks**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10606492)\n30. [IEEE IoT 24] **EXVul: Toward Effective and Explainable Vulnerability Detection for IoT Devices**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10479158)\n31. [IEEE Transactions on Fuzzy Systems] **Towards Embedding Ambiguity-Sensitive Graph Neural Network Explainability** [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10696966)\n32. [IEEE JBHI] **Interpretable Dynamic Directed Graph Convolutional Network for Multi-Relational Prediction of Missense Mutation and Drug Response**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10721576)\n33. [IDEAL 2024] **Causal Explanation of Graph Neural Networks**[[paper]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-77731-8_26)\n34. [BIBM 24] **Seizure Onset Zone Localization Method based on GNN Explanation** [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10821860)\n35. [BIBM 24] **DDTExplainer: Mining Drug-Disease Therapeutic Mechanisms based on GNN Explainability** [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10822060)\n36. [CIKM 24] **EDGE: Evaluation Framework for Logical vs. Subgraph Explanations for Node Classifiers on Knowledge Graphs**[[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3627673.3679904)\n37. [ECML\u002FPKDD 24] **Towards Few-shot Self-explaining Graph Neural Networks**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.07340)\n38. [SDM 24] **XGExplainer: Robust Evaluation-based Explanation for Graph Neural Networks**[[paper]](https:\u002F\u002Fepubs.siam.org\u002Fdoi\u002Fabs\u002F10.1137\u002F1.9781611978032.8)\n23. [DASFAA 24] **Multi-objective Graph Neural Network Explanatory Model with Local and Global Information Preservation**[[paper]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-981-97-5572-1_20)\n28. [ISSTA 2024] **Graph Neural Networks for Vulnerability Detection: A Counterfactual Explanation** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.15687)\n29. [KBS 24] **Shapley-based graph explanation in embedding space**[[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0950705124008785?via%3Dihub)\n30. [KBS 24] **GEAR: Learning graph neural network explainer via adjusting gradients**[[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0950705124010025)\n31. [IEEE TNSM 24] **Ensemble Graph Attention Networks for Cellular Network Analytics: From Model Creation to Explainability**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10622099)\n32. [IEEE TNSE 24] **GAXG: A Global and Self-adaptive Optimal Graph Topology Generation Framework for Explaining Graph Neural Networks**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10614894)\n33. [IEEE TETCI 24] **GF-LRP: A Method for Explaining Predictions Made by Variational Graph Auto-Encoders**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10586750)\n34. [AAAI workshop] **Semi-Supervised Graph Representation Learning with Human-centric Explanation for Predicting Fatty Liver Disease**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.02786)\n35. [xAI 24] **Global Concept Explanations for Graphs by Contrastive Learning** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.16532)\n36. [Arxiv 24.12] **BetaExplainer: A Probabilistic Method to Explain Graph Neural Networks**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.11964)\n37. [Arxiv 24.12] **GISExplainer: On Explainability of Graph Neural Networks via Game-theoretic Interaction Subgraphs** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.15698)\n38. [Arxiv 24.12] **Interpreting GNN-based IDS Detections Using Provenance Graph Structural Features** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.00934)\n39. [Arxiv 24.12] **eXpath: Explaining Knowledge Graph Link Prediction with Ontological Closed Path Rules**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.04846)\n40. [Arxiv 24.12] **On the Probability of Necessity and Sufficiency of Explaining Graph Neural Networks: A Lower Bound Optimization Approach** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.07056)\n41. [Arxiv 24.11] **Rethinking Node Representation Interpretation through Relation Coherence**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.00653)\n42. [Arxiv 24.11] **MBExplainer: Multilevel bandit-based explanations for downstream models with augmented graph embeddings** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.00287)\n43. [Arxiv 24.11] **Securing GNNs: Explanation-Based Identification of Backdoored Training Graphs**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.18136)\n44. [Preprint 24.11] **Chiseling the Graph: An Edge-Sculpting Method for Explaining Graph Neural Networks** [[paper]](https:\u002F\u002Fwww.researchsquare.com\u002Farticle\u002Frs-5414037\u002Fv1)\n45. [Preprint 24.10] **Reliable and Faithful Generative Explainers for Graph Neural Networks**[[paper]](https:\u002F\u002Fwww.preprints.org\u002Fmanuscript\u002F202410.1718)\n46. [Arxiv 24.10] **Explaining Hypergraph Neural Networks: From Local Explanations to Global Concepts**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.07764)\n47. [Arxiv 24.10] **Explainable Graph Neural Networks Under Fire** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.06417)\n48. [Arxiv 24.09] **GINTRIP: Interpretable Temporal Graph Regression using Information bottleneck and Prototype-based method** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.10996)\n49. [Arxiv 24.09] **PAGE: Parametric Generative Explainer for Graph Neural Network** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.14042)\n50. [Preprint 24.08] **CIDER: Counterfactual-Invariant Diffusion-based GNN Explainer for Causal Subgraph Inference**[[paper]](https:\u002F\u002Fwww.researchsquare.com\u002Farticle\u002Frs-4814778\u002Fv1)\n51. [Arxiv 24.07] **LLMExplainer: Large Language Model based Bayesian Inference for Graph Explanation Generation**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.15351)\n52. [Arxiv 24.07] **Explaining Graph Neural Networks for Node Similarity on Graphs**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.07639)\n41. [Arxiv 24.07] **SLInterpreter: An Exploratory and Iterative Human-AI Collaborative System for GNN-based Synthetic Lethal Prediction**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.14770)\n42. [Arxiv 24.07] **Graph Neural Network Causal Explanation via Neural Causal Models**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.09378)\n43. [Arxiv 24.06] **GNNAnatomy: Systematic Generation and Evaluation of Multi-Level Explanations for Graph Neural Networks**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.04548)\n44. [Arxiv 24.06] **On GNN explanability with activation rules**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.11594)\n46. [Arxiv 24.05] **SIG: Efficient Self-Interpretable Graph Neural Network for Continuous-time Dynamic Graphs**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.19062)\n47. [Arxiv 24.06] **L2XGNN: Learning to Explain Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.14402.pdf)\n48. [Arxiv 24.06] **Towards Understanding Sensitive and Decisive Patterns in Explainable AI: A Case Study of Model Interpretation in Geometric Deep Learning**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.00849)\n50. [Arxiv 24.06] **Explainable AI Security: Exploring Robustness of Graph Neural Networks to Adversarial Attacks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.13920)\n51. [Arxiv 24.06] **Robust Ante-hoc Graph Explainer using Bilevel Optimization** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15745)\n52. [Arxiv 24.06] **Perks and Pitfalls of Faithfulness in Regular, Self-Explainable and Domain Invariant GNNs** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.15156)\n53. [Arxiv 24.05] **Utilizing Description Logics for Global Explanations of Heterogeneous Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.12654)\n54. [Arxiv 24.05] **Detecting Complex Multi-step Attacks with Explainable Graph Neural Network** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.11335)\n55. [Arxiv 24.05] **SynHING: Synthetic Heterogeneous Information Network Generation for Graph Learning and Explanation**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.04133)\n56. [Arxiv 24.05] **PAGE: Prototype-Based Model-Level Explanations for Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.17159)\n57. [Arxiv 24.05] **Evaluating Neighbor Explainability for Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.08118)\n58. [Preprint 24.05] **Explainable Graph Neural Networks: An Application to Open Statistics Knowledge Graphs for Estimating House Prices** [[paper]](https:\u002F\u002Fwww.preprints.org\u002Fmanuscript\u002F202405.0037\u002Fv1)\n59. [Arxiv 24.04] **Superior Polymeric Gas Separation Membrane Designed by Explainable Graph Machine Learning** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.10903)\n60. [Arxiv 24.04] **Improving the interpretability of GNN predictions through conformal-based graph sparsification** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.12356)\n61. [Arxiv 24.03] **GreeDy and CoDy: Counterfactual Explainers for Dynamic Graph**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.16846)\n62. [Arxiv 24.03] **Explainable Graph Neural Networks for Observation Impact Analysis in Atmospheric State Estimation**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.17384)\n64. [Arixv 24.03] **Iterative Graph Neural Network Enhancement via Frequent Subgraph Mining of Explanations**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.07849)\n65. [Arxiv 24.02] **PAC Learnability under Explanation-Preserving Graph Perturbations**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.05039)\n66. [Arxiv 24.02] **Explainable Global Wildfire Prediction Models using Graph Neural Networks**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.07152)\n67. [Arxiv 24.02] **Incorporating Retrieval-based Causal Learning with Information Bottlenecks for Interpretable Graph Neural Networks**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.04710)\n68. [Arxiv 24.01] **On Discprecncies between Perturbation Evaluations of Graph Neural Network Attributions**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.00633)\n69. [ASP=DAC 24] **LIPSTICK: Corruptibility-Aware and Explainable Graph Neural Network-based Oracle-Less Attack on Logic Locking**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.04235)\n70. [Biorxiv 24] **Community-aware explanations in knowledge graphs with XP-GNN**[[paper]](https:\u002F\u002Fwww.biorxiv.org\u002Fcontent\u002F10.1101\u002F2024.01.21.576302v1.abstract)\n71. [ISCV 24] **Adaptive Subgraph Feature Extraction for Explainable Multi-Modal Learning**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10620106\u002F)\n72. [IJCNN 24] **Explanations for Graph Neural Networks using A Game-theoretic Value**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10650495)\n73. [AIxIA 2024] **Relating Explanations with the Inductive Biases of Deep Graph Networks** [[paper]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-80607-0_14)\n74. [Neurocomputing] **GeoExplainer: Interpreting Graph Convolutional Networks with geometric masking**[[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0925231224011640?via%3Dihub)\n75. [Technologies] **Explainable Graph Neural Networks: An Application to Open Statistics Knowledge Graphs for Estimating House Prices**[[paper]](https:\u002F\u002Fwww.mdpi.com\u002F2227-7080\u002F12\u002F8\u002F128)\n76. [Reliab. Eng. Syst. Saf.] **Causal intervention graph neural network for fault diagnosis of complex industrial processes**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10620106\u002F)\n77. [Frontiers in big data] **Global explanation supervision for Graph Neural Networks**[[paper]](https:\u002F\u002Fwww.semanticscholar.org\u002Freader\u002Fb6d6dda72e1d31e4b05e59909128cfccf4a835fb)\n78. [Information and Software Technology] **Graph-based explainable vulnerability prediction**[[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS095058492400171X?via%3Dihub)\n79. [Information Systems] **Heterogeneous graph neural networks for fraud detection and explanation in supply chain finance**[[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0306437923001710?via%3Dihub)\n80. [Information Procs. & Mana.] **Towards explaining graph neural networks via preserving prediction ranking and structural dependency**[[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0306457323003084)\n81. [Applied Energy]  **Explainable Spatio-Temporal Graph Neural Networks for multi-site photovoltaic energy production** [[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0306261923015155)\n82. [PAKDD 24] **Random Mask Perturbation Based Explainable Method of Graph Neural Networks** [[paper]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-981-97-2259-4_2)\n83. [Computational Materials Science] **Graph isomorphism network for materials property prediction along with explainability analysis**[[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0927025623006134)\n84. [NN 24] **Explanatory subgraph attacks against Graph Neural Networks**[[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0893608024000030)\n85. [NN 24] **GRAM: An interpretable approach for graph anomaly detection using gradient attention maps**[[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0893608024003873)\n86. [Neural Networks 24] **CI-GNN: A Granger Causality-Inspired Graph Neural Network for Interpretable Brain Network-Based Psychiatric Diagnosis** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.01642)\n87. [NeuroImage 24] **BPI-GNN: Interpretable brain network-based psychiatric diagnosis and subtyping**[[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1053811924000892)\n88. [PAKDD 24] **Toward Interpretable Graph Classification via Concept-Focused Structural Correspondence** [[paper]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-981-97-2650-9_2)\n89. [ICPR 24] **Interpretable Deep Graph-Level Clustering: A Prototype-Based Approach** [[paper]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-78128-5_8)\n90. [MedRxiv 24] **An Interpretable Population Graph Network to Identify Rapid Progression of Alzheimer’s Disease Using UK Biobank**[[paper]](https:\u002F\u002Fwww.medrxiv.org\u002Fcontent\u002F10.1101\u002F2024.03.27.24304966v1)\n91. [IEEE TDSC 24] **TrustGuard: GNN-based Robust and Explainable Trust Evaluation with Dynamicity Support** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.13339.pdf)\n92. [IEEE Transactions] **IEEE Transactions on Computational Social Systems**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fxpl\u002FRecentIssue.jsp?punumber=6570650)\n93. [Journal of Physics] **Explainer on GNN-based segmentation networks**[[paper]](https:\u002F\u002Fiopscience.iop.org\u002Farticle\u002F10.1088\u002F1742-6596\u002F2711\u002F1\u002F012009\u002Fmeta)\n94. [Energy and AI] **Electricity demand forecasting at distribution and household levels using explainable causal graph neural network** [[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS266654682400034X)\n95. [HI-AI@KDD 24] **Interpretable Graph Model with Prototype-Based Graph Information Bottleneck** [[paper]](https:\u002F\u002Fhuman-interpretable-ai.github.io\u002Fassets\u002Fpdf\u002F4_Interpretable_Graph_Model_wi.pdf)\n96. [Neurosymbolic Artificial Intelligence] **Towards Semantic Understanding of GNN Layers embedding with Functional-Semantic Activation Mapping** [[paper]](https:\u002F\u002Fneurosymbolic-ai-journal.com\u002Fsystem\u002Ffiles\u002Fnai-paper-803.pdf)\n97. [NeSy 2024] **Towards Understanding Graph Neural Networks: Functional-Semantic Activation Mapping**[[paper]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-71170-1_11)\n98. [Thesis 24] **Explainable and physics-guided graph deep learning for air pollution modelling** [[paper]](https:\u002F\u002Fcris.vub.be\u002Fws\u002Fportalfiles\u002Fportal\u002F117178225\u002FRodrigoBonet_Esther_thesis.pdf)\n99. [Thesis 24] **Influence of molecular structures on graph neural network explainers’ performance**[[paper]](https:\u002F\u002Frepository.tudelft.nl\u002Ffile\u002FFile_03ae5c75-cc17-42c1-a593-1c82d2593c67?preview=1)\n\n\n\n\n\n\n\n\n### Year 2023\n1. [NeurIPS 23] **Interpretable Graph Networks Formulate Universal Algebra Conjectures**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.11688)\n2. [NeurIPS 23] **SAME: Uncovering GNN Black Box with Structure-aware Shapley-based Multipiece Explanation** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=kBBsj9KRgh)\n3. [NeurIPS 23] **Train Once and Explain Everywhere: Pre-training Interpretable Graph Neural Networks**[[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=enfx8HM4Rp)\n4. [NeurIPS 23] **D4Explainer: In-distribution Explanations of Graph Neural Network via Discrete Denoising Diffusion** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.19321)\n5. [NeurIPS 23] **TempME: Towards the Explainability of Temporal Graph Neural Networks via Motif Discovery** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.19324)\n6. [NeurIPS 23] **V-InFoR: A Robust Graph Neural Networks Explainer for Structurally Corrupted Graphs** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=CtXXOaxDw7)\n7. [NeurIPS 23] **Towards Self-Interpretable Graph-Level Anomaly Detection** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.16520)\n8. [NeurIPS 23] **Evaluating Post-hoc Explanations for Graph Neural Networks via Robustness Analysis** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=eD534mPhAg)\n9. [NeurIPS 23] **Interpretable Prototype-based Graph Information Bottleneck** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.19906)\n10. [ICML 23] **Rethinking Explaining Graph Neural Networks via Non-parametric Subgraph Matching** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=MocsSAUKlk)\n11. [ICML 23] **Relevant Walk Search for Explaining Graph Neural Networks** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=BDYIci7bVs)\n12. [ICML 23] **Towards Understanding the Generalization of Graph Neural Networks** [[paper]](https:\u002F\u002Fopenreview.net\u002Fpdf?id=BhMyLk0YNy)\n13. [ICLR 23] **GNNInterpreter: A Probabilistic Generative Model-Level Explanation for Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.07924.pdf)\n14. [ICLR 23] **Global Explainability of GNNs via Logic Combination of Learned Concepts** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=OTbRTIY4YS)\n15. [ICLR 23] **Explaining Temporal Graph Models through an Explorer-Navigator Framework** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=BR_ZhvcYbGJ)\n16. [ICLR 23] **DAG Matters! GFlowNets Enhanced Explainer for Graph Neural Networks** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=jgmuRzM-sb6)\n17. [ICLR 23] **Interpretable Geometric Deep Learning via Learnable Randomness Injection** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.16966)\n18. [ICLR 23] **A Differential Geometric View and Explainability of GNN on Evolving Graphs** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=lRdhvzMpVYV)\n19. [KDD 23] **MixupExplainer: Generalizing Explanations for Graph Neural Networks with Data Augmentation** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.07832)\n20. [KDD 23] **Counterfactual Learning on Heterogeneous Graphs with Greedy Perturbation** [[paper]](https:\u002F\u002Frepository.kaust.edu.sa\u002Fhandle\u002F10754\u002F693484)\n21. [KDD 23] **Empower Post-hoc Graph Explanations with Information Bottleneck: A Pre-training and Fine-tuning Perspective**[[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3580305.3599330)\n22. [KDD 23] **Less is More: SlimG for Accurate, Robust, and Interpretable Graph Mining.**[[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3580305.3599413)\n23. [KDD 23] **Shift-Robust Molecular Relational Learning with Causal Substructure** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3580305.3599437)\n24. [AAAI 23] **Global Concept-Based Interpretability for Graph Neural Networks via Neuron Analysis** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.10609)\n25. [AAAI 23] **On the Limit of Explaining Black-box Temporal Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.00952)\n26. [AAAI 23] **Towards Fine-Grained Explainability for Heterogeneous Graph Neural Network** [[paper]](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fdownload\u002F26040\u002F25812)\n27. [AAAI 23] **Interpretable Chirality-Aware Graph Neural Network for Quantitative Structure Activity Relationship Modeling in Drug Discovery**  [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=W2OStztdMhc)\n28. [VLDB 23] **HENCE-X: Toward Heterogeneity-agnostic Multi-level Explainability for Deep Graph Networks** [[paper]](https:\u002F\u002Fwww.vldb.org\u002Fpvldb\u002Fvol16\u002Fp2990-lv.pdf)\n29. [VLDB 23] **On Data-Aware Global Explainability of Graph Neural Networks** [[paper]](https:\u002F\u002Fwww.vldb.org\u002Fpvldb\u002Fvol16\u002Fp3447-lv.pdf)\n30. [AISTATS 23] **Distill n' Explain: explaining graph neural networks using simple surrogates** [[Paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.10139)\n31. [AISTATS 23] **Probing Graph Representations** [[paper]](https:\u002F\u002Fproceedings.mlr.press\u002Fv206\u002Fakhondzadeh23a\u002Fakhondzadeh23a.pdf)\n32. [ICDE 23] **INGREX: An Interactive Explanation Framework for Graph Neural Networks**[[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.01548.pdf)\n33. [ICDE 23] **Jointly Attacking Graph Neural Network and its Explanations** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.03388.pdf)\n34. [WWW 23]**PaGE-Link: Path-based Graph Neural Network Explanation for Heterogeneous Link Prediction** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.12465.pdf)\n35. [ICDM 23] **Limitations of Perturbation-based Explanation Methods for Temporal Graph Neural Networks**\n36. [ICDM 23] **Interpretable Subgraph Feature Extraction for Hyperlink Prediction**[[paper]](https:\u002F\u002Fwww.researchgate.net\u002Fpublication\u002F378000024_Interpretable_Subgraph_Feature_Extraction_for_Hyperlink_Prediction)\n37. [WSDM 23]**Interpretable Research Interest Shift Detection with Temporal Heterogeneous Graphs** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3539597.3570453)\n38. [WSDM 23]**Cooperative Explanations of Graph Neural Networks** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3539597.3570378)\n39. [WSDM 23]**Towards Faithful and Consistent Explanations for Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.13733)\n40. [WSDM 23] **Global Counterfactual Explainer for Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.11695)\n41. [CIKM 23] **Explainable Spatio-Temporal Graph Neural Networks** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3583780.3614871)\n42. [CIKM 23] **DuoGAT: Dual Time-oriented Graph Attention Networks for Accurate, Efficient and Explainable Anomaly Detection on Time-series.** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3583780.3614857)\n43. [CIKM 23] **Heterogeneous Temporal Graph Neural Network Explainer** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3583780.3614909)\n44. [CIKM 23] **ACGAN-GNNExplainer: Auxiliary Conditional Generative Explainer for Graph Neural Networks**[[paper]]()\n45. [CIKM 23] **KG4Ex: An Explainable Knowledge Graph-Based Approach for Exercise Recommendation** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3583780.3614943)\n46. [ECML-PKDD 23] **ENGAGE: Explanation Guided Data Augmentation for Graph Representation Learning** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.01053)\n47. [TPAMI 23] **FlowX: Towards Explainable Graph Neural Networks via Message Flows** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.12987)\n48. [TAI] **Prototype-based interpretable graph neural networks.** [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9953541)\n49. [TKDE 23] **Counterfactual Graph Learning for Anomaly Detection on Attributed Networks** [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10056298)\n50. [Scientific Data 23 ] **Evaluating explainability for graph neural networks** [[paper]](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41597-023-01974-x)\n51. [Nature Communications 23] **Chemistry-intuitive explanation of graph neural networks for molecular property prediction with substructure masking** [[paper]](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-023-38192-3)\n52. [ACM Computing Surveys 23] **A Survey on Graph Counterfactual Explanations: Definitions, Methods, Evaluation**  [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.12089)\n53. [TIST 23] **Faithful and Consistent Graph Neural Network Explanations with Rationale Alignment** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.02791)\n54. [Openreview 23] **STExplainer: Global Explainability of GNNs via Frequent SubTree Mining** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=HgSfV6sGIn)\n55. [GLFrontiers 23] **Everybody Needs a Little HELP: Explaining Graphs via Hierarchical Concepts** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=wrqAn3AJA1)\n56. [Openreview 23] **Iterative Graph Neural Network Enhancement Using Explanations** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=qp0oVaFGm0)\n58. [Openreview 23] **Interpretable and Convergent Graph Neural Network Layers at Scale** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=uYTaVRkKvz)\n60. [NeurIPS 2023 Workshop XAIA] **GInX-Eval: Towards In-Distribution Evaluation of Graph Neural Networks Explanations** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=88MalncLgU)\n61. [NeurIPS 2023 Workshop XAIA] **On the Consistency of GNN Explainability Methods** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=tiLZkab8TP)\n65. [Arxiv 23] **Explainability-Based Adversarial Attack on Graphs Through Edge Perturbation**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.17301)\n66. [AICS 23] **A subgraph interpretation generative model for knowledge graph link prediction based on uni-relation transformation** [[paper]](https:\u002F\u002Fwww.spiedigitallibrary.org\u002Fconference-proceedings-of-spie\u002F12803\u002F1280339\u002FA-subgraph-interpretation-generative-model-for-knowledge-graph-link-prediction\u002F10.1117\u002F12.3009388.short?SSO=1)\n67. [GUT 23] **Screening of normal endoscopic large bowel biopsies with interpretable graph learning: a retrospective study** [[paper]](https:\u002F\u002Fgut.bmj.com\u002Fcontent\u002Fgutjnl\u002Fearly\u002F2023\u002F05\u002F11\u002Fgutjnl-2023-329512.full.pdf)\n68. [PR 2023] **Towards self-explainable graph convolutional neural network with frequency adaptive inception** [[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0031320323006891)\n69. [MLG 2023] **Understanding how explainers work in graph neural networks** [[paper]](https:\u002F\u002Fmlg-europe.github.io\u002Fpapers\u002F241.pdf)\n70. [MLG 2023] **Graph Model Explainer Tool** [[paper]](https:\u002F\u002Fwww.mlgworkshop.org\u002F2023\u002Fpapers\u002FMLG__KDD_2023_paper_5.pdf)\n71. [Information Science 23] **Robust explanations for graph neural network with neuron explanation component** [[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0020025523013701)\n72. [Recsys 23] **Explainable Graph Neural Network Recommenders; Challenges and Opportunities** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3604915.3608875)\n73. [xAI  23] **Counterfactual Explanations for Graph Classification Through the Lenses of Density** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.14849)\n74. [XAI 23] **Evaluating Link Prediction Explanations for Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.01682\n75. [xAI 23] **XInsight: Revealing Model Insights for GNNs with Flow-based Explanations** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.04791.pdf)\n76. [xAI 23] **Quantifying the Intrinsic Usefulness of Attributional Explanations for Graph Neural Networks with Artificial Simulatability Studies** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15961)\n77. [xAI 23] **MEGAN: Multi Explanation Graph Attention Network** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=H6LVUiHzYDE)\n78. [XKDD 23] **Game Theoretic Explanations for Graph Neural Networks** [[paper]](http:\u002F\u002Fxkdd2023.isti.cnr.it\u002Fpapers\u002F424.pdf)\n79. [XKDD 23] **From Black Box to Glass Box: Evaluating Faithfulness of Process Predictions with GCNNs** [[paper]](http:\u002F\u002Fxkdd2023.isti.cnr.it\u002Fpapers\u002F425.pdf)\n80. [IJCNN 23] **MEGA: Explaining Graph Neural Networks with Network Motifs** [[paper]](https:\u002F\u002Fdoi.org\u002F10.1109\u002FIJCNN54540.2023.10191684)\n81. [LOG Poster 23] **On the Robustness of Post-hoc GNN Explainers to Label Noise** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.01706)\n82. [LOG Poster 23] **How Faithful are Self-Explainable GNNs?** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.15096)\n84. [LOG Poster 23] **Explaining Link Predictions in Knowledge Graph Embedding Models with Influential Examples** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.02651)\n85. [Bioriv 23] **Building explainable graph neural network by sparse learning for the drug-protein binding prediction** [[paper]](https:\u002F\u002Fwww.biorxiv.org\u002Fcontent\u002F10.1101\u002F2023.08.28.555203v1.abstract)\n86. [ICAID 2023] **Explanations for Graph Neural Networks via Layer Analysis.** [[paper]](https:\u002F\u002Fwww.atlantis-press.com\u002Fproceedings\u002Ficaid-23\u002F125990065)\n87. [ECAI 23] **XGBD: Explanation-Guided Graph Backdoor Detection** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.04406)\n88. [IEEE Transactions on Consumer Electronics 23] **Human Pose Prediction Using Interpretable Graph Convolutional Network for Smart Home** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.04406)\n89. [KBS 23] **KE-X: Towards subgraph explanations of knowledge graph embedding based on knowledge information gain** [[paper]](http:\u002F\u002Fsites.computer.org\u002Fdebull\u002FA23june\u002FA23JUNE-CD.pdf#page=64)\n90. [ICML workshop 23] **Generating Global Factual and Counterfactual Explainer for Molecule under Domain Constraints** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=qElXYQqxQh)\n91. [Thesis 23] **Developing interpretable graph neural networks for high dimensional feature spaces** [[paper]](https:\u002F\u002Fpub.tik.ee.ethz.ch\u002Fstudents\u002F2022-HS\u002FBA-2022-43.pdf)\n92. [Thesis 23] **Evaluation of Explainability Methods on Single-Cell Classification Tasks Using Graph Neural Networks** [[paper]](https:\u002F\u002Fwww.semanticscholar.org\u002Fpaper\u002FEvaluation-of-Explainability-Methods-on-Single-Cell-Singh-Kobayashi\u002F85f4aba430387a337ec3a4b2aa39bfc7361dea1f)\n93. [Arxiv 23] **On the Interplay of Subset Selection and Informed Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.10066)\n94. [ISSTA23] **Interpreters for GNN-Based Vulnerability Detection: Are We There Yet?** [[paper]](https:\u002F\u002Fwww.semanticscholar.org\u002Fpaper\u002FInterpreters-for-GNN-Based-Vulnerability-Detection%3A-Hu-Wang\u002F6bb9c86483f212a631324ba9b47c344d419a428a)\n95. [ICECAI23] **Improved GraphSVX for GNN Explanations Based on Cross Entropy** [[paper]](https:\u002F\u002Fwww.semanticscholar.org\u002Fpaper\u002FImproved-GraphSVX-for-GNN-Explanations-Based-on-Yu-Liang\u002Fb01c4f2c4d54723b590a828d4e1b4cdbfea5dad4)\n96. [ICRA Workshop 23] **Towards Semantic Interpretation and Validation of Graph Attention-based Explanations** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=ymyQeqatQqQ)\n97. [Arxiv 23] **Graph Neural Network based Log Anomaly Detection and Explanation** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.00527)\n99. [Thesis 23] **Interpretability of Graphical Models** [[paper]](https:\u002F\u002Fsearch.proquest.com\u002Fopenview\u002F1e61b389a59936e319974be0e3fd1af5\u002F1?pq-origsite=gscholar&cbl=18750&diss=y)\n101. [Bioengineering 2023] **Personalized Explanations for Early Diagnosis of Alzheimer's Disease Using Explainable Graph Neural Networks with Population Graphs** [[paper]](https:\u002F\u002Fwww.mdpi.com\u002F2306-5354\u002F10\u002F6\u002F701)\n102. [BDSC 2023] **MDC: An Interpretable GNNs Method Based on Node Motif Degree and Graph Diffusion Convolution** [[paper]] (https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-981-99-3925-1_24)\n104. [Information Science 2023] **Explainability techniques applied to road traffic forecasting using Graph Neural Network models** [[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0020025523009052)\n107. [Arxiv 23.05] **EiX-GNN : Concept-level eigencentrality explainer for graph neural networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.03491)\n108. [Arxiv 23.04] **Cognitive Explainers of Graph Neural Networks Based on Medical Concepts** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.07798)\n109. [ICLR Tiny 23] **Message-passing selection: Towards interpretable GNNs for graph classification** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=99Go96dla5y)\n110. [ICLR Tiny 23] **Revisiting CounteRGAN for Counterfactual Explainability of Graphs** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=d0m0Rl15q3g)\n111. [MICCAI Workshop 23] **IA-GCN: Interpretable Attention based Graph Convolutional Network for Disease prediction** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.15587.pdf)\n113. [GRADES & NDA'23] **A Demonstration of Interpretability Methods for Graph Neural Networks** [[paper]](https:\u002F\u002Fhomes.cs.aau.dk\u002F~Arijit\u002FPapers\u002FgInterpreter_GRADES_NDA23.pdf)\n114. [Arxiv 23] **Self-Explainable Graph Neural Networks for Link Prediction**  [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.12578)\n115. [Arxiv 23.02] **MotifExplainer: a Motif-based Graph Neural Network Explainer**  [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.00519)\n116. [ChemRxiv 23] **Interpreting Graph Neural Networks with Myerson Values for Cheminformatics Approaches** [[paper]](https:\u002F\u002Fchemrxiv.org\u002Fengage\u002Fchemrxiv\u002Farticle-details\u002F6456c89707c3f0293753101d)\n117. [Neural Networks 23] **Generating Post-hoc Explanations for Skip-gram-based Node Embeddings by Identifying Important Nodes with Bridgeness** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.12036)\n118. [ICASSP 23] **Towards a More Stable and General Subgraph Information Bottleneck** [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10094812)\n119. [ESANN 23] **Combining Stochastic Explainers and Subgraph Neural Networks can Increase Expressivity and Interpretability** [[Paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.07152)\n120. [IEEE Access] **Generating Real-Time Explanations for GNNs via Multiple Specialty Learners and Online Knowledge Distillation** [[Paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10107968)\n121. [IEEE Access] **Providing Post-Hoc Explanation for Node Representation Learning Models Through Inductive Conformal Predictions** [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10003193&tag=1)\n122. [Journal of Software 23] **A Slice-level vulnerability detection and interpretation method based on graph neural network** [[paper]](http:\u002F\u002Fwww.jos.org.cn\u002Fjosen\u002Farticle\u002Fabstract\u002Fmr008)\n123. [Automation in Construction 23] **Learning from explainable data-driven tunneling graphs: A spatio-temporal graph convolutional network for clogging detection** [[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0926580523000018)\n124. [Briefings in Bioinformatics] **Predicting molecular properties based on the interpretable graph neural network with multistep focus mechanism** [[paper]](https:\u002F\u002Facademic.oup.com\u002Fbib\u002Fadvance-article\u002Fdoi\u002F10.1093\u002Fbib\u002Fbbac534\u002F6918752)\n125. [Briefings in Bioinformatics] **Identification of vital chemical information via visualization of graph neural networks** [[paper]](https:\u002F\u002Facademic.oup.com\u002Fbib\u002Farticle\u002F24\u002F1\u002Fbbac577\u002F6936421)\n126. [Bioinformatics 23] **Explainable Multilayer Graph Neural Network for Cancer Gene Prediction** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2301.08831.pdf)\n127. [ICLR Workshop 23] **GCI: A Graph Concept Interpretation Framework** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.04899)\n128. [Arxiv 23] **Structural Explanations for Graph Neural Networks using HSIC** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.02139)\n129. [Internet of Things 23] **XG-BoT: An Explainable Deep Graph Neural Network for Botnet Detection and Forensics** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.09088)\n130. [JOS23] **A Generic Explaining & Locating Method for Malware Detection based on Graph Neural Networks** [[paper]](https:\u002F\u002Fwww.jos.org.cn\u002Fjosen\u002Farticle\u002Fabstract\u002F7123)\n131. [IJCNN 23] **GRAPHSHAP: Explaining Identity-Aware Graph Classifiers Through the Language of Motifs** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.08815)\n141. [Arxiv 23.01] **Explainability in subgraphs-enhanced Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.07926)\n\n### Year 2022\n1. [NeurIPS 22] **GStarX:Explaining Graph-level Predictions with Communication Structure-Aware Cooperative Games** [[paper]](https:\u002F\u002Fopenreview.net\u002Fpdf?id=Qry8exovcNA)\n2. [NeurIPS 22] **Debiasing Graph Neural Networks via Learning Disentangled Causal Substructure** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.14107)\n3. [NeurIPS 22] **Task-Agnostic Graph Neural Explanations** [[paper]](https:\u002F\u002Fopenreview.net\u002Fpdf?id=NQrx8EYMboO)\n4. [NeurIPS 22] **CLEAR: Generative Counterfactual Explanations on Graphs**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.08443)\n5. [ICML 22] **Interpretable and Generalizable Graph Learning via Stochastic Attention Mechanism** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.12987v1)\n6. [ICLR 22] **DEGREE: Decomposition Based Explanation for Graph Neural Networks** [[paper]](https:\u002F\u002Fopenreview.net\u002Fpdf?id=Ve0Wth3ptT_)\n7. [ICLR 22] **Explainable GNN-Based Models over Knowledge Graphs** [[paper]](https:\u002F\u002Fopenreview.net\u002Fattachment?id=CrCvGNHAIrz&name=pdf)\n8. [ICLR 22] **Discovering Invariant Rationales for Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.12872)\n9. [KDD 22] **On Structural Explanation of Bias in Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.12104)\n10. [KDD 22] **Causal Attention for Interpretable and Generalizable Graph Classification** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.15089)\n10. [CVPR 22] **OrphicX: A Causality-Inspired Latent Variable Model for Interpreting Graph Neural Networks** [[paper]](https:\u002F\u002Fwanyu-lin.github.io\u002Fassets\u002Fpublications\u002Fwanyu-cvpr2022.pdf)\n81. [CVPR 22] **Improving Subgraph Recognition with Variational Graph Information Bottleneck** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.09899)\n12. [AISTATS 22] **Probing GNN Explainers: A Rigorous Theoretical and Empirical Analysis of GNN Explanation Methods** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.09078)\n13. [AISTATS 22] **CF-GNNExplainer: Counterfactual Explanations for Graph Neural Networks**  [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.03322)\n14. [TPAMI 22] **Differentially Private Graph Neural Networks for Whole-Graph Classification** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.03806)\n15. [TPAMI 22] **Reinforced Causal Explainer for Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.11028)\n17. [VLDB 22] **xFraud: Explainable Fraud Transaction Detection on Heterogeneous Graphs** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2011.12193.pdf)\n18. [LOG 22]**GraphFramEx: Towards Systematic Evaluation of Explainability Methods for Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.09677)\n19. [LOG 22] **Towards Training GNNs using Explanation Directed Message Passing** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.16731)\n20. [The Webconf 22] **Learning and Evaluating Graph Neural Network Explanations based on Counterfactual and Factual Reasoning** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.08816)\n21. [AAAI 22] **Prototype-Based Explanations for Graph Neural Networks** [[paper]](https:\u002F\u002Fwww.aaai.org\u002FAAAI22Papers\u002FSA-00396-ShinY.pdf)\n36. [AAAI 22] **KerGNNs: Interpretable Graph Neural Networks with Graph Kernels**[[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2201.00491.pdf)\n37. [AAAI 22] **ProtGNN: Towards Self-Explaining Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.00911)\n23. [IEEE Big Data 22] **Trade less Accuracy for Fairness and Trade-off Explanation for GNN** [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10020318)\n28. [CIKM 22] **GRETEL: A unified framework for Graph Counterfactual Explanation Evaluation**  [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.02957)\n29. [CIKM 22] **GRETEL: Graph Counterfactual Explanation Evaluation Framework**[[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3511808.3557608)\n30. [CIKM 22] **A Model-Centric Explainer for Graph Neural Network based Node Classification** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3511808.3557535)\n31. [IJCAI 22] **What Does My GNN Really Capture? On Exploring Internal GNN Representations** [[paper]](https:\u002F\u002Fhal.archives-ouvertes.fr\u002Fhal-03700710\u002F)\n32. [ECML PKDD 22] **Improving the quality of rule-based GNN explanations** [[paper]](https:\u002F\u002Fkdd.isti.cnr.it\u002Fxkdd2022\u002Fpapers\u002FXKDD_2022_paper_2436.pdf)\n33. [MICCAI 22] **Interpretable Graph Neural Networks for Connectome-Based Brain Disorder Analysis** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.00813)\n34. [MICCAI 22] **Sparse Interpretation of Graph Convolutional Networks for Multi-modal Diagnosis of Alzheimer’s Disease** [[paper]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-16452-1_45)\n38. [EuroS&P 22] **Illuminati: Towards Explaining Graph Neural Networks for Cybersecurity Analysis** [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9797387?casa_token=1AvRK3S4eJQAAAAA:8PXcOA8iU1ketRMdu6YVMBMcfZKjF7MIVujPpHTpjdc2O9r1cvUg8usfRiOYZ5Fe-MKJi4Y)\n39. [INFOCOM 22] **Interpretability Evaluation of Botnet Detection Model based on Graph Neural Network** [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9798287)\n40. [GLOBECOM 22] **Shapley Explainer - An Interpretation Method for GNNs Used in SDN** [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10001460)\n41. [GLOBECOM 22] **An Explainer for Temporal Graph Neural Networks** [[paper]]([https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.00807.pdf])\n42. [TKDE 22] **Zorro: Valid, Sparse, and Stable Explanations in Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.08621)\n43. [TNNLS 22] **Interpretable Graph Reservoir Computing With the Temporal Pattern Attention**  [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10003110)\n44. [TNNLS22] **A Meta-Learning Approach for Training Explainable Graph Neural Networks** [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9772740)\n45. [TNNLS 22] **Explaining Deep Graph Networks via Input Perturbation** [[paper]](https:\u002F\u002Fpubmed.ncbi.nlm.nih.gov\u002F35446771\u002F)\n63. [TNNLS 22] **A Meta-Learning Approach for Training Explainable Graph Neural Network** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2109.09426.pdf)\n47. [DMKD 22] **On GNN explanability with activation patterns**  [[paper]](https:\u002F\u002Fhal.archives-ouvertes.fr\u002Fhal-03367714\u002Ffile\u002Fhal.pdf)\n48. [KBS 22] **EGNN: Constructing explainable graph neural networks via knowledge distillation** [[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0950705122001289?via%3Dihub)\n49. [XKDD 22] **GREASE: Generate Factual and Counterfactual Explanations for GNN-based Recommendations**  [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.04222)\n50. [AI 22] **Are Graph Neural Network Explainers Robust to Graph Noises?** [[paper]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-22695-3_12)\n52. [BRACIS 22] **ConveXplainer for Graph Neural Networks** [[paper]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-21689-3_41)\n53. [GLB 22] **An Explainable AI Library for Benchmarking Graph Explainers** [[paper]](https:\u002F\u002Fgraph-learning-benchmarks.github.io\u002Fassets\u002Fpapers\u002Fglb2022\u002FAn_Explainable_AI_Library_for_Benchmarking_Graph_Explainers.pdf)\n54. [DASFAA 22] **On Global Explainability of Graph Neural Networks** [[paper]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-00123-9_52)\n55. [ISBI 22] **Interpretable Graph Convolutional Network Of Multi-Modality Brain Imaging For Alzheimer’s Disease Diagnosis** [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9761449?casa_token=w3IlSZNlKwcAAAAA:Xvh04eK29bZtbkRq5Eg3jUZURS3qs1k3AA1bhnnN2kKWmIjBnh7alAiy98zBgsHFtvFQqV0IYA)\n56. [Bioinformatics] **GNN-SubNet: disease subnetwork detection with explainable Graph Neural Networks** [[paper]](https:\u002F\u002Facademic.oup.com\u002Fbioinformatics\u002Farticle\u002F38\u002FSupplement_2\u002Fii120\u002F6702000?login=false)\n57. [Medical Imaging 2022]  **Phenotype guided interpretable graph convolutional network analysis of fMRI data reveals changing brain connectivity during adolescence** [[paper]](https:\u002F\u002Fwww.semanticscholar.org\u002Fpaper\u002FPhenotype-guided-interpretable-graph-convolutional-Orlichenko-Qu\u002Fd05adc7c772780be4b99a169441696017d49c6ed)\n83. [NeuroComputing 22] **Perturb more, trap more: Understanding behaviors of graph neural networks** [[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0925231222004404?casa_token=6KLu9elyyLMAAAAA:hM0eGpfSnLxF0V8fZJdoDE3hkalzK2yccBJl3X9KN-Btu_xDSZmmbORIfkYdK5rgjTr7MReeFxc)\n84. [DSN 22] **CFGExplainer: Explaining Graph Neural Network-Based Malware Classification from Control Flow Graphs** [[paper]](http:\u002F\u002Fwww.cs.binghamton.edu\u002F~ghyan\u002Fpapers\u002Fdsn22.pdf)\n118. [IEEE Access 22] **Providing Node-level Local Explanation for node2vec through Reinforcement Learning** [[paper]](https:\u002F\u002Fmlog-workshop.github.io\u002Fpapers\u002FMLoG%20Providing%20Node-level%20Local%20Explanation%20for%20node2vec%20through%20Reinforcement%20Learning.pdf)\n119. [Patterns 22] **Quantitative Evaluation of Explainable Graph Neural Networks for Molecular Property Prediction** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2107.04119.pdf)\n121. [IEEE Access 22] **Providing Post-Hoc Explanation for Node Representation Learning Models Through Inductive Conformal Predictions** [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10003193)\n122. [IEEE 22] **Explaining Graph Neural Networks With Topology-Aware Node Selection: Application in Air Quality Inference**  [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9801665)\n123. [BioRxiv 22] **GNN-SubNet: disease subnetwork detection with explainable Graph Neural Networks** [[paper]](https:\u002F\u002Fwww.biorxiv.org\u002Fcontent\u002F10.1101\u002F2022.01.12.475995v1)\n124. [IEEE Robotics and Automation Letters 22] **Efficient and Interpretable Robot Manipulation with Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2102.13177.pdf)\n125. [Arxiv 22] **Deconfounding to Explanation Evaluation in Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.08802)\n126. [ICCPR 22] **GANExplainer: GAN-based Graph Neural Networks Explainer**  [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.00012)\n129. [Arxiv 22] **Exploring Explainability Methods for Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.01770)\n132. [Arxiv 22] **Toward Multiple Specialty Learners for Explaining GNNs via Online Knowledge Distillation** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.11094)\n134. [Openreview 23] **TGP: Explainable Temporal Graph Neural Networks for Personalized Recommendation** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=EGobBwPc1J-)\n139. [Arxiv 22] **PGX: A Multi-level GNN Explanation Framework Based on Separate Knowledge Distillation Processes** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.03075)\n142. [Arxiv 22] **Defending Against Backdoor Attack on Graph Neural Network by Explainability** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.02902.pdf)\n144. [Arxiv 22] **Explaining Dynamic Graph Neural Networks via Relevance Back-propagation** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.11175)\n147. [Arxiv 22] **Faithful Explanations for Deep Graph Models** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.11850)\n148. [Arxiv 22] **Towards Explanation for Unsupervised Graph-Level Representation Learning** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.09934)\n149. [Arxiv 22] **BAGEL: A Benchmark for Assessing Graph Neural Network Explanations** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.13983)\n152. [Arxiv 22] **Explainability in Graph Neural Networks: An Experimental Survey** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.09258)\n153. [IEEE TSIPN 22] **Explainability and Graph Learning from Social Interactions** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.07494.pdf)\n\n\n\n### Year 2021\n1. [NeurIPS 21] **SALKG: Learning From Knowledge Graph Explanations for Commonsense Reasoning** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.08793.pdf)\n2. [NeurIPS 2021] **Reinforcement Learning Enhanced Explainer for Graph Neural Networks** [[paper]](http:\u002F\u002Frecmind.cn\u002Fpapers\u002Fexplainer_nips21.pdf)\n3. [NeurIPS 2021] **Towards Multi-Grained Explainability for Graph Neural Networks** [[paper]](http:\u002F\u002Fstaff.ustc.edu.cn\u002F~hexn\u002Fpapers\u002Fnips21-explain-gnn.pdf)\n21. [NeurIPS 2021] **Robust Counterfactual Explanations on Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.04086)\n22. [ICML 2021] **On Explainability of Graph Neural Networks via Subgraph Explorations**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.05152)\n32. [ICML 2021] **Generative Causal Explanations for Graph Neural Networks**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.06643)\n33. [ICML 2021] **Improving Molecular Graph Neural Network Explainability with Orthonormalization and Induced Sparsity**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.04854)\n34. [ICML 2021] **Automated Graph Representation Learning with Hyperparameter Importance Explanation**[[paper]](http:\u002F\u002Fproceedings.mlr.press\u002Fv139\u002Fwang21f\u002Fwang21f.pdf)\n26. [ICLR 21] **Explainable Subgraph Reasoning for Forecasting on Temporal Knowledge Graphs**  [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=pGIHq1m7PU)\n27. [ICLR 2021] **Interpreting Graph Neural Networks for NLP With Differentiable Edge Masking**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2010.00577)\n52. [ICLR 2021] **Graph Information Bottleneck for Subgraph Recognition** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.05563.pdf)\n53. [KDD 2021] **When Comparing to Ground Truth is Wrong: On Evaluating GNN Explanation Methods**[[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3447548.3467283)\n54. [KDD 2021] **Counterfactual Graphs for Explainable Classification of Brain Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.08640)\n27. [CVPR 2021] **Quantifying Explainers of Graph Neural Networks in Computational Pathology**.[[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2011.12646.pdf)\n40. [NAACL 2021] **Counterfactual Supporting Facts Extraction for Explainable Medical Record Based Diagnosis with Graph Network**. [[paper]](https:\u002F\u002Faclanthology.org\u002F2021.naacl-main.156.pdf)\n28. [AAAI 2021] **Motif-Driven Contrastive Learning of Graph Representations** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.12533.pdf)\n56. [TPAMI 21] **Higher-Order Explanations of Graph Neural Networks via Relevant Walks** [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9547794)\n57. [WWW 2021] **Interpreting and Unifying Graph Neural Networks with An Optimization Framework** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2101.11859)\n59. [Genome medicine 21] **Explaining decisions of Graph Convolutional Neural Networks: patient-specific molecular subnetworks responsible for metastasis prediction in breast cancer** [[paper]](https:\u002F\u002Fwww.semanticscholar.org\u002Fpaper\u002FExplaining-decisions-of-Graph-Convolutional-Neural-Chereda-Bleckmann\u002F49a4e339182b2b304304c8837b09ce3e0951a616)\n60. [IJCKG 21] **Knowledge Graph Embedding in E-commerce Applications: Attentive Reasoning, Explanations, and Transferable Rules** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.08589)\n61. [RuleML+RR 21] **Combining Sub-Symbolic and Symbolic Methods for Explainability** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.01844)\n62. [PAKDD 21] **SCARLET: Explainable Attention based Graph Neural Network for Fake News spreader prediction** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.04627)\n63. [J. Chem. Inf. Model] **Coloring Molecules with Explainable Artificial Intelligence for Preclinical Relevance Assessment** [[paper]](https:\u002F\u002Fpubs.acs.org\u002Fdoi\u002Fabs\u002F10.1021\u002Facs.jcim.0c01344)\n64. [BioRxiv 21] **APRILE: Exploring the Molecular Mechanisms of Drug Side Effects with Explainable Graph Neural Networks** [[paper]](https:\u002F\u002Fwww.biorxiv.org\u002Fcontent\u002F10.1101\u002F2021.07.02.450937v2.abstract)\n65. [ISM 21] **Edge-Level Explanations for Graph Neural Networks by Extending Explainability Methods for Convolutional Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2111.00722.pdf)\n67. [Arxiv 21] **Towards the Explanation of Graph Neural Networks in Digital Pathology with Information Flows** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.09895)\n68. [Arxiv 21] **SEEN: Sharpening Explanations for Graph Neural Networks using Explanations from Neighborhoods** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.08532.pdf)\n69. [Arxiv 21] **Preserve, Promote, or Attack? GNN Explanation via Topology Perturbation** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.13944.pdf)\n70. [Arxiv 21] **Learnt Sparsification for Interpretable Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.12920.pdf)\n72. [ICML workshop 21] **GCExplainer: Human-in-the-Loop Concept-based Explanations for Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2107.11889.pdf)\n74. [ICML workshop 21] **Reliable Graph Neural Network Explanations Through Adversarial Training** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.13427.pdf)\n75. [ICML workshop 21] **Reimagining GNN Explanations with ideas from Tabular Data** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.12665.pdf)\n76. [ICML workshop 21] **Towards Automated Evaluation of Explanations in Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.11864.pdf)\n79. [ICDM 2021] **GNES: Learning to Explain Graph Neural Networks** [[paper]](https:\u002F\u002Fcs.emory.edu\u002F~lzhao41\u002Fmaterials\u002Fpapers\u002FGNES.pdf)\n80. [ICDM 2021] **GCN-SE: Attention as Explainability for Node Classification in Dynamic Graphs** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.05598)\n82. [ICDM 2021] **Multi-objective Explanations of GNN Predictions** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.14651)\n83. [CIKM 2021] **Towards Self-Explainable Graph Neural Network** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.12055)\n84. [ECML PKDD 2021] **GraphSVX: Shapley Value Explanations for Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.10482)\n85. [WiseML 2021] **Explainability-based Backdoor Attacks Against Graph Neural Networks** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3468218.3469046)\n86. [IJCNN 21] **MEG: Generating Molecular Counterfactual Explanations for Deep Graph Networks**  [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.08060.pdf)\n87. [ICCSA 2021] **Understanding Drug Abuse Social Network Using Weighted Graph Neural Networks Explainer** [[paper]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007%2F978-3-030-86970-0_5)\n88. [NeSy 21] **A New Concept for Explaining Graph Neural Networks** [[paper]](http:\u002F\u002Fceur-ws.org\u002FVol-2986\u002Fpaper1.pdf)\n89. [Information Fusion 21] **Towards multi-modal causability with Graph Neural Networks enabling information fusion for explainable AI** [[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1566253521000142?via%3Dihub)\n90. [Patterns 21] **hcga: Highly Comparative Graph Analysis for network phenotyping** [[paper]](https:\u002F\u002Fwww.biorxiv.org\u002Fcontent\u002F10.1101\u002F2020.09.25.312926v2)\n\n\n\n\n### Year 2020 and Before\n1. [NeurIPS 2020] **Parameterized Explainer for Graph Neural Network**.[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.04573)\n2. [NeurIPS 2020] **PGM-Explainer: Probabilistic Graphical Model Explanations for Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.05788.pdf)\n3. [KDD 2020] **XGNN: Towards Model-Level Explanations of Graph Neural Networks** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3394486.3403085)\n4. [ACL 2020]**GCAN: Graph-aware Co-Attention Networks for Explainable Fake News Detection on Social Media**. [paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2004.11648.pdf)\n5. [Arxiv 2020] **Graph Neural Networks Including Sparse Interpretability** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.00119)\n6. [NeurIPS Workshop 20] **Towards explainable message passing networks for predicting carbon dioxide adsorption in metal-organic frameworks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.03723)\n7. [ICML workstop 2020] **Contrastive Graph Neural Network Explanation** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.13663.pdf)\n8. [ICML workstop 2020] **Towards Explainable Graph Representations in Digital Pathology** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.00311.pdf)\n9. [NeurIPS workshop 2020] **Explaining Deep Graph Networks with Molecular Counterfactuals** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2011.05134.pdf)\n10. [DataMod 2020] **Exploring Graph-Based Neural Networks for Automatic Brain Tumor Segmentation\"** [[paper]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007%2F978-3-030-70650-0_2)\n12. [OpenReview 20] **A Framework For Differentiable Discovery Of Graph Algorithms** [[paper]](https:\u002F\u002Fopenreview.net\u002Fpdf?id=ueiBFzt7CiK)\n13. [OpenReview 20] **Causal Screening to Interpret Graph Neural Networks** [[paper]](https:\u002F\u002Fopenreview.net\u002Fpdf?id=nzKv5vxZfge)\n14. [Arxiv 20] **Understanding Graph Neural Networks from Graph Signal Denoising Perspectives** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2006.04386.pdf)\n15. [Arxiv 20] **Understanding the Message Passing in Graph Neural Networks via Power Iteration** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2006.00144.pdf)\n17. [IJCNN 20] **GCN-LRP explanation: exploring latent attention of graph convolutional networks**] [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9207639)\n18. [CD-MAKE 20] **Explain Graph Neural Networks to Understand Weighted Graph Features in Node Classification** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2002.00514) \n19. [ICDM 19] **Scalable Explanation of Inferences on Large Graphs**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.06482) \n\n\n","# 令人惊叹的图可解释性论文\n关于图神经网络可解释性的论文\n\n### 综述类\n1. [ACM计算综述25] **解释图神经网络中的解释器：一项比较研究** [论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3696444)\n2. [IEEE会议论文集24] **可信图神经网络：方面、方法与趋势** [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.07424)\n3. [预印本24] **基于图的可解释AI：全面综述** [论文](https:\u002F\u002Fhal.science\u002Fhal-04660442\u002F)\n4. [Arxiv 23] **图神经网络可解释性综述** [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.01958)\n5. [ACM计算综述] **图反事实解释综述：定义、方法、评估与研究挑战** [论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3618105)\n6. [TPAMI 22]**图神经网络中的可解释性：分类学综述**。*袁浩、于海洋、桂树睿、季水旺*。[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.15445.pdf)\n7. [Arxiv 22]**可解释图神经网络综述：分类与评估指标** [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.12599.pdf)\n8. [Arxiv 22] **可信图学习综述：可靠性、可解释性与隐私保护** [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.10014)\n9. [大数据2022]**用于网络恶意软件分析的可解释图神经网络综述** [论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10020943)\n10. [机器智能研究24] **可信图神经网络综合综述：隐私、鲁棒性、公平性与可解释性**[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.08570)\n11. [书籍23] **图神经网络的生成式解释：方法与评估** [论文](http:\u002F\u002Fsites.computer.org\u002Fdebull\u002FA23june\u002Fp64.pdf)\n\n### 平台类\n1. **PyTorch Geometric** [[文档]](https:\u002F\u002Fpytorch-geometric.readthedocs.io\u002Fen\u002Flatest\u002Ftutorial\u002Fexplain.html) [[博客]](https:\u002F\u002Fmedium.com\u002F@pytorch_geometric\u002Fgraph-machine-learning-explainability-with-pyg-ff13cffc23c2)\n2. **DIG：深入图深度学习研究的一站式库** [论文](https:\u002F\u002Fwww.jmlr.org\u002Fpapers\u002Fv22\u002F21-0343.html) [代码](https:\u002F\u002Fgithub.com\u002Fdivelab\u002FDIG)\n2. **GraphXAI：评估图神经网络的可解释性** [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.09339v2) [代码](https:\u002F\u002Fgithub.com\u002Fmims-harvard\u002Fgraphxai)\n3. **GraphFramEx：迈向图神经网络可解释性方法的系统化评估** [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.09677) [代码](https:\u002F\u002Fgithub.com\u002Fgraphframex\u002Fgraphframex)\n4. **GNNExplainer和PGExplainer** [论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=8JHrucviUf) [代码](https:\u002F\u002Fgithub.com\u002FLarsHoldijk\u002FRE-ParameterizedExplainerForGraphNeuralNetworks)\n5. **BAGEL：评估图神经网络解释的基准测试** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.13983)[代码](https:\u002F\u002Fgithub.com\u002Fmandeep-rathee\u002Fbagel-benchmark)\n\n\n### 由[Cogdl](https:\u002F\u002Fgithub.com\u002FTHUDM\u002Fcogdl\u002Fblob\u002Fmaster\u002Fgnn_papers.md#explainability)选出的最具影响力论文\n1. **图神经网络中的可解释性：分类学综述**。*袁浩、于海洋、桂树睿、季水旺*。ARXIV 2020年。[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.15445.pdf)\n2. **Gnnexplainer：为图神经网络生成解释**。*Ying Rex、Bourgeois Dylan、You Jiaxuan、Zitnik Marinka、Leskovec Jure*。NeurIPS 2019年。[论文](https:\u002F\u002Fwww.ncbi.nlm.nih.gov\u002Fpmc\u002Farticles\u002FPMC7138248\u002F) [代码](https:\u002F\u002Fgithub.com\u002FRexYing\u002Fgnn-model-explainer)\n3. **图卷积神经网络的可解释性方法**。*Pope Phillip E、Kolouri Soheil、Rostami Mohammad、Martin Charles E、Hoffmann Heiko*。CVPR 2019年。[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FPope_Explainability_Methods_for_Graph_Convolutional_Neural_Networks_CVPR_2019_paper.pdf)\n4. **图神经网络的参数化解释器**。*Luo Dongsheng、Cheng Wei、Xu Dongkuan、Yu Wenchao、Zong Bo、Chen Haifeng、Zhang Xiang*。NeurIPS 2020年。[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.04573) [代码](https:\u002F\u002Fgithub.com\u002Fflyingdoog\u002FPGExplainer)\n5. **Xgnn：迈向图神经网络的模型级解释**。*袁浩、Tang Jiliang、Hu Xia、季水旺*。KDD 2020年。[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3394486.3403085)。\n6. **评估图神经网络的归因**。*Sanchez-Lengeling Benjamin、Wei Jennifer、Lee Brian、Reif Emily、Wang Peter、Qian Wesley、McCloskey Kevin、Colwell Lucy、Wiltschko Alexander*。NeurIPS 2020年。[论文](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Ffile\u002F417fbbf2e9d5a28a855a11894b2e795a-Paper.pdf)\n7. **PGM-Explainer：图神经网络的概率图模型解释**。*Vu Minh、Thai My T.*。NeurIPS 2020年。[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.05788.pdf)\n8. **基于解释的弱监督视觉关系学习与图网络**。*Federico Baldassarre、Kevin Smith、Josephine Sullivan、Hossein Azizpour*。ECCV 2020年。[论文](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fpapers\u002F123730613.pdf)\n9. **GCAN：面向社交媒体上可解释假新闻检测的图感知协同注意力网络**。*Lu、Yi-Ju和Li、Cheng-Te*。ACL 2020年。[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2004.11648.pdf)\n10. **通过子图探索实现图神经网络可解释性**。*袁浩、于海洋、Wang Jie、Li Kang、季水旺*。ICML 2021年。[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2102.05152.pdf)\n\n\n### 2026年\n- [ICLR 26] **无法真正解释的GNN解释及其发现方法**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2601.20815)\n\n### 2025年\n-  [NIPS 25] **基于图曲率的图神经网络鲁棒解释**[[论文]](https:\u002F\u002Fopenreview.net\u002Fpdf?id=48L3BEtH8w)\n-  [NIPS 25] **GnnXemplar：示例即解释——用于全局图神经网络可解释性的自然语言规则**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=eafIjoZAHm)\n-  [NIPS 25] **基于逻辑的自解释图神经网络研究**[[论文]](https:\u002F\u002Fopenreview.net\u002Fpdf?id=OtAiYPP6GA)\n1. [ICLR 25] **图神经网络任意阶Shapley交互作用的精确计算**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=9tKC0YM8sX) \n2. [ICLR 25] **从图神经网络到树结构：图神经网络的多粒度可解释性**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=KEUPk0wXXe)\n3. [ICLR 25] **针对图扰动攻击的可证明鲁棒可解释图神经网络**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=iFK0xoceR0)\n4. [ICLR 25] **迈向解释常深度图神经网络在线性规划中的能力**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=INow59Vurm)\n5. [ICLR 25] **基于公理化层边的演化图上GNN解释**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=pXN8T5RwNN)\n6. [ICLR 25] **MAGE：基于基元图生成的模型级图神经网络解释**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=vue9P1Ypk6)\n7. [AAAI 25] **用于图解释的高阶结构**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.03253)\n8. [AAAI 25] **用于链接符号预测的自解释图Transformer**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.08754)\n9. [AAAI 25] **基于计算树视角的忠实且准确的自注意力归因，应用于消息传递神经网络**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.04612)\n10. [AAAI 25] **图分割与对比增强型图神经网络解释器**[[论文]](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F35440)\n11. [TKDD 25] **DyExplainer：可解释的动态图神经网络**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.16375)\n12. [Arxiv 25.05] **基于子图匹配的双重解释方法用于恶意软件检测**[[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.20904)\n13. [Arxiv 25.04] **关于图神经网络在恶意软件检测中解释的一致性问题**[[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.16316)\n14. [Arxiv 25.01] **通过解释技术对图神经网络进行水印标记以保护所有权**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.05614)\n15. [Arxiv 25.01] **混合专家图Transformer用于可解释的粒子碰撞检测**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.03432)\n16. [ACM Computing Surveys] **图神经网络能否被充分解释？一项综述**[[论文]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3711122)\n17. [IEEE TNSRE] **利用可解释图神经网络寻找运动学习与康复的神经生物标志物**[[论文]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10843258)\n18. [Springer FCS] **借鉴捷径：一种基于捷径引导的可解释图学习方法**[[论文]](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs11704-024-40452-4)\n19. [NN] **具有多头图通道注意力网络的局部可解释垃圾信息检测模型**[[论文]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0893608024009985)\n20. [Applied Intelligence] **KnowGNN：一种知识感知、结构敏感的模型级图神经网络解释器**[[论文]](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs10489-024-06034-4)\n21. [ICML 25] **TopInG：基于持久性理由过滤的拓扑可解释图学习**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.05102) [[项目]](https:\u002F\u002Fjackal092927.github.io\u002Fpublication\u002FTopInG_ICML2025)\n\n### Year 2024\n1. [NeurIPS 24] **RegExplainer: Generating Explanations for Graph Neural Networks in Regression Task** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.07840)\n2. [NeurIPS 24] **GraphTrail: Translating GNN Predictions into Human-Interpretable Logical Rules**[[paper]](https:\u002F\u002Fnips.cc\u002Fvirtual\u002F2024\u002Fposter\u002F94172)\n3. [ICML 24] **Generating In-Distribution Proxy Graphs for Explaining Graph Neural Networks**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.02036)\n4. [ICML 24] **Predicting and Interpreting Energy Barriers of Metallic Glasses with Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.08627)\n5. [ICML 24] **Graph Neural Network Explanations are Fragile** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.03193)\n6. [ICML 24] **How Interpretable Are Interpretable Graph Neural Networks?** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.07955)\n7. [ICML 24] **Feature Attribution with Necessity and Sufficiency via Dual-stage Perturbation Test for Causal Explanation**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.08845)\n8. [ICML 24] **Explaining Graph Neural Networks via Structure-aware Interaction Index** [[paper]](https:\u002F\u002Ficml.cc\u002Fvirtual\u002F2024\u002Fposter\u002F34550)\n9. [ICML 24] **EiG-Search: Generating Edge-Induced Subgraphs for GNN Explanation in Linear Time** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.01762)\n10. [ICLR 24] **GraphChef: Decision-Tree Recipes to Explain Graph Neural Networks** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=IjMUGuUmBI)\n11. [ICLR 24] **GOAt: Explaining Graph Neural Networks via Graph Output Attribution** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=2Q8TZWAHv4)\n12. [ICLR 24] **Towards Robust Fidelity for Evaluating Explainability of Graph Neural Networks** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=up6hr4hIQH)\n13. [ICLR 24] **GNNX-BENCH: Unravelling the Utility of Perturbation-based GNN Explainers through In-depth Benchmarking** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.01794)\n14. [ICLR 24] **UNR-Explainer: Counterfactual Explanations for Unsupervised Node Representation Learning Models** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=0j9ZDzMPqr)\n15. [TPAMI 24] **Towards Inductive and Efficient Explanations for Graph Neural Networks**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10423141)\n20. [Openreview 24] **Robust Graph Attention for Graph Adversarial Attacks: An Information Bottleneck Inspired Approach**[[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=lTL4t68BNc)\n21. [Openreview 24] **AIMing for Explainability in GNNs**[[paper]]([https:\u002F\u002Fopenreview.net\u002Fforum?id=lTL4t68BNc](https:\u002F\u002Fopenreview.net\u002Fforum?id=KZII3faAs2))\n23. [Openreview 24] **Graph Distributional Analytics: Enhancing GNN Explainability through Scalable Embedding and Distribution Analysis**[[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=Fzz8acgC6X)\n25. [Openreview 24] **Watermarking Graph Neural Networks Via Explanations For Ownership Protection**[[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=EgP6IEyfYJ)\n26. [Openreview 24] **Explainable Graph Representation Learning via Graph Pattern Analysis** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=hXJrQWIoR3)\n28. [Openreview 24] **Robust Heterogeneous Graph Neural Network Explainer with Graph Information Bottleneck** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=IMWYNVBHob)\n29. [Openreview 24] **A Hierarchical Language Model Design For Interpretable Graph Reasoning** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=DRSSLefryd)\n30. [Openreview 24] **The GECo algorithm for Graph Neural Networks Explanation** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=sTQC4TeYo1)\n31. [Openreview 24] **On Explaining Equivariant Graph Networks via Improved Relevance Propagation** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=YkMg8sB8AH)\n32. [Openreview 24] **SIG: Self-Interpretable Graph Neural Network for Continuous-time Dynamic Graphs** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=j0KjevdhkH)\n33. [Openreview 24] **Interpretable and Adaptive Graph Contrastive Learning with Information Sharing for Biomedical Link Prediction** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=GlgD9o9bl4)\n35. [Openreview 24] **TAGExplainer: Narrating Graph Explanations for Text-Attributed Graph Learning Models** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=VWBYDo5NaM)\n37. [Openreview 24] **TreeX: Generating Global Graphical GNN Explanations via Critical Subtree Extraction** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=zSUXo1nkqR)\n38. [TMLR 24] **InduCE: Inductive Counterfactual Explanations for Graph Neural Networks** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=RZPN8cgqST)\n39. [PLDI 24] **PL4XGL: A Programming Language Approach to Explainable Graph Learning**[[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3656464)\n40. [Usenix Security 24] **INSIGHT: Attacking Industry-Adopted Learning Resilient Logic Locking Techniques Using Explainable Graph Neural Network**[[paper]](https:\u002F\u002Fwww.usenix.org\u002Fconference\u002Fusenixsecurity24\u002Fpresentation\u002Fmankali)\n41. [SIGMOD 24]**View-based Explanations for Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.02086)\n42. [ACM SIGMOD Record] **The Road to Explainable Graph Neural Networks** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3703922.3703930)\n43. [Thesis UCLA] **Explainable Artificial Intelligence for Graph Data**[[paper]](https:\u002F\u002Fescholarship.org\u002Fuc\u002Fitem\u002F6bf1g6dc)\n44. [Thesis UVA] **Algorithmic Fairness in Graph Machine Learning: Explanation, Optimization, and Certification**[[paper]](https:\u002F\u002Fwww.proquest.com\u002Fdocview\u002F3083271574)\n45. [KDD 24] **SEFraud: Graph-based Self-Explainable Fraud Detection via Interpretative Mask Learning**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.11389)\n46. [KDD 24] **Self-Explainable Temporal Graph Networks based on Graph Information Bottleneck**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.13214)\n47. [KDD 24] **Unveiling Global Interactive Patterns across Graphs: Towards Interpretable Graph Neural Networks**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.01979)\n48. [ICDE 24] **Generating Robust Counterfactual Witnesses for Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.19519)\n49. [ICDE 24] **SES: Bridging the Gap Between Explainability and Prediction of Graph Neural Networks**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.11358)\n50. [ICSE 24] **Coca: Improving and Explaining Graph Neural Network-Based Vulnerability Detection Systems**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.14886)\n51. [AAAI 24] **Generating Diagnostic and Actionable Explanations for Fair Graph Neural Networks** [[paper]](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F30168)\n52. [AAAI 24] **Stratifed GNN Explanations through Sufficient Expansion**[[paper]](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F29180)\n53. [AAAI 24] **Factorized Explainer for Graph Neural Networks**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.05596)\n54. [AAAI 24] **Self-Interpretable Graph Learning with Sufficient and Necessary Explanations**\n55. [AAAI 24] **Explainable Origin-Destination Crowd Flow Interpolation via Variational Multi-Modal Recurrent Graph Auto-Encoder** [[paper]](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F28796)\n13. [AISTATS 24] **Two Birds with One Stone: Enhancing Uncertainty Quantification and Interpretability with Graph Functional Neural Process** [[paper]](https:\u002F\u002Fproceedings.mlr.press\u002Fv238\u002Fkong24a.html)\n14. [WWW 24] **Game-theoretic Counterfactual Explanation for Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.06030)\n15. [WWW 24] **EXGC: Bridging Efficiency and Explainability in Graph Condensation**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.05962)\n16. [WWW 24] **Adversarial Mask Explainer for Graph Neural Networks** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3589334.3645608)\n17. [WWW 24] **Globally Interpretable Graph Learning via Distribution Matching**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.10447)\n18. [WWW 24] **GNNShap: Scalable and Accurate GNN Explanation using Shapley Values** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3589334.3645599)\n19. [LOG 24] **xAI-Drop: Don't Use What You Cannot Explain**[[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=adlpuqQD8Q)\n20. [LOG 24] **MOSE-GNN: A Motif-Based Self-Explaining Graph Neural Network for Molecular Property Prediction** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=nD1a6hSLhO)\n22. [TNNLS 24] **BrainIB: Interpretable Brain Network-based Psychiatric Diagnosis with Graph Information Bottleneck** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.03612)\n23. [TKDE 24] **On Regularization for Explaining Graph Neural Networks: An Information Theory Perspective** [[paper]](https:\u002F\u002Fopenreview.net\u002Fforum?id=5rX7M4wa2R_)\n24. [TKDD 24] **Towards Prototype-Based Self-Explainable Graph Neural Network** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3689647)\n25. [TKDD 24] **Efficient GNN Explanation via Learning Removal-based Attribution** [[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3685678)\n26. [TAI 24] **Learning Counterfactual Explanation of Graph Neural Networks via Generative Flow Network**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10496445)\n27. [TAI 24] **Traffexplainer: A Framework towards GNN-based Interpretable Traffic Prediction** [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10680338)\n28. [TMC 24] **HGExplainer: Heterogeneous Graph Explainer for IoT Device Identification**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10736553)\n29. [IEEE TMI 24] **Multi-Modal Diagnosis of Alzheimer’s Disease using Interpretable Graph Convolutional Networks**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10606492)\n30. [IEEE IoT 24] **EXVul: Toward Effective and Explainable Vulnerability Detection for IoT Devices**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10479158)\n31. [IEEE Transactions on Fuzzy Systems] **Towards Embedding Ambiguity-Sensitive Graph Neural Network Explainability** [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10696966)\n32. [IEEE JBHI] **Interpretable Dynamic Directed Graph Convolutional Network for Multi-Relational Prediction of Missense Mutation and Drug Response**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10721576)\n33. [IDEAL 2024] **Causal Explanation of Graph Neural Networks**[[paper]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-77731-8_26)\n34. [BIBM 24] **Seizure Onset Zone Localization Method based on GNN Explanation** [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10821860)\n35. [BIBM 24] **DDTExplainer: Mining Drug-Disease Therapeutic Mechanisms based on GNN Explainability** [[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10822060)\n36. [CIKM 24] **EDGE: Evaluation Framework for Logical vs. Subgraph Explanations for Node Classifiers on Knowledge Graphs**[[paper]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3627673.3679904)\n37. [ECML\u002FPKDD 24] **Towards Few-shot Self-explaining Graph Neural Networks**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.07340)\n38. [SDM 24] **XGExplainer: Robust Evaluation-based Explanation for Graph Neural Networks**[[paper]](https:\u002F\u002Fepubs.siam.org\u002Fdoi\u002Fabs\u002F10.1137\u002F1.9781611978032.8)\n23. [DASFAA 24] **Multi-objective Graph Neural Network Explanatory Model with Local and Global Information Preservation**[[paper]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-981-97-5572-1_20)\n28. [ISSTA 2024] **Graph Neural Networks for Vulnerability Detection: A Counterfactual Explanation** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.15687)\n29. [KBS 24] **Shapley-based graph explanation in embedding space**[[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0950705124008785?via%3Dihub)\n30. [KBS 24] **GEAR: Learning graph neural network explainer via adjusting gradients**[[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0950705124010025)\n31. [IEEE TNSM 24] **Ensemble Graph Attention Networks for Cellular Network Analytics: From Model Creation to Explainability**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10622099)\n32. [IEEE TNSE 24] **GAXG: A Global and Self-adaptive Optimal Graph Topology Generation Framework for Explaining Graph Neural Networks**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10614894)\n33. [IEEE TETCI 24] **GF-LRP: A Method for Explaining Predictions Made by Variational Graph Auto-Encoders**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10586750)\n34. [AAAI workshop] **Semi-Supervised Graph Representation Learning with Human-centric Explanation for Predicting Fatty Liver Disease**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.02786)\n35. [xAI 24] **Global Concept Explanations for Graphs by Contrastive Learning** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.16532)\n36. [Arxiv 24.12] **BetaExplainer: A Probabilistic Method to Explain Graph Neural Networks**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.11964)\n37. [Arxiv 24.12] **GISExplainer: On Explainability of Graph Neural Networks via Game-theoretic Interaction Subgraphs** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.15698)\n38. [Arxiv 24.12] **Interpreting GNN-based IDS Detections Using Provenance Graph Structural Features** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.00934)\n39. [Arxiv 24.12] **eXpath: Explaining Knowledge Graph Link Prediction with Ontological Closed Path Rules**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.04846)\n40. [Arxiv 24.12] **On the Probability of Necessity and Sufficiency of Explaining Graph Neural Networks: A Lower Bound Optimization Approach** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.07056)\n41. [Arxiv 24.11] **Rethinking Node Representation Interpretation through Relation Coherence**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.00653)\n42. [Arxiv 24.11] **MBExplainer: Multilevel bandit-based explanations for downstream models with augmented graph embeddings** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.00287)\n43. [Arxiv 24.11] **Securing GNNs: Explanation-Based Identification of Backdoored Training Graphs**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.18136)\n44. [Preprint 24.11] **Chiseling the Graph: An Edge-Sculpting Method for Explaining Graph Neural Networks** [[paper]](https:\u002F\u002Fwww.researchsquare.com\u002Farticle\u002Frs-5414037\u002Fv1)\n45. [Preprint 24.10] **Reliable and Faithful Generative Explainers for Graph Neural Networks**[[paper]](https:\u002F\u002Fwww.preprints.org\u002Fmanuscript\u002F202410.1718)\n46. [Arxiv 24.10] **Explaining Hypergraph Neural Networks: From Local Explanations to Global Concepts**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.07764)\n47. [Arxiv 24.10] **Explainable Graph Neural Networks Under Fire** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.06417)\n48. [Arxiv 24.09] **GINTRIP: Interpretable Temporal Graph Regression using Information bottleneck and Prototype-based method** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.10996)\n49. [Arxiv 24.09] **PAGE: Parametric Generative Explainer for Graph Neural Network** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.14042)\n50. [Preprint 24.08] **CIDER: Counterfactual-Invariant Diffusion-based GNN Explainer for Causal Subgraph Inference**[[paper]](https:\u002F\u002Fwww.researchsquare.com\u002Farticle\u002Frs-4814778\u002Fv1)\n51. [Arxiv 24.07] **LLMExplainer: Large Language Model based Bayesian Inference for Graph Explanation Generation**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.15351)\n52. [Arxiv 24.07] **Explaining Graph Neural Networks for Node Similarity on Graphs**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.07639)\n41. [Arxiv 24.07] **SLInterpreter: An Exploratory and Iterative Human-AI Collaborative System for GNN-based Synthetic Lethal Prediction**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.14770)\n42. [Arxiv 24.07] **Graph Neural Network Causal Explanation via Neural Causal Models**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.09378)\n43. [Arxiv 24.06] **GNNAnatomy: Systematic Generation and Evaluation of Multi-Level Explanations for Graph Neural Networks**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.04548)\n44. [Arxiv 24.06] **On GNN explanability with activation rules**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.11594)\n46. [Arxiv 24.05] **SIG: Efficient Self-Interpretable Graph Neural Network for Continuous-time Dynamic Graphs**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.19062)\n47. [Arxiv 24.06] **L2XGNN: Learning to Explain Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.14402.pdf)\n48. [Arxiv 24.06] **Towards Understanding Sensitive and Decisive Patterns in Explainable AI: A Case Study of Model Interpretation in Geometric Deep Learning**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.00849)\n50. [Arxiv 24.06] **Explainable AI Security: Exploring Robustness of Graph Neural Networks to Adversarial Attacks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.13920)\n51. [Arxiv 24.06] **Robust Ante-hoc Graph Explainer using Bilevel Optimization** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15745)\n52. [Arxiv 24.06] **Perks and Pitfalls of Faithfulness in Regular, Self-Explainable and Domain Invariant GNNs** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.15156)\n53. [Arxiv 24.05] **Utilizing Description Logics for Global Explanations of Heterogeneous Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.12654)\n54. [Arxiv 24.05] **Detecting Complex Multi-step Attacks with Explainable Graph Neural Network** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.11335)\n55. [Arxiv 24.05] **SynHING: Synthetic Heterogeneous Information Network Generation for Graph Learning and Explanation**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.04133)\n56. [Arxiv 24.05] **PAGE: Prototype-Based Model-Level Explanations for Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.17159)\n57. [Arxiv 24.05] **Evaluating Neighbor Explainability for Graph Neural Networks** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.08118)\n58. [Preprint 24.05] **Explainable Graph Neural Networks: An Application to Open Statistics Knowledge Graphs for Estimating House Prices** [[paper]](https:\u002F\u002Fwww.preprints.org\u002Fmanuscript\u002F202405.0037\u002Fv1)\n59. [Arxiv 24.04] **Superior Polymeric Gas Separation Membrane Designed by Explainable Graph Machine Learning** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.10903)\n60. [Arxiv 24.04] **Improving the interpretability of GNN predictions through conformal-based graph sparsification** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.12356)\n61. [Arxiv 24.03] **GreeDy and CoDy: Counterfactual Explainers for Dynamic Graph**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.16846)\n62. [Arxiv 24.03] **Explainable Graph Neural Networks for Observation Impact Analysis in Atmospheric State Estimation**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.17384)\n64. [Arixv 24.03] **Iterative Graph Neural Network Enhancement via Frequent Subgraph Mining of Explanations**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.07849)\n65. [Arxiv 24.02] **PAC Learnability under Explanation-Preserving Graph Perturbations**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.05039)\n66. [Arxiv 24.02] **Explainable Global Wildfire Prediction Models using Graph Neural Networks**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.07152)\n67. [Arxiv 24.02] **Incorporating Retrieval-based Causal Learning with Information Bottlenecks for Interpretable Graph Neural Networks**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.04710)\n68. [Arxiv 24.01] **On Discprecncies between Perturbation Evaluations of Graph Neural Network Attributions**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.00633)\n69. [ASP=DAC 24] **LIPSTICK: Corruptibility-Aware and Explainable Graph Neural Network-based Oracle-Less Attack on Logic Locking**[[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.04235)\n70. [Biorxiv 24] **Community-aware explanations in knowledge graphs with XP-GNN**[[paper]](https:\u002F\u002Fwww.biorxiv.org\u002Fcontent\u002F10.1101\u002F2024.01.21.576302v1.abstract)\n71. [ISCV 24] **Adaptive Subgraph Feature Extraction for Explainable Multi-Modal Learning**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10620106\u002F)\n72. [IJCNN 24] **Explanations for Graph Neural Networks using A Game-theoretic Value**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10650495)\n73. [AIxIA 2024] **Relating Explanations with the Inductive Biases of Deep Graph Networks** [[paper]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-80607-0_14)\n74. [Neurocomputing] **GeoExplainer: Interpreting Graph Convolutional Networks with geometric masking**[[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0925231224011640?via%3Dihub)\n75. [Technologies] **Explainable Graph Neural Networks: An Application to Open Statistics Knowledge Graphs for Estimating House Prices**[[paper]](https:\u002F\u002Fwww.mdpi.com\u002F2227-7080\u002F12\u002F8\u002F128)\n76. [Reliab. Eng. Syst. Saf.] **Causal intervention graph neural network for fault diagnosis of complex industrial processes**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10620106\u002F)\n77. [Frontiers in big data] **Global explanation supervision for Graph Neural Networks**[[paper]](https:\u002F\u002Fwww.semanticscholar.org\u002Freader\u002Fb6d6dda72e1d31e4b05e59909128cfccf4a835fb)\n78. [Information and Software Technology] **Graph-based explainable vulnerability prediction**[[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS095058492400171X?via%3Dihub)\n79. [Information Systems] **Heterogeneous graph neural networks for fraud detection and explanation in supply chain finance**[[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0306437923001710?via%3Dihub)\n80. [Information Procs. & Mana.] **Towards explaining graph neural networks via preserving prediction ranking and structural dependency**[[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0306457323003084)\n81. [Applied Energy]  **Explainable Spatio-Temporal Graph Neural Networks for multi-site photovoltaic energy production** [[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0306261923015155)\n82. [PAKDD 24] **Random Mask Perturbation Based Explainable Method of Graph Neural Networks** [[paper]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-981-97-2259-4_2)\n83. [Computational Materials Science] **Graph isomorphism network for materials property prediction along with explainability analysis**[[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0927025623006134)\n84. [NN 24] **Explanatory subgraph attacks against Graph Neural Networks**[[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0893608024000030)\n85. [NN 24] **GRAM: An interpretable approach for graph anomaly detection using gradient attention maps**[[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0893608024003873)\n86. [Neural Networks 24] **CI-GNN: A Granger Causality-Inspired Graph Neural Network for Interpretable Brain Network-Based Psychiatric Diagnosis** [[paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.01642)\n87. [NeuroImage 24] **BPI-GNN: Interpretable brain network-based psychiatric diagnosis and subtyping**[[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1053811924000892)\n88. [PAKDD 24] **Toward Interpretable Graph Classification via Concept-Focused Structural Correspondence** [[paper]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-981-97-2650-9_2)\n89. [ICPR 24] **Interpretable Deep Graph-Level Clustering: A Prototype-Based Approach** [[paper]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-78128-5_8)\n90. [MedRxiv 24] **An Interpretable Population Graph Network to Identify Rapid Progression of Alzheimer’s Disease Using UK Biobank**[[paper]](https:\u002F\u002Fwww.medrxiv.org\u002Fcontent\u002F10.1101\u002F2024.03.27.24304966v1)\n91. [IEEE TDSC 24] **TrustGuard: GNN-based Robust and Explainable Trust Evaluation with Dynamicity Support** [[paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.13339.pdf)\n92. [IEEE Transactions] **IEEE Transactions on Computational Social Systems**[[paper]](https:\u002F\u002Fieeexplore.ieee.org\u002Fxpl\u002FRecentIssue.jsp?punumber=6570650)\n93. [Journal of Physics] **Explainer on GNN-based segmentation networks**[[paper]](https:\u002F\u002Fiopscience.iop.org\u002Farticle\u002F10.1088\u002F1742-6596\u002F2711\u002F1\u002F012009\u002Fmeta)\n94. [Energy and AI] **Electricity demand forecasting at distribution and household levels using explainable causal graph neural network** [[paper]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS266654682400034X)\n95. [HI-AI@KDD 24] **Interpretable Graph Model with Prototype-Based Graph Information Bottleneck** [[paper]](https:\u002F\u002Fhuman-interpretable-ai.github.io\u002Fassets\u002Fpdf\u002F4_Interpretable_Graph_Model_wi.pdf)\n96. [Neurosymbolic Artificial Intelligence] **Towards Semantic Understanding of GNN Layers embedding with Functional-Semantic Activation Mapping** [[paper]](https:\u002F\u002Fneurosymbolic-ai-journal.com\u002Fsystem\u002Ffiles\u002Fnai-paper-803.pdf)\n97. [NeSy 2024] **Towards Understanding Graph Neural Networks: Functional-Semantic Activation Mapping**[[paper]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-71170-1_11)\n98. [Thesis 24] **Explainable and physics-guided graph deep learning for air pollution modelling** [[paper]](https:\u002F\u002Fcris.vub.be\u002Fws\u002Fportalfiles\u002Fportal\u002F117178225\u002FRodrigoBonet_Esther_thesis.pdf)\n99. [Thesis 24] **Influence of molecular structures on graph neural network explainers’ performance**[[paper]](https:\u002F\u002Frepository.tudelft.nl\u002Ffile\u002FFile_03ae5c75-cc17-42c1-a593-1c82d2593c67?preview=1)\n\n\n\n### 2023年\n1. [NeurIPS 23] **可解释图网络提出通用代数猜想**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.11688)\n2. [NeurIPS 23] **SAME：基于结构感知Shapley值的分段式解释方法揭示GNN黑箱**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=kBBsj9KRgh)\n3. [NeurIPS 23] **一次训练，处处解释：预训练可解释图神经网络**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=enfx8HM4Rp)\n4. [NeurIPS 23] **D4Explainer：通过离散去噪扩散模型实现图神经网络的分布内解释**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.19321)\n5. [NeurIPS 23] **TempME：基于模体发现提升时序图神经网络的可解释性**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.19324)\n6. [NeurIPS 23] **V-InFoR：针对结构受损图的鲁棒图神经网络解释器**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=CtXXOaxDw7)\n7. [NeurIPS 23] **迈向自解释的图级别异常检测**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.16520)\n8. [NeurIPS 23] **基于鲁棒性分析评估图神经网络的事后解释方法**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=eD534mPhAg)\n9. [NeurIPS 23] **基于原型的可解释图信息瓶颈**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.19906)\n10. [ICML 23] **重新思考：通过非参数子图匹配解释图神经网络**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=MocsSAUKlk)\n11. [ICML 23] **相关游走搜索：用于解释图神经网络的方法**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=BDYIci7bVs)\n12. [ICML 23] **迈向理解图神经网络的泛化能力**[[论文]](https:\u002F\u002Fopenreview.net\u002Fpdf?id=BhMyLk0YNy)\n13. [ICLR 23] **GNNInterpreter：面向图神经网络的概率生成模型级解释**[[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.07924.pdf)\n14. [ICLR 23] **通过学习概念的逻辑组合实现GNN的全局可解释性**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=OTbRTIY4YS)\n15. [ICLR 23] **基于探索-导航框架解释时序图模型**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=BR_ZhvcYbGJ)\n16. [ICLR 23] **DAG很重要！增强版GFlowNets用于图神经网络的解释**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=jgmuRzM-sb6)\n17. [ICLR 23] **通过可学习的随机性注入实现可解释的几何深度学习**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.16966)\n18. [ICLR 23] **从微分几何视角看演化图上的GNN及其可解释性**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=lRdhvzMpVYV)\n19. [KDD 23] **MixupExplainer：利用数据增强推广图神经网络的解释方法**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.07832)\n20. [KDD 23] **基于贪婪扰动的异质图反事实学习**[[论文]](https:\u002F\u002Frepository.kaust.edu.sa\u002Fhandle\u002F10754\u002F693484)\n21. [KDD 23] **借助信息瓶颈赋能事后图解释：预训练与微调视角**[[论文]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3580305.3599330)\n22. [KDD 23] **少即是多：SlimG用于精准、鲁棒且可解释的图挖掘**[[论文]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3580305.3599413)\n23. [KDD 23] **基于因果子结构的分子关系学习具有迁移鲁棒性**[[论文]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3580305.3599437)\n24. [AAAI 23] **基于神经元分析实现图神经网络的全局概念级可解释性**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.10609)\n25. [AAAI 23] **关于解释黑盒时序图神经网络的极限**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.00952)\n26. [AAAI 23] **迈向异质图神经网络的细粒度可解释性**[[论文]](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fdownload\u002F26040\u002F25812)\n27. [AAAI 23] **可解释的手性感知图神经网络用于药物发现中的定量构效关系建模**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=W2OStztdMhc)\n28. [VLDB 23] **HENCE-X：面向异质性的多层级深度图网络解释框架**[[论文]](https:\u002F\u002Fwww.vldb.org\u002Fpvldb\u002Fvol16\u002Fp2990-lv.pdf)\n29. [VLDB 23] **关于图神经网络的数据感知型全局可解释性**[[论文]](https:\u002F\u002Fwww.vldb.org\u002Fpvldb\u002Fvol16\u002Fp3447-lv.pdf)\n30. [AISTATS 23] **蒸馏并解释：使用简单代理模型解释图神经网络**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.10139)\n31. [AISTATS 23] **探究图表示**[[论文]](https:\u002F\u002Fproceedings.mlr.press\u002Fv206\u002Fakhondzadeh23a\u002Fakhondzadeh23a.pdf)\n32. [ICDE 23] **INGREX：图神经网络的交互式解释框架**[[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.01548.pdf)\n33. [ICDE 23] **联合攻击图神经网络及其解释方法**[[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.03388.pdf)\n34. [WWW 23] **PaGE-Link：基于路径的异质链接预测图神经网络解释**[[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.12465.pdf)\n35. [ICDM 23] **基于扰动的解释方法在时序图神经网络中的局限性**\n36. [ICDM 23] **面向超链接预测的可解释子图特征提取**[[论文]](https:\u002F\u002Fwww.researchgate.net\u002Fpublication\u002F378000024_Interpretable_Subgraph_Feature_Extraction_for_Hyperlink_Prediction)\n37. [WSDM 23] **利用时序异质图检测研究兴趣变化的可解释方法**[[论文]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3539597.3570453)\n38. [WSDM 23] **图神经网络的合作式解释**[[论文]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3539597.3570378)\n39. [WSDM 23] **迈向忠实且一致的图神经网络解释**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.13733)\n40. [WSDM 23] **图神经网络的全局反事实解释器**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.11695)\n41. [CIKM 23] **可解释的时空图神经网络**[[论文]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3583780.3614871)\n42. [CIKM 23] **DuoGAT：双时间导向的图注意力网络，用于准确、高效且可解释的时序异常检测**[[论文]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3583780.3614857)\n43. [CIKM 23] **异质时序图神经网络解释器**[[论文]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3583780.3614909)\n44. [CIKM 23] **ACGAN-GNNExplainer：辅助条件生成式图神经网络解释器**[[论文]]()\n45. [CIKM 23] **KG4Ex：基于可解释知识图谱的运动推荐方法**[[论文]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3583780.3614943)\n46. [ECML-PKDD 23] **ENGAGE：基于解释引导的数据增强用于图表示学习**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.01053)\n47. [TPAMI 23] **FlowX：通过消息流实现可解释的图神经网络**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.12987)\n48. [TAI] **基于原型的可解释图神经网络**[[论文]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9953541)\n49. [TKDE 23] **基于反事实图学习的属性图异常检测**[[论文]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10056298)\n50. [Scientific Data 23] **评估图神经网络的可解释性**[[论文]](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41597-023-01974-x)\n51. [Nature Communications 23] **结合子结构掩码的化学直观解释用于分子性质预测的图神经网络**[[论文]](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41467-023-38192-3)\n52. [ACM Computing Surveys 23] **图反事实解释综述：定义、方法与评估**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.12089)\n53. [TIST 23] **基于理由对齐的忠实且一致的图神经网络解释**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.02791)\n54. [Openreview 23] **STExplainer：通过频繁子树挖掘实现GNN的全局可解释性**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=HgSfV6sGIn)\n55. [GLFrontiers 23] **人人都需要一点帮助：基于层次化概念解释图**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=wrqAn3AJA1)\n56. [Openreview 23] **利用解释迭代增强图神经网络**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=qp0oVaFGm0)\n58. [Openreview 23] **大规模可解释且收敛的图神经网络层**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=uYTaVRkKvz)\n60. [NeurIPS 2023 Workshop XAIA] **GInX-Eval：迈向图神经网络解释的分布内评估**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=88MalncLgU)\n61. [NeurIPS 2023 Workshop XAIA] **关于GNN解释方法的一致性**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=tiLZkab8TP)\n65. [Arxiv 23] **基于可解释性的图对抗攻击：通过边扰动生成对抗样本**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.17301)\n66. [AICS 23] **基于单关系转换的知识图链接预测的子图解释生成模型**[[论文]](https:\u002F\u002Fwww.spiedigitallibrary.org\u002Fconference-proceedings-of-spie\u002F12803\u002F1280339\u002FA-subgraph-interpretation-generative-model-for-knowledge-graph-link-prediction\u002F10.1117\u002F12.3009388.short?SSO=1)\n67. [GUT 23] **利用可解释图学习筛查正常内镜下结直肠活检：一项回顾性研究**[[论文]](https:\u002F\u002Fgut.bmj.com\u002Fcontent\u002Fgutjnl\u002Fearly\u002F2023\u002F05\u002F11\u002Fgutjnl-2023-329512.full.pdf)\n68. [PR 2023] **迈向频率自适应Inception模块的自解释卷积图神经网络**[[论文]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0031320323006891)\n69. [MLG 2023] **理解图神经网络中解释器的工作机制**[[论文]](https:\u002F\u002Fmlg-europe.github.io\u002Fpapers\u002F241.pdf)\n70. [MLG 2023] **图模型解释工具**[[论文]](https:\u002F\u002Fwww.mlgworkshop.org\u002F2023\u002Fpapers\u002FMLG__KDD_2023_paper_5.pdf)\n71. [Information Science 23] **带有神经元解释组件的图神经网络鲁棒解释**[[论文]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0020025523013701)\n72. [Recsys 23] **可解释的图神经网络推荐系统：挑战与机遇**[[论文]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3604915.3608875)\n73. [xAI 23] **通过密度视角解释图分类的反事实解释**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.14849)\n74. [XAI 23] **评估图神经网络的链接预测解释**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.01682)\n75. [XAI 23] **XInsight：基于流式解释揭示GNN的模型洞察**[[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.04791.pdf)\n76. [XAI 23] **通过人工可模拟性研究量化图神经网络归因式解释的内在效用**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15961)\n77. [XAI 23] **MEGAN：多解释图注意力网络**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=H6LVUiHzYDE)\n78. [XKDD 23] **图神经网络的博弈论解释**[[论文]](http:\u002F\u002Fxkdd2023.isti.cnr.it\u002Fpapers\u002F424.pdf)\n79. [XKDD 23] **从黑箱到玻璃箱：评估GCNN过程预测的忠实性**[[论文]](http:\u002F\u002Fxkdd2023.isti.cnr.it\u002Fpapers\u002F425.pdf)\n80. [IJCNN 23] **MEGA：利用网络模体解释图神经网络**[[论文]](https:\u002F\u002Fdoi.org\u002F10.1109\u002FIJCNN54540.2023.10191684)\n81. [LOG Poster 23] **事后GNN解释器对标签噪声的鲁棒性**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.01706)\n82. [LOG Poster 23] **自解释GNN有多忠实？**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.15096)\n84. [LOG Poster 23] **用有影响力示例解释知识图嵌入模型中的链接预测**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.02651)\n85. [Bioriv 23] **通过稀疏学习构建可解释的图神经网络用于药物-蛋白质结合预测**[[论文]](https:\u002F\u002Fwww.biorxiv.org\u002Fcontent\u002F10.1101\u002F2023.08.28.555203v1.abstract)\n86. [ICAID 2023] **通过层分析解释图神经网络**[[论文]](https:\u002F\u002Fwww.atlantis-press.com\u002Fproceedings\u002Ficaid-23\u002F125990065)\n87. [ECAI 23] **XGBD：基于解释引导的图后门检测**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.04406)\n88. [IEEE Transactions on Consumer Electronics 23] **利用可解释卷积图神经网络进行智能家居人体姿态预测**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.04406)\n89. [KBS 23] **KE-X：基于知识信息增益的知识图嵌入子图解释**[[论文]](http:\u002F\u002Fsites.computer.org\u002Fdebull\u002FA23june\u002FA23JUNE-CD.pdf#page=64)\n90. [ICML workshop 23] **在领域约束下为分子生成全局事实与反事实解释器**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=qElXYQqxQh)\n91. [毕业论文 23] **开发高维特征空间下的可解释图神经网络**[[论文]](https:\u002F\u002Fpub.tik.ee.ethz.ch\u002Fstudents\u002F2022-HS\u002FBA-2022-43.pdf)\n92. [毕业论文 23] **评估图神经网络在单细胞分类任务中的可解释性方法**[[论文]](https:\u002F\u002Fwww.semanticscholar.org\u002Fpaper\u002FEvaluation-of-Explainability-Methods-on-Single-Cell-Singh-Kobayashi\u002F85f4aba430387a337ec3a4b2aa39bfc7361dea1f)\n93. [Arxiv 23] **关于子集选择与知情图神经网络的相互作用**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.10066)\n94. [ISSTA23] **基于GNN的漏洞检测解释器：我们到了吗？**[[论文]](https:\u002F\u002Fwww.semanticscholar.org\u002Fpaper\u002FInterpreters-for-GNN-Based-Vulnerability-Detection%3A-Hu-Wang\u002F6bb9c86483f212a631324ba9b47c344d419a428a)\n95. [ICECAI23] **基于交叉熵改进GNN解释的GraphSVX**[[论文]](https:\u002F\u002Fwww.semanticscholar.org\u002Fpaper\u002FImproved-GraphSVX-for-GNN-Explanations-Based-on-Yu-Liang\u002Fb01c4f2c4d54723b590a828d4e1b4cdbfea5dad4)\n96. [ICRA Workshop 23] **迈向语义解释与验证基于图注意力的解释**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=ymyQeqatQqQ)\n97. [Arxiv 23] **基于图神经网络的日志异常检测与解释**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.00527)\n99. [毕业论文 23] **图形模型的可解释性**[[论文]](https:\u002F\u002Fsearch.proquest.com\u002Fopenview\u002F1e61b389a59936e319974be0e3fd1af5\u002F1?pq-origsite=gscholar&cbl=18750&diss=y)\n101. [生物工程 2023] **利用可解释图神经网络和人群图进行阿尔茨海默病早期诊断的个性化解释**[[论文]](https:\u002F\u002Fwww.mdpi.com\u002F2306-5354\u002F10\u002F6\u002F701)\n102. [BDSC 2023] **MDC：一种基于节点模体度和图扩散卷积的可解释GNN方法**[[论文]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-981-99-3925-1_24)\n104. [信息科学 2023] **将可解释技术应用于基于图神经网络模型的道路交通预测**[[论文]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0020025523009052)\n107. [Arxiv 23.05] **EiX-GNN：图神经网络的概念级特征中心性解释器**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.03491)\n108. [Arxiv 23.04] **基于医学概念的图神经网络认知解释器**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.07798)\n109. [ICLR Tiny 23] **消息传递选择：迈向可解释的图分类GNN**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=99Go96dla5y)\n110. [ICLR Tiny 23] **重温CounteRGAN用于图的反事实可解释性**[[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=d0m0Rl15q3g)\n111. [MICCAI Workshop 23] **IA-GCN：用于疾病预测的可解释注意力导向图卷积网络**[[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.15587.pdf)\n113. [GRADES & NDA'23] **图神经网络可解释性方法演示**[[论文]](https:\u002F\u002Fhomes.cs.aau.dk\u002F~Arijit\u002FPapers\u002FgInterpreter_GRADES_NDA23.pdf)\n114. [Arxiv 23] **用于链接预测的自解释图神经网络**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.12578)\n115. [Arxiv 23.02] **MotifExplainer：基于模体的图神经网络解释器**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.00519)\n116. [ChemRxiv 23] **利用梅尔森值解释化学信息学领域的图神经网络**[[论文]](https:\u002F\u002Fchemrxiv.org\u002Fengage\u002Fchemrxiv\u002Farticle-details\u002F6456c89707c3f0293753101d)\n117. [Neural Networks 23] **通过识别桥接节点的重要节点为Skip-gram基点嵌入生成事后解释**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.12036)\n118. [ICASSP 23] **迈向更稳定和通用的子图信息瓶颈**[[论文]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10094812)\n119. [ESANN 23] **将随机解释器与子图神经网络结合可以提高表达能力和可解释性**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.07152)\n120. [IEEE Access] **通过多专业学习者和在线知识蒸馏实时生成GNN解释**[[论文]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10107968)\n121. [IEEE Access] **通过归纳一致性预测为节点表示学习模型提供事后解释**[[论文]](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10003193&tag=1)\n122. [Journal of Software 23] **基于图神经网络的切片级漏洞检测与解释方法**[[论文]](http:\u002F\u002Fwww.jos.org.cn\u002Fjosen\u002Farticle\u002Fabstract\u002Fmr008)\n123. [Automation in Construction 23] **从可解释的数据驱动隧道图中学习：用于堵塞检测的时空图卷积网络**[[论文]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0926580523000018)\n124. [Briefings in Bioinformatics] **基于可解释图神经网络和多步聚焦机制预测分子性质**[[论文]](https:\u002F\u002Facademic.oup.com\u002Fbib\u002Fadvance-article\u002Fdoi\u002F10.1093\u002Fbib\u002Fbbac534\u002F6918752)\n125. [Briefings in Bioinformatics] **通过图神经网络可视化识别关键化学信息**[[论文]](https:\u002F\u002Facademic.oup.com\u002Fbib\u002Farticle\u002F24\u002F1\u002Fbbac577\u002F6936421)\n126. [Bioinformatics 23] **可解释的多层图神经网络用于癌症基因预测**[[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2301.08831.pdf)\n127. [ICLR Workshop 23] **GCI：图概念解释框架**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.04899)\n128. [Arxiv 23] **利用HSIC为图神经网络提供结构化解释**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.02139)\n129. [Internet of Things 23] **XG-BoT：用于僵尸网络检测与取证的可解释深度图神经网络**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.09088)\n130. [JOS23] **基于图神经网络的恶意软件检测通用解释与定位方法**[[论文]](https:\u002F\u002Fwww.jos.org.cn\u002Fjosen\u002Farticle\u002Fabstract\u002F7123)\n131. [IJCNN 23] **GRAPHSHAP：通过模体语言解释身份感知图分类器**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.08815)\n141. [Arxiv 23.01] **子图增强型图神经网络中的可解释性**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.07926)\n\n### 2022年\n1. [NeurIPS 22] **GStarX：基于通信结构感知的合作博弈的图级预测解释** [[论文]](https:\u002F\u002Fopenreview.net\u002Fpdf?id=Qry8exovcNA)\n2. [NeurIPS 22] **通过学习解耦的因果子结构来去偏置图神经网络** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.14107)\n3. [NeurIPS 22] **任务无关的图神经网络解释** [[论文]](https:\u002F\u002Fopenreview.net\u002Fpdf?id=NQrx8EYMboO)\n4. [NeurIPS 22] **CLEAR：图上的生成式反事实解释**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.08443)\n5. [ICML 22] **基于随机注意力机制的可解释且可泛化的图学习** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.12987v1)\n6. [ICLR 22] **DEGREE：面向图神经网络的分解式解释** [[论文]](https:\u002F\u002Fopenreview.net\u002Fpdf?id=Ve0Wth3ptT_)\n7. [ICLR 22] **知识图谱上可解释的图神经网络模型** [[论文]](https:\u002F\u002Fopenreview.net\u002Fattachment?id=CrCvGNHAIrz&name=pdf)\n8. [ICLR 22] **发现图神经网络的不变性推理依据** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.12872)\n9. [KDD 22] **关于图神经网络中偏差的结构性解释** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.12104)\n10. [KDD 22] **用于可解释和可泛化图分类的因果注意力机制** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.15089)\n10. [CVPR 22] **OrphicX：一种受因果启发的潜在变量模型，用于解释图神经网络** [[论文]](https:\u002F\u002Fwanyu-lin.github.io\u002Fassets\u002Fpublications\u002Fwanyu-cvpr2022.pdf)\n81. [CVPR 22] **利用变分图信息瓶颈改进子图识别** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.09899)\n12. [AISTATS 22] **探查GNN解释器：对GNN解释方法的严格理论与实验分析** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.09078)\n13. [AISTATS 22] **CF-GNNExplainer：图神经网络的反事实解释** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.03322)\n14. [TPAMI 22] **面向全图分类的差分隐私图神经网络** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.03806)\n15. [TPAMI 22] **面向图神经网络的强化因果解释器** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.11028)\n17. [VLDB 22] **xFraud：异构图上的可解释欺诈交易检测** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2011.12193.pdf)\n18. [LOG 22]**GraphFramEx：迈向图神经网络解释方法的系统性评估** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.09677)\n19. [LOG 22] **基于解释引导的消息传递训练图神经网络** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.16731)\n20. [The Webconf 22] **基于反事实与事实推理学习和评估图神经网络解释** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.08816)\n21. [AAAI 22] **基于原型的图神经网络解释** [[论文]](https:\u002F\u002Fwww.aaai.org\u002FAAAI22Papers\u002FSA-00396-ShinY.pdf)\n36. [AAAI 22] **KerGNNs：基于图核的可解释图神经网络**[[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2201.00491.pdf)\n37. [AAAI 22] **ProtGNN：迈向自解释的图神经网络** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.00911)\n23. [IEEE Big Data 22] **以公平性和权衡解释为代价降低图神经网络的准确性** [[论文]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10020318)\n28. [CIKM 22] **GRETEL：统一的图反事实解释评估框架** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.02957)\n29. [CIKM 22] **GRETEL：图反事实解释评估框架**[[论文]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3511808.3557608)\n30. [CIKM 22] **面向图神经网络节点分类的模型中心型解释器** [[论文]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3511808.3557535)\n31. [IJCAI 22] **我的GNN到底捕捉了什么？关于探索GNN内部表示的研究** [[论文]](https:\u002F\u002Fhal.archives-ouvertes.fr\u002Fhal-03700710\u002F)\n32. [ECML PKDD 22] **提升基于规则的GNN解释质量** [[论文]](https:\u002F\u002Fkdd.isti.cnr.it\u002Fxkdd2022\u002Fpapers\u002FXKDD_2022_paper_2436.pdf)\n33. [MICCAI 22] **用于基于连接组学的大脑疾病分析的可解释图神经网络** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.00813)\n34. [MICCAI 22] **面向阿尔茨海默病多模态诊断的图卷积网络稀疏解释** [[论文]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-16452-1_45)\n38. [EuroS&P 22] **Illuminati：迈向用于网络安全分析的图神经网络解释** [[论文]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9797387?casa_token=1AvRK3S4eJQAAAAA:8PXcOA8iU1ketRMdu6YVMBMcfZKjF7MIVujPpHTpjdc2O9r1cvUg8usfRiOYZ5Fe-MKJi4Y)\n39. [INFOCOM 22] **基于图神经网络的僵尸网络检测模型的可解释性评估** [[论文]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9798287)\n40. [GLOBECOM 22] **Shapley解释器——一种用于SDN中的GNN解释方法** [[论文]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10001460)\n41. [GLOBECOM 22] **面向时序图神经网络的解释器** [[论文]]([https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.00807.pdf])\n42. [TKDE 22] **Zorro：图神经网络中有效、稀疏且稳定的解释** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.08621)\n43. [TNNLS 22] **带有时间模式注意力的可解释图储备计算** [[论文]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10003110)\n44. [TNNLS22] **用于训练可解释图神经网络的元学习方法** [[论文]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9772740)\n45. [TNNLS 22] **通过输入扰动解释深度图网络** [[论文]](https:\u002F\u002Fpubmed.ncbi.nlm.nih.gov\u002F35446771\u002F)\n63. [TNNLS 22] **用于训练可解释图神经网络的元学习方法** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2109.09426.pdf)\n47. [DMKD 22] **关于基于激活模式的GNN可解释性** [[论文]](https:\u002F\u002Fhal.archives-ouvertes.fr\u002Fhal-03367714\u002Ffile\u002Fhal.pdf)\n48. [KBS 22] **EGNN：通过知识蒸馏构建可解释的图神经网络** [[论文]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0950705122001289?via%3Dihub)\n49. [XKDD 22] **GREASE：为基于GNN的推荐系统生成事实与反事实解释** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.04222)\n50. [AI 22] **图神经网络解释器对图噪声是否鲁棒？** [[论文]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-22695-3_12)\n52. [BRACIS 22] **面向图神经网络的ConveXplainer** [[论文]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-21689-3_41)\n53. [GLB 22] **用于基准测试图解释器的可解释AI库** [[论文]](https:\u002F\u002Fgraph-learning-benchmarks.github.io\u002Fassets\u002Fpapers\u002Fglb2022\u002FAn_Explainable_AI_Library_for_Benchmarking_Graph_Explainers.pdf)\n54. [DASFAA 22] **关于图神经网络的全局可解释性** [[论文]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-00123-9_52)\n55. [ISBI 22] **用于阿尔茨海默病诊断的多模态脑成像可解释图卷积网络** [[论文]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9761449?casa_token=w3IlSZNlKwcAAAAA:Xvh04eK29bZtbkRq5Eg3jUZURS3qs1k3AA1bhnnN2kKWmIjBnh7alAiy98zBgsHFtvFQqV0IYA)\n56. [生物信息学] **GNN-SubNet：利用可解释图神经网络检测疾病亚网络** [[论文]](https:\u002F\u002Facademic.oup.com\u002Fbioinformatics\u002Farticle\u002F38\u002FSupplement_2\u002Fii120\u002F6702000?login=false)\n57. [医学影像2022] **表型指导的可解释图卷积网络分析fMRI数据揭示青少年时期大脑连接的变化** [[论文]](https:\u002F\u002Fwww.semanticscholar.org\u002Fpaper\u002FPhenotype-guided-interpretable-graph-convolutional-Orlichenko-Qu\u002Fd05adc7c772780be4b99a169441696017d49c6ed)\n83. [NeuroComputing 22] **多扰动、多捕获：理解图神经网络的行为** [[论文]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0925231222004404?casa_token=6KLu9elyyLMAAAAA:hM0eGpfSnLxF0V8fZJdoDE3hkalzK2yccBJl3X9KN-Btu_xDSZmmbORIfkYdK5rgjTr7MReeFxc)\n84. [DSN 22] **CFGExplainer：从控制流图解释基于图神经网络的恶意软件分类** [[论文]](http:\u002F\u002Fwww.cs.binghamton.edu\u002F~ghyan\u002Fpapers\u002Fdsn22.pdf)\n118. [IEEE Access 22] **通过强化学习为node2vec提供节点级局部解释** [[论文]](https:\u002F\u002Fmlog-workshop.github.io\u002Fpapers\u002FMLoG%20Providing%20Node-level%20Local%20Explanation%20for%20node2vec%20through%20Reinforcement%20Learning.pdf)\n119. [Patterns 22] **面向分子性质预测的可解释图神经网络的定量评估** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2107.04119.pdf)\n121. [IEEE Access 22] **通过归纳一致性预测为节点表示学习模型提供事后解释** [[论文]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10003193)\n122. [IEEE 22] **利用拓扑感知的节点选择解释图神经网络：应用于空气质量推断** [[论文]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9801665)\n123. [BioRxiv 22] **GNN-SubNet：利用可解释图神经网络检测疾病亚网络** [[论文]](https:\u002F\u002Fwww.biorxiv.org\u002Fcontent\u002F10.1101\u002F2022.01.12.475995v1)\n124. [IEEE Robotics and Automation Letters 22] **高效且可解释的机器人操作：基于图神经网络** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2102.13177.pdf)\n125. [Arxiv 22] **图神经网络中去混杂到解释评估** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.08802)\n126. [ICCPR 22] **GANExplainer：基于GAN的图神经网络解释器** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.00012)\n129. [Arxiv 22] **探索图神经网络的解释方法** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.01770)\n132. [Arxiv 22] **通过在线知识蒸馏培养多领域学习者以解释GNN** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.11094)\n134. [Openreview 23] **TGP：面向个性化推荐的可解释时序图神经网络** [[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=EGobBwPc1J-)\n139. [Arxiv 22] **PGX：基于独立知识蒸馏过程的多层级GNN解释框架** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.03075)\n142. [Arxiv 22] **通过可解释性防御图神经网络后门攻击** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.02902.pdf)\n144. [Arxiv 22] **通过相关性反向传播解释动态图神经网络** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.11175)\n147. [Arxiv 22] **面向深度图模型的忠实解释** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.11850)\n148. [Arxiv 22] **迈向无监督图级别表示学习的解释** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.09934)\n149. [Arxiv 22] **BAGEL：评估图神经网络解释的基准** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.13983)\n152. [Arxiv 22] **图神经网络中的可解释性：一项实验性调查** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.09258)\n153. [IEEE TSIPN 22] **社交互动中的可解释性与图学习** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.07494.pdf)\n\n### 2021年\n1. [NeurIPS 21] **SALKG：基于知识图谱解释的常识推理学习** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.08793.pdf)\n2. [NeurIPS 2021] **用于图神经网络的强化学习增强型解释器** [[论文]](http:\u002F\u002Frecmind.cn\u002Fpapers\u002Fexplainer_nips21.pdf)\n3. [NeurIPS 2021] **迈向图神经网络的多粒度可解释性** [[论文]](http:\u002F\u002Fstaff.ustc.edu.cn\u002F~hexn\u002Fpapers\u002Fnips21-explain-gnn.pdf)\n21. [NeurIPS 2021] **图神经网络上的鲁棒反事实解释** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.04086)\n22. [ICML 2021] **通过子图探索实现图神经网络可解释性研究**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.05152)\n32. [ICML 2021] **图神经网络的生成式因果解释**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.06643)\n33. [ICML 2021] **利用正交化与诱导稀疏性提升分子图神经网络可解释性**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.04854)\n34. [ICML 2021] **带有超参数重要性解释的自动化图表示学习**[[论文]](http:\u002F\u002Fproceedings.mlr.press\u002Fv139\u002Fwang21f\u002Fwang21f.pdf)\n26. [ICLR 21] **面向时序知识图谱预测的可解释子图推理** [[论文]](https:\u002F\u002Fopenreview.net\u002Fforum?id=pGIHq1m7PU)\n27. [ICLR 2021] **使用可微边掩码解释自然语言处理中的图神经网络**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2010.00577)\n52. [ICLR 2021] **用于子图识别的图信息瓶颈** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.05563.pdf)\n53. [KDD 2021] **当与真实标签比较不正确时：关于评估图神经网络解释方法的探讨**[[论文]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3447548.3467283)\n54. [KDD 2021] **用于脑网络可解释分类的反事实图** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.08640)\n27. [CVPR 2021] **计算病理学中图神经网络解释器的量化评估**.[[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2011.12646.pdf)\n40. [NAACL 2021] **基于图网络的可解释病历诊断中反事实支持事实提取**。[[论文]](https:\u002F\u002Faclanthology.org\u002F2021.naacl-main.156.pdf)\n28. [AAAI 2021] **基于模体驱动的图表示对比学习** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.12533.pdf)\n56. [TPAMI 21] **通过相关游走实现图神经网络高阶解释** [[论文]](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9547794)\n57. [WWW 2021] **利用优化框架解释并统一图神经网络** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2101.11859)\n59. [Genome medicine 21] **解释图卷积神经网络的决策：乳腺癌转移预测中患者特异性分子子网络的作用** [[论文]](https:\u002F\u002Fwww.semanticscholar.org\u002Fpaper\u002FExplaining-decisions-of-Graph-Convolutional-Neural-Chereda-Bleckmann\u002F49a4e339182b2b304304c8837b09ce3e0951a616)\n60. [IJCKG 21] **知识图嵌入在电商应用中的作用：注意力机制推理、解释及可迁移规则** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.08589)\n61. [RuleML+RR 21] **结合亚符号与符号方法实现可解释性** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.01844)\n62. [PAKDD 21] **SCARLET：基于可解释注意力的图神经网络，用于假新闻传播者预测** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.04627)\n63. [J. Chem. Inf. Model] **用可解释人工智能为分子着色，以评估其临床前相关性** [[论文]](https:\u002F\u002Fpubs.acs.org\u002Fdoi\u002Fabs\u002F10.1021\u002Facs.jcim.0c01344)\n64. [BioRxiv 21] **APRILE：利用可解释图神经网络探索药物副作用的分子机制** [[论文]](https:\u002F\u002Fwww.biorxiv.org\u002Fcontent\u002F10.1101\u002F2021.07.02.450937v2.abstract)\n65. [ISM 21] **通过扩展卷积神经网络的可解释性方法，实现图神经网络的边级解释** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2111.00722.pdf)\n67. [Arxiv 21] **借助信息流实现数字病理学中图神经网络的解释** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.09895)\n68. [Arxiv 21] **SEEN：利用邻域解释来细化图神经网络的解释** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.08532.pdf)\n69. [Arxiv 21] **保留、促进还是攻击？通过拓扑扰动进行图神经网络解释** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.13944.pdf)\n70. [Arxiv 21] **面向可解释图神经网络的学习稀疏化** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.12920.pdf)\n72. [ICML workshop 21] **GCExplainer：人机协同的概念驱动型图神经网络解释器** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2107.11889.pdf)\n74. [ICML workshop 21] **通过对抗训练实现可靠的图神经网络解释** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.13427.pdf)\n75. [ICML workshop 21] **借鉴表格数据思想重新构想图神经网络解释** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.12665.pdf)\n76. [ICML workshop 21] **迈向图神经网络解释的自动化评估** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.11864.pdf)\n79. [ICDM 2021] **GNES：学习如何解释图神经网络** [[论文]](https:\u002F\u002Fcs.emory.edu\u002F~lzhao41\u002Fmaterials\u002Fpapers\u002FGNES.pdf)\n80. [ICDM 2021] **GCN-SE：将注意力机制作为动态图节点分类的可解释性工具** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.05598)\n82. [ICDM 2021] **图神经网络预测的多目标解释** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.14651)\n83. [CIKM 2021] **迈向自解释图神经网络** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.12055)\n84. [ECML PKDD 2021] **GraphSVX：图神经网络的Shapley值解释** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.10482)\n85. [WiseML 2021] **基于可解释性的后门攻击对抗图神经网络** [[论文]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3468218.3469046)\n86. [IJCNN 21] **MEG：为深度图网络生成分子反事实解释** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.08060.pdf)\n87. [ICCSA 2021] **利用加权图神经网络解释器理解药物滥用社交网络** [[论文]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007%2F978-3-030-86970-0_5)\n88. [NeSy 21] **一种新的图神经网络解释概念** [[论文]](http:\u002F\u002Fceur-ws.org\u002FVol-2986\u002Fpaper1.pdf)\n89. [Information Fusion 21] **借助图神经网络实现多模态因果推断，推动可解释人工智能的信息融合** [[论文]](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1566253521000142?via%3Dihub)\n90. [Patterns 21] **hcga：用于网络表型分析的高度比较性图分析** [[论文]](https:\u002F\u002Fwww.biorxiv.org\u002Fcontent\u002F10.1101\u002F2020.09.25.312926v2)\n\n### 2020年及之前\n1. [NeurIPS 2020] **面向图神经网络的参数化解释器**。[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.04573)\n2. [NeurIPS 2020] **PGM-Explainer：用于图神经网络的概率图模型解释** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.05788.pdf)\n3. [KDD 2020] **XGNN：迈向图神经网络的模型级解释** [[论文]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3394486.3403085)\n4. [ACL 2020] **GCAN：面向社交媒体上可解释假新闻检测的图感知协同注意力网络**。[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2004.11648.pdf)\n5. [Arxiv 2020] **包含稀疏可解释性的图神经网络** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.00119)\n6. [NeurIPS Workshop 20] **迈向可解释的消息传递网络：用于预测金属有机框架中二氧化碳吸附的模型** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.03723)\n7. [ICML工作坊2020] **对比图神经网络解释** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.13663.pdf)\n8. [ICML工作坊2020] **迈向数字病理学中的可解释图表示** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.00311.pdf)\n9. [NeurIPS研讨会2020] **利用分子反事实解释深度图网络** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2011.05134.pdf)\n10. [DataMod 2020] **探索基于图的神经网络用于自动脑肿瘤分割** [[论文]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007%2F978-3-030-70650-0_2)\n12. [OpenReview 20] **一种用于可微分发现图算法的框架** [[论文]](https:\u002F\u002Fopenreview.net\u002Fpdf?id=ueiBFzt7CiK)\n13. [OpenReview 20] **因果筛选法用于解释图神经网络** [[论文]](https:\u002F\u002Fopenreview.net\u002Fpdf?id=nzKv5vxZfge)\n14. [Arxiv 20] **从图信号去噪视角理解图神经网络** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2006.04386.pdf)\n15. [Arxiv 20] **通过幂迭代理解图神经网络中的消息传递** [[论文]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2006.00144.pdf)\n17. [IJCNN 20] **GCN-LRP解释：探索图卷积网络的潜在注意力机制** [[论文]](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9207639)\n18. [CD-MAKE 20] **解释图神经网络以理解节点分类中的加权图特征** [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2002.00514) \n19. [ICDM 19] **大规模图推理的可扩展解释**[[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.06482)","# awesome-graph-explainability-papers 快速上手指南\n\n`awesome-graph-explainability-papers` 并非一个可直接安装的软件库或框架，而是一个**精选论文与资源列表**，旨在汇总图神经网络（GNN）可解释性领域的综述、平台工具及核心研究论文。\n\n本指南将指导开发者如何利用该列表获取最新研究成果，并快速搭建相关的实验环境（基于列表中推荐的主流平台）。\n\n## 环境准备\n\n由于该仓库主要提供文献索引和外部工具链接，使用前需准备以下基础环境以运行列表中推荐的代码库（如 PyTorch Geometric, DIG, GraphXAI 等）：\n\n*   **操作系统**：Linux (推荐 Ubuntu 20.04+), macOS, 或 Windows (WSL2 推荐)\n*   **Python 版本**：3.8 - 3.11\n*   **核心依赖**：\n    *   PyTorch (深度学习框架)\n    *   Git (克隆代码库)\n*   **硬件要求**：建议配备 NVIDIA GPU 以加速 GNN 训练与解释生成过程（CPU 亦可运行小规模演示）\n\n## 安装步骤\n\n### 1. 获取资源列表\n首先克隆该仓库到本地，以便查阅最新的论文列表和链接：\n\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002Fyour-target-repo\u002Fawesome-graph-explainability-papers.git\ncd awesome-graph-explainability-papers\n```\n*(注：请替换为实际的仓库地址，若仅为浏览，可直接访问 GitHub 网页版)*\n\n### 2. 搭建实验环境（推荐方案）\n列表中 **Platforms** 章节推荐了多个主流工具库。以下是以最常用的 **PyTorch Geometric (PyG)** 为例的安装步骤，它是实现大多数 GNN 可解释性算法（如 GNNExplainer, PGExplainer）的基础。\n\n**步骤 A: 安装 PyTorch**\n访问 [PyTorch 官网](https:\u002F\u002Fpytorch.org\u002F) 获取适合你环境的命令。国内用户可使用清华源加速：\n\n```bash\npip install torch torchvision torchaudio --index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu118\n```\n*(注：`cu118` 代表 CUDA 11.8，请根据实际显卡驱动版本调整，若无 GPU 可选用 `cpu` 版本)*\n\n**步骤 B: 安装 PyTorch Geometric**\n使用官方源或国内镜像安装核心库及其依赖：\n\n```bash\npip install pyg-lib torch-scatter torch-sparse torch-cluster torch-spline-conv -f https:\u002F\u002Fdata.pyg.org\u002Fwhl\u002Ftorch-2.0.0+cu118.html\npip install torch-geometric\n```\n\n**步骤 C: 安装专用解释性工具库 (可选)**\n若需复现特定论文，可安装列表中提到的专用库，例如 **GraphXAI**：\n\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002Fmims-harvard\u002Fgraphxai.git\ncd graphxai\npip install -e .\n```\n\n## 基本使用\n\n本资源的核心用法是**查阅论文**并**复用代码**。以下展示如何基于 PyTorch Geometric 运行一个最简单的 GNN 可解释性示例（对应列表中 *GNNExplainer* 方法）。\n\n### 1. 加载模型与数据\n假设你已经有一个训练好的图分类模型 `model` 和一个图数据 `data`。\n\n```python\nimport torch\nfrom torch_geometric.nn import GCNConv, global_mean_pool\nfrom torch_geometric.datasets import MUTAG\nfrom torch_geometric.loader import DataLoader\n\n# 加载数据集\ndataset = MUTAG(root='\u002Ftmp\u002FMUTAG')\nloader = DataLoader(dataset, batch_size=64, shuffle=True)\n\n# 定义简单 GCN 模型\nclass GCN(torch.nn.Module):\n    def __init__(self, num_node_features, num_classes):\n        super().__init__()\n        self.conv1 = GCNConv(num_node_features, 64)\n        self.conv2 = GCNConv(64, num_classes)\n\n    def forward(self, x, edge_index, batch):\n        x = self.conv1(x, edge_index).relu()\n        x = self.conv2(x, edge_index)\n        return global_mean_pool(x, batch)\n\nmodel = GCN(dataset.num_node_features, dataset.num_classes)\n# 此处应添加模型训练代码...\n```\n\n### 2. 应用可解释性方法 (GNNExplainer)\n使用 `torch_geometric.explain` 模块生成解释，找出对预测结果最重要的子图和节点特征。\n\n```python\nfrom torch_geometric.explain import Explainer, GNNExplainer\n\n# 初始化解释器\nexplainer = Explainer(\n    model=model,\n    algorithm=GNNExplainer(epochs=100),\n    explanation_type='model',\n    node_mask_type='attributes',\n    edge_mask_type='object',\n)\n\n# 对第一个图数据进行解释\nexplanation = explainer(data[0].x, data[0].edge_index, target=0)\n\n# 可视化或分析结果\nprint(f\"重要节点特征掩码形状：{explanation.node_mask.shape}\")\nprint(f\"重要边掩码形状：{explanation.edge_mask.shape}\")\n\n# 若需可视化 (需安装 matplotlib)\n# explanation.visualize_graph() \n```\n\n### 3. 查阅前沿论文\n在本地克隆的仓库目录中，打开 `README.md` 文件，根据 **Surveys**（综述）、**Most Influential Papers**（最具影响力论文）或 **Year 2024\u002F2025** 分类，点击对应的 `[paper]` 链接阅读最新算法原理，并前往对应的 `[Code]` 链接获取复现代码。","某金融风控团队正在开发基于图神经网络（GNN）的反欺诈系统，急需向监管机构和业务方解释模型为何将特定交易判定为高风险。\n\n### 没有 awesome-graph-explainability-papers 时\n- **文献检索如大海捞针**：团队成员需手动在 arXiv 和各大会议中筛选论文，耗时数周仍难以厘清 GNNExplainer、PGExplainer 等主流方法的区别与适用场景。\n- **缺乏统一评估标准**：自行设计的解释性验证指标主观性强，无法证明生成的“子图解释”是否真正反映了模型的决策逻辑，导致内部评审陷入僵局。\n- **复现成本极高**：找不到经过验证的代码实现或基准测试平台，算法工程师需从零编写解释器代码，极易引入 Bug 且难以对齐最新学术成果。\n- **合规报告难以撰写**：面对监管询问，只能提供晦涩的数学公式，缺乏权威的综述文章作为理论支撑，难以建立信任。\n\n### 使用 awesome-graph-explainability-papers 后\n- **快速构建知识体系**：直接利用列表中的综述论文（如 TPAMI 22 的分类调查），团队在两天内就掌握了可解释性方法的技术图谱，精准锁定了适合金融场景的算法。\n- **引入权威评估框架**：借助收录的 GraphXAI 和 BAGEL 等基准平台，团队建立了客观的量化评估流程，用数据证明了模型解释的可靠性与稳定性。\n- **高效落地开源方案**：通过链接直接获取 PyTorch Geometric 教程及 GNNExplainer 官方代码，将原本需要一个月的开发周期缩短至三天，迅速完成原型验证。\n- **提升报告说服力**：引用列表中 ACM Computing Survey 等顶级期刊观点，为合规报告提供了坚实的理论背书，顺利通过了外部审计。\n\nawesome-graph-explainability-papers 将分散的学术资源转化为结构化的工程指南，极大地降低了图神经网络在关键领域落地的信任门槛与研发成本。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fflyingdoog_awesome-graph-explainability-papers_e2b29b8f.png","flyingdoog","Dongsheng Luo","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fflyingdoog_da5a8c50.png","Assistant Professor at FIU;\r\nPh.D. at  PSU; ",null,"https:\u002F\u002Fusers.cs.fiu.edu\u002F~dluo\u002F","https:\u002F\u002Fgithub.com\u002Fflyingdoog",798,75,"2026-04-02T10:39:48",1,"","未说明",{"notes":87,"python":85,"dependencies":88},"该仓库是一个关于图神经网络（GNN）可解释性论文的清单（Awesome List），并非一个可直接运行的单一软件工具。因此，README 中未提供具体的操作系统、GPU、内存或 Python 版本等运行环境需求。列表中提到的平台（如 PyTorch Geometric, DIG 等）是独立的开源项目，各自拥有不同的依赖和环境要求，需参考其对应的官方文档进行安装和配置。",[89,90,91,92,93,94,95],"PyTorch Geometric","DIG","GraphXAI","GraphFramEx","GNNExplainer","PGExplainer","BAGEL",[14],[98,99,100,101,102,103,104],"machine-learning","deep-learning","graph-mining","graph-neural-networks","xai","explainable-ai","explainability",4,"2026-03-27T02:49:30.150509","2026-04-11T18:32:47.721867",[],[]]