[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-ENSTA-U2IS-AI--awesome-uncertainty-deeplearning":3,"tool-ENSTA-U2IS-AI--awesome-uncertainty-deeplearning":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",160784,2,"2026-04-19T11:32:54",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",109154,"2026-04-18T11:18:24",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":32,"last_commit_at":50,"category_tags":51,"status":17},6121,"gemini-cli","google-gemini\u002Fgemini-cli","gemini-cli 是一款由谷歌推出的开源 AI 命令行工具，它将强大的 Gemini 大模型能力直接集成到用户的终端环境中。对于习惯在命令行工作的开发者而言，它提供了一条从输入提示词到获取模型响应的最短路径，无需切换窗口即可享受智能辅助。\n\n这款工具主要解决了开发过程中频繁上下文切换的痛点，让用户能在熟悉的终端界面内直接完成代码理解、生成、调试以及自动化运维任务。无论是查询大型代码库、根据草图生成应用，还是执行复杂的 Git 操作，gemini-cli 都能通过自然语言指令高效处理。\n\n它特别适合广大软件工程师、DevOps 人员及技术研究人员使用。其核心亮点包括支持高达 100 万 token 的超长上下文窗口，具备出色的逻辑推理能力；内置 Google 搜索、文件操作及 Shell 命令执行等实用工具；更独特的是，它支持 MCP（模型上下文协议），允许用户灵活扩展自定义集成，连接如图像生成等外部能力。此外，个人谷歌账号即可享受免费的额度支持，且项目基于 Apache 2.0 协议完全开源，是提升终端工作效率的理想助手。",100752,"2026-04-10T01:20:03",[52,13,15,14],"插件",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":32,"last_commit_at":59,"category_tags":60,"status":17},4721,"markitdown","microsoft\u002Fmarkitdown","MarkItDown 是一款由微软 AutoGen 团队打造的轻量级 Python 工具，专为将各类文件高效转换为 Markdown 格式而设计。它支持 PDF、Word、Excel、PPT、图片（含 OCR）、音频（含语音转录）、HTML 乃至 YouTube 链接等多种格式的解析，能够精准提取文档中的标题、列表、表格和链接等关键结构信息。\n\n在人工智能应用日益普及的今天，大语言模型（LLM）虽擅长处理文本，却难以直接读取复杂的二进制办公文档。MarkItDown 恰好解决了这一痛点，它将非结构化或半结构化的文件转化为模型“原生理解”且 Token 效率极高的 Markdown 格式，成为连接本地文件与 AI 分析 pipeline 的理想桥梁。此外，它还提供了 MCP（模型上下文协议）服务器，可无缝集成到 Claude Desktop 等 LLM 应用中。\n\n这款工具特别适合开发者、数据科学家及 AI 研究人员使用，尤其是那些需要构建文档检索增强生成（RAG）系统、进行批量文本分析或希望让 AI 助手直接“阅读”本地文件的用户。虽然生成的内容也具备一定可读性，但其核心优势在于为机器",93400,"2026-04-06T19:52:38",[52,14],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":66,"readme_en":67,"readme_zh":68,"quickstart_zh":69,"use_case_zh":70,"hero_image_url":71,"owner_login":72,"owner_name":73,"owner_avatar_url":74,"owner_bio":75,"owner_company":76,"owner_location":76,"owner_email":76,"owner_twitter":76,"owner_website":77,"owner_url":78,"languages":76,"stars":79,"forks":80,"last_commit_at":81,"license":82,"difficulty_score":83,"env_os":84,"env_gpu":84,"env_ram":84,"env_deps":85,"category_tags":92,"github_topics":93,"view_count":32,"oss_zip_url":76,"oss_zip_packed_at":76,"status":17,"created_at":104,"updated_at":105,"faqs":106,"releases":107},9808,"ENSTA-U2IS-AI\u002Fawesome-uncertainty-deeplearning","awesome-uncertainty-deeplearning","This repository contains a collection of surveys, datasets,  papers, and codes, for predictive uncertainty estimation in deep learning models.","awesome-uncertainty-deeplearning 是一个专注于深度学习预测不确定性估计的开源资源合集。在人工智能应用中，模型不仅要知道“答案是什么”，更需要清楚“自己有多确定”。该项目正是为了解决这一关键问题而生，它系统性地整理了关于如何量化和评估深度学习模型不确定性的前沿成果，帮助开发者识别模型的置信度，从而有效应对误分类、异常检测及分布外数据等挑战。\n\n这份资源库非常适合 AI 研究人员、算法工程师以及对模型可靠性有高要求的技术团队使用。无论是从事自动驾驶、医疗影像分析，还是自然语言处理领域的专家，都能从中找到针对性的理论支持与代码实现。其独特的技术亮点在于分类极其详尽，涵盖了从贝叶斯方法、集成学习、证据深度学习，到最新的多模态与生成式 AI 不确定性量化等多个维度。此外，项目还收录了相关的综述论文、数据集、基准测试、主流框架（如 PyTorch、TensorFlow）的代码库以及教学教程。通过提供一站式的知识导航，awesome-uncertainty-deeplearning 极大地降低了该领域的研究门槛，是构建更安全、更可信人工智能系统的重要参考指南。","# Awesome Uncertainty in Deep learning\n\n\u003Cdiv align=\"center\">\n\n[![MIT License](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Flicense-MIT-green.svg)](https:\u002F\u002Fopensource.org\u002Flicenses\u002FMIT)\n[![Awesome](https:\u002F\u002Fawesome.re\u002Fbadge.svg)](https:\u002F\u002Fawesome.re)\n\n\u003C\u002Fdiv>\n\nThis repo is a collection of *awesome* papers, codes, books, and blogs about Uncertainty and Deep learning. \n\n:star: Feel free to star and fork. :star:\n\nIf you think we missed a paper, please open a pull request or send a message on the corresponding [GitHub discussion](https:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Fawesome-uncertainty-deeplearning\u002Fdiscussions). Tell us where the article was published and when, and send us GitHub and ArXiv links if they are available.\n\nWe are also open to any ideas for improvements!\n\n\u003Ch2>\nTable of Contents\n\u003C\u002Fh2>\n\n- [Awesome Uncertainty in Deep learning](#awesome-uncertainty-in-deep-learning)\n- [Papers](#papers)\n  - [Surveys](#surveys)\n  - [Theory](#theory)\n  - [Bayesian-Methods](#bayesian-methods)\n  - [Ensemble-Methods](#ensemble-methods)\n  - [Sampling\u002FDropout-based-Methods](#samplingdropout-based-methods)\n  - [Post-hoc-Methods\u002FAuxiliary-Networks](#post-hoc-methodsauxiliary-networks)\n  - [Data-augmentation\u002FGeneration-based-methods](#data-augmentationgeneration-based-methods)\n  - [Output-Space-Modeling\u002FEvidential-deep-learning](#output-space-modelingevidential-deep-learning)\n  - [Deterministic-Uncertainty-Methods](#deterministic-uncertainty-methods)\n  - [Quantile-Regression\u002FPredicted-Intervals](#quantile-regressionpredicted-intervals)\n  - [Conformal Predictions](#conformal-predictions)\n  - [Calibration\u002FEvaluation-Metrics](#calibrationevaluation-metrics)\n  - [Misclassification Detection \\& Selective Classification](#misclassification-detection--selective-classification)\n  - [Anomaly-detection and Out-of-Distribution-Detection](#anomaly-detection-and-out-of-distribution-detection)\n  - [Uncertainty sources & Aleatoric and Epistemic Uncertainty Disentenglement](#uncertainty-sources--aleatoric-and-epistemic-uncertainty-disentenglement)\n  - [Uncertainty Quantification in Multimodal Models \u002F GenAI](#uncertainty-quantification-in-multimodal-models--genai)\n  - [Applications](#applications)\n    - [Classification and Semantic-Segmentation](#classification-and-semantic-segmentation)\n    - [Regression](#regression)\n    - [Object detection](#object-detection)\n    - [Domain adaptation](#domain-adaptation)\n    - [Semi-supervised and Active Learning](#semi-supervised-and-active-learning)\n    - [Natural Language Processing](#natural-language-processing)\n    - [Others](#others)\n- [Datasets and Benchmarks](#datasets-and-benchmarks)\n- [Libraries](#libraries)\n  - [Python](#python)\n  - [PyTorch](#pytorch)\n  - [JAX](#jax)\n  - [TensorFlow](#tensorflow)\n- [Lectures and tutorials](#lectures-and-tutorials)\n- [Books](#books)\n- [Other Resources](#other-resources)\n\n# Papers\n\n## Surveys\n\n**Conference**\n\n- Benchmarking Uncertainty Disentanglement: Specialized Uncertainties for Specialized Tasks [[NeurIPS2024](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2402.19460>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fbmucsanyi\u002Funtangle>)\n- A Comparison of Uncertainty Estimation Approaches in Deep Learning Components for Autonomous Vehicle Applications [[AISafety Workshop 2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2006.15172>)\n\n**Journal**\n\n- A survey of uncertainty in deep neural networks [[Artificial Intelligence Review 2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2107.03342>) - [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002FJakobCode\u002FUncertaintyInNeuralNetworks_Resources>) \n- Prior and Posterior Networks: A Survey on Evidential Deep Learning Methods For Uncertainty Estimation [[TMLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2110.03051>)\n- A Survey on Uncertainty Estimation in Deep Learning Classification Systems from a Bayesian Perspective [[ACM2021]](\u003Chttps:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3477140?casa_token=6fozCYTovlIAAAAA:t5vcjuXCMem1b8iFwaMG4o_YJHTe0wArLtoy9KCbL8Cow0aGEoxSiJans2Kzpm2FSKOg-4ZCDkBa>)\n- Ensemble deep learning: A review [[Engineering Applications of AI 2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2104.02395>)\n- A review of uncertainty quantification in deep learning: Techniques, applications and challenges [[Information Fusion 2021]](\u003Chttps:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1566253521001081>)\n- Aleatoric and epistemic uncertainty in machine learning: an introduction to concepts and methods [[Machine Learning 2021]](\u003Chttps:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs10994-021-05946-3>)\n- Predictive inference with the jackknife+ [[The Annals of Statistics 2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1905.02928>)\n- Uncertainty in big data analytics: survey, opportunities, and challenges [[Journal of Big Data 2019]](\u003Chttps:\u002F\u002Fjournalofbigdata.springeropen.com\u002Farticles\u002F10.1186\u002Fs40537-019-0206-3?cv=1>)\n\n**Arxiv**\n\n- A System-Level View on Out-of-Distribution Data in Robotics [[arXiv2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2212.14020>)\n- A Survey on Uncertainty Reasoning and Quantification for Decision Making: Belief Theory Meets Deep Learning [[arXiv2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2206.05675>)\n\n## Theory\n\n**Conference**\n\n- Exploring and Exploiting Model Uncertainty in Bayesian Optimization [[NeurIPS2025]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=p58mKXaeWC>)\n- A Rigorous Link between Deep Ensembles and (Variational) Bayesian Methods [[NeurIPS2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.15027>)\n- Towards Understanding Ensemble, Knowledge Distillation and Self-Distillation in Deep Learning [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.09816.pdf>)\n- Unmasking the Lottery Ticket Hypothesis: What's Encoded in a Winning Ticket's Mask? [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2210.03044.pdf>)\n- Probabilistic Contrastive Learning Recovers the Correct Aleatoric Uncertainty of Ambiguous Inputs [[ICML2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.02865.pdf>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmkirchhof\u002FProbabilistic_Contrastive_Learning>)\n- On Second-Order Scoring Rules for Epistemic Uncertainty Quantification [[ICML2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2301.12736.pdf>)\n- Neural Variational Gradient Descent [[AABI2022]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=oG0vTBw58ic>)\n- Top-label  and multiclass-to-binary reductions [[ICLR2022]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=WqoBaaPHS->)\n- Bayesian Model Selection, the Marginal Likelihood, and Generalization [[ICML2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2202.11678>)\n- With malice towards none: Assessing uncertainty via equalized coverage [[AIES 2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1908.05428>)\n- Uncertainty in Gradient Boosting via Ensembles [[ICLR2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2006.10562>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fyandex-research\u002FGBDT-uncertainty>)\n- Repulsive Deep Ensembles are Bayesian [[NeurIPS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2106.11642>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fratschlab\u002Frepulsive_ensembles>)\n- Bayesian Optimization with High-Dimensional Outputs [[NeurIPS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2106.12997>)\n- Residual Pathway Priors for Soft Equivariance Constraints [[NeurIPS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2112.01388>)\n- Dangers of Bayesian Model Averaging under Covariate Shift [[NeurIPS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2106.11905>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fizmailovpavel\u002Fbnn_covariate_shift>)\n- A Mathematical Analysis of Learning Loss for Active Learning in Regression [[CVPR Workshop2021]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021W\u002FTCV\u002Fhtml\u002FShukla_A_Mathematical_Analysis_of_Learning_Loss_for_Active_Learning_in_CVPRW_2021_paper.html>)\n- Why Are Bootstrapped Deep Ensembles Not Better? [[NeurIPS Workshop]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=dTCir0ceyv0>)\n- Deep Convolutional Networks as shallow Gaussian Processes [[ICLR2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1808.05587>)\n- On the accuracy of influence functions for measuring group effects [[NeurIPS2018]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2019\u002Fhash\u002Fa78482ce76496fcf49085f2190e675b4-Abstract.html>)\n- To Trust Or Not To Trust A Classifier [[NeurIPS2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1805.11783>) - [[Python]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgoogle\u002FTrustScore>)\n- Understanding Measures of Uncertainty for Adversarial Example Detection [[UAI2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1803.08533>)\n\n**Journal**\n\n- Martingale posterior distributions [[Royal Statistical Society Series B]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2103.15671>)\n- A Unified Theory of Diversity in Ensemble Learning [[JMLR2023]](\u003Chttps:\u002F\u002Fjmlr.org\u002Fpapers\u002Fvolume24\u002F23-0041\u002F23-0041.pdf>)\n- Multivariate Uncertainty in Deep Learning [[TNNLS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1910.14215>)\n- A General Framework for Uncertainty Estimation in Deep Learning [[RAL2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1907.06890>)\n- Adaptive nonparametric confidence sets [[Ann. Statist. 2006]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002Fmath\u002F0605473>)\n\n**Arxiv**\n\n- Ensembles for Uncertainty Estimation: Benefits of Prior Functions and Bootstrapping [[arXiv2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.03633.pdf>)\n- Efficient Gaussian Neural Processes for Regression [[arXiv2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2108.09676>)\n- Dense Uncertainty Estimation [[arXiv2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2110.06427>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FJingZhang617\u002FUncertaintyEstimation>)\n- A higher-order swiss army infinitesimal jackknife [[arXiv2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1907.12116>)\n\n## Bayesian-Methods\n\n**Conference**\n\n- Quantifying Uncertainty in the Presence of Distribution Shifts [[NeurIPS2025]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=04p7u1gIsv&referrer=%5Bthe%20profile%20of%20Yuli%20Slavutsky%5D(%2Fprofile%3Fid%3D~Yuli_Slavutsky1)>)\n- Training Bayesian Neural Networks with Sparse Subspace Variational Inference [[ICLR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2402.11025>)\n- Variational Bayesian Last Layers [[ICLR2024]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.11599)\n- A Symmetry-Aware Exploration of Bayesian Neural Network Posteriors [[ICLR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2310.08287>)\n- Beyond Unimodal: Generalising Neural Processes for Multimodal Uncertainty Estimation [[NeurIPS2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2304.01518>)\n- Uncertainty-aware Unsupervised Video Hashing [[AISTATS2023]](\u003Chttps:\u002F\u002Fproceedings.mlr.press\u002Fv206\u002Fwang23i.html>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fwangyucheng1234\u002FBerVAE>)\n- Gradient-based Uncertainty Attribution for Explainable Bayesian Deep Learning [[CVPR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2304.04824>)\n- Robustness to corruption in pre-trained Bayesian neural networks [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.12361.pdf>)\n- Beyond Deep Ensembles: A Large-Scale Evaluation of Bayesian Deep Learning under Distribution Shift [[NeurIPS2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2306.12306>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FFeuermagier\u002FBeyond_Deep_Ensembles>)\n- Transformers Can Do Bayesian Inference [[ICLR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2112.10510>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fautoml\u002FPFNs?tab=readme-ov-file>)\n- Uncertainty Estimation for Multi-view Data: The Power of Seeing the Whole Picture [[NeurIPS2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2210.02676>)\n- On Batch Normalisation for Approximate Bayesian Inference [[AABI2021]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fpdf?id=SH2tfpm_0LE>)\n- Activation-level uncertainty in deep neural networks [[ICLR2021]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=UvBPbpvHRj->)\n- Laplace Redux – Effortless Bayesian Deep Learning [[NeurIPS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2106.14806>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FAlexImmer\u002FLaplace>)\n- On the Effects of Quantisation on Model Uncertainty in Bayesian Neural Networks [[UAI2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2102.11062>)\n- Learnable uncertainty under Laplace approximations [[UAI2021]](\u003Chttps:\u002F\u002Fproceedings.mlr.press\u002Fv161\u002Fkristiadi21a.html>)\n- Bayesian Neural Networks with Soft Evidence [[ICML Workshop2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2010.09570>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fedwardyu\u002Fsoft-evidence-bnn>)\n- TRADI: Tracking deep neural network weight distributions for uncertainty estimation [[ECCV2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1912.11316>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgiannifranchi\u002FTRADI_Tracking_DNN_weights>)\n- How Good is the Bayes Posterior in Deep Neural Networks Really? [[ICML2020]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv119\u002Fwenzel20a.html>)\n- Efficient and Scalable Bayesian Neural Nets with Rank-1 Factors [[ICML2020]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv119\u002Fdusenberry20a\u002Fdusenberry20a.pdf>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgoogle\u002Fedward2>)\n- Being Bayesian, Even Just a Bit, Fixes Overconfidence in ReLU Networks [[ICML2020]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv119\u002Fkristiadi20a\u002Fkristiadi20a.pdf>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FAlexImmer\u002FLaplace>)\n- Bayesian Deep Learning and a Probabilistic Perspective of Generalization [[NeurIPS2020]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Ffile\u002F322f62469c5e3c7dc3e58f5a4d1ea399-Paper.pdf>)\n- A Simple Baseline for Bayesian Uncertainty in Deep Learning [[NeurIPS2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1902.02476>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fwjmaddox\u002Fswa_gaussian>) - [[TorchUncertainty]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n- Bayesian Uncertainty Estimation for Batch Normalized Deep Networks [[ICML2018]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv80\u002Fteye18a.html>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Ficml-mcbn\u002Fmcbn>) - [[TorchUncertainty]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n- Lightweight Probabilistic Deep Networks [[CVPR2018]](\u003Chttps:\u002F\u002Fgithub.com\u002Fezjong\u002Flightprobnets>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fezjong\u002Flightprobnets>)\n- A Scalable Laplace Approximation for Neural Networks [[ICLR2018]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fpdf?id=Skdvd2xAZ>) - [[Theano]](\u003Chttps:\u002F\u002Fgithub.com\u002FBB-UCL\u002FLasagne>)\n- Decomposition of Uncertainty in Bayesian Deep Learning for Efficient and Risk-sensitive Learning [[ICML2018]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv80\u002Fdepeweg18a.html>)\n- Weight Uncertainty in Neural Networks [[ICML2015]](\u003Chttps:\u002F\u002Fproceedings.mlr.press\u002Fv37\u002Fblundell15.html>)\n\n**Journal**\n\n- Hashing with Uncertainty Quantification via Sampling-based Hypothesis Testing [[TMLR2024]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=cc4v6v310f>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FQianLab\u002FHashUQ>)\n- Analytically Tractable Hidden-States Inference in Bayesian Neural Networks [[JMLR2024]](\u003Chttps:\u002F\u002Fjmlr.org\u002Fpapers\u002Fv23\u002F21-0758.html>)\n- Encoding the latent posterior of Bayesian Neural Networks for uncertainty quantification [[TPAMI2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2012.02818>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgiannifranchi\u002FLP_BNN>)\n- Bayesian modeling of uncertainty in low-level vision [[IJCV1990]](\u003Chttps:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007%2FBF00126502>)\n\n**Arxiv**\n\n- Density Uncertainty Layers for Reliable Uncertainty Estimation [[arXiv2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2306.12497>)\n\n## Ensemble-Methods\n\n**Conference**\n\n- Input-gradient space particle inference for neural network ensembles [[ICLR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2306.02775>)\n- Fast Ensembling with Diffusion Schrödinger Bridge [[ICLR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2404.15814>)\n- Pathologies of Predictive Diversity in Deep Ensembles [[ICLR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2302.00704>)\n- Model Ratatouille: Recycling Diverse Models for Out-of-Distribution Generalization [[ICML2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2212.10445.pdf>)\n- Bayesian Posterior Approximation With Stochastic Ensembles [[CVPR2023]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FBalabanov_Bayesian_Posterior_Approximation_With_Stochastic_Ensembles_CVPR_2023_paper.pdf>)\n- Normalizing Flow Ensembles for Rich Aleatoric and Epistemic Uncertainty Modeling [[AAAI2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2302.01312>)\n- Window-Based Early-Exit Cascades for Uncertainty Estimation: When Deep Ensembles are More Efficient than Single Models [[ICCV2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2303.08010>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fguoxoug\u002Fwindow-early-exit>)\n- Weighted Ensemble Self-Supervised Learning [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.09981.pdf>)\n- Agree to Disagree: Diversity through Disagreement for Better Transferability [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2202.04414.pdf>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmpagli\u002FAgree-to-Disagree>)\n- Packed-Ensembles for Efficient Uncertainty Estimation [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2210.09184>) - [[TorchUncertainty]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n- Sub-Ensembles for Fast Uncertainty Estimation in Neural Networks [[ICCV Workshop2023]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023W\u002FLXCV\u002Fpapers\u002FValdenegro-Toro_Sub-Ensembles_for_Fast_Uncertainty_Estimation_in_Neural_Networks_ICCVW_2023_paper.pdf>)\n- Prune and Tune Ensembles: Low-Cost Ensemble Learning With Sparse Independent Subnetworks [[AAAI2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2202.11782>)\n- Deep Ensembles Work, But Are They Necessary? [[NeurIPS2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2202.06985>)\n- FiLM-Ensemble: Probabilistic Deep Learning via Feature-wise Linear Modulation [[NeurIPS2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2206.00050>)\n- Deep Ensembling with No Overhead for either Training or Testing: The All-Round Blessings of Dynamic Sparsity [[ICLR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2106.14568>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FVITA-Group\u002FFreeTickets>)\n- On the Usefulness of Deep Ensemble Diversity for Out-of-Distribution Detection [[ECCV Workshop2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2207.07517>)\n- Masksembles for Uncertainty Estimation [[CVPR2021]](\u003Chttps:\u002F\u002Fnikitadurasov.github.io\u002Fprojects\u002Fmasksembles\u002F>) - [[PyTorch\u002FTensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fnikitadurasov\u002Fmasksembles>)\n- Robustness via Cross-Domain Ensembles [[ICCV2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2103.10919>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FEPFL-VILAB\u002FXDEnsembles>)\n- Uncertainty in Gradient Boosting via Ensembles [[ICLR2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2006.10562>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fyandex-research\u002FGBDT-uncertainty>)\n- Uncertainty Quantification and Deep Ensembles [[NeurIPS2021]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=wg_kD_nyAF>)\n- Maximizing Overall Diversity for Improved Uncertainty Estimates in Deep Ensembles [[AAAI2020]](\u003Chttps:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F5849>)\n- Uncertainty in Neural Networks: Approximately Bayesian Ensembling [[AISTATS2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1810.05546>)\n- Pitfalls of In-Domain Uncertainty Estimation and Ensembling in Deep Learning [[ICLR2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2002.06470>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FSamsungLabs\u002Fpytorch-ensembles>)\n- BatchEnsemble: An Alternative Approach to Efficient Ensemble and Lifelong Learning [[ICLR2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2002.06715>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgoogle\u002Fedward2>) - [[TorchUncertainty]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n- Hyperparameter Ensembles for Robustness and Uncertainty Quantification [[NeurIPS2020]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Fhash\u002F481fbfa59da2581098e841b7afc122f1-Abstract.html>)\n- Bayesian Deep Ensembles via the Neural Tangent Kernel [[NeurIPS2020]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Fhash\u002F0b1ec366924b26fc98fa7b71a9c249cf-Abstract.html>)\n- Diversity with Cooperation: Ensemble Methods for Few-Shot Classification [[ICCV2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1903.11341>)\n- Accurate Uncertainty Estimation and Decomposition in Ensemble Learning [[NeurIPS2019]](\u003Chttps:\u002F\u002Fpapers.nips.cc\u002Fpaper\u002F2019\u002Fhash\u002F1cc8a8ea51cd0adddf5dab504a285915-Abstract.html>)\n- High-Quality Prediction Intervals for Deep Learning: A Distribution-Free, Ensembled Approach [[ICML2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1802.07167>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002FTeaPearce\u002FDeep_Learning_Prediction_Intervals>)\n- Snapshot Ensembles: Train 1, get M for free [[ICLR2017]](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.00109) - [[TorchUncertainty]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n- Simple and scalable predictive uncertainty estimation using deep ensembles [[NeurIPS2017]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1612.01474>) - [[TorchUncertainty]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n\n**Journal**\n\n- One Versus all for deep Neural Network for uncertainty (OVNNI) quantification [[IEEE Access2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2006.00954>)\n\n**Arxiv**\n\n- Split-Ensemble: Efficient OOD-aware Ensemble via Task and Model Splitting [[arXiv2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2312.09148>)\n- Deep Ensemble as a Gaussian Process Approximate Posterior [[arXiv2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2205.00163>)\n- Sequential Bayesian Neural Subnetwork Ensembles [[arXiv2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2206.00794>)\n- Confident Neural Network Regression with Bootstrapped Deep Ensembles [[arXiv2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2202.10903>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002FLaurensSluyterman\u002FBootstrapped_Deep_Ensembles>)\n- Dense Uncertainty Estimation via an Ensemble-based Conditional Latent Variable Model [[arXiv2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2111.11055>)\n- Deep Ensembles: A Loss Landscape Perspective [[arXiv2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1912.02757>)\n- Checkpoint ensembles: Ensemble methods from a single training process [[arXiv2017]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1710.03282>) - [[TorchUncertainty]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n\n## Sampling\u002FDropout-based-Methods\n\n**Conference**\n\n- Rate-In: Information-Driven Adaptive Dropout Rates for Improved Inference-Time Uncertainty Estimation [[CVPR2025]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2412.07169>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcode-supplement-25\u002Frate-in>)\n- Enabling Uncertainty Estimation in Iterative Neural Networks [[ICML2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.16732>) - [[Pytorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcvlab-epfl\u002Fiter_unc>)\n- Make Me a BNN: A Simple Strategy for Estimating Bayesian Uncertainty from Pre-trained Models [[CVPR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2312.15297>) - [[TorchUncertainty]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n- Training-Free Uncertainty Estimation for Dense Regression: Sensitivity as a Surrogate [[AAAI2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1910.04858v3>)\n- Efficient Bayesian Uncertainty Estimation for nnU-Net [[MICCAI2022]](\u003Chttps:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-16452-1_51>)\n- Dropout Sampling for Robust Object Detection in Open-Set Conditions [[ICRA2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1710.06677>)\n- Test-time data augmentation for estimation of heteroscedastic aleatoric uncertainty in deep neural networks [[MIDL2018]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=rJZz-knjz>)\n- Concrete Dropout [[NeurIPS2017]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1705.07832>)\n- Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning [[ICML2016]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1506.02142>) - [[TorchUncertainty]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n\n**Journal**\n\n- A General Framework for Uncertainty Estimation in Deep Learning [[Robotics and Automation Letters2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F1907.06890.pdf>)\n\n**Arxiv**\n\n- SoftDropConnect (SDC) – Effective and Efficient Quantification of the Network Uncertainty in Deep MR Image Analysis [[arXiv2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2201.08418>)\n\n## Post-hoc-Methods\u002FAuxiliary-Networks\n\n**Conference**\n\n- On the Limitations of Temperature Scaling for Distributions with Overlaps [[ICLR2024]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.00740)\n- Post-hoc Uncertainty Learning using a Dirichlet Meta-Model [[AAAI2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2212.07359>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmaohaos2\u002FPosthocUQ>)\n- ProbVLM: Probabilistic Adapter for Frozen Vision-Language Models [[ICCV2023]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fhtml\u002FUpadhyay_ProbVLM_Probabilistic_Adapter_for_Frozen_Vison-Language_Models_ICCV_2023_paper.html>)\n- Out-of-Distribution Detection for Monocular Depth Estimation [[ICCV2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2308.06072>)\n- Detecting Misclassification Errors in Neural Networks with a Gaussian Process Model [[AAAI2022]](\u003Chttps:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F20773>)\n- Learning Structured Gaussians to Approximate Deep Ensembles [[CVPR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15485>)\n- Improving the reliability for confidence estimation [[ECCV2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2210.06776>)\n- Gradient-based Uncertainty for Monocular Depth Estimation [[ECCV2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2208.02005>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fjhornauer\u002FGrUMoDepth>)\n- BayesCap: Bayesian Identity Cap for Calibrated Uncertainty in Frozen Neural Networks [[ECCV2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2207.06873>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FExplainableML\u002FBayesCap>)\n- Learning Uncertainty For Safety-Oriented Semantic Segmentation In Autonomous Driving [[ICIP2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2105.13688>)\n- SLURP: Side Learning Uncertainty for Regression Problems [[BMVC2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2104.02395>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FxuanlongORZ\u002FSLURP_uncertainty_estimate>)\n- Triggering Failures: Out-Of-Distribution detection by learning from local adversarial attacks in Semantic Segmentation [[ICCV2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2108.01634>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fvaleoai\u002Fobsnet>)\n- Learning to Predict Error for MRI Reconstruction [[MICCAI2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2002.05582>)\n- A Mathematical Analysis of Learning Loss for Active Learning in Regression [[CVPR Workshop2021]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021W\u002FTCV\u002Fhtml\u002FShukla_A_Mathematical_Analysis_of_Learning_Loss_for_Active_Learning_in_CVPRW_2021_paper.html>)\n- Real-time uncertainty estimation in computer vision via uncertainty-aware distribution distillation [[WACV2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2007.15857>)\n- On the uncertainty of self-supervised monocular depth estimation [[CVPR2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2005.06209>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmattpoggi\u002Fmono-uncertainty>)\n- Quantifying Point-Prediction Uncertainty in Neural Networks via Residual Estimation with an I\u002FO Kernel [[ICLR2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1906.00588>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcognizant-ai-labs\u002Frio-paper>)\n- Gradients as a Measure of Uncertainty in Neural Networks [[ICIP2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2008.08030>)\n- Learning Loss for Test-Time Augmentation [[NeurIPS2020]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Fhash\u002F2ba596643cbbbc20318224181fa46b28-Abstract.html>)\n- Learning loss for active learning [[CVPR2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1905.03677>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FMephisto405\u002FLearning-Loss-for-Active-Learning>) (unofficial codes)\n- Addressing failure prediction by learning model confidence [[NeurIPS2019]](\u003Chttps:\u002F\u002Fpapers.NeurIPS.cc\u002Fpaper\u002F2019\u002Ffile\u002F757f843a169cc678064d9530d12a1881-Paper.pdf>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fvaleoai\u002FConfidNet>)\n- Structured Uncertainty Prediction Networks [[CVPR2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1802.07079>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002FEra-Dorta\u002Ftf_mvg>)\n- Classification uncertainty of deep neural networks based on gradient information [[IAPR Workshop2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1805.08440>)\n\n**Journal**\n\n- Towards More Reliable Confidence Estimation [[TPAMI2023]](\u003Chttps:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10172026\u002F>)\n- Confidence Estimation via Auxiliary Models [[TPAMI2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2012.06508>)\n\n**Arxiv**\n\n- Instance-Aware Observer Network for Out-of-Distribution Object Segmentation [[arXiv2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2207.08782>)\n- DEUP: Direct Epistemic Uncertainty Prediction [[arXiv2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2102.08501>)\n- Learning Confidence for Out-of-Distribution Detection in Neural Networks [[arXiv2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1802.04865>)\n\n## Data-augmentation\u002FGeneration-based-methods\n\n**Conference**\n\n- Posterior Uncertainty Quantification in Neural Networks using Data Augmentation [[AISTATS2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2403.12729>)\n- Learning to Generate Training Datasets for Robust Semantic Segmentation [[WACV2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2308.02535>)\n- OpenMix: Exploring Outlier Samples for Misclassification Detection [[CVPR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2303.17093>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FImpression2805\u002FOpenMix>)\n- On the Pitfall of Mixup for Uncertainty  [[CVPR2023]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FWang_On_the_Pitfall_of_Mixup_for_Uncertainty__CVPR_2023_paper.html>)\n- Diverse, Global and Amortised Counterfactual Explanations for Uncertainty Estimates [[AAAI2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2112.02646>)\n- Out-of-distribution Detection with Implicit Outlier Transformation [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2303.05033>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fqizhouwang\u002Fdoe>)\n- PixMix: Dreamlike Pictures Comprehensively Improve Safety Measures [[CVPR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2112.05135>)\n- Breaking Down Out-of-Distribution Detection: Many Methods Based on OOD Training Data Estimate a Combination of the Same Core Quantities [[ICML2022]](\u003Chttps:\u002F\u002Fproceedings.mlr.press\u002Fv162\u002Fbitterwolf22a.html>)\n- RegMixup: Mixup as a Regularizer Can Surprisingly Improve Accuracy & Out-of-Distribution Robustness [[NeurIPS2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2206.14502>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Ffrancescopinto\u002Fregmixup>)\n- Towards efficient feature sharing in MIMO architectures [[CVPR Workshop2022]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022W\u002FECV\u002Fhtml\u002FSun_Towards_Efficient_Feature_Sharing_in_MIMO_Architectures_CVPRW_2022_paper.html>)\n- Robust Semantic Segmentation with Superpixel-Mix [[BMVC2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2108.00968>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgiannifranchi\u002Fdeeplabv3-superpixelmix>)\n- MixMo: Mixing Multiple Inputs for Multiple Outputs via Deep Subnetworks [[ICCV2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2103.06132>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Falexrame\u002Fmixmo-pytorch>)\n- Training independent subnetworks for robust prediction [[ICLR2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2010.06610>)\n- Regularizing Variational Autoencoder with Diversity and Uncertainty Awareness [[IJCAI2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2110.12381>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fsmilesdzgk\u002Fdu-vae>)\n- Uncertainty-aware GAN with Adaptive Loss for Robust MRI Image Enhancement  [[ICCV Workshop2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.03343.pdf>)\n- Uncertainty-Aware Deep Classifiers using Generative Models [[AAAI2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2006.04183>)\n- Synthesize then Compare: Detecting Failures and Anomalies for Semantic Segmentation [[ECCV2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2003.08440>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FYingdaXia\u002FSynthCP>)\n- Detecting the Unexpected via Image Resynthesis [[ICCV2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1904.07595>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcvlab-epfl\u002Fdetecting-the-unexpected>)\n- Mix-n-match: Ensemble and compositional methods for uncertainty  in deep learning [[ICML2020]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv119\u002Fzhang20k\u002Fzhang20k.pdf>)\n- Deep Anomaly Detection with Outlier Exposure [[ICLR2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F1812.04606.pdf>)\n- On Mixup Training: Improved  and Predictive Uncertainty for Deep Neural Networks [[NeurIPS2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1905.11001>)\n\n**Arxiv**\n\n- Reliability in Semantic Segmentation: Can We Use Synthetic Data? [[arXiv2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.09231.pdf>)\n- Quantifying uncertainty with GAN-based priors [[arXiv2019]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=HyeAPeBFwS>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdhruvpatel108\u002FGANPriors>)\n\n## Output-Space-Modeling\u002FEvidential-deep-learning\n\nAwesome Evidential Deep Learning [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002FMengyuanChen21\u002FAwesome-Evidential-Deep-Learning>)\n\n**Conference**\n\n- Vicinal Label Supervision for Reliable Aleatoric and Epistemic Uncertainty Estimation [[NeurIPS2025]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=hPfICQIDOm>)\n- Multimodal Learning with Uncertainty Quantification based on Discounted Belief Fusion [[AISTATS2025]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2412.18024>)\n- Hyper-opinion Evidential Deep Learning for Out-of-Distribution Detection [[NeurIPS2024]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=Te8vI2wGTh&referrer=%5Bthe%20profile%20of%20Yufei%20Chen%5D(%2Fprofile%3Fid%3D~Yufei_Chen1)>)\n- R-EDL: Relaxing Nonessential Settings of Evidential Deep Learning [[ICLR2024]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=Si3YFA641c>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FMengyuanChen21\u002FICLR2024-REDL\u002Ftree\u002Fmain>)\n- Hyper Evidential Deep Learning to Quantify Composite Classification Uncertainty [[ICLR2024]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.10980)\n- Reliable conflictive multi-view learning [[AAAI2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2402.16897>) - [[Pytorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fjiajunsi\u002FRCML>)\n- The Evidence Contraction Issue in Deep Evidential Regression: Discussion and Solution [[AAAI2024]](\u003Chttps:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F30172>)\n- Discretization-Induced Dirichlet Posterior for Robust Uncertainty Quantification on Regression [[AAAI2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2308.09065>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002FDIDO>)\n- The Unreasonable Effectiveness of Deep Evidential Regression [[AAAI2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2205.10060>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fpasteurlabs\u002Funreasonable_effective_der>) - [[TorchUncertainty]](https:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty)\n- Exploring and Exploiting Uncertainty for Incomplete Multi-View Classification [[CVPR2023]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.05165)\n- Plausible Uncertainties for Human Pose Regression [[ICCV2023]](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FBramlage_Plausible_Uncertainties_for_Human_Pose_Regression_ICCV_2023_paper.pdf) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fbiggzlar\u002Fplausible-uncertainties>)\n- Uncertainty Estimation by Fisher Information-based Evidential Deep Learning [[ICML2023]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.02045.pdf) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdanruod\u002Fiedl>)\n- Improving Evidential Deep Learning via Multi-task Learning [[AAAI2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2112.09368>) - [[PyTorch]](https:\u002F\u002Fgithub.com\u002Fdeargen\u002FMT-ENet)\n- An Evidential Neural Network Model for Regression Based on Random Fuzzy Numbers [[BELIEF2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2208.00647>)\n- On the Pitfalls of Heteroscedastic Uncertainty Estimation with Probabilistic Neural Networks [[ICLR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2203.09168>) - [[PyTorch]](https:\u002F\u002Fgithub.com\u002Fmartius-lab\u002Fbeta-nll)\n- Natural Posterior Network: Deep Bayesian Uncertainty for Exponential Family Distributions [[ICLR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2105.04471>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fborchero\u002Fnatural-posterior-network>)\n- Pitfalls of Epistemic Uncertainty Quantification through Loss Minimisation [[NeurIPS2022]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fpdf?id=epjxT_ARZW5>)\n- Fast Predictive Uncertainty for Classification with Bayesian Deep Networks [[UAI2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2003.01227>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmariushobbhahn\u002FLB_for_BNNs_official>)\n- Evaluating robustness of predictive uncertainty estimation: Are Dirichlet-based models reliable? [[ICML2021]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv139\u002Fkopetzki21a\u002Fkopetzki21a.pdf>)\n- Trustworthy multimodal regression with mixture of normal-inverse gamma distributions [[NeurIPS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2111.08456>)\n- Misclassification Risk and Uncertainty Quantification in Deep Classifiers [[WACV2021]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2021\u002Fhtml\u002FSensoy_Misclassification_Risk_and_Uncertainty_Quantification_in_Deep_Classifiers_WACV_2021_paper.html>)\n- Ensemble Distribution Distillation [[ICLR2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1905.00076>)\n- Conservative Uncertainty Estimation By Fitting Prior Networks [[ICLR2020]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=BJlahxHYDS>)\n- Being Bayesian about Categorical Probability [[ICML2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2002.07965>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Ftjoo512\u002Fbelief-matching-framework>)\n- Posterior Network: Uncertainty Estimation without OOD Samples via Density-Based Pseudo-Counts  [[NeurIPS2020]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Fhash\u002F0eac690d7059a8de4b48e90f14510391-Abstract.html>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fsharpenb\u002FPosterior-Network>)\n- Deep Evidential Regression [[NeurIPS2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1910.02600>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Faamini\u002Fevidential-deep-learning>) - [[TorchUncertainty]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n- Noise Contrastive Priors for Functional Uncertainty [[UAI2020]](\u003Chttps:\u002F\u002Fproceedings.mlr.press\u002Fv115\u002Fhafner20a.html>)\n- Towards Maximizing the Representation Gap between In-Domain & Out-of-Distribution Examples [[NeurIPS Workshop2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2010.10474>)\n- Uncertainty on Asynchronous Time Event Prediction [[NeurIPS2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1911.05503>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fsharpenb\u002FUncertainty-Event-Prediction>)\n- Reverse KL-Divergence Training of Prior Networks: Improved Uncertainty and Adversarial Robustness [[NeurIPS2019]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2019\u002Fhash\u002F7dd2ae7db7d18ee7c9425e38df1af5e2-Abstract.html>)\n- Quantifying Classification Uncertainty using Regularized Evidential Neural Networks [[AAAI FSS2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1910.06864>)\n- Uncertainty estimates and multi-hypotheses networks for optical flow [[ECCV2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1802.07095>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Flmb-freiburg\u002Fnetdef_models>)\n- Evidential Deep Learning to Quantify Classification Uncertainty [[NeurIPS2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1806.01768>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdougbrion\u002Fpytorch-classification-uncertainty>)\n- Predictive uncertainty estimation via prior networks [[NeurIPS2018]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2018\u002Fhash\u002F3ea2db50e62ceefceaf70a9d9a56a6f4-Abstract.html>)\n- What Uncertainties Do We Need in Bayesian Deep Learning for Computer Vision? [[NeurIPS2017]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1703.04977>)\n- Estimating the Mean and Variance of the Target Probability Distribution [[(ICNN1994)]](\u003Chttps:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F374138>)\n\n**Journal**\n\n- Prior and Posterior Networks: A Survey on Evidential Deep Learning Methods For Uncertainty Estimation [[TMLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2110.03051>)\n- Region-Based Evidential Deep Learning to Quantify Uncertainty and Improve Robustness of Brain Tumor Segmentation [[NCA2022]](\u003Chttp:\u002F\u002Farxiv.org\u002Fabs\u002F2208.06038>)\n- An evidential classifier based on Dempster-Shafer theory and deep learning [[Neurocomputing2021]](\u003Chttps:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0925231221004525>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Ftongzheng1992\u002FE-CNN-classifier>)\n- Evidential fully convolutional network for semantic segmentation [[AppliedIntelligence2021]](\u003Chttps:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs10489-021-02327-0>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Ftongzheng1992\u002FE-FCN>)\n- Information Aware max-norm Dirichlet networks for predictive uncertainty estimation [[NeuralNetworks2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1910.04819#:~:text=Information%20Aware%20Max%2DNorm%20Dirichlet%20Networks%20for%20Predictive%20Uncertainty%20Estimation,-Theodoros%20Tsiligkaridis&text=Precise%20estimation%20of%20uncertainty%20in,prone%20to%20over%2Dconfident%20predictions>)\n- A neural network classifier based on Dempster-Shafer theory [[IEEETransSMC2000]](\u003Chttps:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F833094\u002F>)\n\n**Arxiv**\n\n- Deep evidential fusion with uncertainty quantification and contextual discounting for multimodal medical image segmentation [[arXiv2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2309.05919>)\n- Evidential Uncertainty Quantification: A Variance-Based Perspective [[arXiv2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.11367.pdf>)\n- Effective Uncertainty Estimation with Evidential Models for Open-World Recognition [[arXiv2022]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fpdf?id=NrB52z3eOTY>)\n- Multivariate Deep Evidential Regression [[arXiv2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2104.06135>)\n- Regression Prior Networks [[arXiv2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2006.11590>)\n- A Variational Dirichlet Framework for Out-of-Distribution Detection [[arXiv2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1811.07308>)\n- Uncertainty estimation in deep learning with application to spoken language assessment [[PhDThesis2019]](\u003Chttps:\u002F\u002Fwww.repository.cam.ac.uk\u002Fhandle\u002F1810\u002F298857>)\n- Inhibited softmax for uncertainty estimation in neural networks [[arXiv2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1810.01861>)\n- Quantifying Intrinsic Uncertainty in Classification via Deep Dirichlet Mixture Networks [[arXiv2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1906.04450>)\n\n## Deterministic-Uncertainty-Methods\n\n**Conference**\n- A Rate-Distortion View of Uncertainty Quantification [[ICML2024]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.10775) - [[Tensorflow]](https:\u002F\u002Fgithub.com\u002Fifiaposto\u002FDistance_Aware_Bottleneck)\n- Deep Deterministic Uncertainty: A Simple Baseline [[CVPR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2102.11582>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fomegafragger\u002FDDU>)\n- Gaussian Latent Representations for Uncertainty Estimation using Mahalanobis Distance in Deep Classifiers [[ICCV Workshop2023]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023W\u002FUnCV\u002Fpapers\u002FVenkataramanan_Gaussian_Latent_Representations_for_Uncertainty_Estimation_Using_Mahalanobis_Distance_in_ICCVW_2023_paper.pdf>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fvaishwarya96\u002FMAPLE-uncertainty-estimation>)\n- A Simple and Explainable Method for Uncertainty Estimation using Attribute Prototype Networks [[ICCV Workshop2023]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023W\u002FUnCV\u002Fpapers\u002FZelenka_A_Simple_and_Explainable_Method_for_Uncertainty_Estimation_Using_Attribute_ICCVW_2023_paper.pdf>)\n- Training, Architecture, and Prior for Deterministic Uncertainty Methods [[ICLR Workshop2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2303.05796>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Forientino\u002Fdum-components>)\n- Latent Discriminant deterministic Uncertainty [[ECCV2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2207.10130>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002FLDU>)\n- On the Practicality of Deterministic Epistemic Uncertainty [[ICML2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2107.00649>)\n- Improving Deterministic Uncertainty Estimation in Deep Learning for Classification and Regression [[CoRR2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2102.11409>)\n- Uncertainty Estimation Using a Single Deep Deterministic Neural Network [[ICML2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2003.02037>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fy0ast\u002Fdeterministic-uncertainty-quantification>)\n- Training normalizing flows with the information bottleneck for competitive generative classification [[NeurIPS2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2001.06448>)\n- Simple and principled uncertainty estimation with deterministic deep learning via distance awareness [[NeurIPS2020]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Fhash\u002F543e83748234f7cbab21aa0ade66565f-Abstract.html>)\n- Revisiting One-vs-All Classifiers for Predictive Uncertainty and Out-of-Distribution Detection in Neural Networks [[ICML Workshop2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2007.05134>)\n- Sampling-Free Epistemic Uncertainty Estimation Using Approximated Variance Propagation [[ICCV2019]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fhtml\u002FPostels_Sampling-Free_Epistemic_Uncertainty_Estimation_Using_Approximated_Variance_Propagation_ICCV_2019_paper.html>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fjanisgp\u002FSampling-free-Epistemic-Uncertainty>)\n- Single-Model Uncertainties for Deep Learning [[NeurIPS2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1811.00908>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FSingleModelUncertainty\u002F>)\n\n**Journal**\n\n- ZigZag: Universal Sampling-free Uncertainty Estimation Through Two-Step Inference [[TMLR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.11435>) - [[Pytorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcvlab-epfl\u002Fzigzag>)\n- Density estimation in representation space [[EDSMLS2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1908.07235>)\n\n**Arxiv**\n\n- The Hidden Uncertainty in a Neural Network’s Activations [[arXiv2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2012.03082>)\n- A simple framework for uncertainty in contrastive learning [[arXiv2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2010.02038>)\n- Distance-based Confidence Score for Neural Network Classifiers [[arXiv2017]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1709.09844>)\n\n## Quantile-Regression\u002FPredicted-Intervals\n\n**Conference**\n\n- Image-to-Image Regression with Distribution-Free Uncertainty Quantification and Applications in Imaging [[ICML2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2202.05265>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Faangelopoulos\u002Fim2im-uq>)\n- Prediction Intervals: Split Normal Mixture from Quality-Driven Deep Ensembles [[UAI2020]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv124\u002Fsaleh-salem20a.html>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Ftarik\u002Fpi-snm-qde>)\n- Classification with Valid and Adaptive Coverage [[NeurIPS2020]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Fhash\u002F244edd7e85dc81602b7615cd705545f5-Abstract.html>)\n- Single-Model Uncertainties for Deep Learning [[NeurIPS2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1811.00908>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FSingleModelUncertainty\u002F>)\n- High-Quality Prediction Intervals for Deep Learning: A Distribution-Free, Ensembled Approach [[ICML2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1802.07167>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002FTeaPearce\u002FDeep_Learning_Prediction_Intervals>)\n\n**Journal**\n\n- Scalable Uncertainty Quantification for Deep Operator Networks using Randomized Priors [[CMAME2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2203.03048>)\n- Exploring uncertainty in regression neural networks for construction of prediction intervals [[Neurocomputing2022]](\u003Chttps:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0925231222001102>)\n\n**Arxiv**\n\n- Interval Neural Networks: Uncertainty Scores [[arXiv2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2003.11566>)\n- Tight Prediction Intervals Using Expanded Interval Minimization [[arXiv2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1806.11222>)\n\n## Conformal Predictions\n\nAwesome Conformal Prediction [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fvaleman\u002Fawesome-conformal-prediction>)\n\n\u003C!-- **Conference**\n\n- Testing for Outliers with Conformal p-values  [[Ann. Statist. 2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2104.08279>) - [[Python]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmsesia\u002Fconditional-conformal-pvalues>)\n- Uncertainty sets for image classifiers using conformal prediction [[ICLR2021]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2009.14193.pdf) - [[GitHub]](https:\u002F\u002Fgithub.com\u002Faangelopoulos\u002Fconformal_classification)\n- Conformal Prediction Under Covariate Shift [[NeurIPS2019]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2019\u002Fhash\u002F8fb21ee7a2207526da55a679f0332de2-Abstract.html>)\n- Conformalized Quantile Regression [[NeurIPS2019]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2019\u002Fhash\u002F5103c3584b063c431bd1268e9b5e76fb-Abstract.html>) -->\n\n## Calibration\u002FEvaluation-Metrics\n\n**Conference**\n\n- Confidence Should Be Calibrated More Than One Turn Deep [[ACL2026]] (\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.05397>)\n- Grace: A generative approach to better confidence elicitation in large language models [[ACL2026]] (\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.09438>)\n- Improving Perturbation-based Explanations by Understanding the Role of Uncertainty Calibration [[NeurIPS2025]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=AjOl3iahHd&referrer=%5Bthe%20profile%20of%20Volker%20Tresp%5D(%2Fprofile%3Fid%3D~Volker_Tresp1)>)\n- Uncertainty Weighted Gradients for Model Calibration [[CVPR2025]](\u003Chttps:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2503.22725>)\n- Smooth ECE: Principled Reliability Diagrams via Kernel Smoothing [[ICLR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2309.12236>)\n- Calibrating Transformers via Sparse Gaussian Processes [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2303.02444>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fchenw20\u002Fsgpa>)\n- Beyond calibration: estimating the grouping loss of modern neural networks [[ICLR2023]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fpdf?id=6w1k-IixnL8>) - [[Python]](\u003Chttps:\u002F\u002Fgithub.com\u002Faperezlebel\u002Fbeyond_calibration>)\n- Dual Focal Loss for Calibration [[ICML 2023]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.13665)\n- What Are Effective Labels for Augmented Data? Improving Calibration and Robustness with AutoLabel [[SaTML2023]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.11188)\n- The Devil is in the Margin: Margin-based Label Smoothing for Network Calibration [[CVPR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2111.15430>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fby-liu\u002Fmbls>)\n- AdaFocal: Calibration-aware Adaptive Focal Loss [[NeurIPS2022]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.11838)\n- Calibrating Deep Neural Networks by Pairwise Constraints [[CVPR2022]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fhtml\u002FCheng_Calibrating_Deep_Neural_Networks_by_Pairwise_Constraints_CVPR_2022_paper.html>)\n- Top-label calibration and multiclass-to-binary reductions [[ICLR2022]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=WqoBaaPHS->)\n- From label smoothing to label relaxation [[AAAI2021]](\u003Chttps:\u002F\u002Fwww.aaai.org\u002FAAAI21Papers\u002FAAAI-2191.LienenJ.pdf>)\n- Diagnostic Uncertainty Calibration: Towards Reliable Machine Predictions in Medical Domain [[AIStats2021]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.01659)\n- Rethinking Calibration of Deep Neural Networks: Do Not Be Afraid of Overconfidence [[NeurIPS2021]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2021\u002Fhash\u002F61f3a6dbc9120ea78ef75544826c814e-Abstract.html>)\n- Beyond Pinball Loss: Quantile Methods for Calibrated Uncertainty Quantification [[NeurIPS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2011.09588>)\n- Soft Calibration Objectives for Neural Networks [[NeurIPS2021]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2021\u002Ffile\u002Ff8905bd3df64ace64a68e154ba72f24c-Paper.pdf>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgoogle\u002Funcertainty-baselines\u002Ftree\u002Fmain\u002Fexperimental\u002Fcaltrain>)\n- Confidence-Aware Learning for Deep Neural Networks [[ICML2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2007.01458>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdaintlab\u002Fconfidence-aware-learning>)\n- Mix-n-match: Ensemble and compositional methods for uncertainty calibration in deep learning [[ICML2020]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv119\u002Fzhang20k\u002Fzhang20k.pdf>)\n- Regularization via structural label smoothing [[ICML2020]](\u003Chttps:\u002F\u002Fproceedings.mlr.press\u002Fv108\u002Fli20e.html>)\n- Well-Calibrated Regression Uncertainty in Medical Imaging with Deep Learning [[MIDL2020]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv121\u002Flaves20a.html>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmlaves\u002Fwell-calibrated-regression-uncertainty>)\n- Calibrating Deep Neural Networks using Focal Loss [[NeurIPS2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2002.09437>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Ftorrvision\u002Ffocal_calibration>)\n- Stationary activations for uncertainty calibration in deep learning [[NeurIPS2020]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Fhash\u002F18a411989b47ed75a60ac69d9da05aa5-Abstract.html>)\n- Revisiting the evaluation of uncertainty estimation and its application to explore model complexity-uncertainty trade-off [[CVPR Workshop2020]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2020\u002Fhtml\u002Fw1\u002FDing_Revisiting_the_Evaluation_of_Uncertainty_Estimation_and_Its_Application_to_CVPRW_2020_paper.html>)\n- Evaluating Scalable Bayesian Deep Learning Methods for Robust Computer Vision [[CVPR Workshop2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1906.01620>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Ffregu856\u002Fevaluating_bdl>)\n- Bias-Reduced Uncertainty Estimation for Deep Neural Classifiers [[ICLR2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1805.08206>)\n- Beyond temperature scaling: Obtaining well-calibrated multiclass probabilities with Dirichlet calibration [[NeurIPS2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F1910.12656.pdf>) - [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdirichletcal>)\n- When does label smoothing help? [[NeurIPS2019]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2019\u002Fhash\u002Ff1748d6b0fd9d439f71450117eba2725-Abstract.html>)\n- Verified Uncertainty Calibration [[NeurIPS2019]](\u003Chttps:\u002F\u002Fpapers.NeurIPS.cc\u002Fpaper\u002F2019\u002Fhash\u002Ff8c0c968632845cd133308b1a494967f-Abstract.html>) - [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fp-lambda\u002Fverified_calibration>)\n- Measuring Calibration in Deep Learning [[CVPR Workshop2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1904.01685>)\n- Accurate Uncertainties for Deep Learning Using Calibrated Regression [[ICML2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1807.00263>)\n- Generalized zero-shot learning with deep calibration network [[NeurIPS2018]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2018\u002Fhash\u002F1587965fb4d4b5afe8428a4a024feb0d-Abstract.html>)\n- On calibration of modern neural networks [[ICML2017]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1706.04599>) - [[TorchUncertainty]](https:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty)\n- On Fairness and Calibration [[NeurIPS2017]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1709.02012>)\n- Obtaining Well Calibrated Probabilities Using Bayesian Binning [[AAAI2015]](\u003Chttps:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F9602\u002F9461>)\n\n**Journal**\n\n- Meta-Calibration: Learning of Model Calibration Using Differentiable Expected Calibration Error [[TMLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2106.09613>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fondrejbohdal\u002Fmeta-calibration>)\n- Evaluating and Calibrating Uncertainty Prediction in Regression Tasks [[Sensors2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1905.11659>)\n- Calibrated Prediction Intervals for Neural Network Regressors [[IEEE Access 2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1803.09546>) - [[Python]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcruvadom\u002FPrediction_Intervals>)\n\n**Arxiv**\n\n- Towards Understanding Label Smoothing [[arXiv2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2006.11653>)\n- An Investigation of how Label Smoothing Affects Generalization [[arXiv2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2010.12648>)\n  \n## Misclassification Detection & Selective Classification\n\n**Conference**\n\n- Overcoming Common Flaws in the Evaluation of Selective Classification Systems [[NeurIPS2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2407.01032>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FIML-DKFZ\u002Ffd-shifts\u002Ftree\u002Fmain>)\n- A Data-Driven Measure of Relative Uncertainty for Misclassification Detection [[ICLR2024]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.01710)\n- Plugin estimators for selective classification with out-of-distribution detection [[ICLR2024]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.12386)\n- SURE: SUrvey REcipes for building reliable and robust deep networks [[CVPR2024]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.00543) - [[PyTorch]](https:\u002F\u002Fyutingli0606.github.io\u002FSURE\u002F)\n- RCL: Reliable Continual Learning for Unified Failure Detection [[CVPR2024]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fpapers\u002FZhu_RCL_Reliable_Continual_Learning_for_Unified_Failure_Detection_CVPR_2024_paper.pdf>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FImpression2805\u002FRCL>)\n-  A Call to Reflect on Evaluation Practices for Failure Detection in Image Classification [[ICLR2023]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fpdf?id=YnkGMIh0gvX>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FIML-DKFZ\u002Ffd-shifts\u002Ftree\u002Fmain>)\n- The Devil is in the Wrongly-classified Samples: Towards Unified Open-set Recognition [[ICLR2023]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=xLr0I_xYGAs>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FJun-CEN\u002FUnified-Open-Set-Recognition>)\n- Augmenting Softmax Information for Selective Classification with Out-of-Distribution Data [[ACCV2022]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FACCV2022\u002Fhtml\u002FXia_Augmenting_Softmax_Information_for_Selective_Classification_with_Out-of-Distribution_Data_ACCV_2022_paper.html>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FGuoxoug\u002FSIRC>)\n- Anomaly Detection via Reverse Distillation from One-Class Embedding [[CVPR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2201.10703>)\n- Rethinking Confidence Calibration for Failure Prediction [[ECCV2022]](\u003Chttps:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-19806-9_30>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FImpression2805\u002FFMFP>)\n- Selective Classification for Deep Neural Networks [[NeurIPS2017]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1705.08500>)\n\n**Journal**\n\n- A Unified Benchmark for the Unknown Detection Capability of Deep Neural Networks [[Expert Systems with Applications2023]](\u003Chttps:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0957417423009636>)\n\n**ArXiv**\n\n- Similarity-Distance-Magnitude Universal Verification [[arXiv2025]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2502.20167>) - [[Python]](\u003Chttps:\u002F\u002Fgithub.com\u002FReexpressAI\u002Fsdm>)\n\n## Anomaly-detection and Out-of-Distribution-Detection\nAwesome Out-of-distribution Detection [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcontinuousml\u002FAwesome-Out-Of-Distribution-Detection>)\n\n**Conference**\n\n- Dual Energy-Based Model with Open-World Uncertainty Estimation for Out-of-distribution Detection [[CVPR2025]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FChen_Dual_Energy-Based_Model_with_Open-World_Uncertainty_Estimation_for_Out-of-distribution_Detection_CVPR_2025_paper.html>)\n- Combining Statistical Depth and Fermat Distance for Uncertainty Quantification [[NeurIPS2024]](https:\u002F\u002Fopenreview.net\u002Fpdf?id=xeXRhTUmcf) - [[PyTorch]](https:\u002F\u002Fgithub.com\u002FHaiVyNGUYEN\u002Fld_official)\n- Learning Transferable Negative Prompts for Out-of-Distribution Detection [[CVPR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2404.03248>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmala-lab\u002Fnegprompt>)\n- Epistemic Uncertainty Quantification For Pre-trained Neural Networks [[CVPR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2404.10124>)\n- NECO: NEural Collapse Based Out-of-distribution Detection [[ICLR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2310.06823>)\n- When and How Does In-Distribution Label Help Out-of-Distribution Detection? [[ICML2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2405.18635>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdeeplearning-wisc\u002Fid_label>)\n- Anomaly Detection under Distribution Shift [[ICCV2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2303.13845>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmala-lab\u002FADShift>)\n- Normalizing Flows for Human Pose Anomaly Detection [[ICCV2023]](https:\u002F\u002Forhir.github.io\u002FSTG_NF\u002F) - [[PyTorch]](https:\u002F\u002Fgithub.com\u002Forhir\u002Fstg-nf)\n- RbA: Segmenting Unknown Regions Rejected by All [[ICCV2023]](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FNayal_RbA_Segmenting_Unknown_Regions_Rejected_by_All_ICCV_2023_paper.pdf) - [[PyTorch]](https:\u002F\u002Fgithub.com\u002FNazirNayal8\u002FRbA)\n- Uncertainty-Aware Optimal Transport for Semantically Coherent Out-of-Distribution Detection [[CVPR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2303.10449>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Flufan31\u002Fet-ood>)\n- Modeling the Distributional Uncertainty for Salient Object Detection Models [[CVPR2023]](https:\u002F\u002Fnpucvr.github.io\u002FDistributional_uncer\u002F) - [[PyTorch]](https:\u002F\u002Fgithub.com\u002Ftxynwpu\u002FDistributional_uncertainty_SOD)\n- SQUID: Deep Feature In-Painting for Unsupervised Anomaly Detection [[CVPR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2111.13495>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Ftiangexiang\u002FSQUID>)\n- How to Exploit Hyperspherical Embeddings for Out-of-Distribution Detection? [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.04450.pdf>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdeeplearning-wisc\u002Fcider>)\n- Modeling the Data-Generating Process is Necessary for Out-of-Distribution Generalization [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.07837.pdf>)\n- Can CNNs Be More Robust Than Transformers? [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.03452.pdf>)\n- A framework for benchmarking class-out-of-distribution detection and its application to ImageNet [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.11893.pdf>)\n- Extremely Simple Activation Shaping for Out-of-Distribution Detection [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2209.09858>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fandrijazz\u002Fash>)\n- Quantification of Uncertainty with Adversarial Models [[NeurIPS2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2307.03217>)\n- The Robust Semantic Segmentation UNCV2023 Challenge Results [[ICCV Workshop2023]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.15478)\n- Continual Evidential Deep Learning for Out-of-Distribution Detection [[ICCV Workshop2023]](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023W\u002FVCL\u002Fhtml\u002FAguilar_Continual_Evidential_Deep_Learning_for_Out-of-Distribution_Detection_ICCVW_2023_paper.html)\n- Far Away in the Deep Space: Nearest-Neighbor-Based Dense Out-of-Distribution Detection [[ICCV Workshop2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2211.06660>)\n- Gaussian Latent Representations for Uncertainty Estimation using Mahalanobis Distance in Deep Classifiers [[ICCV Workshop2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2305.13849>)\n- Calibrated Out-of-Distribution Detection with a Generic Representation [[ICCV Workshop2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2303.13148>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fvojirt\u002Fgrood>)\n- Detecting Misclassification Errors in Neural Networks with a Gaussian Process Model [[AAAI2022]](\u003Chttps:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F20773>)\n- Towards Total Recall in Industrial Anomaly Detection [[CVPR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2106.08265>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fhcw-00\u002FPatchCore_anomaly_detection>)\n- POEM: Out-of-Distribution Detection with Posterior Sampling [[ICML2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2206.13687>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdeeplearning-wisc\u002Fpoem>)\n- VOS: Learning What You Don't Know by Virtual Outlier Synthesis [[ICLR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2202.01197>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdeeplearning-wisc\u002Fvos>)\n- Fully Convolutional Cross-Scale-Flows for Image-based Defect Detection [[WACV2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2110.02855>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmarco-rudolph\u002Fcs-flow>)\n- Out-of-Distribution Detection Using Union of 1-Dimensional Subspaces [[CVPR2021]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fhtml\u002FZaeemzadeh_Out-of-Distribution_Detection_Using_Union_of_1-Dimensional_Subspaces_CVPR_2021_paper.html>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fzaeemzadeh\u002FOOD>)\n- NAS-OoD: Neural Architecture Search for Out-of-Distribution Generalization [[ICCV2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2109.02038>)\n- On the Importance of Gradients for Detecting Distributional Shifts in the Wild [[NeurIPS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2110.00218>)\n- Exploring the Limits of Out-of-Distribution Detection [[NeurIPS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2106.03004>)\n- Detecting out-of-distribution image without learning from out-of-distribution data. [[CVPR2020]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fhtml\u002FHsu_Generalized_ODIN_Detecting_Out-of-Distribution_Image_Without_Learning_From_Out-of-Distribution_Data_CVPR_2020_paper.html>)\n- Learning Open Set Network with Discriminative Reciprocal Points [[ECCV2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2011.00178>)\n- Synthesize then Compare: Detecting Failures and Anomalies for Semantic Segmentation [[ECCV2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2003.08440>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FYingdaXia\u002FSynthCP>)\n- NADS: Neural Architecture Distribution Search for Uncertainty Awareness [[ICML2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2006.06646>)\n- PaDiM: a Patch Distribution Modeling Framework for Anomaly Detection and Localization [[ICPR2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2011.08785>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fopenvinotoolkit\u002Fanomalib>)\n- Energy-based Out-of-distribution Detection [[NeurIPS2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2010.03759?context=cs>)\n- Towards Maximizing the Representation Gap between In-Domain & Out-of-Distribution Examples [[NeurIPS Workshop2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2010.10474>)\n- Memorizing Normality to Detect Anomaly: Memory-Augmented Deep Autoencoder for Unsupervised Anomaly Detection [[ICCV2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1904.02639>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdonggong1\u002Fmemae-anomaly-detection>)\n- Detecting the Unexpected via Image Resynthesis [[ICCV2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1904.07595>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcvlab-epfl\u002Fdetecting-the-unexpected>)\n- Enhancing The Reliability of Out-of-distribution Image Detection in Neural Networks [[ICLR2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1706.02690>)\n- A Baseline for Detecting Misclassified and Out-of-Distribution Examples in Neural Networks [[ICLR2017]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1610.02136>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fhendrycks\u002Ferror-detection>)\n\n**Journal**\n\n- Foundation Models and Transformers for Anomaly Detection: A Survey [[Information Fusion2025]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2507.15905>)\n- Generalized out-of-distribution detection: A survey [[IJCV2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2110.11334>)\n- Revisiting Confidence Estimation: Towards Reliable Failure Prediction [[TPAMI2024]](https:\u002F\u002Fwww.computer.org\u002Fcsdl\u002Fjournal\u002Ftp\u002F5555\u002F01\u002F10356834\u002F1SQHDHvGg9i) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FImpression2805\u002FFMFP>)\n- One Versus all for deep Neural Network for uncertaInty (OVNNI) quantification [[IEEE Access2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2006.00954>)\n\n**Arxiv**\n\n- Neuron Activation Coverage: Rethinking Out-of-distribution Detection and Generalization [[arXiv2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2306.02879>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fbierone\u002Food_coverage>)\n- A Simple Fix to Mahalanobis Distance for Improving Near-OOD Detection [[arXiv2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2106.09022>)\n- Do We Really Need to Learn Representations from In-domain Data for Outlier Detection? [[arXiv2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2105.09270>)\n- Frequentist uncertainty estimates for deep learning [[arXiv2018]](\u003Chttp:\u002F\u002Fbayesiandeeplearning.org\u002F2018\u002Fpapers\u002F31.pdf>)\n\n## Uncertainty sources & Aleatoric and Epistemic Uncertainty Disentenglement\n\n**Conference**\n\n- Benchmarking Uncertainty Disentanglement: Specialized Uncertainties for Specialized Tasks [[NeurIPS2024](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2402.19460>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fbmucsanyi\u002Funtangle>)\n\n**ArXiv**\n\n- Sources of Uncertainty in Machine Learning - A Statisticians’ View [[ArXiv2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.16703>)\n- How disentangled are your classification uncertainties? [[ArXiv2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2408.12175>)\n\n## Uncertainty Quantification in Multimodal Models \u002F GenAI\n\n**Conference**\n\n- Towards Understanding and Quantifying Uncertainty for Text-to-Image Generation [[CVPR2025]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2412.03178>)\n- Hyperdimensional Uncertainty Quantification for Multimodal Uncertainty Fusion in Autonomous Vehicles Perception [[CVPR2025]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2503.20011>)\n\n**Journal**\n\n- Multimodal Out-of-Distribution Individual Uncertainty Quantification Enhances Binding Affinity Prediction for Polypharmacology [[Nature Machine Intelligence 2025]](\u003Chttps:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs42256-025-01151-2>)\n\n## Applications\n\n### Classification and Semantic-Segmentation\n\n**Conference**\n\n- Modeling Multimodal Aleatoric Uncertainty in Segmentation with Mixture of Stochastic Experts [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2212.07328>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgaozhitong\u002Fmose-auseg>)\n- Anytime Dense Prediction with Confidence Adaptivity [[ICLR2022]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=kNKFOXleuC>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fliuzhuang13\u002Fanytime>)\n- CRISP - Reliable Uncertainty Estimation for Medical Image Segmentation [[MICCAI2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2206.07664>)\n- TBraTS: Trusted Brain Tumor Segmentation [[MICCAI2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2206.09309>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcocofeat\u002Ftbrats>)\n- Robust Semantic Segmentation with Superpixel-Mix [[BMVC2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2108.00968>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgiannifranchi\u002Fdeeplabv3-superpixelmix>)\n- Deep Deterministic Uncertainty for Semantic Segmentation [[ICMLW2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2111.00079>)\n- DEAL: Difficulty-aware Active Learning for Semantic Segmentation [[ACCV2020]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FACCV2020\u002Fhtml\u002FXie_DEAL_Difficulty-aware_Active_Learning_for_Semantic_Segmentation_ACCV_2020_paper.html>)\n- Classification with Valid and Adaptive Coverage [[NeurIPS2020]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Fhash\u002F244edd7e85dc81602b7615cd705545f5-Abstract.html>)\n- Guided Curriculum Model Adaptation and Uncertainty-Aware Evaluation for Semantic Nighttime Image Segmentation [[ICCV2019]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fhtml\u002FSakaridis_Guided_Curriculum_Model_Adaptation_and_Uncertainty-Aware_Evaluation_for_Semantic_Nighttime_ICCV_2019_paper.html>)\n- Human Uncertainty Makes Classification More Robust [[ICCV2019]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fhtml\u002FPeterson_Human_Uncertainty_Makes_Classification_More_Robust_ICCV_2019_paper.html>)\n- Uncertainty-aware self-ensembling model for semi-supervised 3D left atrium segmentation [[MICCAI2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1806.05034>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fyulequan\u002FUA-MT>)\n- Lightweight Probabilistic Deep Networks [[CVPR2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1805.11327>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fezjong\u002Flightprobnets>)\n- A Probabilistic U-Net for Segmentation of Ambiguous Images [[NeurIPS2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1806.05034>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fstefanknegt\u002FProbabilistic-Unet-Pytorch>)\n- Evidential Deep Learning to Quantify Classification Uncertainty [[NeurIPS2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1806.01768>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdougbrion\u002Fpytorch-classification-uncertainty>)\n- To Trust Or Not To Trust A Classifier [[NeurIPS2018]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2018\u002Fhash\u002F7180cffd6a8e829dacfc2a31b3f72ece-Abstract.html>)\n- Classification uncertainty of deep neural networks based on gradient information [[IAPR Workshop2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1805.08440>)\n- Bayesian segnet: Model uncertainty in deep convolutional encoder-decoder architectures for scene understanding [[BMVC2017]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1511.02680>)\n\n**Journal**\n\n- Explainable machine learning in image classification models: An uncertainty quantification perspective.\" [[KnowledgeBased2022]](\u003Chttps:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS095070512200168X>)\n- Region-Based Evidential Deep Learning to Quantify Uncertainty and Improve Robustness of Brain Tumor Segmentation [[NCA2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2208.06038>)\n\n**Arxiv**\n\n- Leveraging Uncertainty Estimates to Improve Classifier Performance [[arXiv2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.11723.pdf>)\n- Evaluating Bayesian Deep Learning Methods for Semantic Segmentation [[arXiv2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1811.12709>)\n\n### Regression\n\n**Conference**\n\n- Learning the Distribution of Errors in Stereo Matching for Joint Disparity and Uncertainty Estimation [[CVPR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2304.00152>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Flly00412\u002Fsednet>)\n- Probabilistic MIMO U-Net: Efficient and Accurate Uncertainty Estimation for Pixel-wise Regression [[ICCV Workshop2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2308.07477>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fantonbaumann\u002Fmimo-unet>)\n- Training-Free Uncertainty Estimation for Dense Regression: Sensitivity as a Surrogate [[AAAI2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1910.04858v3>)\n- Learning Structured Gaussians to Approximate Deep Ensembles [[CVPR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15485>)\n- Uncertainty Quantification in Depth Estimation via Constrained Ordinal Regression [[ECCV2022]](\u003Chttps:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2022\u002Fpapers_ECCV\u002Fpapers\u002F136620229.pdf>)\n- On Monocular Depth Estimation and Uncertainty Quantification using Classification Approaches for Regression [[ICIP2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2202.12369>)\n- Anytime Dense Prediction with Confidence Adaptivity [[ICLR2022]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=kNKFOXleuC>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fliuzhuang13\u002Fanytime>)\n- Variational Depth Networks: Uncertainty-Aware Monocular Self-supervised Depth Estimation [[ECCV Workshop2022]](\u003Chttps:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-25085-9_3>)\n- SLURP: Side Learning Uncertainty for Regression Problems [[BMVC2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2104.02395>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FxuanlongORZ\u002FSLURP_uncertainty_estimate>)\n- Robustness via Cross-Domain Ensembles [[ICCV2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2103.10919>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FEPFL-VILAB\u002FXDEnsembles>)\n- Learning to Predict Error for MRI Reconstruction [[MICCAI2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2002.05582>)\n- On the uncertainty of self-supervised monocular depth estimation [[CVPR2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2005.06209>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmattpoggi\u002Fmono-uncertainty>)\n- Quantifying Point-Prediction Uncertainty in Neural Networks via Residual Estimation with an I\u002FO Kernel [[ICLR2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1906.00588>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcognizant-ai-labs\u002Frio-paper>)\n- Fast Uncertainty Estimation for Deep Learning Based Optical Flow [[IROS2020]](\u003Chttps:\u002F\u002Fauthors.library.caltech.edu\u002F104758\u002F>)\n- Well-Calibrated Regression Uncertainty in Medical Imaging with Deep Learning [[MIDL2020]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv121\u002Flaves20a.html>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmlaves\u002Fwell-calibrated-regression-uncertainty>)\n- Deep Evidential Regression [[NeurIPS2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1910.02600>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Faamini\u002Fevidential-deep-learning>)\n- Inferring Distributions Over Depth from a Single Image [[IROS2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1912.06268>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgengshan-y\u002Fmonodepth-uncertainty>)\n- Multi-Task Learning based on Separable Formulation of Depth Estimation and its Uncertainty [[CVPR Workshop2019]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2019\u002Fhtml\u002FUncertainty_and_Robustness_in_Deep_Visual_Learning\u002FAsai_Multi-Task_Learning_based_on_Separable_Formulation_of_Depth_Estimation_and_CVPRW_2019_paper.html>)\n- Lightweight Probabilistic Deep Networks [[CVPR2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1805.11327>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fezjong\u002Flightprobnets>)\n- Structured Uncertainty Prediction Networks [[CVPR2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1802.07079>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002FEra-Dorta\u002Ftf_mvg>)\n- Uncertainty estimates and multi-hypotheses networks for optical flow [[ECCV2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1802.07095>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Flmb-freiburg\u002Fnetdef_models>)\n- Accurate Uncertainties for Deep Learning Using Calibrated Regression [[ICML2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1807.00263>)\n\n**Journal**\n\n- How Reliable is Your Regression Model's Uncertainty Under Real-World Distribution Shifts? [[TMLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2302.03679>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Ffregu856\u002Fregression_uncertainty>)\n- Evaluating and Calibrating Uncertainty Prediction in Regression Tasks [[Sensors2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1905.11659>)\n- Exploring uncertainty in regression neural networks for construction of prediction intervals [[Neurocomputing2022]](\u003Chttps:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0925231222001102>)\n- Wasserstein Dropout [[Machine Learning 2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2012.12687>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Ffraunhofer-iais\u002Fsecond-moment-loss>)\n- Deep Distribution Regression [[Computational Statistics & Data Analysis2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1903.06023>)\n- Calibrated Prediction Intervals for Neural Network Regressors [[IEEE Access 2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1803.09546>) - [[Python]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcruvadom\u002FPrediction_Intervals>)\n- Learning a Confidence Measure for Optical Flow [[TPAMI2013]](\u003Chttps:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?arnumber=6261321&casa_token=fYVGhK2pa40AAAAA:XWJdS8zJ4JRw1brCIGiYpzEqMidXTTYVkcKTYnnhSl4ys5pUoHzHO6xsVeGZII9Ir1LAI_3YyfI&tag=1>)\n\n**Arxiv**\n\n- Understanding pathologies of deep heteroskedastic regression [[arxiv2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2306.16717>)\n- Measuring and Modeling Uncertainty Degree for Monocular Depth Estimation [[arXiv2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2307.09929>)\n- UncertaINR: Uncertainty Quantification of End-to-End Implicit Neural Representations for Computed Tomographaphy [[arXiv2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2202.10847>)\n- Efficient Gaussian Neural Processes for Regression [[arXiv2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2108.09676>)\n\n### Object detection\n\n**Conference**\n\n- Bridging Precision and Confidence: A Train-Time Loss for Calibrating Object Detection [[CVPR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.14404.pdf>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fakhtarvision\u002Fbpc_calibration?tab=readme-ov-file>)\n- Parametric and Multivariate Uncertainty Calibration for Regression and Object Detection [[ECCV Workshop2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2207.01242>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FEFS-OpenSource\u002Fcalibration-framework>)\n- Estimating and Evaluating Regression Predictive Uncertainty in Deep Object Detectors [[ICLR2021]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=YLewtnvKgR7>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fasharakeh\u002Fprobdet?tab=readme-ov-file>)\n- Multivariate Confidence Calibration for Object Detection [[CVPR Workshop2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2004.13546>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FEFS-OpenSource\u002Fcalibration-framework>)\n- Gaussian YOLOv3: An Accurate and Fast Object Detector Using Localization Uncertainty for Autonomous Driving [[ICCV2019]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FChoi_Gaussian_YOLOv3_An_Accurate_and_Fast_Object_Detector_Using_Localization_ICCV_2019_paper.pdf>) - [[CUDA]](\u003Chttps:\u002F\u002Fgithub.com\u002Fjwchoi384\u002FGaussian_YOLOv3>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmotokimura\u002FPyTorch_Gaussian_YOLOv3>) - [[Keras]](\u003Chttps:\u002F\u002Fgithub.com\u002Fxuannianz\u002Fkeras-GaussianYOLOv3>)\n\n### Domain adaptation\n\n**Conference**\n\n- Guiding Pseudo-labels with Uncertainty Estimation for Source-free Unsupervised Domain Adaptation [[CVPR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2303.03770>) - [[PyTorch]](https:\u002F\u002Fgithub.com\u002Fmattialitrico\u002Fguiding-pseudo-labels-with-uncertainty-estimation-for-source-free-unsupervised-domain-adaptation)\n- Uncertainty-guided Source-free Domain Adaptation [[ECCV2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2208.07591.pdf>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Froysubhankar\u002Funcertainty-sfda>)\n\n### Semi-supervised and Active Learning\n\nAwesome Semi-Supervised Learning [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fyassouali\u002Fawesome-semi-supervised-learning>)\nAwesome Active Learning [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fbaifanxxx\u002Fawesome-active-learning>)\n\n**Conference**\n\n- Rethinking Epistemic and Aleatoric Uncertainty for Active Open-Set Annotation: An Energy-Based Approach [[CVPR2025]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2502.19691>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fchenchenzong\u002FEAOA>)\n- Uncertainty Meets Diversity: A Comprehensive Active Learning Framework for Indoor 3D Object Detection [[CVPR2025]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2503.16125>)\n- Joint Out-of-Distribution Filtering and Data Discovery Active Learning [[CVPR2025]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2503.02491>)\n- Towards Cost-Effective Learning: A Synergy of Semi-Supervised and Active Learning [[CVPR2025]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FYin_Towards_Cost-Effective_Learning_A_Synergy_of_Semi-Supervised_and_Active_Learning_CVPR_2025_paper.pdf>)\n- Confidence Estimation Using Unlabeled Data [[ICLR2023]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fpdf?id=sOXU-PEJSgQ>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FTopoXLab\u002Fconsistency-ranking-loss>)\n\n### Natural Language Processing\n\nAwesome LLM Uncertainty, Reliability, & Robustness [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fjxzhangjhu\u002FAwesome-LLM-Uncertainty-Reliability-Robustness>)\n\n\n**Conference**\n\n- R-U-SURE? Uncertainty-Aware Code Suggestions By Maximizing Utility Across Random User Intents [[ICML2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.00732.pdf>) - [[GitHub]](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fr_u_sure)\n- Strength in Numbers: Estimating Confidence of Large Language Models by Prompt Agreement [[TrustNLP2023]](\u003Chttps:\u002F\u002Faclanthology.org\u002F2023.trustnlp-1.28\u002F>) - [[GitHub]](https:\u002F\u002Fgithub.com\u002FJHU-CLSP\u002FConfidence-Estimation-TrustNLP2023)\n- Disentangling Uncertainty in Machine Translation Evaluation [[EMNLP2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2204.06546>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdeep-spin\u002Funcertainties_mt_eval>)\n- Investigating Ensemble Methods for Model Robustness Improvement of Text Classifiers [[EMNLP2022 Findings]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2210.16298>)\n- DATE: Detecting Anomalies in Text via Self-Supervision of Transformers [[NAACL2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2104.05591>)\n- Calibrating Structured Output Predictors for Natural Language Processing [[ACL2020]](\u003Chttps:\u002F\u002Faclanthology.org\u002F2020.acl-main.188\u002F>)\n- Calibrated Language Model Fine-Tuning for In- and Out-of-Distribution Data [[EMNLP2020]](\u003Chttps:\u002F\u002Faclanthology.org\u002F2020.emnlp-main.102\u002F>) - [[PyTorch]](https:\u002F\u002Fgithub.com\u002FLingkai-Kong\u002FCalibrated-BERT-Fine-Tuning)\n\n**Journal**\n- How Can We Know When Language Models Know? On the Calibration of Language Models for Question Answering [[TACL2021]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.00955) - [[PyTorch]](https:\u002F\u002Fgithub.com\u002Fjzbjyb\u002Flm-calibration)\n\n**Arxiv**\n\n- DRIFT: Detecting Representational Inconsistencies for Factual Truthfulness [[arXiv2026]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2601.14210>)\n- Gaussian Stochastic Weight Averaging for Bayesian Low-Rank Adaptation of Large Language Models [[arXiv2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.03425>)\n- To Believe or Not to Believe Your LLM [[arXiv2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2406.02543>)\n- Decomposing Uncertainty for Large Language Models through Input Clarification Ensembling [[arXiv2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2311.08718>)\n\n### Others\n\n**Conference**\n\n- Epistemic Uncertainty for Generated Image Detection [[NeurIPS2025]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2412.05897>)\n- PaSCo: Urban 3D Panoptic Scene Completion with Uncertainty Awareness [[CVPR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.02158.pdf>) - [[Website]](\u003Chttps:\u002F\u002Fastra-vision.github.io\u002FPaSCo\u002F>)\n- Uncertainty Quantification via Stable Distribution Propagation [[ICLR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2402.08324>)\n- Assessing Uncertainty in Similarity Scoring: Performance & Fairness in Face Recognition [[ICLR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2211.07245>)\n\n**Arxiv**\n\n- Shaving Weights with Occam's Razor: Bayesian Sparsification for Neural Networks Using the Marginal Likelihood - [[arxiv2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.15978>)\n- Urban 3D Panoptic Scene Completion with Uncertainty Awareness [[arXiv2023]](\u003Chttps:\u002F\u002Fastra-vision.github.io\u002FPaSCo\u002F>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fastra-vision\u002FPaSCo>)\n\n# Datasets and Benchmarks\n\n- SHIFT: A Synthetic Driving Dataset for Continuous Multi-Task Domain Adaptation [[CVPR2022]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fhtml\u002FSun_SHIFT_A_Synthetic_Driving_Dataset_for_Continuous_Multi-Task_Domain_Adaptation_CVPR_2022_paper.html>)\n- MUAD: Multiple Uncertainties for Autonomous Driving, a benchmark for multiple uncertainty types and tasks [[BMVC2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2203.01437>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002FMUAD-Dataset>)\n- ACDC: The Adverse Conditions Dataset with Correspondences for Semantic Driving Scene Understanding [[ICCV2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2104.13395>)\n- The MVTec Anomaly Detection Dataset: A Comprehensive Real-World Dataset for Unsupervised Anomaly Detection [[IJCV2021]](\u003Chttps:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002Fs11263-020-01400-4.pdf>)\n- SegmentMeIfYouCan: A Benchmark for Anomaly Segmentation [[NeurIPS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2104.14812>)\n- Uncertainty Baselines: Benchmarks for Uncertainty & Robustness in Deep Learning [[arXiv2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2106.04015>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgoogle\u002Funcertainty-baselines>)\n- Curriculum Model Adaptation with Synthetic and Real Data for Semantic Foggy Scene Understanding [[IJCV2020]](\u003Chttps:\u002F\u002Fpeople.ee.ethz.ch\u002F~csakarid\u002FModel_adaptation_SFSU_dense\u002F>)\n- Benchmarking the Robustness of Semantic Segmentation Models [[CVPR2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1908.05005>)\n- Fishyscapes: A Benchmark for Safe Semantic Segmentation in Autonomous Driving [[ICCV Workshop2019]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCVW_2019\u002Fhtml\u002FADW\u002FBlum_Fishyscapes_A_Benchmark_for_Safe_Semantic_Segmentation_in_Autonomous_Driving_ICCVW_2019_paper.html>)\n- Benchmarking Robustness in Object Detection: Autonomous Driving when Winter is Coming [[NeurIPS Workshop2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1907.07484>) - [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fbethgelab\u002Frobust-detection-benchmark>)\n- Semantic Foggy Scene Understanding with Synthetic Data [[IJCV2018]](\u003Chttps:\u002F\u002Fpeople.ee.ethz.ch\u002F~csakarid\u002FSFSU_synthetic\u002F>)\n- Lost and Found: Detecting Small Road Hazards for Self-Driving Vehicles [[IROS2016]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1609.04653>)\n\n# Libraries\n\n## Python\n\n- Uncertainty Calibration Library [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fp-lambda\u002Fverified_calibration>)\n- MAPIE: Model Agnostic Prediction Interval Estimator [[Sklearn]](https:\u002F\u002Fgithub.com\u002Fscikit-learn-contrib\u002FMAPIE)\n- Uncertainty Toolbox [[GitHub]](\u003Chttps:\u002F\u002Funcertainty-toolbox.github.io\u002F>)\n- OpenOOD: Benchmarking Generalized OOD Detection [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fjingkang50\u002Fopenood>)\n- Darts: Forecasting and anomaly detection on time series [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Funit8co\u002Fdarts>)\n- Mixture Density Networks (MDN) for distribution and uncertainty estimation [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Faxelbrando\u002FMixture-Density-Networks-for-distribution-and-uncertainty-estimation>)\n- UQLM: Uncertainty Quantification for Language Models [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcvs-health\u002Fuqlm>)\n\n## PyTorch\n\n- TorchUncertainty [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n- Bayesian Torch [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002FIntelLabs\u002Fbayesian-torch>)\n- Blitz: A Bayesian Neural Network library for PyTorch [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002FpiEsposito\u002Fblitz-bayesian-deep-learning>)\n\n## JAX\n\n- Fortuna [[GitHub - JAX]](\u003Chttps:\u002F\u002Fgithub.com\u002Fawslabs\u002Ffortuna>)\n\n## TensorFlow\n\n- TensorFlow Probability [[Website]](\u003Chttps:\u002F\u002Fwww.tensorflow.org\u002Fprobability>)\n\n# Lectures and tutorials\n\n- Dan Hendrycks: Intro to ML Safety course [[Website]](\u003Chttps:\u002F\u002Fcourse.mlsafety.org\u002F>)\n- Uncertainty and Robustness in Deep Learning Workshop in ICML (2020, 2021) [[SlidesLive]](\u003Chttps:\u002F\u002Fslideslive.com\u002Ficml-2020\u002Ficml-workshop-on-uncertainty-and-robustness-in-deep-learning-udl>)\n- Yarin Gal: Bayesian Deep Learning 101 [[Website]](\u003Chttp:\u002F\u002Fwww.cs.ox.ac.uk\u002Fpeople\u002Fyarin.gal\u002Fwebsite\u002Fbdl101\u002F>)\n- MIT 6.S191: Evidential Deep Learning and Uncertainty (2021) [[Youtube]](\u003Chttps:\u002F\u002Fwww.youtube.com\u002Fwatch?v=toTcf7tZK8c>)\n- Hands-on Bayesian Neural Networks - a Tutorial for Deep Learning Users [[IEEE Computational Intelligence Magazine]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.06823.pdf)\n\n# Books\n\n- The \"Probabilistic Machine-Learning\" book series by Kevin Murphy [[Book]](\u003Chttps:\u002F\u002Fprobml.github.io\u002Fpml-book\u002F>)\n\n# Other Resources\n\nUncertainty Quantification in Deep Learning [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fahmedmalaa\u002Fdeep-learning-uncertainty>)\n\nAwesome Out-of-distribution Detection [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcontinuousml\u002FAwesome-Out-Of-Distribution-Detection>)\n\nAnomaly Detection Learning Resources [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fyzhao062\u002Fanomaly-detection-resources>)\n\nAwesome Conformal Prediction [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fvaleman\u002Fawesome-conformal-prediction>)\n\nAwesome LLM Uncertainty, Reliability, & Robustness [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fjxzhangjhu\u002FAwesome-LLM-Uncertainty-Reliability-Robustness>)\n\nUQSay - Seminars on Uncertainty Quantification (UQ), Design and Analysis of Computer Experiments (DACE) and related topics @ Paris Saclay [[Website]](\u003Chttps:\u002F\u002Fwww.uqsay.org\u002Fp\u002Fwelcome.html\u002F>)\n\nProbAI summer school [[Website]](\u003Chttps:\u002F\u002Fprobabilistic.ai\u002F>)\n\nGaussian process summer school [[Website]](\u003Chttps:\u002F\u002Fgpss.cc\u002F>)\n","# 深度学习中的不确定性精选\n\n\u003Cdiv align=\"center\">\n\n[![MIT License](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Flicense-MIT-green.svg)](https:\u002F\u002Fopensource.org\u002Flicenses\u002FMIT)\n[![Awesome](https:\u002F\u002Fawesome.re\u002Fbadge.svg)](https:\u002F\u002Fawesome.re)\n\n\u003C\u002Fdiv>\n\n本仓库汇集了关于不确定性与深度学习的优秀论文、代码、书籍和博客。\n\n:star: 欢迎点赞和 fork。 :star:\n\n如果您认为我们遗漏了某篇论文，请提交 pull request 或在对应的 [GitHub 讨论区](https:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Fawesome-uncertainty-deeplearning\u002Fdiscussions) 中留言。请告知文章的发表地点和时间，并提供 GitHub 和 ArXiv 链接（如有）。\n\n我们也欢迎任何改进建议！\n\n\u003Ch2>\n目录\n\u003C\u002Fh2>\n\n- [深度学习中的不确定性精选](#深度学习中的不确定性精选)\n- [论文](#论文)\n  - [综述](#综述)\n  - [理论](#理论)\n  - [贝叶斯方法](#贝叶斯方法)\n  - [集成方法](#集成方法)\n  - [基于采样\u002F丢弃的方法](#基于采样丢弃的方法)\n  - [事后方法\u002F辅助网络](#事后方法辅助网络)\n  - [数据增强\u002F生成方法](#数据增强生成方法)\n  - [输出空间建模\u002F证据深度学习](#输出空间建模证据深度学习)\n  - [确定性不确定性方法](#确定性不确定性方法)\n  - [分位数回归\u002F预测区间](#分位数回归预测区间)\n  - [一致性预测](#一致性预测)\n  - [校准\u002F评估指标](#校准评估指标)\n  - [误分类检测与选择性分类](#误分类检测与选择性分类)\n  - [异常检测与分布外检测](#异常检测与分布外检测)\n  - [不确定性来源及随机性和认知性不确定性的解耦](#不确定性来源及随机性和认知性不确定性的解耦)\n  - [多模态模型与生成式 AI 中的不确定性量化](#多模态模型与生成式 AI 中的不确定性量化)\n  - [应用](#应用)\n    - [分类与语义分割](#分类与语义分割)\n    - [回归](#回归)\n    - [目标检测](#目标检测)\n    - [领域适应](#领域适应)\n    - [半监督学习与主动学习](#半监督学习与主动学习)\n    - [自然语言处理](#自然语言处理)\n    - [其他](#其他)\n- [数据集与基准测试](#数据集与基准测试)\n- [库](#库)\n  - [Python](#Python)\n  - [PyTorch](#PyTorch)\n  - [JAX](#JAX)\n  - [TensorFlow](#TensorFlow)\n- [讲座与教程](#讲座与教程)\n- [书籍](#书籍)\n- [其他资源](#其他资源)\n\n# 论文\n\n## 综述\n\n**会议**\n\n- 不确定性解耦基准测试：针对特定任务的专业化不确定性 [[NeurIPS2024](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2402.19460>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fbmucsanyi\u002Funtangle>)\n- 自动驾驶应用中深度学习组件中不确定性估计方法的比较 [[AISafety Workshop 2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2006.15172>)\n\n**期刊**\n\n- 深度神经网络中的不确定性综述 [[Artificial Intelligence Review 2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2107.03342>) - [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002FJakobCode\u002FUncertaintyInNeuralNetworks_Resources>) \n- 先验网络与后验网络：证据深度学习方法在不确定性估计中的综述 [[TMLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2110.03051>)\n- 从贝叶斯视角看深度学习分类系统中的不确定性估计综述 [[ACM2021]](\u003Chttps:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3477140?casa_token=6fozCYTovlIAAAAA:t5vcjuXCMem1b8iFwaMG4o_YJHTe0wArLtoy9KCbL8Cow0aGEoxSiJans2Kzpm2FSKOg-4ZCDkBa>)\n- 集成深度学习：综述 [[Engineering Applications of AI 2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2104.02395>)\n- 深度学习中不确定性量化综述：技术、应用与挑战 [[Information Fusion 2021]](\u003Chttps:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1566253521001081>)\n- 机器学习中的随机性和认知性不确定性：概念与方法导论 [[Machine Learning 2021]](\u003Chttps:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs10994-021-05946-3>)\n- 使用 jackknife+ 进行预测推断 [[The Annals of Statistics 2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1905.02928>)\n- 大数据分析中的不确定性：综述、机遇与挑战 [[Journal of Big Data 2019]](\u003Chttps:\u002F\u002Fjournalofbigdata.springeropen.com\u002Farticles\u002F10.1186\u002Fs40537-019-0206-3?cv=1>)\n\n**Arxiv**\n\n- 机器人技术中分布外数据的系统级视角 [[arXiv2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2212.14020>)\n- 决策制定中不确定性推理与量化综述：信念理论与深度学习的结合 [[arXiv2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2206.05675>)\n\n## 理论\n\n**会议**\n\n- 在贝叶斯优化中探索与利用模型不确定性 [[NeurIPS2025]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=p58mKXaeWC>)\n- 深度集成与（变分）贝叶斯方法之间的严格联系 [[NeurIPS2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.15027>)\n- 朝着理解深度学习中的集成、知识蒸馏和自蒸馏迈进 [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.09816.pdf>)\n- 揭秘彩票假设：获胜彩票的掩码中编码了什么？ [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2210.03044.pdf>)\n- 概率对比学习能够恢复模糊输入的正确随机性不确定性 [[ICML2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.02865.pdf>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmkirchhof\u002FProbabilistic_Contrastive_Learning>)\n- 关于用于认识论不确定性量化的二阶评分规则 [[ICML2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2301.12736.pdf>)\n- 神经变分梯度下降 [[AABI2022]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=oG0vTBw58ic>)\n- 顶标签及多分类到二分类的约简 [[ICLR2022]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=WqoBaaPHS->)\n- 贝叶斯模型选择、边际似然与泛化能力 [[ICML2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2202.11678>)\n- 对任何人都不怀恶意：通过均衡覆盖评估不确定性 [[AIES 2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1908.05428>)\n- 通过集成实现梯度提升树的不确定性估计 [[ICLR2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2006.10562>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fyandex-research\u002FGBDT-uncertainty>)\n- 排斥型深度集成是贝叶斯式的 [[NeurIPS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2106.11642>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fratschlab\u002Frepulsive_ensembles>)\n- 高维输出的贝叶斯优化 [[NeurIPS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2106.12997>)\n- 用于软等变约束的残差路径先验 [[NeurIPS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2112.01388>)\n- 特征分布漂移下贝叶斯模型平均的风险 [[NeurIPS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2106.11905>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fizmailovpavel\u002Fbnn_covariate_shift>)\n- 回归任务主动学习中学习损失的数学分析 [[CVPR Workshop2021]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021W\u002FTCV\u002Fhtml\u002FShukla_A_Mathematical_Analysis_of_Learning_Loss_for_Active_Learning_in_CVPRW_2021_paper.html>)\n- 为什么自助抽样的深度集成并不更好？ [[NeurIPS Workshop]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=dTCir0ceyv0>)\n- 深度卷积网络作为浅层高斯过程 [[ICLR2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1808.05587>)\n- 影响函数在衡量群体效应方面的准确性 [[NeurIPS2018]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2019\u002Fhash\u002Fa78482ce76496fcf49085f2190e675b4-Abstract.html>)\n- 是否信任分类器 [[NeurIPS2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1805.11783>) - [[Python]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgoogle\u002FTrustScore>)\n- 理解用于对抗样本检测的不确定性度量 [[UAI2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1803.08533>)\n\n**期刊**\n\n- 鞅后验分布 [[Royal Statistical Society Series B]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2103.15671>)\n- 集成学习中多样性的统一理论 [[JMLR2023]](\u003Chttps:\u002F\u002Fjmlr.org\u002Fpapers\u002Fvolume24\u002F23-0041\u002F23-0041.pdf>)\n-深度学习中的多元不确定性 [[TNNLS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1910.14215>)\n- 深度学习中不确定性估计的一般框架 [[RAL2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1907.06890>)\n- 自适应非参数置信集 [[Ann. Statist. 2006]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002Fmath\u002F0605473>)\n\n**Arxiv**\n\n- 用于不确定性估计的集成：先验函数与自助抽样的优势 [[arXiv2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.03633.pdf>)\n- 用于回归任务的有效高斯神经过程 [[arXiv2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2108.09676>)\n- 密集型不确定性估计 [[arXiv2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2110.06427>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FJingZhang617\u002FUncertaintyEstimation>)\n- 一种高阶瑞士军刀式无穷小刀切法 [[arXiv2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1907.12116>)\n\n## 贝叶斯方法\n\n**会议**\n\n- 在分布偏移存在下的不确定性量化 [[NeurIPS2025]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=04p7u1gIsv&referrer=%5Bthe%20profile%20of%20Yuli%20Slavutsky%5D(%2Fprofile%3Fid%3D~Yuli_Slavutsky1)>)\n- 使用稀疏子空间变分推断训练贝叶斯神经网络 [[ICLR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2402.11025>)\n- 变分贝叶斯最后一层 [[ICLR2024]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.11599)\n- 一种对称性感知的贝叶斯神经网络后验探索 [[ICLR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2310.08287>)\n- 不止于单模态：面向多模态不确定性估计的神经过程泛化 [[NeurIPS2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2304.01518>)\n- 基于不确定性的无监督视频哈希 [[AISTATS2023]](\u003Chttps:\u002F\u002Fproceedings.mlr.press\u002Fv206\u002Fwang23i.html>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fwangyucheng1234\u002FBerVAE>)\n- 基于梯度的不确定性归因用于可解释的贝叶斯深度学习 [[CVPR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2304.04824>)\n- 预训练贝叶斯神经网络中的抗噪声鲁棒性 [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.12361.pdf>)\n- 不止于深度集成：分布偏移下贝叶斯深度学习的大规模评估 [[NeurIPS2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2306.12306>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FFeuermagier\u002FBeyond_Deep_Ensembles>)\n- Transformer 可以进行贝叶斯推断 [[ICLR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2112.10510>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fautoml\u002FPFNs?tab=readme-ov-file>)\n- 多视图数据的不确定性估计：看清全局的力量 [[NeurIPS2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2210.02676>)\n- 关于近似贝叶斯推断中的批归一化 [[AABI2021]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fpdf?id=SH2tfpm_0LE>)\n- 深度神经网络中的激活级不确定性 [[ICLR2021]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=UvBPbpvHRj->)\n- 拉普拉斯重装上阵——轻松实现贝叶斯深度学习 [[NeurIPS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2106.14806>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FAlexImmer\u002FLaplace>)\n- 量化对贝叶斯神经网络模型不确定性的影响 [[UAI2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2102.11062>)\n- 拉普拉斯近似下的可学习不确定性 [[UAI2021]](\u003Chttps:\u002F\u002Fproceedings.mlr.press\u002Fv161\u002Fkristiadi21a.html>)\n- 具有软证据的贝叶斯神经网络 [[ICML Workshop2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2010.09570>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fedwardyu\u002Fsoft-evidence-bnn>)\n- TRADI：追踪深度神经网络权重分布以进行不确定性估计 [[ECCV2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1912.11316>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgiannifranchi\u002FTRADI_Tracking_DNN_weights>)\n- 深度神经网络中的贝叶斯后验究竟有多好？[[ICML2020]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv119\u002Fwenzel20a.html>)\n- 基于秩-1因子的高效可扩展贝叶斯神经网络 [[ICML2020]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv119\u002Fdusenberry20a\u002Fdusenberry20a.pdf>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgoogle\u002Fedward2>)\n- 即使只是稍微采用贝叶斯方法，也能解决 ReLU 网络中的过度自信问题 [[ICML2020]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv119\u002Fkristiadi20a\u002Fkristiadi20a.pdf>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FAlexImmer\u002FLaplace>)\n- 贝叶斯深度学习与泛化的概率视角 [[NeurIPS2020]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Ffile\u002F322f62469c5e3c7dc3e58f5a4d1ea399-Paper.pdf>)\n- 深度学习中贝叶斯不确定性的简单基线 [[NeurIPS2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1902.02476>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fwjmaddox\u002Fswa_gaussian>) - [[TorchUncertainty]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n- 批归一化深度网络的贝叶斯不确定性估计 [[ICML2018]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv80\u002Fteye18a.html>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Ficml-mcbn\u002Fmcbn>) - [[TorchUncertainty]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n- 轻量级概率深度网络 [[CVPR2018]](\u003Chttps:\u002F\u002Fgithub.com\u002Fezjong\u002Flightprobnets>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fezjong\u002Flightprobnets>)\n- 针对神经网络的可扩展拉普拉斯近似 [[ICLR2018]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fpdf?id=Skdvd2xAZ>) - [[Theano]](\u003Chttps:\u002F\u002Fgithub.com\u002FBB-UCL\u002FLasagne>)\n- 贝叶斯深度学习中不确定性的分解以实现高效且风险敏感的学习 [[ICML2018]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv80\u002Fdepeweg18a.html>)\n- 神经网络中的权重不确定性 [[ICML2015]](\u003Chttps:\u002F\u002Fproceedings.mlr.press\u002Fv37\u002Fblundell15.html>)\n\n**期刊**\n\n- 基于采样假设检验的不确定性量化哈希 [[TMLR2024]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=cc4v6v310f>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FQianLab\u002FHashUQ>)\n- 贝叶斯神经网络中解析可处理的隐状态推断 [[JMLR2024]](\u003Chttps:\u002F\u002Fjmlr.org\u002Fpapers\u002Fv23\u002F21-0758.html>)\n- 为不确定性量化编码贝叶斯神经网络的潜在后验 [[TPAMI2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2012.02818>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgiannifranchi\u002FLP_BNN>)\n- 低层视觉中不确定性的贝叶斯建模 [[IJCV1990]](\u003Chttps:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007%2FBF00126502>)\n\n**Arxiv**\n\n- 用于可靠不确定性估计的密度不确定性层 [[arXiv2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2306.12497>)\n\n## 集成方法\n\n**会议**\n\n- 神经网络集成的输入梯度空间粒子推断 [[ICLR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2306.02775>)\n- 基于扩散薛定谔桥的快速集成 [[ICLR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2404.15814>)\n- 深度集成中预测多样性的病理问题 [[ICLR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2302.00704>)\n- 模型拉塔图伊：复用多样化模型以实现分布外泛化 [[ICML2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2212.10445.pdf>)\n- 基于随机集成的贝叶斯后验近似 [[CVPR2023]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FBalabanov_Bayesian_Posterior_Approximation_With_Stochastic_Ensembles_CVPR_2023_paper.pdf>)\n- 用于丰富偶然性和认知不确定性建模的归一化流集成 [[AAAI2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2302.01312>)\n- 基于窗口的早期退出级联用于不确定性估计：当深度集成比单个模型更高效时 [[ICCV2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2303.08010>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fguoxoug\u002Fwindow-early-exit>)\n- 加权集成自监督学习 [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.09981.pdf>)\n- 同意不同意：通过分歧实现多样性以提升迁移能力 [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2202.04414.pdf>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmpagli\u002FAgree-to-Disagree>)\n- 用于高效不确定性估计的打包集成 [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2210.09184>) - [[TorchUncertainty]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n- 用于神经网络中快速不确定性估计的子集成 [[ICCV Workshop2023]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023W\u002FLXCV\u002Fpapers\u002FValdenegro-Toro_Sub-Ensembles_for_Fast_Uncertainty_Estimation_in_Neural_Networks_ICCVW_2023_paper.pdf>)\n- 剪枝与调优集成：利用稀疏独立子网络进行低成本集成学习 [[AAAI2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2202.11782>)\n- 深度集成有效，但真的必要吗？[[NeurIPS2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2202.06985>)\n- FiLM集成：通过逐特征线性调制实现概率深度学习 [[NeurIPS2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2206.00050>)\n- 深度集成无需额外训练或测试开销：动态稀疏性的全方位优势 [[ICLR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2106.14568>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FVITA-Group\u002FFreeTickets>)\n- 关于深度集成多样性在分布外检测中的有用性 [[ECCV Workshop2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2207.07517>)\n- 用于不确定性估计的Masksembles [[CVPR2021]](\u003Chttps:\u002F\u002Fnikitadurasov.github.io\u002Fprojects\u002Fmasksembles\u002F>) - [[PyTorch\u002FTensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fnikitadurasov\u002Fmasksembles>)\n- 通过跨域集成提升鲁棒性 [[ICCV2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2103.10919>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FEPFL-VILAB\u002FXDEnsembles>)\n- 通过集成实现梯度提升树的不确定性 [[ICLR2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2006.10562>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fyandex-research\u002FGBDT-uncertainty>)\n- 不确定性量化与深度集成 [[NeurIPS2021]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=wg_kD_nyAF>)\n- 最大化整体多样性以改进深度集成中的不确定性估计 [[AAAI2020]](\u003Chttps:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F5849>)\n- 神经网络中的不确定性：近似贝叶斯集成 [[AISTATS2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1810.05546>)\n- 深度学习中域内不确定性估计和集成的陷阱 [[ICLR2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2002.06470>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FSamsungLabs\u002Fpytorch-ensembles>)\n- BatchEnsemble：一种高效的集成与终身学习替代方案 [[ICLR2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2002.06715>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgoogle\u002Fedward2>) - [[TorchUncertainty]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n- 用于鲁棒性和不确定性量化的超参数集成 [[NeurIPS2020]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Fhash\u002F481fbfa59da2581098e841b7afc122f1-Abstract.html>)\n- 基于神经切触核的贝叶斯深度集成 [[NeurIPS2020]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Fhash\u002F0b1ec366924b26fc98fa7b71a9c249cf-Abstract.html>)\n- 多样性与合作：用于少样本分类的集成方法 [[ICCV2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1903.11341>)\n- 集成学习中准确的不确定性估计与分解 [[NeurIPS2019]](\u003Chttps:\u002F\u002Fpapers.nips.cc\u002Fpaper\u002F2019\u002Fhash\u002F1cc8a8ea51cd0adddf5dab504a285915-Abstract.html>)\n- 深度学习的高质量预测区间：无分布假设的集成方法 [[ICML2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1802.07167>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002FTeaPearce\u002FDeep_Learning_Prediction_Intervals>)\n- 快照集成：训练一次，免费获得M个模型 [[ICLR2017]](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.00109) - [[TorchUncertainty]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n- 使用深度集成进行简单且可扩展的预测不确定性估计 [[NeurIPS2017]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1612.01474>) - [[TorchUncertainty]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n\n**期刊**\n\n- 一对一与多对一用于深度神经网络不确定性（OVNNI）量化 [[IEEE Access2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2006.00954>)\n\n**Arxiv**\n\n- 分割集成：通过任务和模型分割实现高效的OOD感知集成 [[arXiv2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2312.09148>)\n- 深度集成作为高斯过程近似后验 [[arXiv2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2205.00163>)\n- 顺序贝叶斯神经子网络集成 [[arXiv2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2206.00794>)\n- 带有自助法深度集成的置信神经网络回归 [[arXiv2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2202.10903>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002FLaurensSluyterman\u002FBootstrapped_Deep_Ensembles>)\n- 基于集成的条件潜在变量模型实现密集不确定性估计 [[arXiv2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2111.11055>)\n- 深度集成：从损失景观的角度看 [[arXiv2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1912.02757>)\n- 检查点集成：来自单一训练过程的集成方法 [[arXiv2017]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1710.03282>) - [[TorchUncertainty]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n\n## 基于采样\u002F丢弃的方法\n\n**会议**\n\n- Rate-In：信息驱动的自适应丢弃率，用于改进推理时的不确定性估计 [[CVPR2025]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2412.07169>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcode-supplement-25\u002Frate-in>)\n- 在迭代神经网络中实现不确定性估计 [[ICML2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.16732>) - [[Pytorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcvlab-epfl\u002Fiter_unc>)\n- 让我成为BNN：从预训练模型中估计贝叶斯不确定性的简单策略 [[CVPR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2312.15297>) - [[TorchUncertainty]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n- 用于密集回归的免训练不确定性估计：以敏感性作为替代指标 [[AAAI2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1910.04858v3>)\n- nnU-Net的高效贝叶斯不确定性估计 [[MICCAI2022]](\u003Chttps:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-16452-1_51>)\n- 开放集条件下鲁棒目标检测的丢弃采样 [[ICRA2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1710.06677>)\n- 深度神经网络中异方差随机不确定性的测试时数据增强估计 [[MIDL2018]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=rJZz-knjz>)\n- 具体丢弃 [[NeurIPS2017]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1705.07832>)\n- 丢弃作为贝叶斯近似：在深度学习中表示模型不确定性 [[ICML2016]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1506.02142>) - [[TorchUncertainty]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n\n**期刊**\n\n- 深度学习中不确定性估计的一般框架 [[Robotics and Automation Letters2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F1907.06890.pdf>)\n\n**Arxiv**\n\n- SoftDropConnect (SDC) – 有效且高效的深度MR图像分析中网络不确定性的量化 [[arXiv2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2201.08418>)\n\n## 后处理方法\u002F辅助网络\n\n**会议**\n\n- 关于温度缩放在具有重叠分布上的局限性 [[ICLR2024]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.00740)\n- 使用狄利克雷元模型进行后处理不确定性学习 [[AAAI2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2212.07359>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmaohaos2\u002FPosthocUQ>)\n- ProbVLM：用于冻结视觉-语言模型的概率适配器 [[ICCV2023]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fhtml\u002FUpadhyay_ProbVLM_Probabilistic_Adapter_for_Frozen_Vison-Language_Models_ICCV_2023_paper.html>)\n- 单目深度估计中的分布外检测 [[ICCV2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2308.06072>)\n- 利用高斯过程模型检测神经网络中的误分类错误 [[AAAI2022]](\u003Chttps:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F20773>)\n- 学习结构化高斯分布以近似深度集成 [[CVPR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15485>)\n- 提高置信度估计的可靠性 [[ECCV2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2210.06776>)\n- 基于梯度的单目深度估计不确定性 [[ECCV2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2208.02005>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fjhornauer\u002FGrUMoDepth>)\n- BayesCap：用于冻结神经网络校准不确定性的贝叶斯身份帽 [[ECCV2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2207.06873>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FExplainableML\u002FBayesCap>)\n- 面向自动驾驶的安全语义分割不确定性学习 [[ICIP2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2105.13688>)\n- SLURP：回归问题的侧向不确定性学习 [[BMVC2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2104.02395>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FxuanlongORZ\u002FSLURP_uncertainty_estimate>)\n- 触发故障：通过学习语义分割中的局部对抗攻击进行分布外检测 [[ICCV2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2108.01634>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fvaleoai\u002Fobsnet>)\n- 学习预测MRI重建误差 [[MICCAI2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2002.05582>)\n- 回归主动学习中学习损失的数学分析 [[CVPR Workshop2021]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021W\u002FTCV\u002Fhtml\u002FShukla_A_Mathematical_Analysis_of_Learning_Loss_for_Active_Learning_in_CVPRW_2021_paper.html>)\n- 通过不确定性感知的分布蒸馏实现实时计算机视觉不确定性估计 [[WACV2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2007.15857>)\n- 自监督单目深度估计的不确定性 [[CVPR2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2005.06209>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmattpoggi\u002Fmono-uncertainty>)\n- 通过带有I\u002FO核的残差估计量化神经网络中的点预测不确定性 [[ICLR2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1906.00588>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcognizant-ai-labs\u002Frio-paper>)\n- 梯度作为神经网络不确定性的一种度量 [[ICIP2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2008.08030>)\n- 测试时增强的学习损失 [[NeurIPS2020]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Fhash\u002F2ba596643cbbbc20318224181fa46b28-Abstract.html>)\n- 主动学习的学习损失 [[CVPR2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1905.03677>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FMephisto405\u002FLearning-Loss-for-Active-Learning>)（非官方代码）\n- 通过学习模型置信度来应对故障预测 [[NeurIPS2019]](\u003Chttps:\u002F\u002Fpapers.NeurIPS.cc\u002Fpaper\u002F2019\u002Ffile\u002F757f843a169cc678064d9530d12a1881-Paper.pdf>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fvaleoai\u002FConfidNet>)\n- 结构化不确定性预测网络 [[CVPR2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1802.07079>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002FEra-Dorta\u002Ftf_mvg>)\n- 基于梯度信息的深度神经网络分类不确定性 [[IAPR Workshop2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1805.08440>)\n\n**期刊**\n\n- 朝着更可靠的置信度估计方向 [[TPAMI2023]](\u003Chttps:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10172026\u002F>)\n- 通过辅助模型进行置信度估计 [[TPAMI2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2012.06508>)\n\n**Arxiv**\n\n- 针对分布外目标分割的实例感知观察者网络 [[arXiv2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2207.08782>)\n- DEUP：直接的认识论不确定性预测 [[arXiv2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2102.08501>)\n- 神经网络中用于分布外检测的置信度学习 [[arXiv2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1802.04865>)\n\n## 数据增强\u002F基于生成的方法\n\n**会议**\n\n- 使用数据增强的神经网络后验不确定性量化 [[AISTATS2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2403.12729>)\n- 学习生成用于鲁棒语义分割的训练数据集 [[WACV2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2308.02535>)\n- OpenMix：探索异常样本以检测误分类 [[CVPR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2303.17093>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FImpression2805\u002FOpenMix>)\n- 关于 Mixup 在不确定性估计中的陷阱 [[CVPR2023]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FWang_On_the_Pitfall_of_Mixup_for_Uncertainty__CVPR_2023_paper.html>)\n- 针对不确定性估计的多样化、全局性和摊销型反事实解释 [[AAAI2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2112.02646>)\n- 基于隐式异常值变换的分布外检测 [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2303.05033>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fqizhouwang\u002Fdoe>)\n- PixMix：如梦似幻的图像全面提升安全措施 [[CVPR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2112.05135>)\n- 拆解分布外检测：许多基于 OOD 训练数据的方法都在估计相同的核心量的组合 [[ICML2022]](\u003Chttps:\u002F\u002Fproceedings.mlr.press\u002Fv162\u002Fbitterwolf22a.html>)\n- RegMixup：作为正则化的 Mixup 可以惊人地提高准确性和分布外鲁棒性 [[NeurIPS2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2206.14502>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Ffrancescopinto\u002Fregmixup>)\n- 朝着 MIMO 架构中高效的特征共享方向发展 [[CVPR Workshop2022]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022W\u002FECV\u002Fhtml\u002FSun_Towards_Efficient_Feature_Sharing_in_MIMO_Architectures_CVPRW_2022_paper.html>)\n- 基于超像素混合的鲁棒语义分割 [[BMVC2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2108.00968>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgiannifranchi\u002Fdeeplabv3-superpixelmix>)\n- MixMo：通过深度子网络实现多输入到多输出的混合 [[ICCV2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2103.06132>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Falexrame\u002Fmixmo-pytorch>)\n- 训练独立子网络以实现鲁棒预测 [[ICLR2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2010.06610>)\n- 以多样性和不确定性意识来正则化变分自编码器 [[IJCAI2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2110.12381>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fsmilesdzgk\u002Fdu-vae>)\n- 具有自适应损失的不确定性感知 GAN，用于鲁棒的 MRI 图像增强 [[ICCV Workshop2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.03343.pdf>)\n- 使用生成模型的不确定性感知深度分类器 [[AAAI2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2006.04183>)\n- 先合成再比较：用于语义分割的故障和异常检测 [[ECCV2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2003.08440>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FYingdaXia\u002FSynthCP>)\n- 通过图像重合成检测意外情况 [[ICCV2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1904.07595>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcvlab-epfl\u002Fdetecting-the-unexpected>)\n- 混搭：深度学习中用于不确定性的集成与组合方法 [[ICML2020]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv119\u002Fzhang20k\u002Fzhang20k.pdf>)\n- 基于异常暴露的深度异常检测 [[ICLR2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F1812.04606.pdf>)\n- 关于 Mixup 训练：改进并预测深度神经网络的不确定性 [[NeurIPS2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1905.11001>)\n\n**Arxiv**\n\n- 语义分割中的可靠性：我们能使用合成数据吗？[[arXiv2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.09231.pdf>)\n- 使用基于 GAN 的先验来量化不确定性 [[arXiv2019]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=HyeAPeBFwS>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdhruvpatel108\u002FGANPriors>)\n\n## 输出空间建模\u002F证据深度学习\n\n超赞的证据深度学习 [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002FMengyuanChen21\u002FAwesome-Evidential-Deep-Learning>)\n\n**会议**\n\n- 用于可靠随机性和认知不确定性估计的邻近标签监督 [[NeurIPS2025]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=hPfICQIDOm>)\n- 基于折扣信念融合的不确定性量化多模态学习 [[AISTATS2025]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2412.18024>)\n- 用于分布外检测的超观点证据深度学习 [[NeurIPS2024]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=Te8vI2wGTh&referrer=%5Bthe%20profile%20of%20Yufei%20Chen%5D(%2Fprofile%3Fid%3D~Yufei_Chen1)>)\n- R-EDL：放松证据深度学习的非必要设置 [[ICLR2024]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=Si3YFA641c>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FMengyuanChen21\u002FICLR2024-REDL\u002Ftree\u002Fmain>)\n- 超证据深度学习用于量化复合分类不确定性 [[ICLR2024]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.10980)\n- 可靠的冲突性多视图学习 [[AAAI2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2402.16897>) - [[Pytorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fjiajunsi\u002FRCML>)\n- 深度证据回归中的证据收缩问题：讨论与解决方案 [[AAAI2024]](\u003Chttps:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F30172>)\n- 针对回归任务的鲁棒不确定性量化之离散化诱导狄利克雷后验 [[AAAI2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2308.09065>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002FDIDO>)\n- 深度证据回归的不合理有效性 [[AAAI2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2205.10060>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fpasteurlabs\u002Funreasonable_effective_der>) - [[TorchUncertainty]](https:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty)\n- 针对不完全多视图分类的不确定性探索与利用 [[CVPR2023]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.05165)\n- 人体姿态回归的合理不确定性 [[ICCV2023]](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FBramlage_Plausible_Uncertainties_for_Human_Pose_Regression_ICCV_2023_paper.pdf) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fbiggzlar\u002Fplausible-uncertainties>)\n- 基于费舍尔信息的证据深度学习进行不确定性估计 [[ICML2023]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.02045.pdf) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdanruod\u002Fiedl>)\n- 通过多任务学习改进证据深度学习 [[AAAI2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2112.09368>) - [[PyTorch]](https:\u002F\u002Fgithub.com\u002Fdeargen\u002FMT-ENet)\n- 基于随机模糊数的回归用证据神经网络模型 [[BELIEF2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2208.00647>)\n- 关于使用概率神经网络进行异方差不确定性估计的陷阱 [[ICLR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2203.09168>) - [[PyTorch]](https:\u002F\u002Fgithub.com\u002Fmartius-lab\u002Fbeta-nll)\n- 自然后验网络：针对指数族分布的深度贝叶斯不确定性 [[ICLR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2105.04471>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fborchero\u002Fnatural-posterior-network>)\n- 通过损失最小化进行认知不确定性量化之陷阱 [[NeurIPS2022]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fpdf?id=epjxT_ARZW5>)\n- 使用贝叶斯深度网络进行分类的快速预测不确定性 [[UAI2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2003.01227>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmariushobbhahn\u002FLB_for_BNNs_official>)\n- 评估预测不确定性估计的鲁棒性：基于狄利克雷分布的模型是否可靠？ [[ICML2021]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv139\u002Fkopetzki21a\u002Fkopetzki21a.pdf>)\n- 使用正态逆伽马混合分布进行可信的多模态回归 [[NeurIPS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2111.08456>)\n- 深度分类器中的误分类风险与不确定性量化 [[WACV2021]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2021\u002Fhtml\u002FSensoy_Misclassification_Risk_and_Uncertainty_Quantification_in_Deep_Classifiers_WACV_2021_paper.html>)\n- 集成分布蒸馏 [[ICLR2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1905.00076>)\n- 通过拟合先验网络实现保守的不确定性估计 [[ICLR2020]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=BJlahxHYDS>)\n- 对类别概率采取贝叶斯态度 [[ICML2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2002.07965>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Ftjoo512\u002Fbelief-matching-framework>)\n- 后验网络：无需OOD样本，基于密度的伪计数进行不确定性估计 [[NeurIPS2020]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Fhash\u002F0eac690d7059a8de4b48e90f14510391-Abstract.html>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fsharpenb\u002FPosterior-Network>)\n- 深度证据回归 [[NeurIPS2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1910.02600>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Faamini\u002Fevidential-deep-learning>) - [[TorchUncertainty]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n- 面向功能不确定性的噪声对比先验 [[UAI2020]](\u003Chttps:\u002F\u002Fproceedings.mlr.press\u002Fv115\u002Fhafner20a.html>)\n- 朝着最大化域内与域外样本之间表示差距的方向努力 [[NeurIPS Workshop2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2010.10474>)\n- 异步时间事件预测中的不确定性 [[NeurIPS2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1911.05503>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fsharpenb\u002FUncertainty-Event-Prediction>)\n- 先验网络的反向KL散度训练：提升不确定性与对抗鲁棒性 [[NeurIPS2019]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2019\u002Fhash\u002F7dd2ae7db7d18ee7c9425e38df1af5e2-Abstract.html>)\n- 使用正则化证据神经网络量化分类不确定性 [[AAAI FSS2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1910.06864>)\n- 光流的不确定性估计与多假设网络 [[ECCV2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1802.07095>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Flmb-freiburg\u002Fnetdef_models>)\n- 证据深度学习用于量化分类不确定性 [[NeurIPS2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1806.01768>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdougbrion\u002Fpytorch-classification-uncertainty>)\n- 通过先验网络进行预测不确定性估计 [[NeurIPS2018]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2018\u002Fhash\u002F3ea2db50e62ceefceaf70a9d9a56a6f4-Abstract.html>)\n- 我们在面向计算机视觉的贝叶斯深度学习中需要哪些不确定性？ [[NeurIPS2017]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1703.04977>)\n- 估计目标概率分布的均值和方差 [[(ICNN1994)]](\u003Chttps:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F374138>)\n\n**期刊**\n\n- 先验与后验网络：不确定性估计的证据深度学习方法综述 [[TMLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2110.03051>)\n- 基于区域的证据深度学习，用于量化不确定性并提升脑肿瘤分割的鲁棒性 [[NCA2022]](\u003Chttp:\u002F\u002Farxiv.org\u002Fabs\u002F2208.06038>)\n- 基于Dempster-Shafer理论和深度学习的证据分类器 [[Neurocomputing2021]](\u003Chttps:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0925231221004525>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Ftongzheng1992\u002FE-CNN-classifier>)\n- 用于语义分割的证据全卷积网络 [[AppliedIntelligence2021]](\u003Chttps:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs10489-021-02327-0>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Ftongzheng1992\u002FE-FCN>)\n- 面向预测性不确定度估计的信息感知最大范数狄利克雷网络 [[NeuralNetworks2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1910.04819#:~:text=Information%20Aware%20Max%2DNorm%20Dirichlet%20Networks%20for%20Predictive%20Uncertainty%20Estimation,-Theodoros%20Tsiligkaridis&text=Precise%20estimation%20of%20uncertainty%20in,prone%20to%20over%2Dconfident%20predictions>)\n- 基于Dempster-Shafer理论的神经网络分类器 [[IEEETransSMC2000]](\u003Chttps:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F833094\u002F>)\n\n**Arxiv**\n\n- 结合不确定性量化与上下文折扣的多模态医学图像分割深度证据融合 [[arXiv2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2309.05919>)\n- 证据不确定性量化：基于方差的视角 [[arXiv2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.11367.pdf>)\n- 开放世界识别中基于证据模型的有效不确定性估计 [[arXiv2022]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fpdf?id=NrB52z3eOTY>)\n- 多变量深度证据回归 [[arXiv2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2104.06135>)\n- 回归先验网络 [[arXiv2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2006.11590>)\n- 用于分布外检测的变分狄利克雷框架 [[arXiv2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1811.07308>)\n- 深度学习中的不确定性估计及其在口语评估中的应用 [[PhDThesis2019]](\u003Chttps:\u002F\u002Fwww.repository.cam.ac.uk\u002Fhandle\u002F1810\u002F298857>)\n- 抑制型softmax用于神经网络中的不确定性估计 [[arXiv2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1810.01861>)\n- 通过深度狄利克雷混合网络量化分类中的内在不确定性 [[arXiv2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1906.04450>)\n\n\n\n## 确定性-不确定性方法\n\n**会议**\n- 不确定性量化的率失真视角 [[ICML2024]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.10775) - [[Tensorflow]](https:\u002F\u002Fgithub.com\u002Fifiaposto\u002FDistance_Aware_Bottleneck)\n- 深度确定性不确定性：一个简单的基线 [[CVPR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2102.11582>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fomegafragger\u002FDDU>)\n- 基于马氏距离的高斯潜在表示，用于深度分类器中的不确定性估计 [[ICCV Workshop2023]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023W\u002FUnCV\u002Fpapers\u002FVenkataramanan_Gaussian_Latent_Representations_for_Uncertainty_Estimation_Using_Mahalanobis_Distance_in_ICCVW_2023_paper.pdf>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fvaishwarya96\u002FMAPLE-uncertainty-estimation>)\n- 使用属性原型网络进行不确定性估计的简单且可解释的方法 [[ICCV Workshop2023]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023W\u002FUnCV\u002Fpapers\u002FZelenka_A_Simple_and_Explainable_Method_for_Uncertainty_Estimation_Using_Attribute_ICCVW_2023_paper.pdf>)\n- 确定性不确定性方法的训练、架构与先验设置 [[ICLR Workshop2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2303.05796>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Forientino\u002Fdum-components>)\n- 潜在判别式确定性不确定性 [[ECCV2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2207.10130>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002FLDU>)\n- 关于确定性认识论不确定性实用性的一些思考 [[ICML2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2107.00649>)\n- 改进深度学习中用于分类与回归的确定性不确定性估计 [[CoRR2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2102.11409>)\n- 使用单个深度确定性神经网络进行不确定性估计 [[ICML2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2003.02037>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fy0ast\u002Fdeterministic-uncertainty-quantification>)\n- 利用信息瓶颈对归一化流进行训练，以实现具有竞争力的生成式分类 [[NeurIPS2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2001.06448>)\n- 通过距离感知实现简单而原则性的确定性深度学习不确定性估计 [[NeurIPS2020]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Fhash\u002F543e83748234f7cbab21aa0ade66565f-Abstract.html>)\n- 重新审视一对多分类器在神经网络中的预测性不确定性及分布外检测中的应用 [[ICML Workshop2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2007.05134>)\n- 使用近似方差传播进行无采样式的认识论不确定性估计 [[ICCV2019]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fhtml\u002FPostels_Sampling-Free_Epistemic_Uncertainty_Estimation_Using_Approximated_Variance_Propagation_ICCV_2019_paper.html>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fjanisgp\u002FSampling-free-Epistemic-Uncertainty>)\n- 深度学习中的单模型不确定性 [[NeurIPS2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1811.00908>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FSingleModelUncertainty\u002F>)\n\n**期刊**\n\n- ZigZag：通过两步推理实现的通用无采样不确定性估计 [[TMLR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.11435>) - [[Pytorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcvlab-epfl\u002Fzigzag>)\n- 表征空间中的密度估计 [[EDSMLS2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1908.07235>)\n\n**Arxiv**\n\n- 神经网络激活中的隐藏不确定性 [[arXiv2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2012.03082>)\n- 对比学习中不确定性的简单框架 [[arXiv2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2010.02038>)\n- 基于距离的神经网络分类器置信度分数 [[arXiv2017]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1709.09844>)\n\n## 分位数回归\u002F预测区间\n\n**会议**\n\n- 基于无分布假设的不确定性量化及其在图像处理中的应用的图像到图像回归 [[ICML2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2202.05265>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Faangelopoulos\u002Fim2im-uq>)\n- 预测区间：基于质量驱动的深度集成模型的分裂正态混合模型 [[UAI2020]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv124\u002Fsaleh-salem20a.html>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Ftarik\u002Fpi-snm-qde>)\n- 具有有效且自适应覆盖率的分类任务 [[NeurIPS2020]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Fhash\u002F244edd7e85dc81602b7615cd705545f5-Abstract.html>)\n- 深度学习中的单模型不确定性 [[NeurIPS2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1811.00908>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FSingleModelUncertainty\u002F>)\n- 高质量的深度学习预测区间：一种无分布假设的集成方法 [[ICML2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1802.07167>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002FTeaPearce\u002FDeep_Learning_Prediction_Intervals>)\n\n**期刊**\n\n- 使用随机化先验对深度算子网络进行可扩展的不确定性量化 [[CMAME2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2203.03048>)\n- 探索回归神经网络中的不确定性以构建预测区间 [[Neurocomputing2022]](\u003Chttps:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0925231222001102>)\n\n**Arxiv**\n\n- 区间神经网络：不确定性评分 [[arXiv2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2003.11566>)\n- 利用扩展区间最小化法获得紧致的预测区间 [[arXiv2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1806.11222>)\n\n## 合规性预测\n\n超赞的合规性预测 [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fvaleman\u002Fawesome-conformal-prediction>)\n\n\u003C!-- **会议**\n\n- 使用合规性 p 值检测异常值 [[Ann. Statist. 2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2104.08279>) - [[Python]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmsesia\u002Fconditional-conformal-pvalues>)\n- 基于合规性预测的图像分类器不确定性集合 [[ICLR2021]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2009.14193.pdf) - [[GitHub]](https:\u002F\u002Fgithub.com\u002Faangelopoulos\u002Fconformal_classification)\n- 在协变量漂移下的合规性预测 [[NeurIPS2019]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2019\u002Fhash\u002F8fb21ee7a2207526da55a679f0332de2-Abstract.html>)\n- 合规化的分位数回归 [[NeurIPS2019]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2019\u002Fhash\u002F5103c3584b063c431bd1268e9b5e76fb-Abstract.html>) -->\n\n## 校准\u002F评估指标\n\n**会议**\n\n- 置信度应进行多轮校准 [[ACL2026]] (\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.05397>)\n- Grace：一种用于大型语言模型中更好置信度获取的生成方法 [[ACL2026]] (\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.09438>)\n- 通过理解不确定性校准的作用改进基于扰动的解释 [[NeurIPS2025]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=AjOl3iahHd&referrer=%5Bthe%20profile%20of%20Volker%20Tresp%5D(%2Fprofile%3Fid%3D~Volker_Tresp1)>)\n- 用于模型校准的不确定性加权梯度 [[CVPR2025]](\u003Chttps:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2503.22725>)\n- 平滑ECE：通过核平滑构建原则性的可靠性图 [[ICLR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2309.12236>)\n- 通过稀疏高斯过程校准Transformer模型 [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2303.02444>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fchenw20\u002Fsgpa>)\n- 不止于校准：估计现代神经网络的分组损失 [[ICLR2023]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fpdf?id=6w1k-IixnL8>) - [[Python]](\u003Chttps:\u002F\u002Fgithub.com\u002Faperezlebel\u002Fbeyond_calibration>)\n- 双重焦点损失用于校准 [[ICML 2023]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.13665)\n- 增强数据的有效标签是什么？利用AutoLabel提升校准与鲁棒性 [[SaTML2023]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.11188)\n- 魔鬼藏在边界中：基于边界的标签平滑用于网络校准 [[CVPR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2111.15430>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fby-liu\u002Fmbls>)\n- AdaFocal：感知校准的自适应焦点损失 [[NeurIPS2022]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.11838)\n- 通过成对约束校准深度神经网络 [[CVPR2022]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fhtml\u002FCheng_Calibrating_Deep_Neural_Networks_by_Pairwise_Constraints_CVPR_2022_paper.html>)\n- 最优标签校准与多分类到二分类的约简 [[ICLR2022]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=WqoBaaPHS->)\n- 从标签平滑到标签松弛 [[AAAI2021]](\u003Chttps:\u002F\u002Fwww.aaai.org\u002FAAAI21Papers\u002FAAAI-2191.LienenJ.pdf>)\n- 诊断性不确定性校准：迈向医疗领域可靠的机器预测 [[AIStats2021]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.01659)\n- 重新思考深度神经网络的校准：不要害怕过度自信 [[NeurIPS2021]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2021\u002Fhash\u002F61f3a6dbc9120ea78ef75544826c814e-Abstract.html>)\n- 不止于Pinball损失：用于校准式不确定性量化的分位数方法 [[NeurIPS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2011.09588>)\n- 针对神经网络的软校准目标 [[NeurIPS2021]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2021\u002Ffile\u002Ff8905bd3df64ace64a68e154ba72f24c-Paper.pdf>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgoogle\u002Funcertainty-baselines\u002Ftree\u002Fmain\u002Fexperimental\u002Fcaltrain>)\n- 面向深度神经网络的置信度感知学习 [[ICML2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2007.01458>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdaintlab\u002Fconfidence-aware-learning>)\n- 混搭：集成与组合方法用于深度学习中的不确定性校准 [[ICML2020]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv119\u002Fzhang20k\u002Fzhang20k.pdf>)\n- 通过结构化标签平滑进行正则化 [[ICML2020]](\u003Chttps:\u002F\u002Fproceedings.mlr.press\u002Fv108\u002Fli20e.html>)\n- 医学影像中基于深度学习的校准良好的回归不确定性 [[MIDL2020]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv121\u002Flaves20a.html>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmlaves\u002Fwell-calibrated-regression-uncertainty>)\n- 使用焦点损失校准深度神经网络 [[NeurIPS2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2002.09437>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Ftorrvision\u002Ffocal_calibration>)\n- 深度学习中用于不确定性校准的平稳激活函数 [[NeurIPS2020]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Fhash\u002F18a411989b47ed75a60ac69d9da05aa5-Abstract.html>)\n- 重新审视不确定性估计的评估及其在探索模型复杂度-不确定性权衡中的应用 [[CVPR Workshop2020]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2020\u002Fhtml\u002Fw1\u002FDing_Revisiting_the_Evaluation_of_Uncertainty_Estimation_and_Its_Application_to_CVPRW_2020_paper.html>)\n- 评估可扩展的贝叶斯深度学习方法以实现稳健的计算机视觉 [[CVPR Workshop2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1906.01620>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Ffregu856\u002Fevaluating_bdl>)\n- 针对深度神经网络分类器的偏差减少型不确定性估计 [[ICLR2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1805.08206>)\n- 不止于温度缩放：使用狄利克雷校准获得校准良好的多分类概率 [[NeurIPS2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F1910.12656.pdf>) - [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdirichletcal>)\n- 标签平滑何时有效？[[NeurIPS2019]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2019\u002Fhash\u002Ff1748d6b0fd9d439f71450117eba2725-Abstract.html>)\n- 经验证的不确定性校准 [[NeurIPS2019]](\u003Chttps:\u002F\u002Fpapers.NeurIPS.cc\u002Fpaper\u002F2019\u002Fhash\u002Ff8c0c968632845cd133308b1a494967f-Abstract.html>) - [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fp-lambda\u002Fverified_calibration>)\n- 深度学习中的校准测量 [[CVPR Workshop2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1904.01685>)\n- 利用校准回归为深度学习提供准确的不确定性 [[ICML2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1807.00263>)\n- 基于深度校准网络的广义零样本学习 [[NeurIPS2018]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2018\u002Fhash\u002F1587965fb4d4b5afe8428a4a024feb0d-Abstract.html>)\n- 关于现代神经网络的校准 [[ICML2017]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1706.04599>) - [[TorchUncertainty]](https:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty)\n- 关于公平与校准 [[NeurIPS2017]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1709.02012>)\n- 利用贝叶斯分箱获得良好校准的概率 [[AAAI2015]](\u003Chttps:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F9602\u002F9461>)\n\n**期刊**\n\n- 元校准：利用可微期望校准误差学习模型校准 [[TMLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2106.09613>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fondrejbohdal\u002Fmeta-calibration>)\n- 回归任务中不确定性预测的评估与校准 [[Sensors2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1905.11659>)\n- 针对神经网络回归器的校准预测区间 [[IEEE Access 2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1803.09546>) - [[Python]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcruvadom\u002FPrediction_Intervals>)\n\n**Arxiv**\n\n- 向理解标签平滑迈进 [[arXiv2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2006.11653>)\n- 标签平滑如何影响泛化能力的探究 [[arXiv2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2010.12648>)\n\n## 错分类检测与选择性分类\n\n**会议**\n\n- 克服选择性分类系统评估中的常见缺陷 [[NeurIPS2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2407.01032>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FIML-DKFZ\u002Ffd-shifts\u002Ftree\u002Fmain>)\n- 用于错分类检测的数据驱动相对不确定性度量 [[ICLR2024]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.01710)\n- 带有分布外检测的选择性分类插件估计器 [[ICLR2024]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.12386)\n- SURE：构建可靠且鲁棒深度网络的调查指南 [[CVPR2024]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.00543) - [[PyTorch]](https:\u002F\u002Fyutingli0606.github.io\u002FSURE\u002F)\n- RCL：用于统一故障检测的可靠持续学习 [[CVPR2024]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fpapers\u002FZhu_RCL_Reliable_Continual_Learning_for_Unified_Failure_Detection_CVPR_2024_paper.pdf>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FImpression2805\u002FRCL>)\n- 对图像分类中故障检测评估实践的反思呼吁 [[ICLR2023]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fpdf?id=YnkGMIh0gvX>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FIML-DKFZ\u002Ffd-shifts\u002Ftree\u002Fmain>)\n- 魔鬼藏在错分类样本中：迈向统一的开放集识别 [[ICLR2023]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=xLr0I_xYGAs>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FJun-CEN\u002FUnified-Open-Set-Recognition>)\n- 利用分布外数据增强软最大值信息以实现选择性分类 [[ACCV2022]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FACCV2022\u002Fhtml\u002FXia_Augmenting_Softmax_Information_for_Selective_Classification_with_Out-of-Distribution_Data_ACCV_2022_paper.html>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FGuoxoug\u002FSIRC>)\n- 基于单类嵌入的反向蒸馏异常检测 [[CVPR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2201.10703>)\n- 重新思考用于故障预测的置信度校准 [[ECCV2022]](\u003Chttps:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-19806-9_30>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FImpression2805\u002FFMFP>)\n- 深度神经网络的选择性分类 [[NeurIPS2017]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1705.08500>)\n\n**期刊**\n\n- 深度神经网络未知检测能力的统一基准 [[Expert Systems with Applications2023]](\u003Chttps:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0957417423009636>)\n\n**ArXiv**\n\n- 相似性-距离-量级通用验证 [[arXiv2025]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2502.20167>) - [[Python]](\u003Chttps:\u002F\u002Fgithub.com\u002FReexpressAI\u002Fsdm>)\n\n## 异常检测与分布外检测\n超赞的分布外检测资源 [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcontinuousml\u002FAwesome-Out-Of-Distribution-Detection>)\n\n**会议**\n\n- 基于双能量的模型结合开放世界不确定性估计用于分布外检测 [[CVPR2025]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FChen_Dual_Energy-Based_Model_with_Open-World_Uncertainty_Estimation_for_Out-of-distribution_Detection_CVPR_2025_paper.html>)\n- 结合统计深度与费马距离进行不确定性量化 [[NeurIPS2024]](https:\u002F\u002Fopenreview.net\u002Fpdf?id=xeXRhTUmcf) - [[PyTorch]](https:\u002F\u002Fgithub.com\u002FHaiVyNGUYEN\u002Fld_official)\n- 学习可迁移的负提示用于分布外检测 [[CVPR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2404.03248>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmala-lab\u002Fnegprompt>)\n- 预训练神经网络的认识论不确定性量化 [[CVPR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2404.10124>)\n- NECO：基于神经坍缩的分布外检测 [[ICLR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2310.06823>)\n- 在什么情况下以及如何利用分布内标签来帮助分布外检测？[[ICML2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2405.18635>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdeeplearning-wisc\u002Fid_label>)\n- 分布偏移下的异常检测 [[ICCV2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2303.13845>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmala-lab\u002FADShift>)\n- 用于人体姿态异常检测的归一化流 [[ICCV2023]](https:\u002F\u002Forhir.github.io\u002FSTG_NF\u002F) - [[PyTorch]](https:\u002F\u002Fgithub.com\u002Forhir\u002Fstg-nf)\n- RbA：分割被所有模型拒绝的未知区域 [[ICCV2023]](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FNayal_RbA_Segmenting_Unknown_Regions_Rejected_by_All_ICCV_2023_paper.pdf) - [[PyTorch]](https:\u002F\u002Fgithub.com\u002FNazirNayal8\u002FRbA)\n- 基于不确定性的最优传输用于语义一致的分布外检测 [[CVPR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2303.10449>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Flufan31\u002Fet-ood>)\n- 为显著性目标检测模型建模分布不确定性 [[CVPR2023]](https:\u002F\u002Fnpucvr.github.io\u002FDistributional_uncer\u002F) - [[PyTorch]](https:\u002F\u002Fgithub.com\u002Ftxynwpu\u002FDistributional_uncertainty_SOD)\n- SQUID：无监督异常检测中的深度特征修复 [[CVPR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2111.13495>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Ftiangexiang\u002FSQUID>)\n- 如何利用超球面嵌入进行分布外检测？[[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.04450.pdf>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdeeplearning-wisc\u002Fcider>)\n- 为了实现分布外泛化，必须对数据生成过程建模 [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.07837.pdf>)\n- CNN 是否比 Transformer 更加鲁棒？[[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.03452.pdf>)\n- 用于评估类别分布外检测的基准框架及其在 ImageNet 上的应用 [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.11893.pdf>)\n- 极其简单的激活形状调整用于分布外检测 [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2209.09858>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fandrijazz\u002Fash>)\n- 使用对抗模型进行不确定性量化 [[NeurIPS2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2307.03217>)\n- 鲁棒语义分割 UNCV2023 挑战赛结果 [[ICCV Workshop2023]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.15478)\n- 连续证据深度学习用于分布外检测 [[ICCV Workshop2023]](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023W\u002FVCL\u002Fhtml\u002FAguilar_Continual_Evidential_Deep_Learning_for_Out-of-Distribution_Detection_ICCVW_2023_paper.html)\n- 深空中的遥远之地：基于最近邻的密集分布外检测 [[ICCV Workshop2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2211.06660>)\n- 使用马氏距离在深度分类器中进行不确定性估计的高斯潜在表示 [[ICCV Workshop2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2305.13849>)\n- 基于通用表示的校准型分布外检测 [[ICCV Workshop2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2303.13148>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fvojirt\u002Fgrood>)\n- 使用高斯过程模型检测神经网络中的误分类错误 [[AAAI2022]](\u003Chttps:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F20773>)\n- 朝着工业异常检测的完全召回率迈进 [[CVPR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2106.08265>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fhcw-00\u002FPatchCore_anomaly_detection>)\n- POEM：通过后验采样进行分布外检测 [[ICML2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2206.13687>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdeeplearning-wisc\u002Fpoem>)\n- VOS：通过虚拟异常值合成学习你所不知道的东西 [[ICLR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2202.01197>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdeeplearning-wisc\u002Fvos>)\n- 用于基于图像的缺陷检测的全卷积跨尺度流 [[WACV2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2110.02855>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmarco-rudolph\u002Fcs-flow>)\n- 使用一维子空间并集进行分布外检测 [[CVPR2021]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fhtml\u002FZaeemzadeh_Out-of-Distribution_Detection_Using_Union_of_1-Dimensional_Subspaces_CVPR_2021_paper.html>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fzaeemzadeh\u002FOOD>)\n- NAS-OoD：用于分布外泛化的神经架构搜索 [[ICCV2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2109.02038>)\n- 关于梯度在野外检测分布偏移中的重要性 [[NeurIPS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2110.00218>)\n- 探索分布外检测的极限 [[NeurIPS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2106.03004>)\n- 在不使用分布外数据的情况下检测分布外图像。[[CVPR2020]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fhtml\u002FHsu_Generalized_ODIN_Detecting_Out-of-Distribution_Image_Without_Learning_From_Out-of-Distribution_Data_CVPR_2020_paper.html>)\n- 使用判别互反点学习开放集网络 [[ECCV2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2011.00178>)\n- 先合成再比较：用于语义分割的故障与异常检测 [[ECCV2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2003.08440>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FYingdaXia\u002FSynthCP>)\n- NADS：面向不确定性感知的神经架构分布搜索 [[ICML2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2006.06646>)\n- PaDiM：一种用于异常检测与定位的补丁分布建模框架 [[ICPR2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2011.08785>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fopenvinotoolkit\u002Fanomalib>)\n- 基于能量的分布外检测 [[NeurIPS2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2010.03759?context=cs>)\n- 朝着最大化域内与分布外样本之间表示差距的方向努力 [[NeurIPS Workshop2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2010.10474>)\n- 记住正常以检测异常：用于无监督异常检测的记忆增强型深度自编码器 [[ICCV2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1904.02639>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdonggong1\u002Fmemae-anomaly-detection>)\n- 通过图像重合成来检测意外情况 [[ICCV2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1904.07595>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcvlab-epfl\u002Fdetecting-the-unexpected>)\n- 提升神经网络中分布外图像检测的可靠性 [[ICLR2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1706.02690>)\n- 用于检测神经网络中误分类和分布外样本的基线 [[ICLR2017]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1610.02136>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fhendrycks\u002Ferror-detection>)\n\n**期刊**\n\n- 用于异常检测的基础模型与Transformer：综述 [[Information Fusion2025]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2507.15905>)\n- 广义的分布外检测：综述 [[IJCV2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2110.11334>)\n- 重访置信度估计：迈向可靠的故障预测 [[TPAMI2024]](https:\u002F\u002Fwww.computer.org\u002Fcsdl\u002Fjournal\u002Ftp\u002F5555\u002F01\u002F10356834\u002F1SQHDHvGg9i) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FImpression2805\u002FFMFP>)\n- 针对深度神经网络不确定性量化的一对多方法（OVNNI）[[IEEE Access2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2006.00954>)\n\n**ArXiv**\n\n- 神经元激活覆盖率：重新思考分布外检测与泛化能力 [[arXiv2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2306.02879>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fbierone\u002Food_coverage>)\n- 改进近似分布外检测的马氏距离简单修正方案 [[arXiv2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2106.09022>)\n- 我们真的需要从域内数据中学习表征来进行异常检测吗？[[arXiv2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2105.09270>)\n- 深度学习中的频率学不确定性估计 [[arXiv2018]](\u003Chttp:\u002F\u002Fbayesiandeeplearning.org\u002F2018\u002Fpapers\u002F31.pdf>)\n\n\n\n## 不确定性来源及随机性与认知性不确定性的解耦\n\n**会议**\n\n- 不确定性解耦基准测试：面向特定任务的专业化不确定性 [[NeurIPS2024](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2402.19460>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fbmucsanyi\u002Funtangle>)\n\n**ArXiv**\n\n- 机器学习中的不确定性来源——统计学家视角 [[ArXiv2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.16703>)\n- 你的分类不确定性到底有多解耦？[[ArXiv2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2408.12175>)\n\n## 多模态模型\u002F生成式AI中的不确定性量化\n\n**会议**\n\n- 向文本到图像生成的不确定性理解与量化迈进 [[CVPR2025]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2412.03178>)\n- 面向自动驾驶车辆感知中多模态不确定性融合的超维度不确定性量化 [[CVPR2025]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2503.20011>)\n\n**期刊**\n\n- 多模态分布外个体不确定性量化提升多药理学结合亲和力预测 [[Nature Machine Intelligence 2025]](\u003Chttps:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs42256-025-01151-2>)\n\n## 应用\n\n### 分类与语义分割\n\n**会议**\n\n- 使用随机专家混合模型建模分割中的多模态随机性不确定性 [[ICLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2212.07328>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgaozhitong\u002Fmose-auseg>)\n- 具有置信度自适应的任意时间稠密预测 [[ICLR2022]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=kNKFOXleuC>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fliuzhuang13\u002Fanytime>)\n- CRISP——医学图像分割的可靠不确定性估计 [[MICCAI2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2206.07664>)\n- TBraTS：可信的脑肿瘤分割 [[MICCAI2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2206.09309>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcocofeat\u002Ftbrats>)\n- 基于超像素混合的鲁棒语义分割 [[BMVC2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2108.00968>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgiannifranchi\u002Fdeeplabv3-superpixelmix>)\n- 语义分割中的深度确定性不确定性 [[ICMLW2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2111.00079>)\n- DEAL：面向语义分割的难度感知主动学习 [[ACCV2020]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FACCV2020\u002Fhtml\u002FXie_DEAL_Difficulty-aware_Active_Learning_for_Semantic_Segmentation_ACCV_2020_paper.html>)\n- 具有有效且自适应覆盖的分类 [[NeurIPS2020]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Fhash\u002F244edd7e85dc81602b7615cd705545f5-Abstract.html>)\n- 面向语义夜间图像分割的引导式课程模型适配与不确定性感知评估 [[ICCV2019]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fhtml\u002FSakaridis_Guided_Curriculum_Model_Adaptation_and_Uncertainty-Aware_Evaluation_for_Semantic_Nighttime_ICCV_2019_paper.html>)\n- 人类不确定性使分类更加 robust [[ICCV2019]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fhtml\u002FPeterson_Human_Uncertainty_Makes_Classification_More_Robust_ICCV_2019_paper.html>)\n- 半监督3D左心房分割的不确定性感知自集成模型 [[MICCAI2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1806.05034>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fyulequan\u002FUA-MT>)\n- 轻量级概率深度网络 [[CVPR2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1805.11327>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fezjong\u002Flightprobnets>)\n- 用于模糊图像分割的概率U-Net [[NeurIPS2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1806.05034>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fstefanknegt\u002FProbabilistic-Unet-Pytorch>)\n- 证据深度学习用于量化分类不确定性 [[NeurIPS2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1806.01768>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdougbrion\u002Fpytorch-classification-uncertainty>)\n- 相信还是不相信一个分类器 [[NeurIPS2018]](\u003Chttps:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2018\u002Fhash\u002F7180cffd6a8e829dacfc2a31b3f72ece-Abstract.html>)\n- 基于梯度信息的深度神经网络分类不确定性 [[IAPR Workshop2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1805.08440>)\n- 贝叶斯SegNet：场景理解中深度卷积编码器-解码器架构的模型不确定性 [[BMVC2017]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1511.02680>)\n\n**期刊**\n\n- 图像分类模型中的可解释机器学习：基于不确定性量化的视角。[[KnowledgeBased2022]](\u003Chttps:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS095070512200168X>)\n- 基于区域的证据深度学习用于量化不确定性并提升脑肿瘤分割的鲁棒性 [[NCA2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2208.06038>)\n\n**ArXiv**\n\n- 利用不确定性估计提升分类器性能 [[arXiv2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.11723.pdf>)\n- 评估用于语义分割的贝叶斯深度学习方法 [[arXiv2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1811.12709>)\n\n### 回归\n\n**会议**\n\n- 用于联合视差与不确定性估计的立体匹配中误差分布学习 [[CVPR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2304.00152>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Flly00412\u002Fsednet>)\n- 概率MIMO U-Net：面向像素级回归的高效且精确的不确定性估计 [[ICCV Workshop2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2308.07477>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fantonbaumann\u002Fmimo-unet>)\n- 无训练的密集回归不确定性估计：以敏感性作为替代指标 [[AAAI2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1910.04858v3>)\n- 学习结构化高斯分布以近似深度集成模型 [[CVPR2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15485>)\n- 基于约束序数回归的深度估计不确定性量化 [[ECCV2022]](\u003Chttps:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2022\u002Fpapers_ECCV\u002Fpapers\u002F136620229.pdf>)\n- 使用分类方法进行单目深度估计及不确定性量化 [[ICIP2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2202.12369>)\n- 具有置信度自适应的任意时刻密集预测 [[ICLR2022]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=kNKFOXleuC>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fliuzhuang13\u002Fanytime>)\n- 变分深度网络：基于自监督的单目深度估计及其不确定性感知 [[ECCV Workshop2022]](\u003Chttps:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-25085-9_3>)\n- SLURP：回归问题中的侧向学习不确定性 [[BMVC2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2104.02395>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FxuanlongORZ\u002FSLURP_uncertainty_estimate>)\n- 通过跨域集成提升鲁棒性 [[ICCV2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2103.10919>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FEPFL-VILAB\u002FXDEnsembles>)\n- 学习预测MRI重建中的误差 [[MICCAI2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2002.05582>)\n- 关于自监督单目深度估计的不确定性 [[CVPR2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2005.06209>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmattpoggi\u002Fmono-uncertainty>)\n- 通过带有输入输出核的残差估计来量化神经网络中的点预测不确定性 [[ICLR2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1906.00588>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcognizant-ai-labs\u002Frio-paper>)\n- 基于深度学习的光流快速不确定性估计 [[IROS2020]](\u003Chttps:\u002F\u002Fauthors.library.caltech.edu\u002F104758\u002F>)\n- 医学影像中基于深度学习的校准良好的回归不确定性 [[MIDL2020]](\u003Chttp:\u002F\u002Fproceedings.mlr.press\u002Fv121\u002Flaves20a.html>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmlaves\u002Fwell-calibrated-regression-uncertainty>)\n- 深度证据回归 [[NeurIPS2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1910.02600>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Faamini\u002Fevidential-deep-learning>)\n- 从单张图像推断深度分布 [[IROS2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1912.06268>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgengshan-y\u002Fmonodepth-uncertainty>)\n- 基于可分离形式的深度估计及其不确定性多任务学习 [[CVPR Workshop2019]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2019\u002Fhtml\u002FUncertainty_and_Robustness_in_Deep_Visual_Learning\u002FAsai_Multi-Task_Learning_based_on_Separable_Formulation_of_Depth_Estimation_and_CVPRW_2019_paper.html>)\n- 轻量级概率深度网络 [[CVPR2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1805.11327>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fezjong\u002Flightprobnets>)\n- 结构化不确定性预测网络 [[CVPR2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1802.07079>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002FEra-Dorta\u002Ftf_mvg>)\n- 光流的不确定性估计与多假设网络 [[ECCV2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1802.07095>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Flmb-freiburg\u002Fnetdef_models>)\n- 使用校准回归实现深度学习的准确不确定性 [[ICML2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1807.00263>)\n\n**期刊**\n\n- 在现实世界分布偏移下，你的回归模型的不确定性有多可靠？[[TMLR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2302.03679>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Ffregu856\u002Fregression_uncertainty>)\n- 回归任务中不确定性预测的评估与校准 [[Sensors2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1905.11659>)\n- 探索回归神经网络中的不确定性以构建预测区间 [[Neurocomputing2022]](\u003Chttps:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0925231222001102>)\n- Wasserstein Dropout [[Machine Learning 2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2012.12687>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Ffraunhofer-iais\u002Fsecond-moment-loss>)\n- 深度分布回归 [[Computational Statistics & Data Analysis2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1903.06023>)\n- 神经网络回归器的校准预测区间 [[IEEE Access 2018]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1803.09546>) - [[Python]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcruvadom\u002FPrediction_Intervals>)\n- 学习光流的置信度度量 [[TPAMI2013]](\u003Chttps:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?arnumber=6261321&casa_token=fYVGhK2pa40AAAAA:XWJdS8zJ4JRw1brCIGiYpzEqMidXTTYVkcKTYnnhSl4ys5pUoHzHO6xsVeGZII9Ir1LAI_3YyfI&tag=1>)\n\n**Arxiv**\n\n- 理解深度异方差回归的病理机制 [[arxiv2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2306.16717>)\n- 单目深度估计的不确定性程度测量与建模 [[arXiv2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2307.09929>)\n- UncertaINR：面向计算机断层扫描的端到端隐式神经表示的不确定性量化 [[arXiv2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2202.10847>)\n- 面向回归的高效高斯神经过程 [[arXiv2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2108.09676>)\n\n### 目标检测\n\n**会议**\n\n- 桥接精度与置信度：一种用于目标检测校准的训练时损失 [[CVPR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.14404.pdf>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fakhtarvision\u002Fbpc_calibration?tab=readme-ov-file>)\n- 面向回归与目标检测的参数化及多元不确定性校准 [[ECCV Workshop2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2207.01242>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FEFS-OpenSource\u002Fcalibration-framework>)\n- 深度目标检测器中回归预测不确定性的估计与评估 [[ICLR2021]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fforum?id=YLewtnvKgR7>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fasharakeh\u002Fprobdet?tab=readme-ov-file>)\n- 面向目标检测的多元置信度校准 [[CVPR Workshop2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2004.13546>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FEFS-OpenSource\u002Fcalibration-framework>)\n- 高斯YOLOv3：一种利用定位不确定性进行自动驾驶的精准快速目标检测器 [[ICCV2019]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FChoi_Gaussian_YOLOv3_An_Accurate_and_Fast_Object_Detector_Using_Localization_ICCV_2019_paper.pdf>) - [[CUDA]](\u003Chttps:\u002F\u002Fgithub.com\u002Fjwchoi384\u002FGaussian_YOLOv3>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fmotokimura\u002FPyTorch_Gaussian_YOLOv3>) - [[Keras]](\u003Chttps:\u002F\u002Fgithub.com\u002Fxuannianz\u002Fkeras-GaussianYOLOv3>)\n\n### 领域自适应\n\n**会议**\n\n- 基于不确定性估计的伪标签引导用于无源无监督领域自适应 [[CVPR2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2303.03770>) - [[PyTorch]](https:\u002F\u002Fgithub.com\u002Fmattialitrico\u002Fguiding-pseudo-labels-with-uncertainty-estimation-for-source-free-unsupervised-domain-adaptation)\n- 不确定性引导的无源领域自适应 [[ECCV2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2208.07591.pdf>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Froysubhankar\u002Funcertainty-sfda>)\n\n### 半监督学习与主动学习\n\n超赞的半监督学习 [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fyassouali\u002Fawesome-semi-supervised-learning>)\n超赞的主动学习 [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fbaifanxxx\u002Fawesome-active-learning>)\n\n**会议**\n\n- 重新思考主动开放集标注中的认识论与随机性不确定性：一种基于能量的方法 [[CVPR2025]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2502.19691>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fchenchenzong\u002FEAOA>)\n- 不确定性与多样性相结合：面向室内3D目标检测的综合主动学习框架 [[CVPR2025]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2503.16125>)\n- 联合分布外过滤与数据发现的主动学习 [[CVPR2025]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2503.02491>)\n- 通往低成本学习之路：半监督与主动学习的协同效应 [[CVPR2025]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FYin_Towards_Cost-Effective_Learning_A_Synergy_of_Semi-Supervised_and_Active_Learning_CVPR_2025_paper.pdf>)\n- 利用未标记数据进行置信度估计 [[ICLR2023]](\u003Chttps:\u002F\u002Fopenreview.net\u002Fpdf?id=sOXU-PEJSgQ>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FTopoXLab\u002Fconsistency-ranking-loss>)\n\n### 自然语言处理\n\n超赞的大模型不确定性、可靠性与鲁棒性 [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fjxzhangjhu\u002FAwesome-LLM-Uncertainty-Reliability-Robustness>)\n\n\n**会议**\n\n- R-U-SURE？通过最大化跨随机用户意图的效用来实现不确定性感知的代码建议 [[ICML2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.00732.pdf>) - [[GitHub]](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fr_u_sure)\n- 团结就是力量：通过提示一致性估计大语言模型的置信度 [[TrustNLP2023]](\u003Chttps:\u002F\u002Faclanthology.org\u002F2023.trustnlp-1.28\u002F>) - [[GitHub]](https:\u002F\u002Fgithub.com\u002FJHU-CLSP\u002FConfidence-Estimation-TrustNLP2023)\n- 解耦机器翻译评估中的不确定性 [[EMNLP2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2204.06546>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fdeep-spin\u002Funcertainties_mt_eval>)\n- 探讨集成方法在提升文本分类器模型鲁棒性中的应用 [[EMNLP2022 Findings]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2210.16298>)\n- DATE：通过Transformer的自监督检测文本异常 [[NAACL2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2104.05591>)\n- 为自然语言处理校准结构化输出预测器 [[ACL2020]](\u003Chttps:\u002F\u002Faclanthology.org\u002F2020.acl-main.188\u002F>)\n- 针对分布内和分布外数据的校准型语言模型微调 [[EMNLP2020]](\u003Chttps:\u002F\u002Faclanthology.org\u002F2020.emnlp-main.102\u002F>) - [[PyTorch]](https:\u002F\u002Fgithub.com\u002FLingkai-Kong\u002FCalibrated-BERT-Fine-Tuning)\n\n**期刊**\n- 我们如何知道语言模型何时真正“知道”？关于问答任务中语言模型的校准问题 [[TACL2021]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.00955) - [[PyTorch]](https:\u002F\u002Fgithub.com\u002Fjzbjyb\u002Flm-calibration)\n\n**Arxiv**\n\n- DRIFT：检测表征不一致以确保事实真实性 [[arXiv2026]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2601.14210>)\n- 高斯随机权重平均法用于大语言模型的贝叶斯低秩适配 [[arXiv2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.03425>)\n- 相信还是不相信你的大语言模型？[[arXiv2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2406.02543>)\n- 通过输入澄清集成分解大语言模型的不确定性 [[arXiv2023]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2311.08718>)\n\n### 其他\n\n**会议**\n\n- 生成图像检测中的认识论不确定性 [[NeurIPS2025]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2412.05897>)\n- PaSCo：具有不确定性意识的城市3D全景场景补全 [[CVPR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.02158.pdf>) - [[官网]](\u003Chttps:\u002F\u002Fastra-vision.github.io\u002FPaSCo\u002F>)\n- 基于稳定分布传播的不确定性量化 [[ICLR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2402.08324>)\n- 评估相似度评分中的不确定性：人脸识别中的性能与公平性 [[ICLR2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2211.07245>)\n\n**Arxiv**\n\n- 用奥卡姆剃刀削减权重：利用边际似然对神经网络进行贝叶斯稀疏化 - [[arxiv2024]](\u003Chttps:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.15978>)\n- 城市3D全景场景补全，具备不确定性意识 [[arXiv2023]](\u003Chttps:\u002F\u002Fastra-vision.github.io\u002FPaSCo\u002F>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002Fastra-vision\u002FPaSCo>)\n\n# 数据集与基准测试\n\n- SHIFT：用于连续多任务领域自适应的合成驾驶数据集 [[CVPR2022]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fhtml\u002FSun_SHIFT_A_Synthetic_Driving_Dataset_for_Continuous_Multi-Task_Domain_Adaptation_CVPR_2022_paper.html>)\n- MUAD：自动驾驶中的多种不确定性，一个针对多种不确定性和任务的基准测试 [[BMVC2022]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2203.01437>) - [[PyTorch]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002FMUAD-Dataset>)\n- ACDC：具有对应关系的恶劣条件数据集，用于语义驾驶场景理解 [[ICCV2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2104.13395>)\n- MVTec异常检测数据集：一个全面的真实世界数据集，用于无监督异常检测 [[IJCV2021]](\u003Chttps:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002Fs11263-020-01400-4.pdf>)\n- SegmentMeIfYouCan：异常分割的基准测试 [[NeurIPS2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2104.14812>)\n- 不确定性基准：深度学习中不确定性和鲁棒性的基准测试 [[arXiv2021]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F2106.04015>) - [[TensorFlow]](\u003Chttps:\u002F\u002Fgithub.com\u002Fgoogle\u002Funcertainty-baselines>)\n- 结合合成与真实数据的课程式模型适配，用于语义雾天场景理解 [[IJCV2020]](\u003Chttps:\u002F\u002Fpeople.ee.ethz.ch\u002F~csakarid\u002FModel_adaptation_SFSU_dense\u002F>)\n- 语义分割模型鲁棒性的基准测试 [[CVPR2020]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1908.05005>)\n- Fishyscapes：自动驾驶中安全语义分割的基准测试 [[ICCV Workshop2019]](\u003Chttps:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCVW_2019\u002Fhtml\u002FADW\u002FBlum_Fishyscapes_A_Benchmark_for_Safe_Semantic_Segmentation_in_Autonomous_Driving_ICCVW_2019_paper.html>)\n- 对象检测鲁棒性的基准测试：寒冬将至时的自动驾驶 [[NeurIPS Workshop2019]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1907.07484>) - [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fbethgelab\u002Frobust-detection-benchmark>)\n- 使用合成数据进行语义雾天场景理解 [[IJCV2018]](\u003Chttps:\u002F\u002Fpeople.ee.ethz.ch\u002F~csakarid\u002FSFSU_synthetic\u002F>)\n- 丢失与发现：为自动驾驶车辆检测小型道路隐患 [[IROS2016]](\u003Chttps:\u002F\u002Farxiv.org\u002Fabs\u002F1609.04653>)\n\n# 库\n\n## Python\n\n- 不确定性校准库 [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fp-lambda\u002Fverified_calibration>)\n- MAPIE：模型无关的预测区间估计器 [[Sklearn]](https:\u002F\u002Fgithub.com\u002Fscikit-learn-contrib\u002FMAPIE)\n- 不确定性工具箱 [[GitHub]](\u003Chttps:\u002F\u002Funcertainty-toolbox.github.io\u002F>)\n- OpenOOD：广义分布外检测基准测试 [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fjingkang50\u002Fopenood>)\n- Darts：时间序列的预测与异常检测 [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Funit8co\u002Fdarts>)\n- 用于分布和不确定性估计的混合密度网络（MDN） [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Faxelbrando\u002FMixture-Density-Networks-for-distribution-and-uncertainty-estimation>)\n- UQLM：语言模型的不确定性量化 [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcvs-health\u002Fuqlm>)\n\n## PyTorch\n\n- TorchUncertainty [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Ftorch-uncertainty>)\n- 贝叶斯PyTorch [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002FIntelLabs\u002Fbayesian-torch>)\n- Blitz：PyTorch的贝叶斯神经网络库 [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002FpiEsposito\u002Fblitz-bayesian-deep-learning>)\n\n## JAX\n\n- Fortuna [[GitHub - JAX]](\u003Chttps:\u002F\u002Fgithub.com\u002Fawslabs\u002Ffortuna>)\n\n## TensorFlow\n\n- TensorFlow Probability [[官网]](\u003Chttps:\u002F\u002Fwww.tensorflow.org\u002Fprobability>)\n\n# 讲座与教程\n\n- 丹·亨德里克斯：机器学习安全入门课程 [[官网]](\u003Chttps:\u002F\u002Fcourse.mlsafety.org\u002F>)\n- ICML深度学习中的不确定性与鲁棒性研讨会（2020、2021年） [[SlidesLive]](\u003Chttps:\u002F\u002Fslideslive.com\u002Ficml-2020\u002Ficml-workshop-on-uncertainty-and-robustness-in-deep-learning-udl>)\n- 亚林·加尔：贝叶斯深度学习入门 [[官网]](\u003Chttp:\u002F\u002Fwww.cs.ox.ac.uk\u002Fpeople\u002Fyarin.gal\u002Fwebsite\u002Fbdl101\u002F>)\n- MIT 6.S191：证据深度学习与不确定性（2021年） [[Youtube]](\u003Chttps:\u002F\u002Fwww.youtube.com\u002Fwatch?v=toTcf7tZK8c>)\n- 面向深度学习用户的贝叶斯神经网络实践教程 [[IEEE计算智能杂志]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.06823.pdf)\n\n# 书籍\n\n- 凯文·墨菲的“概率机器学习”系列丛书 [[图书]](\u003Chttps:\u002F\u002Fprobml.github.io\u002Fpml-book\u002F>)\n\n# 其他资源\n\n深度学习中的不确定性量化 [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fahmedmalaa\u002Fdeep-learning-uncertainty>)\n优秀的分布外检测资源 [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fcontinuousml\u002FAwesome-Out-Of-Distribution-Detection>)\n异常检测学习资源 [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fyzhao062\u002Fanomaly-detection-resources>)\n优秀的共形预测资源 [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fvaleman\u002Fawesome-conformal-prediction>)\n优秀的LLM不确定性、可靠性与鲁棒性资源 [[GitHub]](\u003Chttps:\u002F\u002Fgithub.com\u002Fjxzhangjhu\u002FAwesome-LLM-Uncertainty-Reliability-Robustness>)\nUQSay——巴黎萨克雷大学关于不确定性量化（UQ）、计算机实验设计与分析（DACE）及相关主题的研讨会 [[官网]](\u003Chttps:\u002F\u002Fwww.uqsay.org\u002Fp\u002Fwelcome.html\u002F>)\nProbAI暑期学校 [[官网]](\u003Chttps:\u002F\u002Fprobabilistic.ai\u002F>)\n高斯过程暑期学校 [[官网]](\u003Chttps:\u002F\u002Fgpss.cc\u002F>)","# awesome-uncertainty-deeplearning 快速上手指南\n\n`awesome-uncertainty-deeplearning` 不是一个可直接安装的单一软件库，而是一个精选的**资源合集仓库**。它汇集了关于深度学习不确定性（Uncertainty in Deep Learning）的论文、代码实现、书籍、教程和基准测试。\n\n本指南将帮助你如何利用该仓库快速找到适合你项目的工具库（Libraries）和代码实现。\n\n## 环境准备\n\n由于该仓库包含多种不同方法（如贝叶斯神经网络、集成学习、证据深度学习等）的实现，具体的系统要求取决于你选择使用的具体子项目。但大多数现代深度学习不确定性工具都基于以下主流框架：\n\n*   **操作系统**: Linux, macOS, Windows (推荐 Linux)\n*   **Python 版本**: Python 3.8 或更高版本\n*   **核心依赖**:\n    *   PyTorch (最常用)\n    *   TensorFlow \u002F Keras\n    *   JAX\n*   **通用工具**:\n    *   `git`: 用于克隆仓库\n    *   `pip` 或 `conda`: 用于管理依赖\n\n建议预先安装好基础的深度学习环境（如 `pytorch` 或 `tensorflow`）。\n\n## 安装步骤\n\n### 1. 克隆资源仓库\n首先，将该合集仓库克隆到本地，以便浏览目录和查找特定任务的代码链接。\n\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI\u002Fawesome-uncertainty-deeplearning.git\ncd awesome-uncertainty-deeplearning\n```\n\n### 2. 选择并安装具体工具库\n根据 README 中的 **[Libraries](#libraries)** 章节，选择适合你需求的工具库进行安装。以下是几个热门库的安装示例（推荐使用国内镜像源加速）：\n\n#### 方案 A: 使用 Laplace (基于 PyTorch 的贝叶斯深度学习)\n适用于想要轻松为现有神经网络添加不确定性估计的开发者。\n\n```bash\n# 使用清华源安装\npip install laplace-torch -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n```\n\n#### 方案 B: 使用 TorchUncertainty (综合工具箱)\n如果需要一个包含多种不确定性方法的综合库（注：需确认具体库名，部分论文代码需单独克隆）。\n\n对于 README 中列出的特定论文代码（例如 `Beyond_Deep_Ensembles`），通常需要单独克隆其 GitHub 仓库：\n\n```bash\n# 示例：克隆某个具体论文的代码实现\ngit clone https:\u002F\u002Fgithub.com\u002FFeuermagier\u002FBeyond_Deep_Ensembles.git\ncd Beyond_Deep_Ensembles\npip install -r requirements.txt -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n```\n\n> **提示**: 请在克隆后的仓库根目录查看该具体项目的 `README.md` 以获取准确的依赖列表。\n\n## 基本使用\n\n由于这是一个资源列表，\"使用\"通常指调用其中推荐的某个库。以下以 **Laplace** 库为例，展示如何为一个预训练模型添加不确定性估计（这是该领域最经典的上手场景）。\n\n### 示例：使用 Laplace 进行后验近似\n\n假设你已经有一个训练好的 PyTorch 分类模型。\n\n```python\nimport torch\nimport torch.nn as nn\nfrom laplace import Laplace\n\n# 1. 加载你现有的预训练模型 (示例为一个简单的 ResNet)\nmodel = torch.hub.load('pytorch\u002Fvision', 'resnet18', weights='IMAGENET1K_V1')\nmodel.eval()\n\n# 2. 初始化 Laplace 近似\n# 'last-layer' 表示仅对最后一层进行贝叶斯推断，计算效率高\nla = Laplace(model, 'classification', subset_of_weights='last-layer', hessian_structure='diag')\n\n# 3. 拟合后验分布\n# 需要传入训练数据加载器 (train_loader) 来计算海森矩阵\n# la.fit(train_loader) \n# 注意：实际使用时请取消上一行注释并传入你的 DataLoader\n\n# 4. 进行预测 (返回预测均值和方差\u002F不确定性)\n# dummy_input = torch.randn(1, 3, 224, 224)\n# pred_mean, pred_var = la(dummy_input)\n# print(f\"Prediction: {pred_mean}, Uncertainty: {pred_var}\")\n```\n\n### 如何查找更多示例？\n1. 打开本地克隆的 `awesome-uncertainty-deeplearning` 文件夹。\n2. 阅读 `README.md` 文件。\n3. 定位到 **Papers** 部分，找到与你任务相关的方法（例如 `Bayesian-Methods` 或 `Ensemble-Methods`）。\n4. 点击论文标题旁边的 `[[PyTorch]]` 或 `[[GitHub]]` 链接，跳转到具体代码仓库。\n5. 在具体代码仓库中查找 `examples\u002F` 文件夹或 `demo.ipynb` 文件获取针对该算法的详细教程。","某自动驾驶初创团队的算法工程师正在优化感知模型，急需让车辆在面对罕见路况或传感器故障时，能准确判断自身预测的可信度，以避免盲目决策引发事故。\n\n### 没有 awesome-uncertainty-deeplearning 时\n- **文献调研如大海捞针**：团队需手动在 arXiv 和谷歌学术中筛选“不确定性估计”相关论文，难以区分贝叶斯方法、集成学习或证据深度学习等不同技术路线的优劣。\n- **复现成本极高**：找到理论后，往往找不到对应的开源代码或标准数据集，工程师需从零编写复杂的采样或校准逻辑，耗时数周且容易出错。\n- **评估指标缺失**：缺乏统一的校准误差（ECE）或误分类检测基准，导致无法量化模型在“未知场景”下的表现，只能凭经验瞎猜。\n- **技术选型盲目**：由于缺乏系统的综述对比，团队可能选用了不适合实时推理的重型方法，导致车载芯片算力不足。\n\n### 使用 awesome-uncertainty-deeplearning 后\n- **技术路线一目了然**：直接查阅库中分类清晰的综述（Surveys）和理论（Theory）板块，快速锁定了适合车载环境的“基于 Dropout 的采样方法”。\n- **代码与数据即取即用**：通过 Libraries 和 Datasets 章节，直接集成了成熟的 PyTorch 实现和专用基准测试，将原型开发周期从数周缩短至两天。\n- **评估体系标准化**：利用库中推荐的 Calibration\u002FEvaluation-Metrics 工具，建立了科学的不确定性量化指标，精准识别出模型在雨雾天气的低置信度区域。\n- **应用案例有章可循**：参考 Object detection 和 Domain adaptation 等具体应用场景的论文，成功将不确定性模块迁移到现有的目标检测模型中，提升了系统安全性。\n\nawesome-uncertainty-deeplearning 将分散的前沿研究转化为结构化的工程资产，帮助开发者低成本地为深度学习模型装上“自知之明”的安全阀。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FENSTA-U2IS-AI_awesome-uncertainty-deeplearning_5bbde268.png","ENSTA-U2IS-AI","ENSTA Paris - U2IS - AI","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FENSTA-U2IS-AI_c1ae6821.jpg","Deep Learning Research at U2IS",null,"http:\u002F\u002Fu2is.ensta-paris.fr\u002F","https:\u002F\u002Fgithub.com\u002FENSTA-U2IS-AI",799,78,"2026-04-19T09:47:05","MIT",1,"未说明",{"notes":86,"python":84,"dependencies":87},"该仓库是一个关于深度学习不确定性（Uncertainty in Deep Learning）的论文、代码、书籍和博客的资源列表（Awesome List），本身不是一个可直接运行的单一软件工具。因此没有统一的运行环境需求。具体的环境要求取决于列表中引用的各个独立项目（如 Laplace, TRADI, PFNs 等），用户需参考对应子项目的 README 获取详细的依赖、GPU 及版本信息。",[88,89,90,91],"PyTorch","TensorFlow","JAX","Python",[14],[94,95,96,97,98,99,100,101,102,103],"deep-learning","uncertainty-quantification","uncertainty-estimation","uncertainty-neural-networks","uncertainty-analysis","deep-neural-networks","deep-learning-tutorials","machine-learning","awesome","awesome-resources","2026-03-27T02:49:30.150509","2026-04-20T07:06:05.092683",[],[]]