[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-subeeshvasu--Awesome-Deblurring":3,"tool-subeeshvasu--Awesome-Deblurring":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",150720,2,"2026-04-11T11:33:10",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",108322,"2026-04-10T11:39:34",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":32,"last_commit_at":50,"category_tags":51,"status":17},6121,"gemini-cli","google-gemini\u002Fgemini-cli","gemini-cli 是一款由谷歌推出的开源 AI 命令行工具，它将强大的 Gemini 大模型能力直接集成到用户的终端环境中。对于习惯在命令行工作的开发者而言，它提供了一条从输入提示词到获取模型响应的最短路径，无需切换窗口即可享受智能辅助。\n\n这款工具主要解决了开发过程中频繁上下文切换的痛点，让用户能在熟悉的终端界面内直接完成代码理解、生成、调试以及自动化运维任务。无论是查询大型代码库、根据草图生成应用，还是执行复杂的 Git 操作，gemini-cli 都能通过自然语言指令高效处理。\n\n它特别适合广大软件工程师、DevOps 人员及技术研究人员使用。其核心亮点包括支持高达 100 万 token 的超长上下文窗口，具备出色的逻辑推理能力；内置 Google 搜索、文件操作及 Shell 命令执行等实用工具；更独特的是，它支持 MCP（模型上下文协议），允许用户灵活扩展自定义集成，连接如图像生成等外部能力。此外，个人谷歌账号即可享受免费的额度支持，且项目基于 Apache 2.0 协议完全开源，是提升终端工作效率的理想助手。",100752,"2026-04-10T01:20:03",[52,13,15,14],"插件",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":32,"last_commit_at":59,"category_tags":60,"status":17},4721,"markitdown","microsoft\u002Fmarkitdown","MarkItDown 是一款由微软 AutoGen 团队打造的轻量级 Python 工具，专为将各类文件高效转换为 Markdown 格式而设计。它支持 PDF、Word、Excel、PPT、图片（含 OCR）、音频（含语音转录）、HTML 乃至 YouTube 链接等多种格式的解析，能够精准提取文档中的标题、列表、表格和链接等关键结构信息。\n\n在人工智能应用日益普及的今天，大语言模型（LLM）虽擅长处理文本，却难以直接读取复杂的二进制办公文档。MarkItDown 恰好解决了这一痛点，它将非结构化或半结构化的文件转化为模型“原生理解”且 Token 效率极高的 Markdown 格式，成为连接本地文件与 AI 分析 pipeline 的理想桥梁。此外，它还提供了 MCP（模型上下文协议）服务器，可无缝集成到 Claude Desktop 等 LLM 应用中。\n\n这款工具特别适合开发者、数据科学家及 AI 研究人员使用，尤其是那些需要构建文档检索增强生成（RAG）系统、进行批量文本分析或希望让 AI 助手直接“阅读”本地文件的用户。虽然生成的内容也具备一定可读性，但其核心优势在于为机器",93400,"2026-04-06T19:52:38",[52,14],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":66,"readme_en":67,"readme_zh":68,"quickstart_zh":69,"use_case_zh":70,"hero_image_url":71,"owner_login":72,"owner_name":73,"owner_avatar_url":74,"owner_bio":75,"owner_company":76,"owner_location":77,"owner_email":78,"owner_twitter":79,"owner_website":80,"owner_url":81,"languages":79,"stars":82,"forks":83,"last_commit_at":84,"license":79,"difficulty_score":85,"env_os":86,"env_gpu":87,"env_ram":87,"env_deps":88,"category_tags":91,"github_topics":93,"view_count":32,"oss_zip_url":79,"oss_zip_packed_at":79,"status":17,"created_at":108,"updated_at":109,"faqs":110,"releases":146},6621,"subeeshvasu\u002FAwesome-Deblurring","Awesome-Deblurring","A curated list of resources for Image and Video Deblurring","Awesome-Deblurring 是一个专注于图像与视频去模糊技术的精选资源库。它旨在解决因相机抖动、物体快速运动或对焦不准导致的画面模糊问题，帮助恢复清晰锐利的视觉内容。\n\n该资源库系统性地整理了从 2006 年至今的经典学术论文、开源代码实现以及权威基准数据集。其内容覆盖广泛，既包含早期的传统非深度学习算法，也收录了基于深度学习的最新单图盲去模糊、多图\u002F视频去模糊、深度感知去模糊及散焦去模糊等前沿方案。通过提供按年份和发表会议分类的详细列表，Awesome-Deblurring 极大地降低了查找和复现相关技术的门槛。\n\n无论是从事计算机视觉研究的学者、需要优化算法的开发者，还是希望了解底层原理的摄影师与设计师，都能从中获益。研究人员可借此追踪技术演进脉络并获取实验数据；开发者能快速找到可用的代码库进行二次开发；而普通技术爱好者也能通过此窗口一窥 AI 如何“化模糊为神奇”。作为一个开放维护的项目，它还鼓励社区共同贡献新成果，是进入去模糊领域不可或缺的入门指南与参考手册。","\u003C!--A curated list of resources for Image and Video Deblurring-->\n\u003C!-- PROJECT LOGO -->\n\u003Cp align=\"center\">\n  \u003Ch3 align=\"center\">Image and Video Deblurring\u003C\u002Fh3>\n  \u003Cp align=\"center\">A curated list of resources for Image and Video Deblurring\n    \u003Cbr \u002F>\n    \u003Cbr \u002F>\n    \u003Cbr \u002F>\n    \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fsubeeshvasu\u002FAwesome-Deblurring\u002Fpulls\u002Fnew\">Suggest new item\u003C\u002Fa>\n    \u003Cbr \u002F>\n    \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fsubeeshvasu\u002FAwesome-Deblurring\u002Fissues\u002Fnew\">Report Bug\u003C\u002Fa>\n  \u003C\u002Fp>\n\u003C\u002Fp>\n\n## Table of contents\n\n- [Single-Image-Blind-Motion-Deblurring (non-DL)](#single-image-blind-motion-deblurring-non-dl)\n- [Single-Image-Blind-Motion-Deblurring (DL)](#single-image-blind-motion-deblurring-dl)\n- [Non-Blind-Deblurring](#non-blind-deblurring)\n- [(Multi-image\u002FVideo)-Motion-Deblurring](#multi-imagevideo-motion-deblurring)\n- [Depth-Aware Motion deblurring](#depth-aware-motion-deblurring)\n- [Other Closely Related Works](#other-closely-related-works)\n- [Defocus Deblurring and Potential Datasets](#defocus-deblurring-and-potential-datasets)\n- [Benchmark Datasets on Motion Deblurring](#benchmark-datasets-on-motion-deblurring)\n- [AI Photo Enhancer Apps](#AI-Photo-Enhancer-Apps)\n\n## Single-Image-Blind-Motion-Deblurring (non-DL)\n|Year|Pub|Paper|Repo|\n|:---:|:---:|:---:|:---:|\n|2006|TOG|[Removing camera shake from a single photograph](https:\u002F\u002Fcs.nyu.edu\u002F~fergus\u002Fpapers\u002Fdeblur_fergus.pdf)|[Code & Project page](https:\u002F\u002Fcs.nyu.edu\u002F~fergus\u002Fresearch\u002Fdeblur.html)|\n|2007|CVPR|[Single image motion deblurring using transparency](http:\u002F\u002Fjiaya.me\u002Fall_final_papers\u002Fmotion_deblur_cvpr07.pdf)||\n|2008|CVPR|[Psf estimation using sharp edge prediction](http:\u002F\u002Fvision.ucsd.edu\u002Fkriegman-grp\u002Fresearch\u002Fpsf_estimation\u002Fpsf_estimation.pdf)|[Project page](http:\u002F\u002Fvision.ucsd.edu\u002Fkriegman-grp\u002Fresearch\u002Fpsf_estimation\u002F)|\n|2008|TOG|[High-quality motion deblurring from a single image](http:\u002F\u002Fwww.cse.cuhk.edu.hk\u002F~leojia\u002Fprojects\u002Fmotion_deblurring\u002Fdeblur_siggraph08.pdf)|[Code & Project page](http:\u002F\u002Fwww.cse.cuhk.edu.hk\u002F~leojia\u002Fprojects\u002Fmotion_deblurring\u002Findex.html)|\n|2009|TOG|[Fast motion deblurring](https:\u002F\u002Fvclab.dgist.ac.kr\u002Fdownload\u002Ffast_motion_deblurring\u002Fpaper.pdf)||\n|2009|CVPR|[Image deblurring and denoising using color priors](http:\u002F\u002Fneelj.com\u002Fprojects\u002Ftwocolordeconvolution\u002Ftwo_color_deconvolution.pdf)|[Project page](http:\u002F\u002Fneelj.com\u002Fprojects\u002Ftwocolordeconvolution\u002F)|\n|2010|CVPR|[Efficient ̈filter flow for space-variant multiframe blind deconvolution](https:\u002F\u002Fpure.mpg.de\u002Frest\u002Fitems\u002Fitem_1789030\u002Fcomponent\u002Ffile_3009627\u002Fcontent)||\n|2010|CVPR|[Non-uniform deblurring for shaken images](http:\u002F\u002Fwww.di.ens.fr\u002Fwillow\u002Fpdfs\u002Fcvpr10d.pdf)|[Code & Project page](https:\u002F\u002Fwww.di.ens.fr\u002Fwillow\u002Fresearch\u002Fdeblurring\u002F)|\n|2010|CVPR|[Denoising vs. deblurring: HDR imaging techniques using moving cameras](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F5540171)||\n|2010|ECCV|[Single image deblurring using motion density functions](http:\u002F\u002Fgrail.cs.washington.edu\u002Fprojects\u002Fmdf_deblurring\u002Fgupta_mdf_deblurring.pdf)|[Project page](http:\u002F\u002Fgrail.cs.washington.edu\u002Fprojects\u002Fmdf_deblurring\u002F)|\n|2010|ECCV|[Two-phase kernel estimation for robust motion deblurring](http:\u002F\u002Fwww.cse.cuhk.edu.hk\u002F~leojia\u002Fprojects\u002Frobust_deblur\u002Frobust_motion_deblurring.pdf)|[Code & Project page](http:\u002F\u002Fwww.cse.cuhk.edu.hk\u002F~leojia\u002Fprojects\u002Frobust_deblur\u002Findex.html)|\n|2010|NIPS|[Space-variant single-image blind deconvolution for removing camera shake](https:\u002F\u002Fpapers.nips.cc\u002Fpaper\u002F4007-space-variant-single-image-blind-deconvolution-for-removing-camera-shake.pdf)||\n|2011|CVPR|[Blind deconvolution using a normalized sparsity measure](https:\u002F\u002Fdilipkay.files.wordpress.com\u002F2019\u002F04\u002Fpriors_cvpr11.pdf)|[Code & Project page](https:\u002F\u002Fdilipkay.wordpress.com\u002Fblind-deconvolution\u002F)|\n|2011|CVPR|[Blur kernel estimation using the radon transform](http:\u002F\u002Fpeople.csail.mit.edu\u002Fsparis\u002Fpubli\u002F2011\u002Fcvpr_radon\u002FCho_11_Blur_Kernel_Estimation.pdf)|[Code](http:\u002F\u002Fpeople.csail.mit.edu\u002Ftaegsang\u002FThesis.html)|\n|2011|CVPR|[Exploring aligned complementary image pair for blind motion deblurring](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F5995351)||\n|2011|ICCV|[Fast removal of non-uniform camera shake](http:\u002F\u002Fpixel.kyb.tuebingen.mpg.de\u002Ffast_removal_of_camera_shake\u002Ffiles\u002FHirsch_ICCV2011_Fast%20removal%20of%20non-uniform%20camera%20shake.pdf)||\n|2011|IJCV|[The non-parametric sub-pixel local point spread function estimation is a well posed problem](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs11263-011-0460-0)||\n|2012|ECCV|[Blur-kernel estimation from spectral irregularities](http:\u002F\u002Fciteseerx.ist.psu.edu\u002Fviewdoc\u002Fdownload?doi=10.1.1.646.4404&rep=rep1&type=pdf)||\n|2012|ACCV|[MRF-based Blind Image Deconvolution](http:\u002F\u002Fimagine.enpc.fr\u002F~komodakn\u002Fpublications\u002Fdocs\u002Faccv2012.pdf)||\n|2012|TIP|[Framelet-based Blind Motion deblurring from a single Image](https:\u002F\u002Fwww.math.hkust.edu.hk\u002F~jfcai\u002Fpaper\u002FCJLS_TIP_11.pdf)||\n|2013|CVPR|[Unnatural L0 sparse representation for natural image deblurring](http:\u002F\u002Fwww.cse.cuhk.edu.hk\u002F~leojia\u002Fprojects\u002Fl0deblur\u002Fl0deblur_cvpr13.pdf)|[Code & Project page](http:\u002F\u002Fwww.cse.cuhk.edu.hk\u002F~leojia\u002Fprojects\u002Fl0deblur\u002F)|\n|2013|CVPR|[Handling noise in single image deblurring using directional filters](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2013\u002Fhtml\u002FZhong_Handling_Noise_in_2013_CVPR_paper.html)|\n|2013|NIPS|[Non-Uniform Camera Shake Removal Using a Spatially-Adaptive Sparse Penalty](https:\u002F\u002Fpapers.nips.cc\u002Fpaper\u002F4864-non-uniform-camera-shake-removal-using-a-spatially-adaptive-sparse-penalty)|[Project page](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fhczhang1\u002Fprojects\u002Fnon-uniform-camera-shake-removal)|\n|2013|ICCV|[Dynamic Scene Deblurring](https:\u002F\u002Fcv.snu.ac.kr\u002Fpublication\u002Fconf\u002F2013\u002FDSD_ICCV2013.pdf)||\n|2013|ICCP|[Edge-based blur kernel estimation using patch priors](http:\u002F\u002Fcs.brown.edu\u002F~lbsun\u002Fdeblur2013\u002Fpatchdeblur_iccp2013.pdf)|[Project page & Results & Dataset](http:\u002F\u002Fcs.brown.edu\u002F~lbsun\u002Fdeblur2013\u002Fdeblur2013iccp.html)|\n|2014|CVPR|[Deblurring Text Images via L0 -Regularized Intensity and Gradient Prior](https:\u002F\u002Feng.ucmerced.edu\u002Fpeople\u002Fzhu\u002FCVPR14_deblurtext.pdf)|[Code & Project page](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fjspanhomepage\u002Fl0rigdeblur)|\n|2014|CVPR|[Segmentation-Free Dynamic Scene Deblurring](https:\u002F\u002Fcv.snu.ac.kr\u002Fpublication\u002Fconf\u002F2014\u002FSFDSD_CVPR2014.pdf)||\n|2014|CVPR|[Separable Kernel for Image Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2014\u002Fhtml\u002FFang_Separable_Kernel_for_2014_CVPR_paper.html)|\n|2014|CVPR|[Deblurring Low-light Images with Light Streaks](https:\u002F\u002Feng.ucmerced.edu\u002Fpeople\u002Fzhu\u002FCVPR14_lightstreak.pdf)|[Code & Project page](https:\u002F\u002Feng.ucmerced.edu\u002Fpeople\u002Fzhu\u002FCVPR14_lightstreak.html)|\n|2014|CVPR|[Joint depth estimation and camera shake removal from single blurry image](https:\u002F\u002Feng.ucmerced.edu\u002Fpeople\u002Fzhu\u002FCVPR14_deblurdepth.pdf)||\n|2014|ECCV|[Hybrid Image Deblurring by Fusing Edge and Power Spectrum Information](http:\u002F\u002Fwww.juew.org\u002Fpublication\u002FECCV14-hybridDeblur.pdf)||\n|2014|ECCV|[Deblurring Face Images with Exemplars](https:\u002F\u002Ffaculty.ucmerced.edu\u002Fmhyang\u002Fpapers\u002Feccv14_deblur.pdf)|[Code & Project page](https:\u002F\u002Feng.ucmerced.edu\u002Fpeople\u002Fzhu\u002FECCV14_facedeblur.html)|\n|2014|ECCV|[Blind deblurring using internal patch recurrence](http:\u002F\u002Fwww.wisdom.weizmann.ac.il\u002F~vision\u002FBlindDeblur\u002FMichaeli_Irani_ECCV2014.pdf)|[Code & Project page](http:\u002F\u002Fwww.wisdom.weizmann.ac.il\u002F~vision\u002FBlindDeblur.html)|\n|2014|NIPS|[Scale Adaptive Blind Deblurring](https:\u002F\u002Fpapers.nips.cc\u002Fpaper\u002F5566-scale-adaptive-blind-deblurring)|[Project page](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fhczhang1\u002Fprojects\u002Fscale-adaptive-blind-deblurring)|\n|2015|CVPR|[Burst Deblurring: Removing Camera Shake Through Fourier Burst Accumulation](http:\u002F\u002Fdev.ipol.im\u002F~mdelbra\u002Ffba\u002FFBA_cvpr2015_preprint.pdf)|[Project page](http:\u002F\u002Fiie.fing.edu.uy\u002F~mdelbra\u002Ffba\u002F)|\n|2015|CVPR|[Kernel Fusion for Better Image Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2015\u002Fhtml\u002FMai_Kernel_Fusion_for_2015_CVPR_paper.html)|[Project page](http:\u002F\u002Fweb.cecs.pdx.edu\u002F~fliu\u002Fproject\u002Fkernelfusion\u002F)|\n|2015|ICCV|[Class-Specific Image Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_iccv_2015\u002Fhtml\u002FAnwar_Class-Specific_Image_Deblurring_ICCV_2015_paper.html)|[Code & Project page](https:\u002F\u002Fgithub.com\u002Fsaeed-anwar\u002FClass_Specific_Deblurring)|\n|2015|TIP|[Coupled Learning for Facial Deblur](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1904.08671.pdf)||\n|2016|CVPR|[Blind image deblurring using dark channel prior](http:\u002F\u002Fvllab1.ucmerced.edu\u002F~jinshan\u002Fprojects\u002Fdark-channel-deblur\u002Fdark-channel-deblur\u002Fcvpr16-dark-channel-deblur.pdf)|[Code & Project page](http:\u002F\u002Fvllab1.ucmerced.edu\u002F~jinshan\u002Fprojects\u002Fdark-channel-deblur\u002F)|\n|2016|CVPR|[Robust Kernel Estimation with Outliers Handling for Image Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2016\u002Fhtml\u002FPan_Robust_Kernel_Estimation_CVPR_2016_paper.html)|[Code](https:\u002F\u002Fwww.dropbox.com\u002Fs\u002Fhz9qmi8ar1k1zn0\u002Fpcode.zip?dl=0)|\n|2016|CVPR|[Blind image deconvolution by automatic gradient activation](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2016\u002Fpapers\u002FGong_Blind_Image_Deconvolution_CVPR_2016_paper.pdf)||\n|2017|CVPR|[Image deblurring via extreme channels prior](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fhtml\u002FYan_Image_Deblurring_via_CVPR_2017_paper.html)|[Code & Project page](https:\u002F\u002Fsites.google.com\u002Fsite\u002Frenwenqi888\u002Fresearch\u002Fdeblurring\u002Fecp)|\n|2017|CVPR|[From local to global: Edge profiles to camera motion in blurred images](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fhtml\u002FVasu_From_Local_to_CVPR_2017_paper.html)|[Project page & Results-on-benchmark-datasets](https:\u002F\u002Fsubeeshvasu.github.io\u002F2017_subeesh_from_cvpr\u002F)|\n|2017|ICCV|[Blind Image Deblurring with Outlier Handling](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FDong_Blind_Image_Deblurring_ICCV_2017_paper.pdf)|[Code](https:\u002F\u002Fwww.dropbox.com\u002Fs\u002Fqmxkkwgnmuwrfoj\u002Fcode_iccv2017_outlier.zip?dl=0)|\n|2017|ICCV|[Self-paced Kernel Estimation for Robust Blind Image Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FGong_Self-Paced_Kernel_Estimation_ICCV_2017_paper.pdf)|[Code](https:\u002F\u002Fdonggong1.github.io\u002Fpublications.html),[Results](https:\u002F\u002Fdrive.google.com\u002Fopen?id=1gP_s-87js7KKFrIzAlushc1HJqEogR1L)|\n|2017|ICCV|[Convergence Analysis of MAP based Blur Kernel Estimation](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_iccv_2017\u002Fhtml\u002FCho_Convergence_Analysis_of_ICCV_2017_paper.html)||\n|2018|ECCV|[Normalized Blind Deconvolution](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fhtml\u002FMeiguang_Jin_Normalized_Blind_Deconvolution_ECCV_2018_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FMeiguangJin\u002FNBD)|\n|2018|ECCV|[Deblurring Natural Image Using Super-Gaussian Fields](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fhtml\u002FYuhang_Liu_Deblurring_Natural_Image_ECCV_2018_paper.html)|[Code](https:\u002F\u002Fdonggong1.github.io\u002Fpublications.html)|\n|2019|CVPR|[Blind Image Deblurring With Local Maximum Gradient Prior](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FChen_Blind_Image_Deblurring_With_Local_Maximum_Gradient_Prior_CVPR_2019_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fcuiyixin555\u002FLMG)|\n|2019|CVPR|[Phase-Only Image Based Kernel Estimation for Single Image Blind Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FPan_Phase-Only_Image_Based_Kernel_Estimation_for_Single_Image_Blind_Deblurring_CVPR_2019_paper.html)|[Results-on-benchmark-datasets](https:\u002F\u002Fgithub.com\u002Fpanpanfei\u002FPhase-only-Image-Based-Kernel-Estimation-for-Blind-Motion-Deblurring\u002Ftree\u002Fmaster\u002Fresult)|\n|2019|CVPR|[A Variational EM Framework With Adaptive Edge Selection for Blind Motion Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FYang_A_Variational_EM_Framework_With_Adaptive_Edge_Selection_for_Blind_CVPR_2019_paper.html)||\n|2019|TIP|[Graph-Based Blind Image Deblurring From a Single Photograph](https:\u002F\u002Farxiv.org\u002Fabs\u002F1802.07929)|[Code](https:\u002F\u002Fgithub.com\u002FBYchao100\u002FGraph-Based-Blind-Image-Deblurring)|\n|2019|TPAMI|[Surface-aware Blind Image Deblurring](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8839600)||\n|2019|TCSVT|[Single Image Blind Deblurring Using Multi-Scale Latent Structure Prior](https:\u002F\u002Farxiv.org\u002Fabs\u002F1906.04442)||\n|2020|ECCV|[OID: Outlier Identifying and Discarding in Blind Image Deblurring](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fhtml\u002F5134_ECCV_2020_paper.php)|[Code&Data](https:\u002F\u002Fliangchen527.github.io\u002F)|\n|2020|ECCV|[Enhanced Sparse Model for Blind Deblurring](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fpapers\u002F123700630.pdf)|[Code](https:\u002F\u002Fliangchen527.github.io\u002F)|\n|2021|CVPR|[Blind Deblurring for Saturated Images](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FChen_Blind_Deblurring_for_Saturated_Images_CVPR_2021_paper.pdf)|[Code&Data](https:\u002F\u002Fliangchen527.github.io\u002F)|\n|2021|TCI|[Polyblur: Removing mild blur by polynomial reblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.09322)||\n|2021|SPIC|[Fast blind deconvolution using a deeper sparse patch-wise maximum gradient prior](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0923596520301910)||\n|2021|TCSVT|[Blind Image Deblurring Using Patch-Wise Minimal Pixels Regularization](https:\u002F\u002F128.84.21.199\u002Fabs\u002F1906.06642v3)|[Code](https:\u002F\u002Fgithub.com\u002FFWen\u002Fdeblur-pmp)|\n|2022|CVPR|[Pixel Screening Based Intermediate Correction for Blind Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fhtml\u002FZhang_Pixel_Screening_Based_Intermediate_Correction_for_Blind_Deblurring_CVPR_2022_paper.html)||\n\n\n## Single-Image-Blind-Motion-Deblurring (DL)\n|Year|Pub|Paper|Repo|\n|:---:|:---:|:---:|:---:|\n|2015|CVPR|[Learning a convolutional neural network for non-uniform motion blur removal](https:\u002F\u002Fwww.cv-foundation.org\u002Fopenaccess\u002Fcontent_cvpr_2015\u002Fpapers\u002FSun_Learning_a_Convolutional_2015_CVPR_paper.pdf)|[Code 1](http:\u002F\u002Fgr.xjtu.edu.cn\u002Fc\u002Fdocument_library\u002Fget_file?folderId=2076150&name=DLFE-78101.zip),[Code 2](https:\u002F\u002Fgithub.com\u002FSibozhu\u002FMotionBlur-detection-by-CNN)|\n|2015|BMVC|[Convolutional neural networks for direct text deblurring](http:\u002F\u002Fwww.bmva.org\u002Fbmvc\u002F2015\u002Fpapers\u002Fpaper006\u002Findex.html)|[Code and Project Page](http:\u002F\u002Fwww.fit.vutbr.cz\u002F~ihradis\u002FCNN-Deblur\u002F)|\n|2016|ECCV|[A neural approach to blind motion deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F1603.04771)|[Code](https:\u002F\u002Fgithub.com\u002Fayanc\u002Fndeblur)|\n|2016|PAMI|[Learning to deblur](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1406.7444.pdf)||\n|2017|CVPR|[Deep multi-scale convolutional neural network for dynamic scene deblurring](http:\u002F\u002Fzpascal.net\u002Fcvpr2017\u002FNah_Deep_Multi-Scale_Convolutional_CVPR_2017_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002FSeungjunNah\u002FDeepDeblur_release)|\n|2017|CVPR|[From Motion Blur to Motion Flow: A Deep Learning Solution for Removing Heterogeneous Motion Blur](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FGong_From_Motion_Blur_CVPR_2017_paper.pdf)|[Code & Project page](https:\u002F\u002Fdonggong1.github.io\u002Fblur2mflow.html)|\n|2017|ICCV|[Blur-Invariant Deep Learning for Blind Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FNimisha_Blur-Invariant_Deep_Learning_ICCV_2017_paper.pdf)||\n|2017|ICCV|[Learning to Super-resolve Blurry Face and Text Images](http:\u002F\u002Ffaculty.ucmerced.edu\u002Fmhyang\u002Fpapers\u002Ficcv2017_gan_super_deblur.pdf)|[Code & Project page](https:\u002F\u002Fsites.google.com\u002Fview\u002Fxiangyuxu\u002Fdeblursr_iccv17)|\n|2017|ICCV|[Learning Discriminative Data Fitting Functions for Blind Image Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FPan_Learning_Discriminative_Data_ICCV_2017_paper.pdf)|[Code](https:\u002F\u002Fwww.dropbox.com\u002Fs\u002Foavk46q521fiowr\u002Ficcv17_learning_deblur_code.zip?dl=0)|\n|2018|ICIP|[Semi-supervised Learning of Camera Motion from a Blurred Image](https:\u002F\u002Fapvijay.github.io\u002Fpdf\u002F2018_icip.pdf)||\n|2018|TIP|[Motion blur kernel estimation via deep learning](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F8039224)|[Code & Project page](https:\u002F\u002Fsites.google.com\u002Fview\u002Fxiangyuxu\u002Fdeepedge_tip)|\n|2018|CVPR|[Deep Semantic Face Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fhtml\u002FShen_Deep_Semantic_Face_CVPR_2018_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fjoanshen0508\u002FDeep-Semantic-Face-Deblurring)|\n|2018|CVPR|[Learning a Discriminative Prior for Blind Image Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fhtml\u002FLi_Learning_a_Discriminative_CVPR_2018_paper.html)|[Code & Project page](https:\u002F\u002Fsites.google.com\u002Fview\u002Flerenhanli\u002Fhomepage\u002Flearn_prior_deblur)|\n|2018|CVPR|[Dynamic Scene Deblurring Using Spatially Variant Recurrent Neural Networks](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fhtml\u002FZhang_Dynamic_Scene_Deblurring_CVPR_2018_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fzhjwustc\u002Fcvpr18_rnn_deblur_matcaffe)|\n|2018|CVPR|[Scale-recurrent network for deep image deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fhtml\u002FTao_Scale-Recurrent_Network_for_CVPR_2018_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fjiangsutx\u002FSRN-Deblur)|\n|2018|CVPR|[Deblurgan: Blind motion deblurring using conditional adversarial networks](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fhtml\u002FKupyn_DeblurGAN_Blind_Motion_CVPR_2018_paper.html)|[Code-Pytorch](https:\u002F\u002Fgithub.com\u002FKupynOrest\u002FDeblurGAN)|\n|2018|ECCV|[Unsupervised Class-Specific Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fhtml\u002FNimisha_T_M_Unsupervised_Class-Specific_Deblurring_ECCV_2018_paper.html)||\n|2018|BMVC|[Gated Fusion Network for Joint Image Deblurring and Super-Resolution](https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.10806)|[Code](https:\u002F\u002Fgithub.com\u002Fjacquelinelala\u002FGFN)|[Project page](http:\u002F\u002Fxinyizhang.tech\u002Fbmvc2018\u002F)|\n|2019|WACV|[Gyroscope-Aided Motion Deblurring with Deep Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1810.00986)|[Code](https:\u002F\u002Fgithub.com\u002Fjannemus\u002FDeepGyro)|\n|2019|CVPR|[Dynamic Scene Deblurring With Parameter Selective Sharing and Nested Skip Connections](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FGao_Dynamic_Scene_Deblurring_With_Parameter_Selective_Sharing_and_Nested_Skip_CVPR_2019_paper.html)||\n|2019|CVPR|[Deep Stacked Hierarchical Multi-Patch Network for Image Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FZhang_Deep_Stacked_Hierarchical_Multi-Patch_Network_for_Image_Deblurring_CVPR_2019_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FHongguangZhang\u002FDMPHN-cvpr19-master)|\n|2019|CVPR|[Unsupervised Domain-Specific Deblurring via Disentangled Representations](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FLu_Unsupervised_Domain-Specific_Deblurring_via_Disentangled_Representations_CVPR_2019_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fustclby\u002FUnsupervised-Domain-Specific-Deblurring)|\n|2019|CVPR|[Bringing Alive Blurred Moments](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FPurohit_Bringing_Alive_Blurred_Moments_CVPR_2019_paper.html)|[Project page & Results-on-benchmark-datasets](https:\u002F\u002Fgithub.com\u002Fanshulbshah\u002FBlurred-Image-to-Video)|\n|2019|CVPR|[Douglas-Rachford Networks: Learning Both the Image Prior and Data Fidelity Terms for Blind Image Deconvolution](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FAljadaany_Douglas-Rachford_Networks_Learning_Both_the_Image_Prior_and_Data_Fidelity_CVPR_2019_paper.html)||\n|2019|ICCV|[DeblurGAN-v2: Deblurring (Orders-of-Magnitude) Faster and Better](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.03826)|[Code](https:\u002F\u002Fgithub.com\u002FTAMU-VITA\u002FDeblurGANv2)|\n|2019|ICCV (HIDE)|[Human-Aware Motion Deblurring](https:\u002F\u002Fpdfs.semanticscholar.org\u002F20a4\u002Fb3353579525f0b76ec42e17a2284b4453f9a.pdf)||\n|2019|BMVC|[Blind image deconvolution using deep generative priors](https:\u002F\u002Farxiv.org\u002Fabs\u002F1802.04073)||\n|2019|ACMMM|[Tell Me Where It is Still Blurry: Adversarial Blurred Region Mining and Refining](https:\u002F\u002Fwww.iis.sinica.edu.tw\u002Fpapers\u002Fliutyng\u002F22871-F.pdf)||\n|2019|IJCV|[Joint Face Hallucination and Deblurring via Structure Generation and Detail Enhancement](https:\u002F\u002Farxiv.org\u002Fabs\u002F1811.09019)|[Code](https:\u002F\u002Fgithub.com\u002FTAMU-VITA\u002FDeblurGANv2)|\n|2020|AAAI|[Learning to Deblur Face Images via Sketch Synthesis](https:\u002F\u002Faaai.org\u002Fojs\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F6818\u002F6672)||\n|2020|AAAI|[Region-Adaptive Dense Network for Efficient Motion Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F1903.11394)||\n|2020|WACV|[DAVID: Dual-Attentional Video Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_WACV_2020\u002Fhtml\u002FWu_DAVID_Dual-Attentional_Video_Deblurring_WACV_2020_paper.html)||\n|2020|CVPR|[Neural Blind Deconvolution Using Deep Priors](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.02197)|[Code](https:\u002F\u002Fgithub.com\u002Fcsdwren\u002FSelfDeblur)|\n|2020|CVPR|[Spatially-Attentive Patch-Hierarchical Network for Adaptive Motion Deblurring](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2004.05343.pdf)||\n|2020|CVPR|[Deblurring by Realistic Blurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.01860)|[Code](https:\u002F\u002Fgithub.com\u002FHDCVLab\u002FDeblurring-by-Realistic-Blurring)|\n|2020|CVPR|[Learning Event-Based Motion Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.05794)||\n|2020|CVPR|[Efficient Dynamic Scene Deblurring Using Spatially Variant Deconvolution Network With Optical Flow Guided Training](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fhtml\u002FYuan_Efficient_Dynamic_Scene_Deblurring_Using_Spatially_Variant_Deconvolution_Network_With_CVPR_2020_paper.html)||\n|2020|CVPR|[Deblurring using Analysis-Synthesis Networks Pair](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.02956)||\n|2020|ECCV|[Multi-Temporal Recurrent Neural Networks For Progressive Non-Uniform Single Image Deblurring With Incremental Temporal Training](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.07410)||\n|2020|TIP|[Efficient and Interpretable Deep Blind Image Deblurring Via Algorithm Unrolling](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1902.03493.pdf)||\n|2020|TIP|[Deblurring Face Images using Uncertainty Guided Multi-Stream Semantic Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1907.13106)|[Code](https:\u002F\u002Fgithub.com\u002Frajeevyasarla\u002FUMSN-Face-Deblurring)|\n|2020|TIP|[Dark and bright channel prior embedded network for dynamic scene deblurring](https:\u002F\u002Fwww4.comp.polyu.edu.hk\u002F~cslzhang\u002Fpaper\u002FDBCPeNet_TIP.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fcsjcai\u002FDBCPeNet)|\n|2020|TIP|[Dynamic Scene Deblurring by Depth Guided Model](https:\u002F\u002Ffaculty.ucmerced.edu\u002Fmhyang\u002Fpapers\u002Ftip2020_dynamic_scene_deblurring.pdf)|[Project Page](https:\u002F\u002Fsites.google.com\u002Fview\u002Flerenhanli\u002Fhomepage\u002Fdepth_deblurring)|\n|2020|IEEEAccess|[Scale-Iterative Upscaling Network for Image Deblurring](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8963625)|[Code](https:\u002F\u002Fgithub.com\u002Fminyuanye\u002FSIUN)|\n|2020|ACCV|[Human Motion Deblurring using Localized Body Prior](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FACCV2020\u002Fhtml\u002FLumentut_Human_Motion_Deblurring_using_Localized_Body_Prior_ACCV_2020_paper.html)||\n|2020|TPAMI|[Physics-Based Generative Adversarial Models for Image Restoration and Beyond](https:\u002F\u002Farxiv.org\u002Fabs\u002F1808.00605)|[Code](https:\u002F\u002Fjspan.github.io\u002Fprojects\u002Fphysicsgan\u002F)|\n|2020|TCI|[Blind Image Deconvolution using Deep Generative Priors](https:\u002F\u002Farxiv.org\u002Fabs\u002F1802.04073)||\n|2020|TMM|[Raw Image Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.04264)|[Dataset](https:\u002F\u002Fgithub.com\u002Fbob831009\u002Fraw_image_deblurring)|\n|2020|Arxiv|[Blur Invariant Kernel-Adaptive Network for Single Image Blind deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.04543)||\n|2021|TPAMI|[Exposure Trajectory Recovery from Motion Blur](https:\u002F\u002Farxiv.org\u002Fabs\u002F2010.02484)|[Code](https:\u002F\u002Fgithub.com\u002Fyjzhang96\u002FMotion-ETR)|\n|2021|Arxiv|[BANet: Blur-aware Attention Networks for Dynamic Scene Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2101.07518)|[Code](https:\u002F\u002Fgithub.com\u002Fpp00704831\u002FBANet)|\n|2021|CVPR|[Multi-Stage Progressive Image Restoration](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2102.02808.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fswz30\u002FMPRNet)|\n|2021|CVPR|[DeFMO: Deblurring and Shape Recovery of Fast Moving Objects](https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.00595)|[Code](https:\u002F\u002Fgithub.com\u002Frozumden\u002FDeFMO)|\n|2021|CVPR|[Blind Deblurring for Saturated Images](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fhtml\u002FChen_Blind_Deblurring_for_Saturated_Images_CVPR_2021_paper.html)||\n|2021|CVPR|[Test-Time Fast Adaptation for Dynamic Scene Deblurring via Meta-Auxiliary Learning](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fhtml\u002FChi_Test-Time_Fast_Adaptation_for_Dynamic_Scene_Deblurring_via_Meta-Auxiliary_Learning_CVPR_2021_paper.html)||\n|2021|CVPR|[Explore Image Deblurring via Encoded Blur Kernel Space](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.00317)|[Code](https:\u002F\u002Fgithub.com\u002FVinAIResearch\u002Fblur-kernel-space-exploring)|\n|2021|CVPR|[Pre-trained image processing transformer](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FChen_Pre-Trained_Image_Processing_Transformer_CVPR_2021_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fhuawei-noah\u002FPretrained-IPT)|\n|2021|CVPR|[Multi-stage progressive image restoration](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.02808)|[Code](https:\u002F\u002Fgithub.com\u002Fswz30\u002FMPRNet)|\n|2021|CVPRW|[Hinet: Half instance normalization network for image restoration](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021W\u002FNTIRE\u002Fpapers\u002FChen_HINet_Half_Instance_Normalization_Network_for_Image_Restoration_CVPRW_2021_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fmegvii-model\u002FHINet)|\n|2021|ICCV|[Spatially-Adaptive Image Restoration using Distortion-Guided Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.08617)|[Code](https:\u002F\u002Fgithub.com\u002Fhuman-analysis\u002Fspatially-adaptive-image-restoration\u002F)|\n|2021|ICCV|[Rethinking Coarse-To-Fine Approach in Single Image Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fhtml\u002FCho_Rethinking_Coarse-To-Fine_Approach_in_Single_Image_Deblurring_ICCV_2021_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fchosj95\u002Fmimo-unet)|\n|2021|ICCV|[Perceptual Variousness Motion Deblurring With Light Global Context Refinement](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fhtml\u002FLi_Perceptual_Variousness_Motion_Deblurring_With_Light_Global_Context_Refinement_ICCV_2021_paper.html)||\n|2021|ICCV|[Pyramid Architecture Search for Real-Time Image Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fhtml\u002FHu_Pyramid_Architecture_Search_for_Real-Time_Image_Deblurring_ICCV_2021_paper.html)||\n|2021|ICCV|[Searching for Controllable Image Restoration Networks](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fhtml\u002FKim_Searching_for_Controllable_Image_Restoration_Networks_ICCV_2021_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fghimhw\u002FTASNet)|\n|2021|ICCVW|[Sdwnet: A straight dilated network with wavelet transformation for image deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021W\u002FAIM\u002Fpapers\u002FZou_SDWNet_A_Straight_Dilated_Network_With_Wavelet_Transformation_for_Image_ICCVW_2021_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002FFlyEgle\u002FSDWNet)|\n|2021|TIP|[Structure-Aware Motion Deblurring Using Multi-Adversarial Optimized CycleGAN](http:\u002F\u002Fgraphics.csie.ncku.edu.tw\u002FTIP_cycle_2021\u002FTIP2021.pdf)|\n|2021|JSTS|[Degradation Aware Approach to Image Restoration Using Knowledge Distillation](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9288928)||\n|2021|Arxiv|[Non-uniform Blur Kernel Estimation via Adaptive Basis Decomposition](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.01026)|[Code](https:\u002F\u002Fgithub.com\u002FGuillermoCarbajal\u002FNonUniformBlurKernelEstimation)|\n|2021|Arxiv|[Clean Images are Hard to Reblur: A New Clue for Deblurring](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.12665.pdf)||\n|2021|Arxiv|[Deep residual fourier transformation for single image deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.11745)|[Code](https:\u002F\u002Fgithub.com\u002FINVOKERer\u002FDeepRFT)|\n|2021|CVIU|[Single-image deblurring with neural networks: A comparative survey](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS1077314220301533?dgcid=rss_sd_all)||\n|2021|TIP|[Blind Motion Deblurring Super-Resolution: When Dynamic Spatio-Temporal Learning Meets Static Image Understanding](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2105.13077.pdf)||\n|2021|NC|[Deep Robust Image Deblurring via Blur Distilling and Information Comparison in Latent Space](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0925231221013771)||\n|2022|IJCV|[Deep Image Deblurring: A Survey](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2201.10700.pdf)||\n|2022|WACV|[Deep Feature Prior Guided Face Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2022\u002Fhtml\u002FJung_Deep_Feature_Prior_Guided_Face_Deblurring_WACV_2022_paper.html)||\n|2022|CVPR|[Restormer: Efficient transformer for high-resolution image restoration](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FZamir_Restormer_Efficient_Transformer_for_High-Resolution_Image_Restoration_CVPR_2022_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fswz30\u002FRestormer)|\n|2022|CVPR|[Maxim: Multi-axis mlp for image processing](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FTu_MAXIM_Multi-Axis_MLP_for_Image_Processing_CVPR_2022_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fmaxim)|\n|2022|CVPR|[Uformer: A general u-shaped transformer for image restoration](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FWang_Uformer_A_General_U-Shaped_Transformer_for_Image_Restoration_CVPR_2022_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002FZhendongWang6\u002FUformer)|\n|2022|CVPR|[Deblurring via Stochastic Refinement](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FWhang_Deblurring_via_Stochastic_Refinement_CVPR_2022_paper.pdf)||\n|2022|CVPR|[XYDeblur: Divide and Conquer for Single Image Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FJi_XYDeblur_Divide_and_Conquer_for_Single_Image_Deblurring_CVPR_2022_paper.pdf)||\n|2022|CVPR|[All-In-One Image Restoration for Unknown Corruption](http:\u002F\u002Fpengxi.me\u002Fwp-content\u002Fuploads\u002F2022\u002F03\u002FAll-In-One-Image-Restoration-for-Unknown-Corruption.pdf)|[Code](https:\u002F\u002Fgithub.com\u002FXLearning-SCU\u002F2022-CVPR-AirNet)|\n|2022|CVPR|[Exploring and Evaluating Image Restoration Potential in Dynamic Scenes](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.11754.pdf)||\n|2022|CVPR|[Deep Generalized Unfolding Networks for Image Restoration](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.13348)|[Code](https:\u002F\u002Fgithub.com\u002FMC-E\u002FDeep-Generalized-Unfolding-Networks-for-Image-Restoration)|\n|2022|CVPR|[GIQE: Generic Image Quality Enhancement via Nth Order Iterative Degradation](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FShyam_GIQE_Generic_Image_Quality_Enhancement_via_Nth_Order_Iterative_Degradation_CVPR_2022_paper.pdf)||\n|2022|CVPRW|[Blind Non-Uniform Motion Deblurring Using Atrous Spatial Pyramid Deformable Convolution and Deblurring-Reblurring Consistency](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022W\u002FNTIRE\u002Fhtml\u002FHuo_Blind_Non-Uniform_Motion_Deblurring_Using_Atrous_Spatial_Pyramid_Deformable_Convolution_CVPRW_2022_paper.html)||\n|2022|CVPRW|[Motion Aware Double Attention Network for Dynamic Scene Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022W\u002FNTIRE\u002Fhtml\u002FYang_Motion_Aware_Double_Attention_Network_for_Dynamic_Scene_Deblurring_CVPRW_2022_paper.html)||\n|2022|ECCV|[Stripformer: Strip Transformer for Fast Image Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.04627)|[Code](https:\u002F\u002Fgithub.com\u002Fpp00704831\u002FStripformer)|\n|2022|ECCV|[Simple baselines for image restoration](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.04676)|[Code](https:\u002F\u002Fgithub.com\u002Fmegvii-research\u002FNAFNet)|\n|2022|ECCV|[D2HNet: Joint Denoising and Deblurring with Hierarchical Network for Robust Night Image Restoration](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.03294)|[Code](https:\u002F\u002Fgithub.com\u002Fzhaoyuzhi\u002Fd2hnet)|\n|2022|ECCV|[Improving Image Restoration by Revisiting Global Information Aggregation](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.04491)|[Code](https:\u002F\u002Fgithub.com\u002Fmegvii-research\u002FTLC)|\n|2022|ECCV|[Animation from Blur: Multi-modal Blur Decomposition with Motion Guidance](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.10123)|[Code](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FAnimation-from-Blur)|\n|2022|ECCV|[Learning Degradation Representations for Image Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.05244)|[Code](https:\u002F\u002Fgithub.com\u002Fdasongli1\u002FLearning_degradation)|\n|2022|ECCV|[Realistic Blur Synthesis for Learning Image Deblurring](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2022\u002Fpapers_ECCV\u002Fhtml\u002F6325_ECCV_2022_paper.php)||\n|2022|ECCV|[Event-based Fusion for Motion Deblurring with Cross-modal Attention](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.00167)|[Code](https:\u002F\u002Fgithub.com\u002FAHupuJR\u002FEFNet)|\n|2022|ACCV|[Learning to Predict Decomposed Dynamic Filters for Single Image Motion Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FACCV2022\u002Fpapers\u002FHu_Learning_to_Predict_Decomposed_Dynamic_Filters_for_Single_Image_Motion_ACCV_2022_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002FZHIQIANGHU2021\u002FDecomposedDynamicFilters)|\n|2022|Arxiv|[Multi-scale-stage network for single image deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.09652)||\n|2023|AAAI|[Real-world deep local motion deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.08179)|[Code&Dataset](https:\u002F\u002Fgithub.com\u002FLeiaLi\u002FReLoBlur)|\n|2023|AAAI|[Intriguing Findings of Frequency Selection for Image Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.11745)|[Code](https:\u002F\u002Fgithub.com\u002FDeepMed-Lab-ECNU\u002FDeepRFT-AAAI2023)|\n|2023|AAAI|[Dual-domain Attention for Image Deblurring](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F25122\u002F24894)||\n|2023|CVPR|[Self-Supervised Non-Uniform Kernel Estimation With Flow-Based Motion Prior for Blind Image Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FFang_Self-Supervised_Non-Uniform_Kernel_Estimation_With_Flow-Based_Motion_Prior_for_Blind_CVPR_2023_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FFangzhenxuan\u002FUFPDeblur)|\n|2023|CVPR|[Efficient Frequency Domain-Based Transformers for High-Quality Image Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FKong_Efficient_Frequency_Domain-Based_Transformers_for_High-Quality_Image_Deblurring_CVPR_2023_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fkkkls\u002FFFTformer)|\n|2023|CVPR|[Self-Supervised Blind Motion Deblurring With Deep Expectation Maximization](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FLi_Self-Supervised_Blind_Motion_Deblurring_With_Deep_Expectation_Maximization_CVPR_2023_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FChilie\u002FDeblur_MCEM)|\n|2023|ICCV|[Multiscale Structure Guided Diffusion for Image Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FRen_Multiscale_Structure_Guided_Diffusion_for_Image_Deblurring_ICCV_2023_paper.pdf)||\n|2023|ICCV|[Multi-Scale Residual Low-Pass Filter Network for Image Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FDong_Multi-Scale_Residual_Low-Pass_Filter_Network_for_Image_Deblurring_ICCV_2023_paper.pdf)||\n|2023|ICCV|[DiffIR: Efficient Diffusion Model for Image Restoration](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FXia_DiffIR_Efficient_Diffusion_Model_for_Image_Restoration_ICCV_2023_paper.pdf)||\n|2023|NeurIPS|[Hierarchical Integration Diffusion Model for Realistic Image Deblurring](https:\u002F\u002Fpapers.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2023\u002Ffile\u002F5cebc89b113920dbff7c79854ba765a3-Paper-Conference.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fzhengchen1999\u002FHI-Diff)|\n|2023|Arxiv|[LaKDNet: Revisiting Image Deblurring with an Efficient ConvNet](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.02234.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Flingyanruan\u002FLaKDNet)|\n|2024|CVPR|[Unsupervised Blind Image Deblurring Based on Self-Enhancement](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FChen_Unsupervised_Blind_Image_Deblurring_Based_on_Self-Enhancement_CVPR_2024_paper.html)||\n|2024|CVPR|[Blur2Blur: Blur Conversion for Unsupervised Image Deblurring on Unknown Domains](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FPham_Blur2Blur_Blur_Conversion_for_Unsupervised_Image_Deblurring_on_Unknown_Domains_CVPR_2024_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FVinAIResearch\u002FBlur2Blur)|\n|2024|CVPR|[Motion-adaptive Separable Collaborative Filters for Blind Motion Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FLiu_Motion-adaptive_Separable_Collaborative_Filters_for_Blind_Motion_Deblurring_CVPR_2024_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FChengxuLiu\u002FMISCFilter)|\n|2024|CVPR|[Efficient Multi-scale Network with Learnable Discrete Wavelet Transform for Blind Motion Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FGao_Efficient_Multi-scale_Network_with_Learnable_Discrete_Wavelet_Transform_for_Blind_CVPR_2024_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fthqiu0419\u002FMLWNet)|\n|2024|CVPR|[Fourier Priors-Guided Diffusion for Zero-Shot Joint Low-Light Enhancement and Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FLv_Fourier_Priors-Guided_Diffusion_for_Zero-Shot_Joint_Low-Light_Enhancement_and_Deblurring_CVPR_2024_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Faipixel\u002FFourierDiff)|\n|2024|CVPR|[Real-World Efficient Blind Motion Deblurring via Blur Pixel Discretization](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FKim_Real-World_Efficient_Blind_Motion_Deblurring_via_Blur_Pixel_Discretization_CVPR_2024_paper.html)||\n|2024|CVPR|[AdaRevD: Adaptive Patch Exiting Reversible Decoder Pushes the Limit of Image Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FMao_AdaRevD_Adaptive_Patch_Exiting_Reversible_Decoder_Pushes_the_Limit_of_CVPR_2024_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FINVOKERer\u002FAdaRevD)|\n|2024|CVPR|[FMA-Net: Flow-Guided Dynamic Filtering and Iterative Feature Refinement with Multi-Attention for Joint Video Super-Resolution and Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FYouk_FMA-Net_Flow-Guided_Dynamic_Filtering_and_Iterative_Feature_Refinement_with_Multi-Attention_CVPR_2024_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FKAIST-VICLab\u002FFMA-Net)|\n|2024|CVPR|[ID-Blau: Image Deblurring by Implicit Diffusion-based reBLurring AUgmentation](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FWu_ID-Blau_Image_Deblurring_by_Implicit_Diffusion-based_reBLurring_AUgmentation_CVPR_2024_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fplusgood-steven\u002FID-Blau)|\n|2024|CVPR|[Residual Denoising Diffusion Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2308.13712)|[Code](https:\u002F\u002Fgithub.com\u002Fnachifur\u002FRDDM)|\n|2024|IJCV|[Blind Image Deblurring with Unknown Kernel Size and Substantial Noise](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2208.09483.pdf)|[Project Page](https:\u002F\u002Fgithub.com\u002Fsun-umn\u002FBlind-Image-Deblurring)|\n|2024|ECCV|[Motion Aware Event Representation-driven Image Deblurring](https:\u002F\u002Fgithub.com\u002FZhijingS\u002FDA_event_deblur)|[Code](https:\u002F\u002Fgithub.com\u002FZhijingS\u002FDA_event_deblur)|\n|2024|Arxiv|[Gyroscope-Assisted Motion Deblurring Network](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.06854)||\n|2024|Arxiv|[Efficient Image Deblurring Networks based on Diffusion Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.05907)|[Code](https:\u002F\u002Fgithub.com\u002Fbnm6900030\u002Fswintormer)|\n|2025|CVPR|[Gyro-based Neural Single Image Deblurring](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.00916)|[Code](https:\u002F\u002Fgithub.com\u002Fhmyang0727\u002FGyroDeblurNet)|\n\n\n## Non-Blind-Deblurring\n|Year|Pub|Paper|Repo|\n|:---:|:---:|:---:|:---:|\n|2006|IJCV|[Image deblurring in the presence of impulsive noise](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs11263-006-6468-1)||\n|2009|NIPS|[Fast image deconvolution using hyper-laplacian priors](http:\u002F\u002Fcs.nyu.edu\u002F~dilip\u002Fresearch\u002Fpapers\u002Ffid_nips09.pdf)|[Code & Project page](https:\u002F\u002Fdilipkay.wordpress.com\u002Ffast-deconvolution\u002F)|\n|2011|PAMI|[Richardson-Lucy Deblurring for Scenes under a Projective Motion Path](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F5674049)||\n|2011|ICCV|[Handling outliers in non-blind image deconvolution](http:\u002F\u002Fcg.postech.ac.kr\u002Fpapers\u002Fdeconv_outliers.pdf)|[Code](https:\u002F\u002Fgithub.com\u002FCoupeLibrary\u002Fhandleoutlier)|\n|2011|ICCV|[From learning models of natural image patches to whole image restoration](http:\u002F\u002Fpeople.ee.duke.edu\u002F~lcarin\u002FEPLICCVCameraReady.pdf)|[Code](http:\u002F\u002Fpeople.csail.mit.edu\u002Fdanielzoran\u002F)|\n|2012|TIP|[Bm3d frames and variational image deblurring](https:\u002F\u002Fwww.cs.tut.fi\u002F~foi\u002FGCF-BM3D\u002FBM3DframesDeblur-Danielyan.pdf)||\n|2012|TIP|[Robust image deblurring with an inaccurate blur kernel](http:\u002F\u002Fciteseerx.ist.psu.edu\u002Fviewdoc\u002Fdownload?doi=10.1.1.716.1055&rep=rep1&type=pdf) [Code](https:\u002F\u002Fblog.nus.edu.sg\u002Fmatjh\u002Fdownload\u002F)|\n|2013|CVPR|[A machine learning approach for non-blind image deconvolution](https:\u002F\u002Fwww.cv-foundation.org\u002Fopenaccess\u002Fcontent_cvpr_2013\u002Fpapers\u002FSchuler_A_Machine_Learning_2013_CVPR_paper.pdf)|[Code & Project page](http:\u002F\u002Fwebdav.is.mpg.de\u002Fpixel\u002Fneural_deconvolution\u002F)|\n|2013|CVPR|[Discriminative non-blind deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2013\u002Fpapers\u002FSchmidt_Discriminative_Non-blind_Deblurring_2013_CVPR_paper.pdf)|[Code](https:\u002F\u002Fwww.visinf.tu-darmstadt.de\u002Fvi_research\u002Fcode\u002Findex.en.jsp#discriminative_deblurring)|\n|2014|TIP|[A general framework for regularized, similarity-based image restoration](http:\u002F\u002Fwww.academia.edu\u002Fdownload\u002F42621942\u002FA_General_Framework_for_Regularized_Simi20160212-19526-i3txol.pdf) [Code & Project page](http:\u002F\u002Falumni.soe.ucsc.edu\u002F~aminkh\u002FKernelRestoration.html)|\n|2014|NIPS|[Deep convolutional neural network for image deconvolution](http:\u002F\u002Fwww.cse.cuhk.edu.hk\u002Fleojia\u002Fpapers\u002Fdeconv_nips14.pdf)|[Code & Project page](http:\u002F\u002Flxu.me\u002Fprojects\u002Fdcnn\u002F)|\n|2014|CVPR|[Shrinkage fields for effective image restoration](http:\u002F\u002Fresearch.uweschmidt.org\u002Fpubs\u002Fcvpr14schmidt.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fuschmidt83\u002Fshrinkage-fields)|\n|2014|ECCV|[Good Image Priors for Non-blind Deconvolution: Generic vs Specific](http:\u002F\u002Fcs.brown.edu\u002F~lbsun\u002FGoodPriors2014\u002Fgoodpriors_eccv2014.pdf)|[Project page](http:\u002F\u002Fcs.brown.edu\u002F~lbsun\u002FGoodPriors2014\u002Fgoodpriors2014eccv.html)|\n|2016|CVIP|[Fast Non-Blind Image De-blurring With Sparse Priors](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-981-10-2104-6_56)||\n|2017|TIP|[Partial Deconvolution With Inaccurate Blur Kernel](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8071032)||\n|2017|ICCP|[Fast non-blind deconvolution via regularized residual networks with long\u002Fshort skip-connections](http:\u002F\u002Fcg.postech.ac.kr\u002Fpapers\u002FskipConnect.pdf)|[Code](https:\u002F\u002Fgithub.com\u002FHyeongseokSon1\u002FCNN_deconvolution), [Project Page](http:\u002F\u002Fcg.postech.ac.kr\u002Fresearch\u002Fresnet_deconvolution\u002F)|\n|2017|CVPR|[Noise-Blind Image Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fhtml\u002FJin_Noise-Blind_Image_Deblurring_CVPR_2017_paper.html)||\n|2017|CVPR|[Learning Deep CNN Denoiser Prior for Image Restoration](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fhtml\u002FZhang_Learning_Deep_CNN_CVPR_2017_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fcszn\u002Fircnn)|\n|2017|CVPR|[Learning Fully Convolutional Networks for Iterative Non-blind Deconvolution](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1611.06495)|[Code](https:\u002F\u002Fgithub.com\u002Fzhjwustc\u002Fcvpr17_iter_deblur_testing_matconvnet)|\n|2017|ICCV|[Learning proximal operators: Using denoising networks for regularizing inverse imaging problems](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.03488)||\n|2017|ICCV|[Learning to push the limits of efficient fft-based image deconvolution](http:\u002F\u002Fresearch.uweschmidt.org\u002Fpubs\u002Ficcv17kruse.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fuschmidt83\u002Ffourier-deconvolution-network)|\n|2017|NIPS|[Deep Mean-Shift Priors for Image Restoration](https:\u002F\u002Fpapers.nips.cc\u002Fpaper\u002F6678-deep-mean-shift-priors-for-image-restoration.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fsiavashbigdeli\u002FDMSP)|\n|2018|ICIP|[Modeling Realistic Degradations in Non-Blind Deconvolution](https:\u002F\u002Farxiv.org\u002Fabs\u002F1806.01097)||\n|2018|CVPR|[Non-blind Deblurring: Handling Kernel Uncertainty with CNNs](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fhtml\u002FVasu_Non-Blind_Deblurring_Handling_CVPR_2018_paper.html)|[Project page & Results-on-benchmark-datasets](https:\u002F\u002Fgithub.com\u002Fsubeeshvasu\u002F2018_subeesh_nbd_cvpr)|\n|2018|CVPR|[Deep image prior](https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.10925)|[Code](https:\u002F\u002Fgithub.com\u002FDmitryUlyanov\u002Fdeep-image-prior)|\n|2018|ECCV|[Learning Data Terms for Non-blind Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fhtml\u002FJiangxin_Dong_Learning_Data_Terms_ECCV_2018_paper.html)||\n|2018|NIPS|[Deep Non-Blind Deconvolution via Generalized Low-Rank Approximation](https:\u002F\u002Fpapers.nips.cc\u002Fpaper\u002F7313-deep-non-blind-deconvolution-via-generalized-low-rank-approximation.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Frwenqi\u002FNBD-GLRA)|\n|2019|ICLR|[Deep decoder: Concise image representations from untrained non-convolutional networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1810.03982)|[Code](https:\u002F\u002Fgithub.com\u002Freinhardh\u002Fsupplement_deep_decoder)|\n|2019|CVPR|[Deep Plug-And-Play Super-Resolution for Arbitrary Blur Kernels](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FZhang_Deep_Plug-And-Play_Super-Resolution_for_Arbitrary_Blur_Kernels_CVPR_2019_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fcszn\u002FDPSR)|\n|2019|ICCVW|[Image deconvolution with deep image and kernel priors](https:\u002F\u002Farxiv.org\u002Fabs\u002F1910.08386)||\n|2019|TPAMI|[Denoising prior driven deep neural network for image restoration](https:\u002F\u002Farxiv.org\u002Fabs\u002F1801.06756)||\n|2020|CVPR|[Variational-EM-Based Deep Learning for Noise-Blind Image Deblurring](https:\u002F\u002Fgithub.com\u002Fysnan\u002FVEM-NBD\u002Fblob\u002Fmaster\u002Fpaper\u002Fvem_deconv.pdf)|[Project page & Results-on-benchmark-datasets](https:\u002F\u002Fgithub.com\u002Fysnan\u002FVEM-NBD)|\n|2020|CVPR|[Deep Learning for Handling Kernel\u002Fmodel Uncertainty in Image Deconvolution](https:\u002F\u002Fgithub.com\u002Fysnan\u002FNBD_KerUnc\u002Fblob\u002Fmaster\u002Fpaper\u002Fkn.pdf)|[Project page & Results-on-benchmark-datasets](https:\u002F\u002Fgithub.com\u002Fysnan\u002FNBD_KerUnc)|\n|2020|ECCV|[End-to-end interpretable learning of non-blind image deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.01769)||\n|2020|EUSIPCO|[Bp-dip: A backprojection based deep image prior](https:\u002F\u002Farxiv.org\u002Fabs\u002F2003.05417)|[Code](https:\u002F\u002Fgithub.com\u002Fjennyzu\u002FBP-DIP-deblurring)|\n|2020|NIPS|[Deep Wiener Deconvolution: Wiener Meets Deep Learning for Image Deblurring](https:\u002F\u002Fpapers.nips.cc\u002Fpaper\u002F2020\u002Ffile\u002F0b8aff0438617c055eb55f0ba5d226fa-Paper.pdf)|[Code](https:\u002F\u002Fgitlab.mpi-klsb.mpg.de\u002Fjdong\u002Fdwdn)|\n|2020|TNLS|[Learning deep gradient descent optimization for image deconvolution](https:\u002F\u002Farxiv.org\u002Fabs\u002F1804.03368)|[Code](https:\u002F\u002Fgithub.com\u002Fdonggong1\u002Flearn-optimizer-rgdn)|\n|2020|TCI|[Neumann networks for linear inverse problems in imaging](https:\u002F\u002Farxiv.org\u002Fabs\u002F1901.03707)|[Code](https:\u002F\u002Fgithub.com\u002Fdgilton\u002Fneumann_networks_code)|\n|2020|Arxiv|[The Maximum Entropy on the Mean Method for Image Deblurring](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2002.10434.pdf)||\n|2021|CVPR|[Learning Spatially-Variant MAP Models for Non-Blind Image Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fhtml\u002FDong_Learning_Spatially-Variant_MAP_Models_for_Non-Blind_Image_Deblurring_CVPR_2021_paper.html)||\n|2021|CVPR|[Learning a Non-Blind Deblurring Network for Night Blurry Images](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fhtml\u002FChen_Learning_a_Non-Blind_Deblurring_Network_for_Night_Blurry_Images_CVPR_2021_paper.html)|[Code&Data](https:\u002F\u002Fliangchen527.github.io\u002F)|\n|2021|TNNLS|[Nonblind Image Deblurring via Deep Learning in Complex Field](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9404870)||\n|2022|WACV|[Non-Blind Deblurring for Fluorescence: A Deformable Latent Space Approach With Kernel Parameterization](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2022\u002Fpapers\u002FGuan_Non-Blind_Deblurring_for_Fluorescence_A_Deformable_Latent_Space_Approach_With_WACV_2022_paper.pdf)|\n|2022|CVPR|[Deep Constrained Least Squares for Blind Image Super-Resolution](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.07508)|[Project Page](https:\u002F\u002Fgithub.com\u002FAlgolzw\u002FDCLS)|\n|2022|CVPRW|[A Robust Non-Blind Deblurring Method Using Deep Denoiser Prior](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022W\u002FNTIRE\u002Fhtml\u002FFang_A_Robust_Non-Blind_Deblurring_Method_Using_Deep_Denoiser_Prior_CVPRW_2022_paper.html)||\n|2022|SPIC|[Black-box image deblurring and defiltering](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0923596522001242)|[CodeMatlab](https:\u002F\u002Fgithub.com\u002Ffayolle\u002FbbDeblur), [CodePy](https:\u002F\u002Fgithub.com\u002Ffayolle\u002FbbDeblur_py)|\n|2022|TPAMI|[DWDN: Deep Wiener Deconvolution Network for Non-Blind Image Deblurring](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9664009)||\n|2022|TCI|[Photon Limited Non-Blind Deblurring Using Algorithm Unrolling](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.15314)|[Code](https:\u002F\u002Fgithub.com\u002Fsanghviyashiitb\u002Fpoisson-deblurring)|\n|2023|WACV|[Wiener Guided DIP for Unsupervised Blind Image Deconvolution](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.10271)|[Code](https:\u002F\u002Fgithub.com\u002Fgbredell\u002FW_DIP)|\n|2023|CVPR|[Uncertainty-Aware Unsupervised Image Deblurring with Deep Residual Prior](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FTang_Uncertainty-Aware_Unsupervised_Image_Deblurring_With_Deep_Residual_Prior_CVPR_2023_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fxl-tang3\u002FUAUDeblur)|\n|2023|ICCV|[Leveraging Classic Deconvolution and Feature Extraction in Zero-Shot Image Restoration](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.02097)||\n|2023|SIVP|[Reverse image filtering with clean and noisy filters](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs11760-022-02236-w)|[Code](https:\u002F\u002Fgithub.com\u002Ffayolle\u002Fclean_noisy_defilter)|\n|2023|TIP|[INFWIDE: Image and Feature Space Wiener Deconvolution Network for Non-blind Image Deblurring in Low-Light Conditions](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.08201)|[Code](https:\u002F\u002Fgithub.com\u002Fzhihongz\u002FINFWIDE)|\n|2023|TPAMI|[Blind Image Deconvolution Using Variational Deep Image Prior](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.00179)|[Code](https:\u002F\u002Fgithub.com\u002FDong-Huo\u002FVDIP-Deconvolution)|\n|2024|WACV|[Deep Plug-and-Play Nighttime Non-Blind Deblurring With Saturated Pixel Handling Schemes](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2024\u002Fpapers\u002FShu_Deep_Plug-and-Play_Nighttime_Non-Blind_Deblurring_With_Saturated_Pixel_Handling_Schemes_WACV_2024_paper.pdf)||\n|2024|TCI|[The Secrets of Non-Blind Poisson Deconvolution](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.03105)||\n|2024|ACM MM|[LoFormer: Local Frequency Transformer for Image Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.16993)|[Code](https:\u002F\u002Fgithub.com\u002FINVOKERer\u002FLoFormer)|\n|2024|IJCV|[Deep Richardson-Lucy Deconvolution for Low-Light Image Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.05543)||\n|2025|JMI|[Deep learning CT image restoration using system blur and noise models](https:\u002F\u002Fwww.spiedigitallibrary.org\u002Fjournals\u002Fjournal-of-medical-imaging\u002Fvolume-12\u002Fissue-1\u002F014003\u002FDeep-learning-CT-image-restoration-using-system-blur-and-noise\u002F10.1117\u002F1.JMI.12.1.014003.short)||\n\n## (Multi-image\u002FVideo)-Motion-Deblurring\n|Year|Pub|Paper|Repo|\n|:---:|:---:|:---:|:---:|\n|2007|TOG|[Image Deblurring with Blurred\u002FNoisy Image Pairs](https:\u002F\u002Fwww.microsoft.com\u002Fen-us\u002Fresearch\u002Fwp-content\u002Fuploads\u002F2016\u002F11\u002FDeblurring_SIGGRAPH07.pdf)||\n|2008|CVPR|[Robust dual motion deblurring](http:\u002F\u002Fciteseerx.ist.psu.edu\u002Fviewdoc\u002Fdownload?doi=10.1.1.443.6370&rep=rep1&type=pdf)||\n|2009|JCP|[Blind motion deblurring using multiple images](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0021999109001867)||\n|2010|CVPR|[Robust flash deblurring](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F5539941)||\n|2010|CVPR|[Efficient filter flow for space-variant multiframe blind deconvolution](http:\u002F\u002Fsuvrit.de\u002Fpapers\u002Fcvpr10.pdf)||\n|2012|ECCV|[Deconvolving PSFs for A Better Motion Deblurring using Multiple Images](http:\u002F\u002Fciteseerx.ist.psu.edu\u002Fviewdoc\u002Fdownload?doi=10.1.1.259.6526&rep=rep1&type=pdf)||\n|2012|TIP|[Robust multichannel blind deconvolution via fast alternating minimization](https:\u002F\u002Fusers.soe.ucsc.edu\u002F~milanfar\u002Fpublications\u002Fjournal\u002FMCBD.pdf)||\n|2012|CGF|[Registration Based Non-uniform Motion Deblurring](http:\u002F\u002Fcg.postech.ac.kr\u002Fpapers\u002Fregistration.pdf)||\n|2012|TOG|[Video deblurring for hand-held cameras using patch-based synthesis](https:\u002F\u002Fwww.juew.org\u002Fpublication\u002Fvideo_deblur.pdf)|[Project page](http:\u002F\u002Fcg.postech.ac.kr\u002Fresearch\u002Fvideo_deblur\u002F)|\n|2013|CVPR|[Multi-image Blind Deblurring Using a Coupled Adaptive Sparse Prior](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2013\u002Fhtml\u002FZhang_Multi-image_Blind_Deblurring_2013_CVPR_paper.html)|[Code & Project page](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fhczhang1\u002Fprojects\u002Fsparse-blind-deblurring)|\n|2014|CVPR|[Multi-Shot Imaging: Joint Alignment, Deblurring and Resolution Enhancement](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2014\u002Fhtml\u002FZhang_Multi-Shot_Imaging_Joint_2014_CVPR_paper.html)|[Project page](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fhczhang1\u002Fprojects\u002Fmulti-shot-imaging)|\n|2014|CVPR|[Gyro-Based Multi-Image Deconvolution for Removing Handshake Blur](http:\u002F\u002Fgraphics.stanford.edu\u002Fpapers\u002Fgyrodeblur\u002Fgyrodeblur_park_cvpr14.pdf)|[Project Page](http:\u002F\u002Fgraphics.stanford.edu\u002Fpapers\u002Fgyrodeblur\u002F)|\n|2014|ECCV|[Modeling Blurred Video with Layers](http:\u002F\u002Ffiles.is.tue.mpg.de\u002Fblack\u002Fpapers\u002FWulffECCV2014.pdf)|[Project page, Results & Dataset](http:\u002F\u002Fps.is.tuebingen.mpg.de\u002Fresearch_projects\u002Fmotion-blur-in-layers)|\n|2015|CVPR|[Burst Deblurring: Removing Camera Shake Through Fourier Burst Accumulation](http:\u002F\u002Fdev.ipol.im\u002F~mdelbra\u002Ffba\u002FFBA_cvpr2015_preprint.pdf)||\n|2015|TCI|[Hand-held video deblurring via efficient fourier aggregation](http:\u002F\u002Farxiv.org\u002Fpdf\u002F1509.05251)|[Project page & Results](http:\u002F\u002Fiie.fing.edu.uy\u002F~mdelbra\u002FvideoFA\u002F)||\n|2015|TIP|[Removing camera shake via weighted fourier burst accumulation](https:\u002F\u002Farxiv.org\u002Fabs\u002F1505.02731)||\n|2015|CVPR|[Generalized Video Deblurring for Dynamic Scenes](http:\u002F\u002Fcv.snu.ac.kr\u002Fpublication\u002Fconf\u002F2015\u002FVD_CVPR2015.pdf)|[Code & Project page](https:\u002F\u002Fcv.snu.ac.kr\u002Fresearch\u002F~VD\u002F)||\n|2015|CVPR|[Intra-Frame Deblurring by Leveraging Inter-Frame Camera Motion](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2015\u002Fhtml\u002FZhang_Intra-Frame_Deblurring_by_2015_CVPR_paper.html)|[Project page](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fhczhang1\u002Fprojects\u002Fvideo_deblur)|\n|2016|ECCV|[Stereo video deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F1607.08421)||\n|2017|CVPR|[Simultaneous stereo video deblurring and scene flow estimation](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.03273)||\n|2017|CVPR|[Deep Video Deblurring for Hand-Held Cameras](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fhtml\u002FSu_Deep_Video_Deblurring_CVPR_2017_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fshuochsu\u002FDeepVideoDeblurring)|[Project page](http:\u002F\u002Fwww.cs.ubc.ca\u002Flabs\u002Fimager\u002Ftr\u002F2017\u002FDeepVideoDeblurring\u002F)|\n|2017|CVPR|[Light Field Blind Motion Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fhtml\u002FSrinivasan_Light_Field_Blind_CVPR_2017_paper.html)|[code](https:\u002F\u002Fgithub.com\u002Fpratulsrinivasan\u002FLight_Field_Blind_Motion_Deblurring)|\n|2017|ICCV|[Video Deblurring via Semantic Segmentation and Pixel-Wise Non-Linear Kernel](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FRen_Video_Deblurring_via_ICCV_2017_paper.pdf)|[Project page](https:\u002F\u002Fsites.google.com\u002Fsite\u002Frenwenqi888\u002Fresearch\u002Fdeblurring\u002Fpwnlk)|\n|2017|ICCV|[Online Video Deblurring via Dynamic Temporal Blending Network](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FKim_Online_Video_Deblurring_ICCV_2017_paper.pdf)|[Code](https:\u002F\u002Fsites.google.com\u002Fsite\u002Flliger9\u002Fpublications)|\n|2018|ECCV|[Burst Image Deblurring Using Permutation Invariant Convolutional Neural Networks](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fhtml\u002FMiika_Aittala_Burst_Image_Deblurring_ECCV_2018_paper.html)|[Project page](http:\u002F\u002Fpeople.csail.mit.edu\u002Fmiika\u002Feccv18_deblur\u002F)|\n|2018|ECCV|[Joint Blind Motion Deblurring and Depth Estimation of Light Field](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fhtml\u002FDongwoo_Lee_Joint_Blind_Motion_ECCV_2018_paper.html)||\n|2018|TPAMI|[Dynamic Video Deblurring using a Locally Adaptive Linear Blur Model](https:\u002F\u002Fcv.snu.ac.kr\u002Fpublication\u002Fjour\u002F2018\u002Fthkim_pami2018_dynamic.pdf)||\n|2018|ICCP|[Reblur2deblur: Deblurring videos via self-supervised learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1801.05117.pdf)||\n|2018|Arxiv|[LSD-Joint Denoising and Deblurring of Short and Long Exposure Images with Convolutional Neural Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1811.09485)||\n|2019|TIP|[Adversarial Spatio-Temporal Learning for Video Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F1804.00533)|[Code](https:\u002F\u002Fgithub.com\u002Fthemathgeek13\u002FSTdeblur)|[Project page](https:\u002F\u002Fgithub.com\u002FJLtwoP\u002FAdversarial-Spatio-Temporal-Learning-for-Video-Deblurring)|\n|2019|CVPR|[Recurrent Neural Networks With Intra-Frame Iterations for Video Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FNah_Recurrent_Neural_Networks_With_Intra-Frame_Iterations_for_Video_Deblurring_CVPR_2019_paper.html)||\n|2019|CVPR|[DAVANet: Stereo Deblurring With View Aggregation](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FZhou_DAVANet_Stereo_Deblurring_With_View_Aggregation_CVPR_2019_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fsczhou\u002FDAVANet)|\n|2019|CVPR_W|[A Deep Motion Deblurring Network based on Per-Pixel Adaptive Kernels with Residual Down-Up and Up-Down Modules](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2019\u002Fhtml\u002FNTIRE\u002FSim_A_Deep_Motion_Deblurring_Network_Based_on_Per-Pixel_Adaptive_Kernels_CVPRW_2019_paper.html)||\n|2019|ICCV|[Spatio-Temporal Filter Adaptive Network for Video Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F1904.12257)|[Project page](https:\u002F\u002Fshangchenzhou.com\u002Fprojects\u002Fstfan\u002F), [Code](https:\u002F\u002Fgithub.com\u002Fsczhou\u002FSTFAN)|\n|2019|ICCV|[Face Video Deblurring using 3D Facial Priors](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fhtml\u002FRen_Face_Video_Deblurring_Using_3D_Facial_Priors_ICCV_2019_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Frwenqi\u002F3Dfacedeblurring)|\n|2019|SPL|[Deep Recurrent Network for Fast and Full-Resolution Light Field Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F1904.00352)||\n|2019|ICCV_W|[Deep Video Deblurring: The Devil is in the Details](https:\u002F\u002Farxiv.org\u002Fabs\u002F1909.12196)|[Code](https:\u002F\u002Fgithub.com\u002Fvisinf\u002Fdeblur-devil)|\n|2020|CVPR|[Cascaded Deep Video Deblurring Using Temporal Sharpness Prior](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.02501)|[Code](https:\u002F\u002Fgithub.com\u002Fcsbhr\u002FCDVD-TSP)|[Project Page](https:\u002F\u002Fbaihaoran.xyz\u002Fprojects\u002Fcdvd-tsp\u002Findex.html)|\n|2020|CVPR|[Blurry Video Frame Interpolation](https:\u002F\u002Farxiv.org\u002Fabs\u002F2002.12259)|[Code](https:\u002F\u002Fgithub.com\u002Flaomao0\u002FBIN)|\n|2020|ECCV|[Efficient Spatio-Temporal Recurrent Neural Network for Video Deblurring](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fhtml\u002F5116_ECCV_2020_paper.php)|[Code](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FESTRNN)|\n|2020|ECCV|[Learning Event-Driven Video Deblurring and Interpolation](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fhtml\u002F671_ECCV_2020_paper.php)||\n|2020|TIP|[Blur Removal Via Blurred-Noisy Image Pair](https:\u002F\u002Farxiv.org\u002Fabs\u002F1903.10667)||\n|2020|TCSVT|[Recursive Neural Network for Video Deblurring](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9247314)||\n|2021|AAAI|[Motion-blurred Video Interpolation and Extrapolation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.02984.pdf)||\n|2021|CVPR|[Gated Spatio-Temporal Attention-Guided Video Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FSuin_Gated_Spatio-Temporal_Attention-Guided_Video_Deblurring_CVPR_2021_paper.pdf)||\n|2021|CVPR|[ARVo: Learning All-Range Volumetric Correspondence for Video Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.04260)||\n|2021|TOG|[Recurrent Video Deblurring with Blur-Invariant Motion Estimation and Pixel Volumes](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3453720)|[Code](https:\u002F\u002Fgithub.com\u002Fcodeslake\u002FPVDNet)|\n|2021|CVIU|[Video Deblurring via Spatiotemporal Pyramid Network and Adversarial Gradient Prior](https:\u002F\u002Fwhluo.github.io\u002Fpapers\u002Fcviu103135_final.pdf)|\n|2021|ICCV|[Multi-Scale Separable Network for Ultra-High-Definition Video Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fhtml\u002FDeng_Multi-Scale_Separable_Network_for_Ultra-High-Definition_Video_Deblurring_ICCV_2021_paper.html)||\n|2022|AAAI|[Deep Recurrent Neural Network with Multi-Scale Bi-Directional Propagation for Video Deblurring](https:\u002F\u002Faaai-2022.virtualchair.net\u002Fposter_aaai3124)||\n|2022|CVPR|[Deblur-NeRF: Neural Radiance Fields From Blurry Images](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FMa_Deblur-NeRF_Neural_Radiance_Fields_From_Blurry_Images_CVPR_2022_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Flimacv\u002FDeblur-NeRF)|\n|2022|ECCV|[Improving Image Restoration by Revisiting Global Information Aggregation](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.04491)|[Code](https:\u002F\u002Fgithub.com\u002Fmegvii-research\u002FTLC)|\n|2022|ECCV|[Animation from Blur: Multi-modal Blur Decomposition with Motion Guidance](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.10123)|[Code](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FAnimation-from-Blur)|\n|2022|ECCV|[Efficient Video Deblurring Guided by Motion Magnitude](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.13374)|[Code](https:\u002F\u002Fgithub.com\u002Fsollynoay\u002FMMP-RNN)|\n|2022|ECCV|[Spatio-Temporal Deformable Attention Network for Video Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.10852)|[Code](https:\u002F\u002Fgithub.com\u002Fhuicongzhang\u002FSTDAN)|\n|2022|ECCV|[ERDN: Equivalent Receptive Field Deformable Network for Video Deblurring](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2022\u002Fpapers_ECCV\u002Fhtml\u002F4085_ECCV_2022_paper.php)|[Code](https:\u002F\u002Fgithub.com\u002FTencentCloud\u002FERDN)|\n|2022|ECCV|[DeMFI: Deep Joint Deblurring and Multi-Frame Interpolation with Flow-Guided Attentive Correlation and Recursive Boosting](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.09985)|[Code](https:\u002F\u002Fgithub.com\u002FJihyongOh\u002FDeMFI)|\n|2022|ECCVW|[Towards Real-World Video Deblurring by Exploring Blur Formation Process](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.13184)||\n|2022|CGF|[Real-Time Video Deblurring via Lightweight Motion Compensation](https:\u002F\u002Fdiglib.eg.org\u002Fbitstream\u002Fhandle\u002F10.1111\u002Fcgf14667\u002Fv41i7pp177-188.pdf?sequence=1&isAllowed=y)|[Code](https:\u002F\u002Fgithub.com\u002Fcodeslake\u002FRealTime_VDBLR)|\n|2022|IJCV|[Real-world Video Deblurring: A Benchmark Dataset and An Efficient Recurrent Neural Network](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.16028)|[Code](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FESTRNN)|\n|2024|WACV|[Sharp-NeRF: Grid-Based Fast Deblurring Neural Radiance Fields Using Sharpness Prior](https:\u002F\u002Fgithub.com\u002Fradimspetlik\u002FSI-DDPM-FMO)|[Code](https:\u002F\u002Fgithub.com\u002FbenhenryL\u002FSharpNeRF)|\n|2024|WACV|[Deblur-NSFF: Neural Scene Flow Fields for Blurry Dynamic Scenes](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2024\u002Fpapers\u002FLuthra_Deblur-NSFF_Neural_Scene_Flow_Fields_for_Blurry_Dynamic_Scenes_WACV_2024_paper.pdf)||\n|2023|CVPR|[Blur Interpolation Transformer for Real-World Motion from Blur](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.11423)|[Code](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FBiT)|\n|2023|CVPR|[DP-NeRF: Deblurred Neural Radiance Field with Physical Scene Priors](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FLee_DP-NeRF_Deblurred_Neural_Radiance_Field_With_Physical_Scene_Priors_CVPR_2023_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fdogyoonlee\u002FDP-NeRF)|\n|2023|CVPR|[BAD-NeRF: Bundle Adjusted Deblur Neural Radiance Fields](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FWang_BAD-NeRF_Bundle_Adjusted_Deblur_Neural_Radiance_Fields_CVPR_2023_paper.pdf)|[Code&Dataset](https:\u002F\u002Fgithub.com\u002FWU-CVGL\u002FBAD-NeRF)|\n|2023|CVPR|[Joint Video Multi-Frame Interpolation and Deblurring Under Unknown Exposure Time](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FShang_Joint_Video_Multi-Frame_Interpolation_and_Deblurring_Under_Unknown_Exposure_Time_CVPR_2023_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fshangwei5\u002FVIDUE)|\n|2023|CVPR|[Deep Discriminative Spatial and Temporal Network for Efficient Video Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FPan_Deep_Discriminative_Spatial_and_Temporal_Network_for_Efficient_Video_Deblurring_CVPR_2023_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fxuboming8\u002FDSTNet)|\n|2023|ICCV|[Exploring Temporal Frequency Spectrum in Deep Video Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FZhu_Exploring_Temporal_Frequency_Spectrum_in_Deep_Video_Deblurring_ICCV_2023_paper.pdf)||\n|2023|ICCV|[E2NeRF: Event Enhanced Neural Radiance Fields from Blurry Images](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FQi_E2NeRF_Event_Enhanced_Neural_Radiance_Fields_from_Blurry_Images_ICCV_2023_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002FiCVTEAM\u002FE2NeRF)|\n|2024|CVPR|[Blur-aware Spatio-temporal Sparse Transformer for Video Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FZhang_Blur-aware_Spatio-temporal_Sparse_Transformer_for_Video_Deblurring_CVPR_2024_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fhuicongzhang\u002FBSSTNet)|\n|2024|CVPR|[Mitigating Motion Blur in Neural Radiance Fields with Events and Frames](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FCannici_Mitigating_Motion_Blur_in_Neural_Radiance_Fields_with_Events_and_CVPR_2024_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fuzh-rpg\u002FEvDeblurNeRF)|\n|2024|CVPR|[DyBluRF: Dynamic Neural Radiance Fields from Blurry Monocular Video](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FSun_DyBluRF_Dynamic_Neural_Radiance_Fields_from_Blurry_Monocular_Video_CVPR_2024_paper.html)||\n|2024|ECCV|[Rethinking video deblurring with waveletaware dynamic transformer and diffusion model](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.13459)|[Code](https:\u002F\u002Fgithub.com\u002FChen-Rao\u002FVD-Diff)|\n|2024|NeurIPS|[Learning Truncated Causal History Model for Video Restoration](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.03936)|[Code](https:\u002F\u002Fgithub.com\u002FAscend-Research\u002FTurtle)|\n## Challenges on Motion Deblurring\n\n|Year|Pub|Paper|Repo|\n|:---:|:---:|:---:|:---:|\n|2019|CVPR_W|[NTIRE 2019 Challenge on Video Deblurring: Methods and Results](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2019\u002Fhtml\u002FNTIRE\u002FNah_NTIRE_2019_Challenge_on_Video_Deblurring_Methods_and_Results_CVPRW_2019_paper.html)||\n|2019|CVPR_W|[NTIRE 2019 Challenge on Video Deblurring and Super-Resolution: Dataset and Study](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2019\u002Fhtml\u002FNTIRE\u002FNah_NTIRE_2019_Challenge_on_Video_Deblurring_and_Super-Resolution_Dataset_and_CVPRW_2019_paper.html)||\n|2019|CVPR_W|[EDVR: Video Restoration with Enhanced Deformable Convolutional Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.02716)|[Code-Pytorch](https:\u002F\u002Fgithub.com\u002Fxinntao\u002FEDVR)|[Project page](https:\u002F\u002Fxinntao.github.io\u002Fprojects\u002FEDVR)|\n|2020|CVPR_W|[Ntire 2020 challenge on image and video deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2020\u002Fpapers\u002Fw31\u002FNah_NTIRE_2020_Challenge_on_Image_and_Video_Deblurring_CVPRW_2020_paper.pdf)||\n|2020|CVPR_W|[Deploying Image Deblurring across Mobile Devices: A Perspective of Quality and Latency](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2020\u002Fpapers\u002Fw31\u002FChiang_Deploying_Image_Deblurring_Across_Mobile_Devices_A_Perspective_of_Quality_CVPRW_2020_paper.pdf)||\n|2020|CVPR_W|[High-Resolution Dual-Stage Multi-Level Feature Aggregation for Single Image and Video Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2020\u002Fpapers\u002Fw31\u002FBrehm_High-Resolution_Dual-Stage_Multi-Level_Feature_Aggregation_for_Single_Image_and_Video_CVPRW_2020_paper.pdf)||\n\n## Depth-Aware Motion Deblurring\n|Year|Pub|Paper|Repo|\n|:---:|:---:|:---:|:---:|\n|2012|ICCP|[Depth-aware motion deblurring](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F6215220)||\n|2014|CVPR|[Joint Depth Estimation and Camera Shake Removal from Single Blurry Image](https:\u002F\u002Fwww.cv-foundation.org\u002Fopenaccess\u002Fcontent_cvpr_2014\u002Fhtml\u002FHu_Joint_Depth_Estimation_2014_CVPR_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fchaehonglee\u002FJoint_Depth_Esimation_and_Deblur)|\n|2019|WACV|[Single Image Deblurring and Camera Motion Estimation With Depth Map](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8658686)||\n|2019|CVPR|[DAVANet: Stereo Deblurring With View Aggregation](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FZhou_DAVANet_Stereo_Deblurring_With_View_Aggregation_CVPR_2019_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fsczhou\u002FDAVANet)|\n|2020|TIP|[Dynamic Scene Deblurring by Depth Guided Model](https:\u002F\u002Ffaculty.ucmerced.edu\u002Fmhyang\u002Fpapers\u002Ftip2020_dynamic_scene_deblurring.pdf)|[Project Page](https:\u002F\u002Fsites.google.com\u002Fview\u002Flerenhanli\u002Fhomepage\u002Fdepth_deblurring)|\n|2020|TCSVT|[Depth-Aware Motion Deblurring Using Loopy Belief Propagation](https:\u002F\u002Fwww4.comp.polyu.edu.hk\u002F~pinli\u002FCoRR\u002FTCSVT\u002FTCSVT2020_2.pdf)||\n|2022|ICME|[Dast-Net: Depth-Aware Spatio-Temporal Network for Video Deblurring](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9858929)||\n|2023|SCIA|[Depth-Aware Image Compositing Model for Parallax Camera Motion Blur](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.09334)|[Code & Project Page](https:\u002F\u002Fgermanftv.github.io\u002FParallaxICB.github.io\u002F)|\n|2023|WACV|[Fast and Accurate: Video Enhancement Using Sparse Depth](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2023\u002Fhtml\u002FFeng_Fast_and_Accurate_Video_Enhancement_Using_Sparse_Depth_WACV_2023_paper.html)||\n|2024|ECCV_W|[DAVIDE: Depth-Aware Video Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.01274)|[Code & Project Page](https:\u002F\u002Fgermanftv.github.io\u002FDAVIDE.github.io\u002F)|\n|2024||[Deep Lidar-guided Image Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.07262v1)|[Code](https:\u002F\u002Fgithub.com\u002Fdiegovalsesia\u002Flidardeblurring)|\n\n## Other Closely Related Works\n|Year|Pub|Paper|Repo|\n|:---:|:---:|:---:|:---:|\n|2000||[Multiframe Restoration Methods for Image Synthesis and Recovery, Joseph J. Green, Univ. of Arizona, PhD thesis](https:\u002F\u002Frepository.arizona.edu\u002Fhandle\u002F10150\u002F284110)|[Code](https:\u002F\u002Fgithub.com\u002Fnasa-jpl\u002Fpmapper)|\n|2013|TOG|[A No-Reference Metric for Evaluating The Quality of Motion Deblurring](https:\u002F\u002Fgfx.cs.princeton.edu\u002Fpubs\u002FLiu_2013_ANM\u002Fsa13.pdf)|[Code & Project Page](https:\u002F\u002Fgfx.cs.princeton.edu\u002Fpubs\u002FLiu_2013_ANM\u002Findex.php)|\n|2018|CVPR|[Learning to extract a video sequence from a single motion-blurred image](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fhtml\u002FJin_Learning_to_Extract_CVPR_2018_paper.html)||[Code](https:\u002F\u002Fgithub.com\u002FMeiguangJin\u002FLearning-to-Extract-a-Video-Sequence-from-a-Single-Motion-Blurred-Image)|\n|2019|CVPR|[Bringing a Blurry Frame Alive at High Frame-Rate With an Event Camera](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FPan_Bringing_a_Blurry_Frame_Alive_at_High_Frame-Rate_With_an_CVPR_2019_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fpanpanfei\u002FBringing-a-Blurry-Frame-Alive-at-High-Frame-Rate-with-an-Event-Camera)|\n|2019|CVPR|[Learning to Extract Flawless Slow Motion From Blurry Videos](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FJin_Learning_to_Extract_Flawless_Slow_Motion_From_Blurry_Videos_CVPR_2019_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FMeiguangJin\u002Fslow-motion)|\n|2019|CVPR|[Learning to Synthesize Motion Blur](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FBrooks_Learning_to_Synthesize_Motion_Blur_CVPR_2019_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fgoogle-research\u002Ftree\u002Fmaster\u002Fmotion_blur), [Project page](http:\u002F\u002Ftimothybrooks.com\u002Ftech\u002Fmotion-blur\u002F)|\n|2019|CVPR|[World from blur](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FQiu_World_From_Blur_CVPR_2019_paper.pdf)||\n|2019|ICCV|[FAB: A Robust Facial Landmark Detection Framework for Motion-Blurred Videos](https:\u002F\u002Farxiv.org\u002Fabs\u002F1910.12100)|[Code](https:\u002F\u002Fgithub.com\u002FKeqiangSun\u002FFAB)|\n|2019|ICCV|[Visual Deprojection: Probabilistic Recovery of Collapsed Dimensions](https:\u002F\u002Farxiv.org\u002Fabs\u002F1909.00475)||\n|2020|CVPR-W|[Photosequencing of Motion Blur using Short and Long Exposures](https:\u002F\u002Farxiv.org\u002Fabs\u002F1912.06102)|[Project Page](https:\u002F\u002Fapvijay.github.io\u002Fphotoseq_blur.html)|\n|2020|ACM-MM|[Every Moment Matters: Detail-Aware Networks to Bring a Blurry Image Alive](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3394171.3413929)||\n|2020|NIPS|[Watch out! Motion is Blurring Blurring the Vision of Your Deep Neural Networks](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Ffile\u002F0a73de68f10e15626eb98701ecf03adb-Paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Ftsingqguo\u002FABBA)|\n|2021|Arxiv|[Geometric Moment Invariants to Motion Blur](https:\u002F\u002Farxiv.org\u002Fabs\u002F2101.08647v2)||\n|2021|AAAI|[Optical Flow Estimation from a Single Motion-blurred Image](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.02996.pdf)||\n|2021|CVPR|[Towards Rolling Shutter Correction and Deblurring in Dynamic Scenes](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.01601)|[Code](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FRSCD)||\n|2021|CVPR|[Improved Handling of Motion Blur in Online Object Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.14448)||\n|2021|CVPR|[Blur, Noise, and Compression Robust Generative Adversarial Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F2003.07849)||\n|2021|ICCV|[Motion Deblurring With Real Events](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fhtml\u002FXu_Motion_Deblurring_With_Real_Events_ICCV_2021_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fxufangchn\u002FMotion-Deblurring-with-Real-Events)|\n|2021|ICCV|[Bringing Events Into Video Deblurring With Non-Consecutively Blurry Frames](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fhtml\u002FShang_Bringing_Events_Into_Video_Deblurring_With_Non-Consecutively_Blurry_Frames_ICCV_2021_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fshangwei5\u002FD2Net)|\n|2021|IEEEAccess|[Robust Single Image Deblurring Using Gyroscope Sensor](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9444479)||\n|2022|ECCV|[Animation from Blur: Multi-modal Blur Decomposition with Motion Guidance](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.10123)|[Code](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FAnimation-from-Blur)|\n|2022|ECCV|[Realistic Blur Synthesis for Learning Image Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.08771)|[Code & Dataset](https:\u002F\u002Fgithub.com\u002Frimchang\u002FRSBlur)||\n|2022|ECCV|[Event-Guided Deblurring of Unknown Exposure Time Videos](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2022\u002Fpapers_ECCV\u002Fhtml\u002F3601_ECCV_2022_paper.php)||\n|2023|CVPR|[Blur Interpolation Transformer for Real-World Motion from Blur](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.11423)|[Code](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FBiT)|\n|2023|CVPR|[Improving Robustness of Semantic Segmentation to Motion-Blur Using Class-Centric Augmentation](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FAakanksha_Improving_Robustness_of_Semantic_Segmentation_to_Motion-Blur_Using_Class-Centric_Augmentation_CVPR_2023_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Faka-discover\u002FCCMBA_CVPR23)|\n[2023]|CVPR|[Recovering 3D Hand Mesh Sequence From a Single Blurry Image: A New Dataset and Temporal Unfolding](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FOh_Recovering_3D_Hand_Mesh_Sequence_From_a_Single_Blurry_Image_CVPR_2023_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FJaehaKim97\u002FBlurHand_RELEASE)|\n|2023|CVPR|[Blur Interpolation Transformer for Real-World Motion from Blur](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FZhong_Blur_Interpolation_Transformer_for_Real-World_Motion_From_Blur_CVPR_2023_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FBiT?tab=readme-ov-file)|\n|2023|CVPR|[Event-Based Frame Interpolation with Ad-hoc Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FSun_Event-Based_Frame_Interpolation_With_Ad-Hoc_Deblurring_CVPR_2023_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FAHupuJR\u002FREFID)|\n|2023|CVPR|[DartBlur: Privacy Preservation With Detection Artifact Suppression](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FJiang_DartBlur_Privacy_Preservation_With_Detection_Artifact_Suppression_CVPR_2023_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FJaNg2333\u002FDartBlur.)|\n|2023|CVPR|[HyperCUT: Video Sequence From a Single Blurry Image Using Unsupervised Ordering](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FPham_HyperCUT_Video_Sequence_From_a_Single_Blurry_Image_Using_Unsupervised_CVPR_2023_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FVinAIResearch\u002FHyperCUT.git)|\n|2023|CVPR|[Hybrid Neural Rendering for Large-Scale Scenes With Motion Blur](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FDai_Hybrid_Neural_Rendering_for_Large-Scale_Scenes_With_Motion_Blur_CVPR_2023_paper.html)|[Code](https:\u002F\u002Fdaipengwa.github.io\u002FHybrid-Rendering-ProjectPage)|\n|2023|CVPR|[Event-Based Blurry Frame Interpolation Under Blind Exposure](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FWeng_Event-Based_Blurry_Frame_Interpolation_Under_Blind_Exposure_CVPR_2023_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FWarranWeng\u002FEBFI-BE)|\n|2023|ICCV|[Non-Coaxial Event-Guided Motion Deblurring with Spatial Alignment](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FCho_Non-Coaxial_Event-Guided_Motion_Deblurring_with_Spatial_Alignment_ICCV_2023_paper.pdf)||\n|2023|ICCV|[Generalizing Event-Based Motion Deblurring in Real-World Scenarios](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FZhang_Generalizing_Event-Based_Motion_Deblurring_in_Real-World_Scenarios_ICCV_2023_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002FXiangZ-0\u002FGEM)|\n|2024|WACV|[Single-Image Deblurring, Trajectory and Shape Recovery of Fast Moving Objects with Denoising Diffusion Probabilistic Models](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2024\u002Fpapers\u002FSpetlik_Single-Image_Deblurring_Trajectory_and_Shape_Recovery_of_Fast_Moving_Objects_WACV_2024_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fradimspetlik\u002FSI-DDPM-FMO)|\n|2024|CVPR|[Spike-guided Motion Deblurring with Unknown Modal Spatiotemporal Alignment](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FZhang_Spike-guided_Motion_Deblurring_with_Unknown_Modal_Spatiotemporal_Alignment_CVPR_2024_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FLeozhangjiyuan\u002FUaSDN)|\n|2024|CVPR|[Latency Correction for Event-guided Deblurring and Frame Interpolation](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FYang_Latency_Correction_for_Event-guided_Deblurring_and_Frame_Interpolation_CVPR_2024_paper.html)||\n|2024|CVPR|[Frequency-aware Event-based Video Deblurring for Real-World Motion Blur](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FKim_Frequency-aware_Event-based_Video_Deblurring_for_Real-World_Motion_Blur_CVPR_2024_paper.html)||\n|2024|CVPR|[EVS-assisted Joint Deblurring Rolling-Shutter Correction and Video Frame Interpolation through Sensor Inverse Modeling](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FJiang_EVS-assisted_Joint_Deblurring_Rolling-Shutter_Correction_and_Video_Frame_Interpolation_through_CVPR_2024_paper.html)||\n|2024|CVPR|[Motion Blur Decomposition with Cross-shutter Guidance](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FJi_Motion_Blur_Decomposition_with_Cross-shutter_Guidance_CVPR_2024_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fjixiang2016\u002FdualBR)|\n\n## Defocus Deblurring and Potential Datasets\n|Year|Pub|Paper|Repo|\n|:---:|:---:|:---:|:---:|\n|2009|ICCP|[What are Good Apertures for Defocus Deblurring?](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F5559018)||\n|2009|ICIP|[Single image defocus map estimation using local contrast prior](https:\u002F\u002Fwww.eecs.yorku.ca\u002F~mbrown\u002Fpdf\u002Ficip09_defocus.pdf)||\n|2011|PR|[Defocus map estimation from a single image](https:\u002F\u002Fwww.comp.nus.edu.sg\u002F~tsim\u002Fdocuments\u002FdefocusEstimation-published.pdf)||\n|2012|ICASSP|[Spatially-varying out-of-focus image deblurring with L1-2 optimization and a guided blur map](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F6288071)||\n|2013|ICASSP|[Removing out-of-focus blur from similar image pairs](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F6637925)||\n|2014|CVPR|[Discriminative Blur Detection Features](http:\u002F\u002Fwww.shijianping.me\u002Fblur_cvpr14.pdf)|[Project Page](http:\u002F\u002Fwww.cse.cuhk.edu.hk\u002F~leojia\u002Fprojects\u002Fdblurdetect\u002Findex.html)|\n|2015|CVPR|[Just Noticeable Defocus Blur Detection and Estimation](http:\u002F\u002Fshijianping.me\u002Fjnb\u002Fpapers\u002Fjnbdetection_final.pdf)|[Project Page](http:\u002F\u002Fshijianping.me\u002Fjnb\u002Findex.html)|\n|2016||[Spatially Variant Defocus Blur Map Estimation and Deblurring from a Single Image](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1047320316000031)|[Code](https:\u002F\u002Fgithub.com\u002FZHANGXinxinPKU\u002Fdefocus-deblurring)|\n|2017|BMVC|[Depth Estimation and Blur Removal from a Single Out-of-focus Image](https:\u002F\u002Fsaeed-anwar.github.io\u002Fpapers\u002FBMVC17-depth.pdf)||\n|2017|CVPR|[Spatially-Varying Blur Detection Based on Multiscale Fused and Sorted Transform Coefficients of Gradient Magnitudes](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fhtml\u002FGolestaneh_Spatially-Varying_Blur_Detection_CVPR_2017_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fisalirezag\u002FHiFST)|\n|2017|CVPR|[A unified approach of multi-scale deep and hand-crafted features for defocus estimation](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.08992)|[Code](https:\u002F\u002Fgithub.com\u002Fzzangjinsun\u002FDHDE_CVPR17)|\n|2017|ICCV|[Learning to Synthesize a 4D RGBD Light Field from a Single Image](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_iccv_2017\u002Fhtml\u002FSrinivasan_Learning_to_Synthesize_ICCV_2017_paper.html)|[Dataset and Project Page](https:\u002F\u002Fgithub.com\u002Fpratulsrinivasan\u002FLocal_Light_Field_Synthesis)|\n|2018|ECCV|[Refocusgan: Scene refocusing using a single image](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FParikshit_Sakurikar_Single_Image_Scene_ECCV_2018_paper.pdf)||\n|2018|ECCV_W|[Deep Depth from Defocus: how can defocus blur improve 3D estimation using dense neural networks?](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_eccv_2018_workshops\u002Fw3\u002Fhtml\u002FCarvalho_Deep_Depth_from_Defocus_how_can_defocus_blur_improve_3D_ECCVW_2018_paper.html)|[Code & Dataset](https:\u002F\u002Fgithub.com\u002Fmarcelampc\u002Fd3net_depth_estimation)|\n|2018|PG|[Defocus and Motion Blur Detection with Deep Contextual Features](http:\u002F\u002Fcg.postech.ac.kr\u002Fpapers\u002FKim2018Defocus.pdf)|[Code & Dataset](https:\u002F\u002Fgithub.com\u002FHyeongseokSon1\u002Fdeep_blur_detection_and_classification)|\n|2018|TIP|[Edge-based defocus blur estimation with adaptive scale selection](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8101511)|[Code](https:\u002F\u002Fgithub.com\u002Falikaraali\u002FTIP2018-Edge-Based-Defocus-Blur-Estimation-With-Adaptive-Scale-Selection)|\n|2019|CVPR|[Deep Defocus Map Estimation using Domain Adaptation](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FLee_Deep_Defocus_Map_Estimation_Using_Domain_Adaptation_CVPR_2019_paper.html)|[Code & Dataset](https:\u002F\u002Fgithub.com\u002Fcodeslake\u002FDMENet)|\n|2019|CVPR|[DeFusionNET: Defocus Blur Detection via Recurrently Fusing and Refining Multi-Scale Deep Features](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FTang_DeFusionNET_Defocus_Blur_Detection_via_Recurrently_Fusing_and_Refining_Multi-Scale_CVPR_2019_paper.pdf)||\n|2020|ECCV|[Defocus Deblurring Using Dual-Pixel Data](https:\u002F\u002Farxiv.org\u002Fabs\u002F2005.00305)|[Code & Dataset](https:\u002F\u002Fgithub.com\u002FAbdullah-Abuolaim\u002Fdefocus-deblurring-dual-pixel)|\n|2020|ECCV|[Rethinking the Defocus Blur Detection Problem and A Real-Time Deep DBD Model](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fhtml\u002F1182_ECCV_2020_paper.php)||\n|2020|ECCV|[Defocus Blur Detection via Depth Distillation](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.08113)|[Code](https:\u002F\u002Fgithub.com\u002Fvinthony\u002Fdepth-distillation)|\n|2020|TCI|[AIFNet: All-in-focus Image Restoration Network using a Light Field-based Dataset](https:\u002F\u002Fsweb.cityu.edu.hk\u002Fmiullam\u002FAIFNET\u002F)|[Code](https:\u002F\u002Fgithub.com\u002Fbinorchen\u002FAIFNET),[Dataset](https:\u002F\u002Fsweb.cityu.edu.hk\u002Fmiullam\u002FAIFNET\u002Fdataset\u002FLFDOF.zip)|\n|2020|Arxiv|[CycleGAN with a Blur Kernel for Deconvolution Microscopy: Optimal Transport Geometry](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.09414)||\n|2020|Arxiv|[Deep Multi-Scale Feature Learning for Defocus Blur Estimation](https:\u002F\u002Farxiv.org\u002Fabs\u002F2009.11939)||\n|2020|TCSVT|[Estimating Generalized Gaussian Blur Kernels for Out-of-Focus Image Deblurring](http:\u002F\u002Fivlab.org\u002Fpublications\u002FTCSVT2021-GGdeblurring.pdf)||\n|2021|Arxiv|[Defocus Blur Detection via Salient Region Detection Prior](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.09677)||\n|2021|Arxiv|[Learning to Estimate Kernel Scale and Orientation of Defocus Blur with Asymmetric Coded Aperture](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.05843)||\n|2021|CVPR|[Iterative Filter Adaptive Network for Single Image Defocus Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FLee_Iterative_Filter_Adaptive_Network_for_Single_Image_Defocus_Deblurring_CVPR_2021_paper.pdf)|[Code & Dataset](https:\u002F\u002Fgithub.com\u002Fcodeslake\u002FIFAN)|\n|2021|CVPR|[Self-Generated Defocus Blur Detection via Dual Adversarial Discriminators](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fhtml\u002FZhao_Self-Generated_Defocus_Blur_Detection_via_Dual_Adversarial_Discriminators_CVPR_2021_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fshangcai1\u002FSG)|\n|2021|CVPR|[Dual Pixel Exploration: Simultaneous Depth Estimation and Image Restoration](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FPan_Dual_Pixel_Exploration_Simultaneous_Depth_Estimation_and_Image_Restoration_CVPR_2021_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fpanpanfei\u002FDual-Pixel-Exploration-Simultaneous-Depth-Estimation-and-Image-Restoration)|\n|2021|CVPRW|[NTIRE 2021 Challenge for Defocus Deblurring Using Dual-pixel Images: Methods and Results](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021W\u002FNTIRE\u002Fpapers\u002FAbuolaim_NTIRE_2021_Challenge_for_Defocus_Deblurring_Using_Dual-Pixel_Images_Methods_CVPRW_2021_paper.pdf)||\n|2021|CVPRW|[Attention! Stay Focus!](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.07925)|[Code](https:\u002F\u002Fgithub.com\u002Ftuvovan\u002FATTSF)|\n|2021|ICCV|[Single Image Defocus Deblurring Using Kernel-Sharing Parallel Atrous Convolutions](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.09108.pdf)|[Code](https:\u002F\u002Fgithub.com\u002FHyeongseokSon1\u002FKPAC)|\n|2021|ICCV|[Learning To Reduce Defocus Blur by Realistically Modeling Dual-Pixel Data](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fhtml\u002FAbuolaim_Learning_To_Reduce_Defocus_Blur_by_Realistically_Modeling_Dual-Pixel_Data_ICCV_2021_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FAbdullah-Abuolaim\u002Frecurrent-defocus-deblurring-synth-dual-pixel)|\n|2022|WACV|[Improving Single-Image Defocus Deblurring: How Dual-Pixel Images Help Through Multi-Task Learning](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2022\u002Fhtml\u002FAbuolaim_Improving_Single-Image_Defocus_Deblurring_How_Dual-Pixel_Images_Help_Through_Multi-Task_WACV_2022_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FAbdullah-Abuolaim\u002Fmulti-task-defocus-deblurring-dual-pixel-nimat)|\n|2022|CVPR|[Learning to Deblur Using Light Field Generated and Real Defocus Images](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fhtml\u002FRuan_Learning_to_Deblur_Using_Light_Field_Generated_and_Real_Defocus_CVPR_2022_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Flingyanruan\u002FDRBNet)|\n|2022|CVPR|[AR-NeRF: Unsupervised Learning of Depth and Defocus Effects From Natural Images With Aperture Rendering Neural Radiance Fields](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fhtml\u002FKaneko_AR-NeRF_Unsupervised_Learning_of_Depth_and_Defocus_Effects_From_Natural_CVPR_2022_paper.html)||\n|2022|ECCV|[United Defocus Blur Detection and Deblurring via Adversarial Promoting Learning](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2022\u002Fpapers_ECCV\u002Fhtml\u002F3308_ECCV_2022_paper.php)|[Code](https:\u002F\u002Fgithub.com\u002Fwdzhao123\u002FAPL)|\n|2023|AAAI|[Learning Single Image Defocus Deblurring with Misaligned Training Pairs](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.14502)|[Code](https:\u002F\u002Fgithub.com\u002Fliyucs\u002FJDRL)|\n|2023|CVPR|[K3DN: Disparity-Aware Kernel Estimation for Dual-Pixel Defocus Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FYang_K3DN_Disparity-Aware_Kernel_Estimation_for_Dual-Pixel_Defocus_Deblurring_CVPR_2023_paper.html)||\n|2023|CVPR|[Better \"CMOS\" Produces Clearer Images: Learning Space-Variant Blur Estimation for Blind Image Super-Resolution](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FChen_Better_CMOS_Produces_Clearer_Images_Learning_Space-Variant_Blur_Estimation_for_CVPR_2023_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FByChelsea\u002FCMOS)|\n|2023|CVPR|[Neumann Network With Recursive Kernels for Single Image Defocus Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FQuan_Neumann_Network_With_Recursive_Kernels_for_Single_Image_Defocus_Deblurring_CVPR_2023_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FcsZcWu\u002FNRKNet)|\n|2023|CVPR|[DP-NeRF: Deblurred Neural Radiance Field With Physical Scene Priors](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FLee_DP-NeRF_Deblurred_Neural_Radiance_Field_With_Physical_Scene_Priors_CVPR_2023_paper.html)|[Code](https:\u002F\u002Fdogyoonlee.github.io\u002Fdpnerf\u002F)|\n|2023|ICCV|[Single Image Defocus Deblurring via Implicit Neural Inverse Kernels](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FQuan_Single_Image_Defocus_Deblurring_via_Implicit_Neural_Inverse_Kernels_ICCV_2023_paper.pdf)||\n|2023|IJCV|[End-to-end Alternating Optimization for Real-World Blind Super Resolution](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.08816)|[Code](https:\u002F\u002Fgithub.com\u002Fgreatlog\u002FRealDAN.git)|\n|2023|Arxiv|[LaKDNet: Revisiting Image Deblurring with an Efficient ConvNet](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.02234.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Flingyanruan\u002FLaKDNet)|\n|2024|WACV|[Camera-Independent Single Image Depth Estimation From Defocus Blur](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2024\u002Fpapers\u002FWijayasingha_Camera-Independent_Single_Image_Depth_Estimation_From_Defocus_Blur_WACV_2024_paper.pdf)||\n|2024|CVPR|[A Unified Framework for Microscopy Defocus Deblur with Multi-Pyramid Transformer and Contrastive Learning](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FZhang_A_Unified_Framework_for_Microscopy_Defocus_Deblur_with_Multi-Pyramid_Transformer_CVPR_2024_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FPieceZhang\u002FMPT-CataBlur)|\n|2024|CVPR|[LDP: Language-driven Dual-Pixel Image Defocus Deblurring Network](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FYang_LDP_Language-driven_Dual-Pixel_Image_Defocus_Deblurring_Network_CVPR_2024_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fnoxsine\u002FLDP)|\n\n\n## Benchmark Datasets on Motion Deblurring\n|Year|Pub|Paper|Repo|\n|:---:|:---:|:---:|:---:|\n|2009|CVPR|[Understanding and evaluating blind deconvolution algorithms](http:\u002F\u002Fwebee.technion.ac.il\u002Fpeople\u002Fanat.levin\u002Fpapers\u002FdeconvLevinEtalCVPR09.pdf)|[Dataset](http:\u002F\u002Fwebee.technion.ac.il\u002Fpeople\u002Fanat.levin\u002Fpapers\u002FLevinEtalCVPR09Data.rar)|\n|2012|ECCV|[Recording and playback of camera shake: benchmarking blind deconvolution with a real-world database](http:\u002F\u002Fwebdav.is.mpg.de\u002Fpixel\u002Fbenchmark4camerashake\u002Fsrc_files\u002FPdf\u002FKoehler_ECCV2012_Benchmark.pdf)|[Dataset](http:\u002F\u002Fwebdav.is.mpg.de\u002Fpixel\u002Fbenchmark4camerashake\u002F)|\n|2013|ICCP|[Edge-based blur kernel estimation using patch priors](http:\u002F\u002Fcs.brown.edu\u002F~lbsun\u002Fdeblur2013\u002Fpatchdeblur_iccp2013.pdf)|[Dataset](http:\u002F\u002Fcs.brown.edu\u002F~lbsun\u002Fdeblur2013\u002Fdeblur2013iccp.html)|\n|2016|CVPR|[A Comparative Study for Single Image Blind Deblurring](http:\u002F\u002Fvllab.ucmerced.edu\u002Fwlai24\u002Fcvpr16_deblur_study\u002Fpaper\u002Fcvpr16_deblur_study.pdf)|[Dataset](http:\u002F\u002Fvllab.ucmerced.edu\u002Fwlai24\u002Fcvpr16_deblur_study\u002F)|\n|2017|CVPR (GOPRO)|[Deep multi-scale convolutional neural network for dynamic scene deblurring](http:\u002F\u002Fzpascal.net\u002Fcvpr2017\u002FNah_Deep_Multi-Scale_Convolutional_CVPR_2017_paper.pdf)|[Dataset](https:\u002F\u002Fgithub.com\u002FSeungjunNah\u002FDeepDeblur_release)|\n|2017|CVPR (DVD)|[Deep Video Deblurring for Hand-Held Cameras](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fhtml\u002FSu_Deep_Video_Deblurring_CVPR_2017_paper.html)|[Dataset](http:\u002F\u002Fwww.cs.ubc.ca\u002Flabs\u002Fimager\u002Ftr\u002F2017\u002FDeepVideoDeblurring\u002F)|\n|2017|GCPR|[Motion deblurring in the wild](https:\u002F\u002Farxiv.org\u002Fabs\u002F1701.01486)||\n|2019|CVPR (Stereo Blur Dataset)|[Stereo Deblurring With View Aggregation](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FZhou_DAVANet_Stereo_Deblurring_With_View_Aggregation_CVPR_2019_paper.html)|[Dataset](https:\u002F\u002Fstereoblur.shangchenzhou.com\u002F)|\n|2019|CVPR_W (REDS)|[NTIRE 2019 Challenge on Video Deblurring and Super-Resolution: Dataset and Study](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2019\u002Fhtml\u002FNTIRE\u002FNah_NTIRE_2019_Challenge_on_Video_Deblurring_and_Super-Resolution_Dataset_and_CVPRW_2019_paper.html)|[Dataset](https:\u002F\u002Fseungjunnah.github.io\u002FDatasets\u002Freds)|\n|2019|ICCV (HIDE)|[Human-Aware Motion Deblurring](https:\u002F\u002Fpdfs.semanticscholar.org\u002F20a4\u002Fb3353579525f0b76ec42e17a2284b4453f9a.pdf)|[Dataset](https:\u002F\u002Fgithub.com\u002Fjoanshen0508\u002FHA_deblur)|\n|2020|CVPR|[Deblurring by Realistic Blurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.01860)|[Dataset](https:\u002F\u002Fgithub.com\u002FHDCVLab\u002FDeblurring-by-Realistic-Blurring)|\n|2020|CVPR|[Learning Event-Based Motion Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.05794)||\n|2020|ECCV (BSD)|[Efficient Spatio-Temporal Recurrent Neural Network for Video Deblurring](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fpapers\u002F123510188.pdf)|[Dataset](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FESTRNN)|\n|2020|ECCV|[Real-World Blur Dataset for Learning and Benchmarking Deblurring Algorithms](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fpapers\u002F123700188.pdf)|[Code & Dataset](http:\u002F\u002Fcg.postech.ac.kr\u002Fresearch\u002Frealblur\u002F)|\n|2021|CVPR (BS-RSCD)|[Towards Rolling Shutter Correction and Deblurring in Dynamic Scenes](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.01601)|[Dataset](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FRSCD)||\n|2021|Arxiv|[MC-Blur: A Comprehensive Benchmark for Image Deblurring](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2112.00234.pdf)|[Dataset](https:\u002F\u002Fgithub.com\u002FHDCVLab\u002FMC-Blur-Dataset)||\n|2022|ECCV|[Realistic Blur Synthesis for Learning Image Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.08771)|[Code & Dataset](https:\u002F\u002Fgithub.com\u002Frimchang\u002FRSBlur)||\n|2022|IJCV (BSD)|[Real-world Video Deblurring: A Benchmark Dataset and An Efficient Recurrent Neural Network](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.16028)|[Dataset](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FESTRNN)|\n|2023|CVPR (RBI)|[Blur Interpolation Transformer for Real-World Motion from Blur](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.11423)|[Code & Dataset](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FBiT)|\n|2023|AAAI|[Real-world deep local motion deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.08179)|[Code&Dataset](https:\u002F\u002Fgithub.com\u002FLeiaLi\u002FReLoBlur)|\n|2024|ECCV_W|[DAVIDE: Depth-Aware Video Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.01274)|[Code & Dataset](https:\u002F\u002Fgermanftv.github.io\u002FDAVIDE.github.io\u002F)|\n\nAbbreviations:\n\n+ DL -> Deep Learning\n+ non-DL -> non-Deep Learning\n\n## AI-Photo-Enhancer-Apps\n+ [HitPaw Photo Enhancer](https:\u002F\u002Fwww.hitpaw.com\u002Fphoto-enhancer.html)\n\n","\u003C!--图像和视频去模糊资源精选列表-->\n\u003C!-- 项目Logo -->\n\u003Cp align=\"center\">\n  \u003Ch3 align=\"center\">图像和视频去模糊\u003C\u002Fh3>\n  \u003Cp align=\"center\">图像和视频去模糊资源精选列表\n    \u003Cbr \u002F>\n    \u003Cbr \u002F>\n    \u003Cbr \u002F>\n    \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fsubeeshvasu\u002FAwesome-Deblurring\u002Fpulls\u002Fnew\">建议新增条目\u003C\u002Fa>\n    \u003Cbr \u002F>\n    \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fsubeeshvasu\u002FAwesome-Deblurring\u002Fissues\u002Fnew\">报告Bug\u003C\u002Fa>\n  \u003C\u002Fp>\n\u003C\u002Fp>\n\n## 目录\n\n- [单张图像盲运动去模糊（非深度学习）](#single-image-blind-motion-deblurring-non-dl)\n- [单张图像盲运动去模糊（深度学习）](#single-image-blind-motion-deblurring-dl)\n- [非盲去模糊](#non-blind-deblurring)\n- [(多图像\u002F视频)运动去模糊](#multi-imagevideo-motion-deblurring)\n- [深度感知运动去模糊](#depth-aware-motion-deblurring)\n- [其他密切相关的工作](#other-closely-related-works)\n- [散焦去模糊及潜在数据集](#defocus-deblurring-and-potential-datasets)\n- [运动去模糊基准数据集](#benchmark-datasets-on-motion-deblurring)\n- [AI照片增强应用](#AI-Photo-Enhancer-Apps)\n\n## 单张图像盲运动去模糊（非深度学习）\n|年份|期刊\u002F会议|论文|代码及项目页面|\n|:---:|:---:|:---:|:---:|\n|2006|TOG|[从单张照片中去除相机抖动](https:\u002F\u002Fcs.nyu.edu\u002F~fergus\u002Fpapers\u002Fdeblur_fergus.pdf)|[代码与项目页面](https:\u002F\u002Fcs.nyu.edu\u002F~fergus\u002Fresearch\u002Fdeblur.html)|\n|2007|CVPR|[利用透明度进行单幅图像运动去模糊](http:\u002F\u002Fjiaya.me\u002Fall_final_papers\u002Fmotion_deblur_cvpr07.pdf)|||\n|2008|CVPR|[基于清晰边缘预测的点扩散函数估计](http:\u002F\u002Fvision.ucsd.edu\u002Fkriegman-grp\u002Fresearch\u002Fpsf_estimation\u002Fpsf_estimation.pdf)|[项目页面](http:\u002F\u002Fvision.ucsd.edu\u002Fkriegman-grp\u002Fresearch\u002Fpsf_estimation\u002F)|\n|2008|TOG|[高质量的单幅图像运动去模糊](http:\u002F\u002Fwww.cse.cuhk.edu.hk\u002F~leojia\u002Fprojects\u002Fmotion_deblurring\u002Fdeblur_siggraph08.pdf)|[代码与项目页面](http:\u002F\u002Fwww.cse.cuhk.edu.hk\u002F~leojia\u002Fprojects\u002Fmotion_deblurring\u002Findex.html)|\n|2009|TOG|[快速运动去模糊](https:\u002F\u002Fvclab.dgist.ac.kr\u002Fdownload\u002Ffast_motion_deblurring\u002Fpaper.pdf)|||\n|2009|CVPR|[利用颜色先验进行图像去模糊与去噪](http:\u002F\u002Fneelj.com\u002Fprojects\u002Ftwocolordeconvolution\u002Ftwo_color_deconvolution.pdf)|[项目页面](http:\u002F\u002Fneelj.com\u002Fprojects\u002Ftwocolordeconvolution\u002F)|\n|2010|CVPR|[用于空变多帧盲反卷积的高效滤波流](https:\u002F\u002Fpure.mpg.de\u002Frest\u002Fitems\u002Fitem_1789030\u002Fcomponent\u002Ffile_3009627\u002Fcontent)|||\n|2010|CVPR|[针对抖动图像的非均匀去模糊](http:\u002F\u002Fwww.di.ens.fr\u002Fwillow\u002Fpdfs\u002Fcvpr10d.pdf)|[代码与项目页面](https:\u002F\u002Fwww.di.ens.fr\u002Fwillow\u002Fresearch\u002Fdeblurring\u002F)|\n|2010|CVPR|[去噪与去模糊：使用移动相机的HDR成像技术](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F5540171)|||\n|2010|ECCV|[利用运动密度函数进行单幅图像去模糊](http:\u002F\u002Fgrail.cs.washington.edu\u002Fprojects\u002Fmdf_deblurring\u002Fgupta_mdf_deblurring.pdf)|[项目页面](http:\u002F\u002Fgrail.cs.washington.edu\u002Fprojects\u002Fmdf_deblurring\u002F)|\n|2010|ECCV|[两阶段核估计用于鲁棒运动去模糊](http:\u002F\u002Fwww.cse.cuhk.edu.hk\u002F~leojia\u002Fprojects\u002Frobust_deblur\u002Frobust_motion_deblurring.pdf)|[代码与项目页面](http:\u002F\u002Fwww.cse.cuhk.edu.hk\u002F~leojia\u002Fprojects\u002Frobust_deblur\u002Findex.html)|\n|2010|NIPS|[用于去除相机抖动的空变单幅图像盲反卷积](https:\u002F\u002Fpapers.nips.cc\u002Fpaper\u002F4007-space-variant-single-image-blind-deconvolution-for-removing-camera-shake.pdf)|||\n|2011|CVPR|[利用归一化稀疏度量进行盲反卷积](https:\u002F\u002Fdilipkay.files.wordpress.com\u002F2019\u002F04\u002Fpriors_cvpr11.pdf)|[代码与项目页面](https:\u002F\u002Fdilipkay.wordpress.com\u002Fblind-deconvolution\u002F)|\n|2011|CVPR|[利用拉东变换进行模糊核估计](http:\u002F\u002Fpeople.csail.mit.edu\u002Fsparis\u002Fpubli\u002F2011\u002Fcvpr_radon\u002FCho_11_Blur_Kernel_Estimation.pdf)|[代码](http:\u002F\u002Fpeople.csail.mit.edu\u002Ftaegsang\u002FThesis.html)|\n|2011|CVPR|[探索对齐的互补图像对以进行盲运动去模糊](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F5995351)|||\n|2011|ICCV|[快速去除非均匀相机抖动](http:\u002F\u002Fpixel.kyb.tuebingen.mpg.de\u002Ffast_removal_of_camera_shake\u002Ffiles\u002FHirsch_ICCV2011_Fast%20removal%20of%20non-uniform%20camera%20shake.pdf)|||\n|2011|IJCV|[非参数化的亚像素局部点扩散函数估计是一个适定问题](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs11263-011-0460-0)|||\n|2012|ECCV|[从光谱不规则性中估计模糊核](http:\u002F\u002Fciteseerx.ist.psu.edu\u002Fviewdoc\u002Fdownload?doi=10.1.1.646.4404&rep=rep1&type=pdf)|||\n|2012|ACCV|[基于MRF的盲图像反卷积](http:\u002F\u002Fimagine.enpc.fr\u002F~komodakn\u002Fpublications\u002Fdocs\u002Faccv2012.pdf)|||\n|2012|TIP|[基于框架的小波的单幅图像盲运动去模糊](https:\u002F\u002Fwww.math.hkust.edu.hk\u002F~jfcai\u002Fpaper\u002FCJLS_TIP_11.pdf)|||\n|2013|CVPR|[针对自然图像去模糊的非自然L0稀疏表示](http:\u002F\u002Fwww.cse.cuhk.edu.hk\u002F~leojia\u002Fprojects\u002Fl0deblur\u002Fl0deblur_cvpr13.pdf)|[代码与项目页面](http:\u002F\u002Fwww.cse.cuhk.edu.hk\u002F~leojia\u002Fprojects\u002Fl0deblur\u002F)|\n|2013|CVPR|[利用方向滤波器处理单幅图像去模糊中的噪声](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2013\u002Fhtml\u002FZhong_Handling_Noise_in_2013_CVPR_paper.html)|||\n|2013|NIPS|[利用空间自适应稀疏惩罚去除非均匀相机抖动](https:\u002F\u002Fpapers.nips.cc\u002Fpaper\u002F4864-non-uniform-camera-shake-removal-using-a-spatially-adaptive-sparse-penalty)|[项目页面](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fhczhang1\u002Fprojects\u002Fnon-uniform-camera-shake-removal)|\n|2013|ICCV|[动态场景去模糊](https:\u002F\u002Fcv.snu.ac.kr\u002Fpublication\u002Fconf\u002F2013\u002FDSD_ICCV2013.pdf)|||\n|2013|ICCP|[基于边缘和补丁先验的模糊核估计](http:\u002F\u002Fcs.brown.edu\u002F~lbsun\u002Fdeblur2013\u002Fpatchdeblur_iccp2013.pdf)|[项目页面、结果与数据集](http:\u002F\u002Fcs.brown.edu\u002F~lbsun\u002Fdeblur2013\u002Fdeblur2013iccp.html)|\n|2014|CVPR|[通过L0正则化的强度和梯度先验对文本图像去模糊](https:\u002F\u002Feng.ucmerced.edu\u002Fpeople\u002Fzhu\u002FCVPR14_deblurtext.pdf)|[代码与项目页面](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fjspanhomepage\u002Fl0rigdeblur)|\n|2014|CVPR|[无分割的动态场景去模糊](https:\u002F\u002Fcv.snu.ac.kr\u002Fpublication\u002Fconf\u002F2014\u002FSFDSD_CVPR2014.pdf)|||\n|2014|CVPR|[用于图像去模糊的可分离核](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2014\u002Fhtml\u002FFang_Separable_Kernel_for_2014_CVPR_paper.html)|||\n|2014|CVPR|[带有光条纹的低光照图像去模糊](https:\u002F\u002Feng.ucmerced.edu\u002Fpeople\u002Fzhu\u002FCVPR14_lightstreak.pdf)|[代码与项目页面](https:\u002F\u002Feng.ucmerced.edu\u002Fpeople\u002Fzhu\u002FCVPR14_lightstreak.html)|\n|2014|CVPR|[从单张模糊图像中联合估计深度与去除相机抖动](https:\u002F\u002Feng.ucmerced.edu\u002Fpeople\u002Fzhu\u002FCVPR14_deblurdepth.pdf)|||\n|2014|ECCV|[通过融合边缘和功率谱信息进行混合图像去模糊](http:\u002F\u002Fwww.juew.org\u002Fpublication\u002FECCV14-hybridDeblur.pdf)|||\n|2014|ECCV|[利用范例进行人脸图像去模糊](https:\u002F\u002Ffaculty.ucmerced.edu\u002Fmhyang\u002Fpapers\u002Feccv14_deblur.pdf)|[代码与项目页面](https:\u002F\u002Feng.ucmerced.edu\u002Fpeople\u002Fzhu\u002FECCV14_facedeblur.html)|\n|2014|ECCV|[利用内部补丁重复进行盲去模糊](http:\u002F\u002Fwww.wisdom.weizmann.ac.il\u002F~vision\u002FBlindDeblur\u002FMichaeli_Irani_ECCV2014.pdf)|[代码与项目页面](http:\u002F\u002Fwww.wisdom.weizmann.ac.il\u002F~vision\u002FBlindDeblur.html)|\n|2014|NIPS|[尺度自适应的盲去模糊](https:\u002F\u002Fpapers.nips.cc\u002Fpaper\u002F5566-scale-adaptive-blind-deblurring)|[项目页面](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fhczhang1\u002Fprojects\u002Fscale-adaptive-blind-deblurring)|\n|2015|CVPR|[突发去模糊：通过傅里叶突发累积去除相机抖动](http:\u002F\u002Fdev.ipol.im\u002F~mdelbra\u002Ffba\u002FFBA_cvpr2015_preprint.pdf)|[项目页面](http:\u002F\u002Fiie.fing.edu.uy\u002F~mdelbra\u002Ffba\u002F)|\n|2015|CVPR|[为更好图像去模糊而进行的核融合](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2015\u002Fhtml\u002FMai_Kernel_Fusion_for_2015_CVPR_paper.html)|[项目页面](http:\u002F\u002Fweb.cecs.pdx.edu\u002F~fliu\u002Fproject\u002Fkernelfusion\u002F)|\n|2015|ICCV|[特定类别的图像去模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_iccv_2015\u002Fhtml\u002FAnwar_Class-Specific_Image_Deblurring_ICCV_2015_paper.html)|[代码与项目页面](https:\u002F\u002Fgithub.com\u002Fsaeed-anwar\u002FClass_Specific_Deblurring)|\n|2015|TIP|[用于面部去模糊的耦合学习](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1904.08671.pdf)|||\n|2016|CVPR|[利用暗通道先验进行盲图像去模糊](http:\u002F\u002Fvllab1.ucmerced.edu\u002F~jinshan\u002Fprojects\u002Fdark-channel-deblur\u002Fdark-channel-deblur\u002Fcvpr16-dark-channel-deblur.pdf)|[代码与项目页面](http:\u002F\u002Fvllab1.ucmerced.edu\u002F~jinshan\u002Fprojects\u002Fdark-channel-deblur\u002F)|\n|2016|CVPR|[具有异常值处理的鲁棒核估计用于图像去模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2016\u002Fhtml\u002FPan_Robust_Kernel_Estimation_CVPR_2016_paper.html)|[代码](https:\u002F\u002Fwww.dropbox.com\u002Fs\u002Fhz9qmi8ar1k1zn0\u002Fpcode.zip?dl=0)|\n|2016|CVPR|[通过自动梯度激活进行盲图像反卷积](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2016\u002Fpapers\u002FGong_Blind_Image_Deconvolution_CVPR_2016_paper.pdf)|||\n|2017|CVPR|[通过极端通道先验进行图像去模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fhtml\u002FYan_Image_Deblurring_via_CVPR_2017_paper.html)|[代码与项目页面](https:\u002F\u002Fsites.google.com\u002Fsite\u002Frenwenqi888\u002Fresearch\u002Fdeblurring\u002Fecp)|\n|2017|CVPR|[从局部到全局：模糊图像中的边缘轮廓与相机运动](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fhtml\u002FVasu_From_Local_to_CVPR_2017_paper.html)|[项目页面与基准数据集上的结果](https:\u002F\u002Fsubeeshvasu.github.io\u002F2017_subeesh_from_cvpr\u002F)|\n|2017|ICCV|[具有异常值处理的盲图像去模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FDong_Blind_Image_Deblurring_ICCV_2017_paper.pdf)|[代码](https:\u002F\u002Fwww.dropbox.com\u002Fs\u002Fqmxkkwgnmuwrfoj\u002Fcode_iccv2017_outlier.zip?dl=0)|\n|2017|ICCV|[用于鲁棒盲图像去模糊的自步核估计](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FGong_Self-Paced_Kernel_Estimation_ICCV_2017_paper.pdf)|[代码](https:\u002F\u002Fdonggong1.github.io\u002Fpublications.html)，[结果](https:\u002F\u002Fdrive.google.com\u002Fopen?id=1gP_s-87js7KKFrIzAlushc1HJqEogR1L)|\n|2017|ICCV|[基于MAP的模糊核估计的收敛性分析](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_iccv_2017\u002Fhtml\u002FCho_Convergence_Analysis_of_ICCV_2017_paper.html)|||\n|2018|ECCV|[归一化的盲反卷积](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fhtml\u002FMeiguang_Jin_Normalized_Blind_Deconvolution_ECCV_2018_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002FMeiguangJin\u002FNBD)|\n|2018|ECCV|[利用超高斯场进行自然图像去模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fhtml\u002FYuhang_Liu_Deblurring_Natural_Image_ECCV_2018_paper.html)|[代码](https:\u002F\u002Fdonggong1.github.io\u002Fpublications.html)|\n|2019|CVPR|[具有局部最大梯度先验的盲图像去模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FChen_Blind_Image_Deblurring_With_Local_Maximum_Gradient_Prior_CVPR_2019_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Fcuiyixin555\u002FLMG)|\n|2019|CVPR|[基于仅相位图像的核估计用于单幅图像盲去模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FPan_Phase-Only_Image_Based_Kernel_Estimation_for_Single_Image_Blind_Deblurring_CVPR_2019_paper.html)|[基准数据集上的结果](https:\u002F\u002Fgithub.com\u002Fpanpanfei\u002FPhase-only-Image-Based-Kernel-Estimation-for-Blind-Motion-Deblurring\u002Ftree\u002Fmaster\u002Fresult)|\n|2019|CVPR|[具有自适应边缘选择的变分EM框架用于盲运动去模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FYang_A_Variational_EM_Framework_With_Adaptive_Edge_Selection_for_Blind_CVPR_2019_paper.html)|||\n|2019|TIP|[基于图的单幅图像盲去模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F1802.07929)|[代码](https:\u002F\u002Fgithub.com\u002FBYchao100\u002FGraph-Based-Blind-Image-Deblurring)|\n|2019|TPAMI|[表面感知的盲图像去模糊](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8839600)|||\n|2019|TCSVT|[利用多尺度潜在结构先验进行单幅图像盲去模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F1906.04442)|||\n|2020|ECCV|[OID：在盲图像去模糊中识别并丢弃异常值](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fhtml\u002F5134_ECCV_2020_paper.php)|[代码与数据](https:\u002F\u002Fliangchen527.github.io\u002F)|\n|2020|ECCV|[增强的稀疏模型用于盲去模糊](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fpapers\u002F123700630.pdf)|[代码](https:\u002F\u002Fliangchen527.github.io\u002F)|\n|2021|CVPR|[饱和图像的盲去模糊](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FChen_Blind_Deblurring_for_Saturated_Images_CVPR_2021_paper.pdf)|[代码与数据](https:\u002F\u002Fliangchen527.github.io\u002F)|\n|2021|TCI|[Polyblur：通过多项式再模糊去除轻微模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.09322)|||\n|2021|SPIC|[利用更深的稀疏补丁式最大梯度先验进行快速盲反卷积](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0923596520301910)|||\n|2021|TCSVT|[利用补丁式最小像素正则化进行盲图像去模糊](https:\u002F\u002F128.84.21.199\u002Fabs\u002F1906.06642v3)|[代码](https:\u002F\u002Fgithub.com\u002FFWen\u002Fdeblur-pmp)|\n|2022|CVPR|[基于像素筛选的中间校正用于盲去模糊](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fhtml\u002FZhang_Pixel_Screening_Based_Intermediate_Correction_for_Blind_Deblurring_CVPR_2022_paper.html)||\n\n\n\n## Single-Image-Blind-Motion-Deblurring (DL)\n|Year|Pub|Paper|Repo|\n|:---:|:---:|:---:|:---:|\n|2015|CVPR|[Learning a convolutional neural network for non-uniform motion blur removal](https:\u002F\u002Fwww.cv-foundation.org\u002Fopenaccess\u002Fcontent_cvpr_2015\u002Fpapers\u002FSun_Learning_a_Convolutional_2015_CVPR_paper.pdf)|[Code 1](http:\u002F\u002Fgr.xjtu.edu.cn\u002Fc\u002Fdocument_library\u002Fget_file?folderId=2076150&name=DLFE-78101.zip),[Code 2](https:\u002F\u002Fgithub.com\u002FSibozhu\u002FMotionBlur-detection-by-CNN)|\n|2015|BMVC|[Convolutional neural networks for direct text deblurring](http:\u002F\u002Fwww.bmva.org\u002Fbmvc\u002F2015\u002Fpapers\u002Fpaper006\u002Findex.html)|[Code and Project Page](http:\u002F\u002Fwww.fit.vutbr.cz\u002F~ihradis\u002FCNN-Deblur\u002F)|\n|2016|ECCV|[A neural approach to blind motion deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F1603.04771)|[Code](https:\u002F\u002Fgithub.com\u002Fayanc\u002Fndeblur)|\n|2016|PAMI|[Learning to deblur](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1406.7444.pdf)||\n|2017|CVPR|[Deep multi-scale convolutional neural network for dynamic scene deblurring](http:\u002F\u002Fzpascal.net\u002Fcvpr2017\u002FNah_Deep_Multi-Scale_Convolutional_CVPR_2017_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002FSeungjunNah\u002FDeepDeblur_release)|\n|2017|CVPR|[From Motion Blur to Motion Flow: A Deep Learning Solution for Removing Heterogeneous Motion Blur](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FGong_From_Motion_Blur_CVPR_2017_paper.pdf)|[Code & Project page](https:\u002F\u002Fdonggong1.github.io\u002Fblur2mflow.html)|\n|2017|ICCV|[Blur-Invariant Deep Learning for Blind Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FNimisha_Blur-Invariant_Deep_Learning_ICCV_2017_paper.pdf)||\n|2017|ICCV|[Learning to Super-resolve Blurry Face and Text Images](http:\u002F\u002Ffaculty.ucmerced.edu\u002Fmhyang\u002Fpapers\u002Ficcv2017_gan_super_deblur.pdf)|[Code & Project page](https:\u002F\u002Fsites.google.com\u002Fview\u002Fxiangyuxu\u002Fdeblursr_iccv17)|\n|2017|ICCV|[Learning Discriminative Data Fitting Functions for Blind Image Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FPan_Learning_Discriminative_Data_ICCV_2017_paper.pdf)|[Code](https:\u002F\u002Fwww.dropbox.com\u002Fs\u002Foavk46q521fiowr\u002Ficcv17_learning_deblur_code.zip?dl=0)|\n|2018|ICIP|[Semi-supervised Learning of Camera Motion from a Blurred Image](https:\u002F\u002Fapvijay.github.io\u002Fpdf\u002F2018_icip.pdf)||\n|2018|TIP|[Motion blur kernel estimation via deep learning](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F8039224)|[Code & Project page](https:\u002F\u002Fsites.google.com\u002Fview\u002Fxiangyuxu\u002Fdeepedge_tip)|\n|2018|CVPR|[Deep Semantic Face Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fhtml\u002FShen_Deep_Semantic_Face_CVPR_2018_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fjoanshen0508\u002FDeep-Semantic-Face-Deblurring)|\n|2018|CVPR|[Learning a Discriminative Prior for Blind Image Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fhtml\u002FLi_Learning_a_Discriminative_CVPR_2018_paper.html)|[Code & Project page](https:\u002F\u002Fsites.google.com\u002Fview\u002Flerenhanli\u002Fhomepage\u002Flearn_prior_deblur)|\n|2018|CVPR|[Dynamic Scene Deblurring Using Spatially Variant Recurrent Neural Networks](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fhtml\u002FZhang_Dynamic_Scene_Deblurring_CVPR_2018_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fzhjwustc\u002Fcvpr18_rnn_deblur_matcaffe)|\n|2018|CVPR|[Scale-recurrent network for deep image deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fhtml\u002FTao_Scale-Recurrent_Network_for_CVPR_2018_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fjiangsutx\u002FSRN-Deblur)|\n|2018|CVPR|[Deblurgan: Blind motion deblurring using conditional adversarial networks](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fhtml\u002FKupyn_DeblurGAN_Blind_Motion_CVPR_2018_paper.html)|[Code-Pytorch](https:\u002F\u002Fgithub.com\u002FKupynOrest\u002FDeblurGAN)|\n|2018|ECCV|[Unsupervised Class-Specific Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fhtml\u002FNimisha_T_M_Unsupervised_Class-Specific_Deblurring_ECCV_2018_paper.html)||\n|2018|BMVC|[Gated Fusion Network for Joint Image Deblurring and Super-Resolution](https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.10806)|[Code](https:\u002F\u002Fgithub.com\u002Fjacquelinelala\u002FGFN)|[Project page](http:\u002F\u002Fxinyizhang.tech\u002Fbmvc2018\u002F)|\n|2019|WACV|[Gyroscope-Aided Motion Deblurring with Deep Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1810.00986)|[Code](https:\u002F\u002Fgithub.com\u002Fjannemus\u002FDeepGyro)|\n|2019|CVPR|[Dynamic Scene Deblurring With Parameter Selective Sharing and Nested Skip Connections](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FGao_Dynamic_Scene_Deblurring_With_Parameter_Selective_Sharing_and_Nested_Skip_CVPR_2019_paper.html)||\n|2019|CVPR|[Deep Stacked Hierarchical Multi-Patch Network for Image Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FZhang_Deep_Stacked_Hierarchical_Multi-Patch_Network_for_Image_Deblurring_CVPR_2019_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FHongguangZhang\u002FDMPHN-cvpr19-master)|\n|2019|CVPR|[Unsupervised Domain-Specific Deblurring via Disentangled Representations](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FLu_Unsupervised_Domain-Specific_Deblurring_via_Disentangled_Representations_CVPR_2019_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fustclby\u002FUnsupervised-Domain-Specific-Deblurring)|\n|2019|CVPR|[Bringing Alive Blurred Moments](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FPurohit_Bringing_Alive_Blurred_Moments_CVPR_2019_paper.html)|[Project page & Results-on-benchmark-datasets](https:\u002F\u002Fgithub.com\u002Fanshulbshah\u002FBlurred-Image-to-Video)|\n|2019|CVPR|[Douglas-Rachford Networks: Learning Both the Image Prior and Data Fidelity Terms for Blind Image Deconvolution](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FAljadaany_Douglas-Rachford_Networks_Learning_Both_the_Image_Prior_and_Data_Fidelity_CVPR_2019_paper.html)||\n|2019|ICCV|[DeblurGAN-v2: Deblurring (Orders-of-Magnitude) Faster and Better](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.03826)|[Code](https:\u002F\u002Fgithub.com\u002FTAMU-VITA\u002FDeblurGANv2)|\n|2019|ICCV (HIDE)|[Human-Aware Motion Deblurring](https:\u002F\u002Fpdfs.semanticscholar.org\u002F20a4\u002Fb3353579525f0b76ec42e17a2284b4453f9a.pdf)||\n|2019|BMVC|[Blind image deconvolution using deep generative priors](https:\u002F\u002Farxiv.org\u002Fabs\u002F1802.04073)||\n|2019|ACMMM|[Tell Me Where It is Still Blurry: Adversarial Blurred Region Mining and Refining](https:\u002F\u002Fwww.iis.sinica.edu.tw\u002Fpapers\u002Fliutyng\u002F22871-F.pdf)||\n|2019|IJCV|[Joint Face Hallucination and Deblurring via Structure Generation and Detail Enhancement](https:\u002F\u002Farxiv.org\u002Fabs\u002F1811.09019)|[Code](https:\u002F\u002Fgithub.com\u002FTAMU-VITA\u002FDeblurGANv2)|\n|2020|AAAI|[Learning to Deblur Face Images via Sketch Synthesis](https:\u002F\u002Faaai.org\u002Fojs\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F6818\u002F6672)||\n|2020|AAAI|[Region-Adaptive Dense Network for Efficient Motion Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F1903.11394)||\n|2020|WACV|[DAVID: Dual-Attentional Video Deblurring](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_WACV_2020\u002Fhtml\u002FWu_DAVID_Dual-Attentional_Video_Deblurring_WACV_2020_paper.html)||\n|2020|CVPR|[Neural Blind Deconvolution Using Deep Priors](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.02197)|[Code](https:\u002F\u002Fgithub.com\u002Fcsdwren\u002FSelfDeblur)|\n|2020|CVPR|[Spatially-Attentive Patch-Hierarchical Network for Adaptive Motion Deblurring](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2004.05343.pdf)||\n|2020|CVPR|[Deblurring by Realistic Blurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.01860)|[Code](https:\u002F\u002Fgithub.com\u002FHDCVLab\u002FDeblurring-by-Realistic-Blurring)|\n|2020|CVPR|[Learning Event-Based Motion Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.05794)||\n|2020|CVPR|[Efficient Dynamic Scene Deblurring Using Spatially Variant Deconvolution Network With Optical Flow Guided Training](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fhtml\u002FYuan_Efficient_Dynamic_Scene_Deblurring_Using_Spatially_Variant_Deconvolution_Network_With_CVPR_2020_paper.html)||\n|2020|CVPR|[Deblurring using Analysis-Synthesis Networks Pair](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.02956)||\n|2020|ECCV|[Multi-Temporal Recurrent Neural Networks For Progressive Non-Uniform Single Image Deblurring With Incremental Temporal Training](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.07410)||\n|2020|TIP|[Efficient and Interpretable Deep Blind Image Deblurring Via Algorithm Unrolling](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1902.03493.pdf)||\n|2020|TIP|[Deblurring Face Images using Uncertainty Guided Multi-Stream Semantic Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1907.13106)|[Code](https:\u002F\u002Fgithub.com\u002Frajeevyasarla\u002FUMSN-Face-Deblurring)|\n|2020|TIP|[Dark and bright channel prior embedded network for dynamic scene deblurring](https:\u002F\u002Fwww4.comp.polyu.edu.hk\u002F~cslzhang\u002Fpaper\u002FDBCPeNet_TIP.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fcsjcai\u002FDBCPeNet)|\n|2020|TIP|[Dynamic Scene Deblurring by Depth Guided Model](https:\u002F\u002Ffaculty.ucmerced.edu\u002Fmhyang\u002Fpapers\u002Ftip2020_dynamic_scene_deblurring.pdf)|[Project Page](https:\u002F\u002Fsites.google.com\u002Fview\u002Flerenhanli\u002Fhomepage\u002Fdepth_deblurring)|\n|2020|IEEEAccess|[Scale-Iterative Upscaling Network for Image Deblurring](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8963625)|[Code](https:\u002F\u002Fgithub.com\u002Fminyuanye\u002FSIUN)|\n|2020|ACCV|[Human Motion Deblurring using Localized Body Prior](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FACCV2020\u002Fhtml\u002FLumentut_Human_Motion_Deblurring_using_Localized_Body_Prior_ACCV_2020_paper.html)||\n|2020|TPAMI|[Physics-Based Generative Adversarial Models for Image Restoration and Beyond](https:\u002F\u002Farxiv.org\u002Fabs\u002F1808.00605)|[Code](https:\u002F\u002Fjspan.github.io\u002Fprojects\u002Fphysicsgan\u002F)|\n|2020|TCI|[Blind Image Deconvolution using Deep Generative Priors](https:\u002F\u002Farxiv.org\u002Fabs\u002F1802.04073)||\n|2020|TMM|[Raw Image Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.04264)|[Dataset](https:\u002F\u002Fgithub.com\u002Fbob831009\u002Fraw_image_deblurring)|\n|2020|Arxiv|[Blur Invariant Kernel-Adaptive Network for Single Image Blind deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.04543)||\n|2021|TPAMI|[Exposure Trajectory Recovery from Motion Blur](https:\u002F\u002Farxiv.org\u002Fabs\u002F2010.02484)|[Code](https:\u002F\u002Fgithub.com\u002Fyjzhang96\u002FMotion-ETR)|\n|2021|Arxiv|[BANet: Blur-aware Attention Networks for Dynamic Scene Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2101.07518)|[Code](https:\u002F\u002Fgithub.com\u002Fpp00704831\u002FBANet)|\n|2021|CVPR|[Multi-Stage Progressive Image Restoration](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2102.02808.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fswz30\u002FMPRNet)|\n|2021|CVPR|[DeFMO: Deblurring and Shape Recovery of Fast Moving Objects](https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.00595)|[Code](https:\u002F\u002Fgithub.com\u002Frozumden\u002FDeFMO)|\n|2021|CVPR|[Blind Deblurring for Saturated Images](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fhtml\u002FChen_Blind_Deblurring_for_Saturated_Images_CVPR_2021_paper.html)||\n|2021|CVPR|[Test-Time Fast Adaptation for Dynamic Scene Deblurring via Meta-Auxiliary Learning](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fhtml\u002FChi_Test-Time_Fast_Adaptation_for_Dynamic_Scene_Deblurring_via_Meta-Auxiliary_Learning_CVPR_2021_paper.html)||\n|2021|CVPR|[Explore Image Deblurring via Encoded Blur Kernel Space](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.00317)|[Code](https:\u002F\u002Fgithub.com\u002FVinAIResearch\u002Fblur-kernel-space-exploring)|\n|2021|CVPR|[Pre-trained image processing transformer](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FChen_Pre-Trained_Image_Processing_Transformer_CVPR_2021_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fhuawei-noah\u002FPretrained-IPT)|\n|2021|CVPR|[Multi-stage progressive image restoration](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.02808)|[Code](https:\u002F\u002Fgithub.com\u002Fswz30\u002FMPRNet)|\n|2021|CVPRW|[Hinet: Half instance normalization network for image restoration](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021W\u002FNTIRE\u002Fpapers\u002FChen_HINet_Half_Instance_Normalization_Network_for_Image_Restoration_CVPRW_2021_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fmegvii-model\u002FHINet)|\n|2021|ICCV|[Spatially-Adaptive Image Restoration using Distortion-Guided Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.08617)|[Code](https:\u002F\u002Fgithub.com\u002Fhuman-analysis\u002Fspatially-adaptive-image-restoration\u002F)|\n|2021|ICCV|[Rethinking Coarse-To-Fine Approach in Single Image Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fhtml\u002FCho_Rethinking_Coarse-To-Fine_Approach_in_Single_Image_Deblurring_ICCV_2021_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fchosj95\u002Fmimo-unet)|\n|2021|ICCV|[Perceptual Variousness Motion Deblurring With Light Global Context Refinement](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fhtml\u002FLi_Perceptual_Variousness_Motion_Deblurring_With_Light_Global_Context_Refinement_ICCV_2021_paper.html)||\n|2021|ICCV|[Pyramid Architecture Search for Real-Time Image Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fhtml\u002FHu_Pyramid_Architecture_Search_for_Real-Time_Image_Deblurring_ICCV_2021_paper.html)||\n|2021|ICCV|[Searching for Controllable Image Restoration Networks](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fhtml\u002FKim_Searching_for_Controllable_Image_Restoration_Networks_ICCV_2021_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fghimhw\u002FTASNet)|\n|2021|ICCVW|[Sdwnet: A straight dilated network with wavelet transformation for image deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021W\u002FAIM\u002Fpapers\u002FZou_SDWNet_A_Straight_Dilated_Network_With_Wavelet_Transformation_for_Image_ICCVW_2021_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002FFlyEgle\u002FSDWNet)|\n|2021|TIP|[Structure-Aware Motion Deblurring Using Multi-Adversarial Optimized CycleGAN](http:\u002F\u002Fgraphics.csie.ncku.edu.tw\u002FTIP_cycle_2021\u002FTIP2021.pdf)|\n|2021|JSTS|[Degradation Aware Approach to Image Restoration Using Knowledge Distillation](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9288928)||\n|2021|Arxiv|[Non-uniform Blur Kernel Estimation via Adaptive Basis Decomposition](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.01026)|[Code](https:\u002F\u002Fgithub.com\u002FGuillermoCarbajal\u002FNonUniformBlurKernelEstimation)|\n|2021|Arxiv|[Clean Images are Hard to Reblur: A New Clue for Deblurring](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.12665.pdf)||\n|2021|Arxiv|[Deep residual fourier transformation for single image deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.11745)|[Code](https:\u002F\u002Fgithub.com\u002FINVOKERer\u002FDeepRFT)|\n|2021|CVIU|[Single-image deblurring with neural networks: A comparative survey](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS1077314220301533?dgcid=rss_sd_all)||\n|2021|TIP|[Blind Motion Deblurring Super-Resolution: When Dynamic Spatio-Temporal Learning Meets Static Image Understanding](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2105.13077.pdf)||\n|2021|NC|[Deep Robust Image Deblurring via Blur Distilling and Information Comparison in Latent Space](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0925231221013771)||\n|2022|IJCV|[Deep Image Deblurring: A Survey](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2201.10700.pdf)||\n|2022|WACV|[Deep Feature Prior Guided Face Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2022\u002Fhtml\u002FJung_Deep_Feature_Prior_Guided_Face_Deblurring_WACV_2022_paper.html)||\n|2022|CVPR|[Restormer: Efficient transformer for high-resolution image restoration](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FZamir_Restormer_Efficient_Transformer_for_High-Resolution_Image_Restoration_CVPR_2022_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fswz30\u002FRestormer)|\n|2022|CVPR|[Maxim: Multi-axis mlp for image processing](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FTu_MAXIM_Multi-Axis_MLP_for_Image_Processing_CVPR_2022_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fmaxim)|\n|2022|CVPR|[Uformer: A general u-shaped transformer for image restoration](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FWang_Uformer_A_General_U-Shaped_Transformer_for_Image_Restoration_CVPR_2022_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002FZhendongWang6\u002FUformer)|\n|2022|CVPR|[Deblurring via Stochastic Refinement](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FWhang_Deblurring_via_Stochastic_Refinement_CVPR_2022_paper.pdf)||\n|2022|CVPR|[XYDeblur: Divide and Conquer for Single Image Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FJi_XYDeblur_Divide_and_Conquer_for_Single_Image_Deblurring_CVPR_2022_paper.pdf)||\n|2022|CVPR|[All-In-One Image Restoration for Unknown Corruption](http:\u002F\u002Fpengxi.me\u002Fwp-content\u002Fuploads\u002F2022\u002F03\u002FAll-In-One-Image-Restoration-for-Unknown-Corruption.pdf)|[Code](https:\u002F\u002Fgithub.com\u002FXLearning-SCU\u002F2022-CVPR-AirNet)|\n|2022|CVPR|[Exploring and Evaluating Image Restoration Potential in Dynamic Scenes](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.11754.pdf)||\n|2022|CVPR|[Deep Generalized Unfolding Networks for Image Restoration](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.13348)|[Code](https:\u002F\u002Fgithub.com\u002FMC-E\u002FDeep-Generalized-Unfolding-Networks-for-Image-Restoration)|\n|2022|CVPR|[GIQE: Generic Image Quality Enhancement via Nth Order Iterative Degradation](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FShyam_GIQE_Generic_Image_Quality_Enhancement_via_Nth_Order_Iterative_Degradation_CVPR_2022_paper.pdf)||\n|2022|CVPRW|[Blind Non-Uniform Motion Deblurring Using Atrous Spatial Pyramid Deformable Convolution and Deblurring-Reblurring Consistency](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022W\u002FNTIRE\u002Fhtml\u002FHuo_Blind_Non-Uniform_Motion_Deblurring_Using_Atrous_Spatial_Pyramid_Deformable_Convolution_CVPRW_2022_paper.html)||\n|2022|CVPRW|[Motion Aware Double Attention Network for Dynamic Scene Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022W\u002FNTIRE\u002Fhtml\u002FYang_Motion_Aware_Double_Attention_Network_for_Dynamic_Scene_Deblurring_CVPRW_2022_paper.html)||\n|2022|ECCV|[Stripformer: Strip Transformer for Fast Image Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.04627)|[Code](https:\u002F\u002Fgithub.com\u002Fpp00704831\u002FStripformer)|\n|2022|ECCV|[Simple baselines for image restoration](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.04676)|[Code](https:\u002F\u002Fgithub.com\u002Fmegvii-research\u002FNAFNet)|\n|2022|ECCV|[D2HNet: Joint Denoising and Deblurring with Hierarchical Network for Robust Night Image Restoration](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.03294)|[Code](https:\u002F\u002Fgithub.com\u002Fzhaoyuzhi\u002Fd2hnet)|\n|2022|ECCV|[Improving Image Restoration by Revisiting Global Information Aggregation](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.04491)|[Code](https:\u002F\u002Fgithub.com\u002Fmegvii-research\u002FTLC)|\n|2022|ECCV|[Animation from Blur: Multi-modal Blur Decomposition with Motion Guidance](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.10123)|[Code](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FAnimation-from-Blur)|\n|2022|ECCV|[Learning Degradation Representations for Image Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.05244)|[Code](https:\u002F\u002Fgithub.com\u002Fdasongli1\u002FLearning_degradation)|\n|2022|ECCV|[Realistic Blur Synthesis for Learning Image Deblurring](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2022\u002Fpapers_ECCV\u002Fhtml\u002F6325_ECCV_2022_paper.php)||\n|2022|ECCV|[Event-based Fusion for Motion Deblurring with Cross-modal Attention](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.00167)|[Code](https:\u002F\u002Fgithub.com\u002FAHupuJR\u002FEFNet)|\n|2022|ACCV|[Learning to Predict Decomposed Dynamic Filters for Single Image Motion Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FACCV2022\u002Fpapers\u002FHu_Learning_to_Predict_Decomposed_Dynamic_Filters_for_Single_Image_Motion_ACCV_2022_paper.pdf)|[Code](https:\u002F\u002Fgithub.com\u002FZHIQIANGHU2021\u002FDecomposedDynamicFilters)|\n|2022|Arxiv|[Multi-scale-stage network for single image deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.09652)||\n|2023|AAAI|[Real-world deep local motion deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.08179)|[Code&Dataset](https:\u002F\u002Fgithub.com\u002FLeiaLi\u002FReLoBlur)|\n|2023|AAAI|[Intriguing Findings of Frequency Selection for Image Deblurring](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.11745)|[Code](https:\u002F\u002Fgithub.com\u002FDeepMed-Lab-ECNU\u002FDeepRFT-AAAI2023)|\n|2023|AAAI|[Dual-domain Attention for Image Deblurring](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F25122\u002F24894)||\n|2023|CVPR|[Self-Supervised Non-Uniform Kernel Estimation With Flow-Based Motion Prior for Blind Image Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FFang_Self-Supervised_Non-Uniform_Kernel_Estimation_With_Flow-Based_Motion_Prior_for_Blind_CVPR_2023_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FFangzhenxuan\u002FUFPDeblur)|\n|2023|CVPR|[Efficient Frequency Domain-Based Transformers for High-Quality Image Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FKong_Efficient_Frequency_Domain-Based_Transformers_for_High-Quality_Image_Deblurring_CVPR_2023_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fkkkls\u002FFFTformer)|\n|2023|CVPR|[Self-Supervised Blind Motion Deblurring With Deep Expectation Maximization](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FLi_Self-Supervised_Blind_Motion_Deblurring_With_Deep_Expectation_Maximization_CVPR_2023_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FChilie\u002FDeblur_MCEM)|\n|2023|ICCV|[Multiscale Structure Guided Diffusion for Image Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FRen_Multiscale_Structure_Guided_Diffusion_for_Image_Deblurring_ICCV_2023_paper.pdf)||\n|2023|ICCV|[Multi-Scale Residual Low-Pass Filter Network for Image Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FDong_Multi-Scale_Residual_Low-Pass_Filter_Network_for_Image_Deblurring_ICCV_2023_paper.pdf)||\n|2023|ICCV|[DiffIR: Efficient Diffusion Model for Image Restoration](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FXia_DiffIR_Efficient_Diffusion_Model_for_Image_Restoration_ICCV_2023_paper.pdf)||\n|2023|NeurIPS|[Hierarchical Integration Diffusion Model for Realistic Image Deblurring](https:\u002F\u002Fpapers.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2023\u002Ffile\u002F5cebc89b113920dbff7c79854ba765a3-Paper-Conference.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Fzhengchen1999\u002FHI-Diff)|\n|2023|Arxiv|[LaKDNet: Revisiting Image Deblurring with an Efficient ConvNet](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.02234.pdf)|[Code](https:\u002F\u002Fgithub.com\u002Flingyanruan\u002FLaKDNet)|\n|2024|CVPR|[Unsupervised Blind Image Deblurring Based on Self-Enhancement](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FChen_Unsupervised_Blind_Image_Deblurring_Based_on_Self-Enhancement_CVPR_2024_paper.html)||\n|2024|CVPR|[Blur2Blur: Blur Conversion for Unsupervised Image Deblurring on Unknown Domains](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FPham_Blur2Blur_Blur_Conversion_for_Unsupervised_Image_Deblurring_on_Unknown_Domains_CVPR_2024_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FVinAIResearch\u002FBlur2Blur)|\n|2024|CVPR|[Motion-adaptive Separable Collaborative Filters for Blind Motion Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FLiu_Motion-adaptive_Separable_Collaborative_Filters_for_Blind_Motion_Deblurring_CVPR_2024_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FChengxuLiu\u002FMISCFilter)|\n|2024|CVPR|[Efficient Multi-scale Network with Learnable Discrete Wavelet Transform for Blind Motion Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FGao_Efficient_Multi-scale_Network_with_Learnable_Discrete_Wavelet_Transform_for_Blind_CVPR_2024_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fthqiu0419\u002FMLWNet)|\n|2024|CVPR|[Fourier Priors-Guided Diffusion for Zero-Shot Joint Low-Light Enhancement and Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FLv_Fourier_Priors-Guided_Diffusion_for_Zero-Shot_Joint_Low-Light_Enhancement_and_Deblurring_CVPR_2024_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Faipixel\u002FFourierDiff)|\n|2024|CVPR|[Real-World Efficient Blind Motion Deblurring via Blur Pixel Discretization](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FKim_Real-World_Efficient_Blind_Motion_Deblurring_via_Blur_Pixel_Discretization_CVPR_2024_paper.html)||\n|2024|CVPR|[AdaRevD: Adaptive Patch Exiting Reversible Decoder Pushes the Limit of Image Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FMao_AdaRevD_Adaptive_Patch_Exiting_Reversible_Decoder_Pushes_the_Limit_of_CVPR_2024_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FINVOKERer\u002FAdaRevD)|\n|2024|CVPR|[FMA-Net: Flow-Guided Dynamic Filtering and Iterative Feature Refinement with Multi-Attention for Joint Video Super-Resolution and Deblurring](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FYouk_FMA-Net_Flow-Guided_Dynamic_Filtering_and_Iterative_Feature_Refinement_with_Multi-Attention_CVPR_2024_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002FKAIST-VICLab\u002FFMA-Net)|\n|2024|CVPR|[ID-Blau: Image Deblurring by Implicit Diffusion-based reBLurring AUgmentation](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FWu_ID-Blau_Image_Deblurring_by_Implicit_Diffusion-based_reBLurring_AUgmentation_CVPR_2024_paper.html)|[Code](https:\u002F\u002Fgithub.com\u002Fplusgood-steven\u002FID-Blau)|\n|2024|CVPR|[Residual Denoising Diffusion Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2308.13712)|[Code](https:\u002F\u002Fgithub.com\u002Fnachifur\u002FRDDM)|\n|2024|IJCV|[Blind Image Deblurring with Unknown Kernel Size and Substantial Noise](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2208.09483.pdf)|[Project Page](https:\u002F\u002Fgithub.com\u002Fsun-umn\u002FBlind-Image-Deblurring)|\n|2024|ECCV|[Motion Aware Event Representation-driven Image Deblurring](https:\u002F\u002Fgithub.com\u002FZhijingS\u002FDA_event_deblur)|[Code](https:\u002F\u002Fgithub.com\u002FZhijingS\u002FDA_event_deblur)|\n|2024|Arxiv|[Gyroscope-Assisted Motion Deblurring Network](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.06854)||\n|2024|Arxiv|[Efficient Image Deblurring Networks based on Diffusion Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.05907)|[Code](https:\u002F\u002Fgithub.com\u002Fbnm6900030\u002Fswintormer)|\n|2025|CVPR|[Gyro-based Neural Single Image Deblurring](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.00916)|[Code](https:\u002F\u002Fgithub.com\u002Fhmyang0727\u002FGyroDeblurNet)|\n\n\n\n## 非盲去模糊\n|年份|期刊\u002F会议|论文|代码库|\n|:---:|:---:|:---:|:---:|\n|2006|IJCV|[脉冲噪声下的图像去模糊](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs11263-006-6468-1)|||\n|2009|NIPS|[基于超拉普拉斯先验的快速图像反卷积](http:\u002F\u002Fcs.nyu.edu\u002F~dilip\u002Fresearch\u002Fpapers\u002Ffid_nips09.pdf)|[代码及项目页面](https:\u002F\u002Fdilipkay.wordpress.com\u002Ffast-deconvolution\u002F)|\n|2011|PAMI|[在投影运动路径下的Richardson-Lucy去模糊](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F5674049)|||\n|2011|ICCV|[非盲图像反卷积中的异常值处理](http:\u002F\u002Fcg.postech.ac.kr\u002Fpapers\u002Fdeconv_outliers.pdf)|[代码](https:\u002F\u002Fgithub.com\u002FCoupeLibrary\u002Fhandleoutlier)|\n|2011|ICCV|[从自然图像块模型学习到整幅图像恢复](http:\u002F\u002Fpeople.ee.duke.edu\u002F~lcarin\u002FEPLICCVCameraReady.pdf)|[代码](http:\u002F\u002Fpeople.csail.mit.edu\u002Fdanielzoran\u002F)|\n|2012|TIP|[Bm3d帧与变分图像去模糊](https:\u002F\u002Fwww.cs.tut.fi\u002F~foi\u002FGCF-BM3D\u002FBM3DframesDeblur-Danielyan.pdf)|||\n|2012|TIP|[使用不精确模糊核的鲁棒图像去模糊](http:\u002F\u002Fciteseerx.ist.psu.edu\u002Fviewdoc\u002Fdownload?doi=10.1.1.716.1055&rep=rep1&type=pdf) [代码](https:\u002F\u002Fblog.nus.edu.sg\u002Fmatjh\u002Fdownload\u002F)|\n|2013|CVPR|[一种用于非盲图像反卷积的机器学习方法](https:\u002F\u002Fwww.cv-foundation.org\u002Fopenaccess\u002Fcontent_cvpr_2013\u002Fpapers\u002FSchuler_A_Machine_Learning_2013_CVPR_paper.pdf)|[代码及项目页面](http:\u002F\u002Fwebdav.is.mpg.de\u002Fpixel\u002Fneural_deconvolution\u002F)|\n|2013|CVPR|[判别式非盲去模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2013\u002Fpapers\u002FSchmidt_Discriminative_Non-blind_Deblurring_2013_CVPR_paper.pdf)|[代码](https:\u002F\u002Fwww.visinf.tu-darmstadt.de\u002Fvi_research\u002Fcode\u002Findex.en.jsp#discriminative_deblurring)|\n|2014|TIP|[基于正则化和相似性的一般图像恢复框架](http:\u002F\u002Fwww.academia.edu\u002Fdownload\u002F42621942\u002FA_General_Framework_for_Regularized_Simi20160212-19526-i3txol.pdf) [代码及项目页面](http:\u002F\u002Falumni.soe.ucsc.edu\u002F~aminkh\u002FKernelRestoration.html)|\n|2014|NIPS|[用于图像反卷积的深度卷积神经网络](http:\u002F\u002Fwww.cse.cuhk.edu.hk\u002Fleojia\u002Fpapers\u002Fdeconv_nips14.pdf)|[代码及项目页面](http:\u002F\u002Flxu.me\u002Fprojects\u002Fdcnn\u002F)|\n|2014|CVPR|[收缩场用于高效图像恢复](http:\u002F\u002Fresearch.uweschmidt.org\u002Fpubs\u002Fcvpr14schmidt.pdf)|[代码](https:\u002F\u002Fgithub.com\u002Fuschmidt83\u002Fshrinkage-fields)|\n|2014|ECCV|[非盲反卷积中的良好图像先验：通用与特定](http:\u002F\u002Fcs.brown.edu\u002F~lbsun\u002FGoodPriors2014\u002Fgoodpriors_eccv2014.pdf)|[项目页面](http:\u002F\u002Fcs.brown.edu\u002F~lbsun\u002FGoodPriors2014\u002Fgoodpriors2014eccv.html)|\n|2016|CVIP|[具有稀疏先验的快速非盲图像去模糊](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-981-10-2104-6_56)|||\n|2017|TIP|[使用不精确模糊核的部分反卷积](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8071032)|||\n|2017|ICCP|[通过带有长短跳跃连接的正则化残差网络进行快速非盲反卷积](http:\u002F\u002Fcg.postech.ac.kr\u002Fpapers\u002FskipConnect.pdf)|[代码](https:\u002F\u002Fgithub.com\u002FHyeongseokSon1\u002FCNN_deconvolution)，[项目页面](http:\u002F\u002Fcg.postech.ac.kr\u002Fresearch\u002Fresnet_deconvolution\u002F)|\n|2017|CVPR|[无噪图像去模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fhtml\u002FJin_Noise-Blind_Image_Deblurring_CVPR_2017_paper.html)|||\n|2017|CVPR|[为图像恢复学习深度CNN去噪先验](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fhtml\u002FZhang_Learning_Deep_CNN_CVPR_2017_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Fcszn\u002Fircnn)|\n|2017|CVPR|[学习全卷积网络用于迭代非盲反卷积](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1611.06495)|[代码](https:\u002F\u002Fgithub.com\u002Fzhjwustc\u002Fcvpr17_iter_deblur_testing_matconvnet)|\n|2017|ICCV|[学习邻近算子：利用去噪网络正则化逆成像问题](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.03488)|||\n|2017|ICCV|[学习如何突破高效FFT基图像反卷积的极限](http:\u002F\u002Fresearch.uweschmidt.org\u002Fpubs\u002Ficcv17kruse.pdf)|[代码](https:\u002F\u002Fgithub.com\u002Fuschmidt83\u002Ffourier-deconvolution-network)|\n|2017|NIPS|[用于图像恢复的深度均值漂移先验](https:\u002F\u002Fpapers.nips.cc\u002Fpaper\u002F6678-deep-mean-shift-priors-for-image-restoration.pdf)|[代码](https:\u002F\u002Fgithub.com\u002Fsiavashbigdeli\u002FDMSP)|\n|2018|ICIP|[非盲反卷积中真实退化的建模](https:\u002F\u002Farxiv.org\u002Fabs\u002F1806.01097)|||\n|2018|CVPR|[非盲去模糊：用CNN处理模糊核不确定性](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fhtml\u002FVasu_Non-Blind_Deblurring_Handling_CVPR_2018_paper.html)|[项目页面及基准数据集上的结果](https:\u002F\u002Fgithub.com\u002Fsubeeshvasu\u002F2018_subeesh_nbd_cvpr)|\n|2018|CVPR|[深度图像先验](https:\u002F\u002Farxiv.org\u002Fabs\u002F1711.10925)|[代码](https:\u002F\u002Fgithub.com\u002FDmitryUlyanov\u002Fdeep-image-prior)|\n|2018|ECCV|[为非盲去模糊学习数据项](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fhtml\u002FJiangxin_Dong_Learning_Data_Terms_ECCV_2018_paper.html)|||\n|2018|NIPS|[通过广义低秩逼近的深度非盲反卷积](https:\u002F\u002Fpapers.nips.cc\u002Fpaper\u002F7313-deep-non-blind-deconvolution-via-generalized-low-rank-approximation.pdf)|[代码](https:\u002F\u002Fgithub.com\u002Frwenqi\u002FNBD-GLRA)|\n|2019|ICLR|[深度解码器：来自未训练非卷积网络的简洁图像表示](https:\u002F\u002Farxiv.org\u002Fabs\u002F1810.03982)|[代码](https:\u002F\u002Fgithub.com\u002Freinhardh\u002Fsupplement_deep_decoder)|\n|2019|CVPR|[适用于任意模糊核的深度即插即用超分辨率](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FZhang_Deep_Plug-And-Play_Super-Resolution_for_Arbitrary_Blur_Kernels_CVPR_2019_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Fcszn\u002FDPSR)|\n|2019|ICCVW|[结合深度图像和模糊核先验的图像反卷积](https:\u002F\u002Farxiv.org\u002Fabs\u002F1910.08386)|||\n|2019|TPAMI|[由去噪先验驱动的深度神经网络用于图像恢复](https:\u002F\u002Farxiv.org\u002Fabs\u002F1801.06756)|||\n|2020|CVPR|[基于变分EM的深度学习用于无噪图像去模糊](https:\u002F\u002Fgithub.com\u002Fysnan\u002FVEM-NBD\u002Fblob\u002Fmaster\u002Fpaper\u002Fvem_deconv.pdf)|[项目页面及基准数据集上的结果](https:\u002F\u002Fgithub.com\u002Fysnan\u002FVEM-NBD)|\n|2020|CVPR|[深度学习用于处理图像反卷积中的模糊核\u002F模型不确定性](https:\u002F\u002Fgithub.com\u002Fysnan\u002FNBD_KerUnc\u002Fblob\u002Fmaster\u002Fpaper\u002Fkn.pdf)|[项目页面及基准数据集上的结果](https:\u002F\u002Fgithub.com\u002Fysnan\u002FNBD_KerUnc)|\n|2020|ECCV|[端到端可解释的非盲图像去模糊学习](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.01769)|||\n|2020|EUSIPCO|[Bp-dip：基于反投影的深度图像先验](https:\u002F\u002Farxiv.org\u002Fabs\u002F2003.05417)|[代码](https:\u002F\u002Fgithub.com\u002Fjennyzu\u002FBP-DIP-deblurring)|\n|2020|NIPS|[深度维纳反卷积：维纳滤波与深度学习结合用于图像去模糊](https:\u002F\u002Fpapers.nips.cc\u002Fpaper\u002F2020\u002Ffile\u002F0b8aff0438617c055eb55f0ba5d226fa-Paper.pdf)|[代码](https:\u002F\u002Fgitlab.mpi-klsb.mpg.de\u002Fjdong\u002Fdwdn)|\n|2020|TNLS|[为图像反卷积学习深度梯度下降优化](https:\u002F\u002Farxiv.org\u002Fabs\u002F1804.03368)|[代码](https:\u002F\u002Fgithub.com\u002Fdonggong1\u002Flearn-optimizer-rgdn)|\n|2020|TCI|[用于成像中线性逆问题的诺伊曼网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F1901.03707)|[代码](https:\u002F\u002Fgithub.com\u002Fdgilton\u002Fneumann_networks_code)|\n|2020|Arxiv|[最大熵均值法用于图像去模糊](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2002.10434.pdf)|||\n|2021|CVPR|[为非盲图像去模糊学习空间变异性MAP模型](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fhtml\u002FDong_Learning_Spatially-Variant_MAP_Models_for_Non-Blind_Image_Deblurring_CVPR_2021_paper.html)|||\n|2021|CVPR|[为夜间模糊图像学习非盲去模糊网络](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR2021\u002Fhtml\u002FChen_Learning_a_Non-Blind_Deblurring_Network_for_Night_Blurry_Images_CVPR_2021_paper.html)|[代码及数据](https:\u002F\u002Fliangchen527.github.io\u002F)|\n|2021|TNNLS|[通过复数域深度学习进行非盲图像去模糊](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9404870)|||\n|2022|WACV|[荧光成像的非盲去模糊：带模糊核参数化的可变形潜在空间方法](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2022\u002Fpapers\u002FGuan_Non-Blind_Deblurring_for_Fluorescence_A_Deformable_Latent_Space_Approach_With_WACV_2022_paper.pdf)|\n|2022|CVPR|[用于盲人图像超分辨率的深度约束最小二乘法](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.07508)|[项目页面](https:\u002F\u002Fgithub.com\u002FAlgolzw\u002FDCLS)|\n|2022|CVPRW|[一种使用深度去噪先验的鲁棒非盲去模糊方法](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR2022W\u002FNTIRE\u002Fhtml\u002FFang_A_Robust_Non-Blind_Deblurring_Method_Using_Deep_Denoiser_Prior_CVPRW_2022_paper.html)|||\n|2022|SPIC|[黑盒图像去模糊与去滤波](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0923596522001242)|[Matlab代码](https:\u002F\u002Fgithub.com\u002Ffayolle\u002FbbDeblur)，[Python代码](https:\u002F\u002Fgithub.com\u002Ffayolle\u002FbbDeblur_py)|\n|2022|TPAMI|[DWDN：用于非盲图像去模糊的深度维纳反卷积网络](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9664009)|||\n|2022|TCI|[使用算法展开进行光子受限的非盲去模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.15314)|[代码](https:\u002F\u002Fgithub.com\u002Fsanghviyashiitb\u002Fpoisson-deblurring)|\n|2023|WACV|[维纳引导的DIP用于无监督盲人图像反卷积](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.10271)|[代码](https:\u002F\u002Fgithub.com\u002Fgbredell\u002FW_DIP)|\n|2023|CVPR|[具有深度残差先验的不确定性感知无监督图像去模糊](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR2023\u002Fhtml\u002FTang_Uncertainty-Aware_Unsupervised_Image_Deblurring_With_Deep_Residual_Prior_CVPR_2023_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Fxl-tang3\u002FUAUDeblur)|\n|2023|ICCV|[零样本图像恢复中经典反卷积和特征提取的应用](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.02097)|||\n|2023|SIVP|[使用干净和噪声滤波器进行图像反滤波](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs11760-022-02236-w)|[代码](https:\u002F\u002Fgithub.com\u002Ffayolle\u002Fclean_noisy_defilter)|\n|2023|TIP|[INFWIDE：用于低光照条件下非盲图像去模糊的图像和特征空间维纳反卷积网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.08201)|[代码](https:\u002F\u002Fgithub.com\u002Fzhihongz\u002FINFWIDE)|\n|2023|TPAMI|[使用变分深度图像先验进行盲人图像反卷积](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.00179)|[代码](https:\u002F\u002Fgithub.com\u002FDong-Huo\u002FVDIP-Deconvolution)|\n|2024|WACV|[具有饱和像素处理方案的深度即插即用夜间非盲去模糊](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2024\u002Fpapers\u002FShu_Deep_Plug-and-Play_Nighttime_Non-Blind_Deblurring_With_Saturated_Pixel_Handling_Schemes_WACV_2024_paper.pdf)|||\n|2024|TCI|[非盲泊松反卷积的奥秘](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.03105)|||\n|2024|ACM MM|[LoFormer：用于图像去模糊的局部频率变换器](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.16993)|[代码](https:\u002F\u002Fgithub.com\u002FINVOKERer\u002FLoFormer)|\n|2024|IJCV|[用于低光照图像去模糊的深度Richardson-Lucy反卷积](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.05543)|||\n|2025|JMI|[使用系统模糊和噪声模型进行深度学习CT图像恢复](https:\u002F\u002Fwww.spiedigitallibrary.org\u002Fjournals\u002Fjournal-of-medical-imaging\u002Fvolume-12\u002Fissue-1\u002F014003\u002FDeep-learning-CT-image-restoration-using-system-blur-and-noise\u002F10.1117\u002F1.JMI.12.1.014003.short)||\n\n## （多图像\u002F视频）运动模糊去除\n|年份|期刊\u002F会议|论文|代码库|\n|:---:|:---:|:---:|:---:|\n|2007|TOG|[利用模糊\u002F噪声图像对进行图像去模糊](https:\u002F\u002Fwww.microsoft.com\u002Fen-us\u002Fresearch\u002Fwp-content\u002Fuploads\u002F2016\u002F11\u002FDeblurring_SIGGRAPH07.pdf)|||\n|2008|CVPR|[鲁棒的双通道运动模糊去除](http:\u002F\u002Fciteseerx.ist.psu.edu\u002Fviewdoc\u002Fdownload?doi=10.1.1.443.6370&rep=rep1&type=pdf)|||\n|2009|JCP|[基于多张图像的盲运动模糊去除](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0021999109001867)|||\n|2010|CVPR|[鲁棒的闪光灯去模糊](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F5539941)|||\n|2010|CVPR|[用于空变多帧盲反卷积的高效滤波流](http:\u002F\u002Fsuvrit.de\u002Fpapers\u002Fcvpr10.pdf)|||\n|2012|ECCV|[利用多张图像解卷积点扩散函数以实现更好的运动模糊去除](http:\u002F\u002Fciteseerx.ist.psu.edu\u002Fviewdoc\u002Fdownload?doi=10.1.1.259.6526&rep=rep1&type=pdf)|||\n|2012|TIP|[通过快速交替最小化实现鲁棒的多通道盲反卷积](https:\u002F\u002Fusers.soe.ucsc.edu\u002F~milanfar\u002Fpublications\u002Fjournal\u002FMCBD.pdf)|||\n|2012|CGF|[基于配准的非均匀运动模糊去除](http:\u002F\u002Fcg.postech.ac.kr\u002Fpapers\u002Fregistration.pdf)|||\n|2012|TOG|[使用基于块的合成技术对手持相机拍摄的视频进行去模糊](https:\u002F\u002Fwww.juew.org\u002Fpublication\u002Fvideo_deblur.pdf)|[项目页面](http:\u002F\u002Fcg.postech.ac.kr\u002Fresearch\u002Fvideo_deblur\u002F)|\n|2013|CVPR|[利用耦合自适应稀疏先验进行多图像盲运动模糊去除](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2013\u002Fhtml\u002FZhang_Multi-image_Blind_Deblurring_2013_CVPR_paper.html)|[代码及项目页面](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fhczhang1\u002Fprojects\u002Fsparse-blind-deblurring)|\n|2014|CVPR|[多帧成像：联合对齐、去模糊与分辨率增强](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2014\u002Fhtml\u002FZhang_Multi-Shot_Imaging_Joint_2014_CVPR_paper.html)|[项目页面](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fhczhang1\u002Fprojects\u002Fmulti-shot-imaging)|\n|2014|CVPR|[基于陀螺仪的多图像反卷积以去除手抖模糊](http:\u002F\u002Fgraphics.stanford.edu\u002Fpapers\u002Fgyrodeblur\u002Fgyrodeblur_park_cvpr14.pdf)|[项目页面](http:\u002F\u002Fgraphics.stanford.edu\u002Fpapers\u002Fgyrodeblur\u002F)|\n|2014|ECCV|[用分层模型建模模糊视频](http:\u002F\u002Ffiles.is.tue.mpg.de\u002Fblack\u002Fpapers\u002FWulffECCV2014.pdf)|[项目页面、结果及数据集](http:\u002F\u002Fps.is.tuebingen.mpg.de\u002Fresearch_projects\u002Fmotion-blur-in-layers)|\n|2015|CVPR|[突发去模糊：通过傅里叶突发累积去除相机抖动](http:\u002F\u002Fdev.ipol.im\u002F~mdelbra\u002Ffba\u002FFBA_cvpr2015_preprint.pdf)|||\n|2015|TCI|[通过高效的傅里叶聚合实现手持视频去模糊](http:\u002F\u002Farxiv.org\u002Fpdf\u002F1509.05251)|[项目页面及结果](http:\u002F\u002Fiie.fing.edu.uy\u002F~mdelbra\u002FvideoFA\u002F)|\n|2015|TIP|[通过加权傅里叶突发累积去除相机抖动](https:\u002F\u002Farxiv.org\u002Fabs\u002F1505.02731)|||\n|2015|CVPR|[面向动态场景的通用视频去模糊](http:\u002F\u002Fcv.snu.ac.kr\u002Fpublication\u002Fconf\u002F2015\u002FVD_CVPR2015.pdf)|[代码及项目页面](https:\u002F\u002Fcv.snu.ac.kr\u002Fresearch\u002F~VD\u002F)|\n|2015|CVPR|[利用帧间相机运动进行帧内去模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2015\u002Fhtml\u002FZhang_Intra-Frame_Deblurring_by_2015_CVPR_paper.html)|[项目页面](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fhczhang1\u002Fprojects\u002Fvideo_deblur)|\n|2016|ECCV|[立体视频去模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F1607.08421)|||\n|2017|CVPR|[同时进行立体视频去模糊和场景光流估计](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.03273)|||\n|2017|CVPR|[用于手持相机的深度视频去模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fhtml\u002FSu_Deep_Video_Deblurring_CVPR_2017_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Fshuochsu\u002FDeepVideoDeblurring)|[项目页面](http:\u002F\u002Fwww.cs.ubc.ca\u002Flabs\u002Fimager\u002Ftr\u002F2017\u002FDeepVideoDeblurring\u002F)|\n|2017|CVPR|[光场盲运动去模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fhtml\u002FSrinivasan_Light_Field_Blind_CVPR_2017_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Fpratulsrinivasan\u002FLight_Field_Blind_Motion_Deblurring)|\n|2017|ICCV|[通过语义分割和像素级非线性核进行视频去模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FRen_Video_Deblurring_via_ICCV_2017_paper.pdf)|[项目页面](https:\u002F\u002Fsites.google.com\u002Fsite\u002Frenwenqi888\u002Fresearch\u002Fdeblurring\u002Fpwnlk)|\n|2017|ICCV|[通过动态时间混合网络实现在线视频去模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FKim_Online_Video_Deblurring_ICCV_2017_paper.pdf)|[代码](https:\u002F\u002Fsites.google.com\u002Fsite\u002Flliger9\u002Fpublications)|\n|2018|ECCV|[利用排列不变卷积神经网络进行突发图像去模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fhtml\u002FMiika_Aittala_Burst_Image_Deblurring_ECCV_2018_paper.html)|[项目页面](http:\u002F\u002Fpeople.csail.mit.edu\u002Fmiika\u002Feccv18_deblur\u002F)|\n|2018|ECCV|[光场的联合盲运动去模糊与深度估计](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fhtml\u002FDongwoo_Lee_Joint_Blind_Motion_ECCV_2018_paper.html)|||\n|2018|TPAMI|[利用局部自适应线性模糊模型进行动态视频去模糊](https:\u002F\u002Fcv.snu.ac.kr\u002Fpublication\u002Fjour\u002F2018\u002Fthkim_pami2018_dynamic.pdf)|||\n|2018|ICCP|[Reblur2deblur：通过自监督学习实现视频去模糊](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1801.05117.pdf)|||\n|2018|Arxiv|[LSD—利用卷积神经网络对短曝光和长曝光图像进行联合去噪与去模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F1811.09485)|||\n|2019|TIP|[用于视频去模糊的对抗性时空学习](https:\u002F\u002Farxiv.org\u002Fabs\u002F1804.00533)|[代码](https:\u002F\u002Fgithub.com\u002Fthemathgeek13\u002FSTdeblur)|[项目页面](https:\u002F\u002Fgithub.com\u002FJLtwoP\u002FAdversarial-Spatio-Temporal-Learning-for-Video-Deblurring)|\n|2019|CVPR|[具有帧内迭代的循环神经网络用于视频去模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FNah_Recurrent_Neural_Networks_With_Intra-Frame_Iterations_for_Video_Deblurring_CVPR_2019_paper.html)|||\n|2019|CVPR|[DAVANet：基于视图聚合的立体去模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FZhou_DAVANet_Stereo_Deblurring_With_View_Aggregation_CVPR_2019_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Fsczhou\u002FDAVANet)|\n|2019|CVPR_W|[基于逐像素自适应核的深层运动去模糊网络，包含残差下采样和上采样模块](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2019\u002Fhtml\u002FNTIRE\u002FSim_A_Deep_Motion_Deblurring_Network_Based_on_Per-Pixel_Adaptive_Kernels_CVPRW_2019_paper.html)|||\n|2019|ICCV|[用于视频去模糊的时空滤波自适应网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F1904.12257)|[项目页面](https:\u002F\u002Fshangchenzhou.com\u002Fprojects\u002Fstfan\u002F)，[代码](https:\u002F\u002Fgithub.com\u002Fsczhou\u002FSTFAN)|\n|2019|ICCV|[利用3D人脸先验进行人脸视频去模糊](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fhtml\u002FRen_Face_Video_Deblurring_Using_3D_Facial_Priors_ICCV_2019_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Frwenqi\u002F3Dfacedeblurring)|\n|2019|SPL|[用于快速且全分辨率光场去模糊的深度循环网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F1904.00352)|||\n|2019|ICCV_W|[深度视频去模糊：细节决定成败](https:\u002F\u002Farxiv.org\u002Fabs\u002F1909.12196)|[代码](https:\u002F\u002Fgithub.com\u002Fvisinf\u002Fdeblur-devil)|\n|2020|CVPR|[利用时间锐度先验进行级联深度视频去模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.02501)|[代码](https:\u002F\u002Fgithub.com\u002Fcsbhr\u002FCDVD-TSP)|[项目页面](https:\u002F\u002Fbaihaoran.xyz\u002Fprojects\u002Fcdvd-tsp\u002Findex.html)|\n|2020|CVPR|[模糊视频帧插值](https:\u002F\u002Farxiv.org\u002Fabs\u002F2002.12259)|[代码](https:\u002F\u002Fgithub.com\u002Flaomao0\u002FBIN)|\n|2020|ECCV|[用于视频去模糊的高效时空循环神经网络](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fhtml\u002F5116_ECCV_2020_paper.php)|[代码](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FESTRNN)|\n|2020|ECCV|[学习事件驱动的视频去模糊与插值](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fhtml\u002F671_ECCV_2020_paper.php)|||\n|2020|TIP|[通过模糊-噪声图像对去除模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F1903.10667)|||\n|2020|TCSVT|[用于视频去模糊的递归神经网络](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9247314)|||\n|2021|AAAI|[运动模糊视频的插值与外推](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.02984.pdf)|||\n|2021|CVPR|[门控时空注意力引导的视频去模糊](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FSuin_Gated_Spatio-Temporal_Attention-Guided_Video_Deblurring_CVPR_2021_paper.pdf)|||\n|2021|CVPR|[ARVo：学习全范围体积对应关系以实现视频去模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.04260)|||\n|2021|TOG|[具有模糊不变运动估计和像素体积的循环视频去模糊](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3453720)|[代码](https:\u002F\u002Fgithub.com\u002Fcodeslake\u002FPVDNet)|\n|2021|CVIU|[通过时空金字塔网络和对抗梯度先验进行视频去模糊](https:\u002F\u002Fwhluo.github.io\u002Fpapers\u002Fcviu103135_final.pdf)|||\n|2021|ICCV|[用于超高清视频去模糊的多尺度可分离网络](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fhtml\u002FDeng_Multi-Scale_Separable_Network_for_Ultra-High-Definition_Video_Deblurring_ICCV_2021_paper.html)|||\n|2022|AAAI|[具有多尺度双向传播的深度循环神经网络用于视频去模糊](https:\u002F\u002Faaai-2022.virtualchair.net\u002Fposter_aaai3124)|||\n|2022|CVPR|[Deblur-NeRF：从模糊图像重建神经辐射场](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FMa_Deblur-NeRF_Neural_Radiance_Fields_From_Blurry_Images_CVPR_2022_paper.pdf)|[代码](https:\u002F\u002Fgithub.com\u002Flimacv\u002FDeblur-NeRF)|\n|2022|ECCV|[通过重新审视全局信息聚合来提升图像修复效果](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.04491)|[代码](https:\u002F\u002Fgithub.com\u002Fmegvii-research\u002FTLC)|\n|2022|ECCV|[从模糊中生成动画：结合运动指导的多模态模糊分解](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.10123)|[代码](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FAnimation-from-Blur)|\n|2022|ECCV|[由运动幅度引导的高效视频去模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.13374)|[代码](https:\u002F\u002Fgithub.com\u002Fsollynoay\u002FMMP-RNN)|\n|2022|ECCV|[用于视频去模糊的时空可变形注意力网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.10852)|[代码](https:\u002F\u002Fgithub.com\u002Fhuicongzhang\u002FSTDAN)|\n|2022|ECCV|[ERDN：等效感受野可变形网络用于视频去模糊](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2022\u002Fpapers_ECCV\u002Fhtml\u002F4085_ECCV_2022_paper.php)|[代码](https:\u002F\u002Fgithub.com\u002FTencentCloud\u002FERDN)|\n|2022|ECCV|[DeMFI：基于流引导注意相关性和递归增强的深度联合去模糊与多帧插值](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.09985)|[代码](https:\u002F\u002Fgithub.com\u002FJihyongOh\u002FDeMFI)|\n|2022|ECCVW|[通过探索模糊形成过程迈向真实世界的视频去模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.13184)|||\n|2022|CGF|[通过轻量级运动补偿实现实时视频去模糊](https:\u002F\u002Fdiglib.eg.org\u002Fbitstream\u002Fhandle\u002F10.1111\u002Fcgf14667\u002Fv41i7pp177-188.pdf?sequence=1&isAllowed=y)|[代码](https:\u002F\u002Fgithub.com\u002Fcodeslake\u002FRealTime_VDBLR)|\n|2022|IJCV|[真实世界视频去模糊：基准数据集与高效循环神经网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.16028)|[代码](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FESTRNN)|\n|2024|WACV|[Sharp-NeRF：基于网格的快速去模糊神经辐射场，利用锐度先验](https:\u002F\u002Fgithub.com\u002Fradimspetlik\u002FSI-DDPM-FMO)|[代码](https:\u002F\u002Fgithub.com\u002FbenhenryL\u002FSharpNeRF)|\n|2024|WACV|[Deblur-NSFF：用于模糊动态场景的神经场景流场](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2024\u002Fpapers\u002FLuthra_Deblur-NSFF_Neural_Scene_Flow_Fields_for_Blurry_Dynamic_Scenes_WACV_2024_paper.pdf)|||\n|2023|CVPR|[来自模糊的真实运动的模糊插值变换器](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.11423)|[代码](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FBiT)|\n|2023|CVPR|[DP-NeRF：带有物理场景先验的去模糊神经辐射场](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FLee_DP-NeRF_Deblurred_Neural_Radiance_Field_With_Physical_Scene_Priors_CVPR_2023_paper.pdf)|[代码](https:\u002F\u002Fgithub.com\u002Fdogyoonlee\u002FDP-NeRF)|\n|2023|CVPR|[BAD-NeRF：经束调整的去模糊神经辐射场](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FWang_BAD-NeRF_Bundle_Adjusted_Deblur_Neural_Radiance_Fields_CVPR_2023_paper.pdf)|[代码及数据集](https:\u002F\u002Fgithub.com\u002FWU-CVGL\u002FBAD-NeRF)|\n|2023|CVPR|[在未知曝光时间下进行联合视频多帧插值与去模糊](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FShang_Joint_Video_Multi-Frame_Interpolation_and_Deblurring_Under_Unknown_Exposure_Time_CVPR_2023_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Fshangwei5\u002FVIDUE)|\n|2023|CVPR|[用于高效视频去模糊的深度判别式时空网络](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FPan_Deep_Discriminative_Spatial_and_Temporal_Network_for_Efficient_Video_Deblurring_CVPR_2023_paper.pdf)|[代码](https:\u002F\u002Fgithub.com\u002Fxuboming8\u002FDSTNet)|\n|2023|ICCV|[在深度视频去模糊中探索时间频率谱](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FZhu_Exploring_Temporal_Frequency_Spectrum_in_Deep_Video_Deblurring_ICCV_2023_paper.pdf)|||\n|2023|ICCV|[E2NeRF：从模糊图像重建事件增强型神经辐射场](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FQi_E2NeRF_Event_Enhanced_Neural_Radiance_Fields_from_Blurry_Images_ICCV_2023_paper.pdf)|[代码](https:\u002F\u002Fgithub.com\u002FiCVTEAM\u002FE2NeRF)|\n|2024|CVPR|[面向视频去模糊的感知模糊时空稀疏Transformer](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FZhang_Blur-aware_Spatio-temporal_Sparse_Transformer_for_Video_Deblurring_CVPR_2024_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Fhuicongzhang\u002FBSSTNet)|\n|2024|CVPR|[利用事件和帧缓解神经辐射场中的运动模糊](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FCannici_Mitigating_Motion_Blur_in_Neural_Radiance_Fields_with_Events_and_CVPR_2024_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Fuzh-rpg\u002FEvDeblurNeRF)|\n|2024|CVPR|[DyBluRF：从模糊单目视频重建动态神经辐射场](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FSun_DyBluRF_Dynamic_Neural_Radiance_Fields_from_Blurry_Monocular_Video_CVPR_2024_paper.html)|||\n|2024|ECCV|[用小波感知的动态变压器和扩散模型重新思考视频去模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.13459)|[代码](https:\u002F\u002Fgithub.com\u002FChen-Rao\u002FVD-Diff)|\n|2024|NeurIPS|[学习截断因果历史模型用于视频修复](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.03936)|[代码](https:\u002F\u002Fgithub.com\u002FAscend-Research\u002FTurtle)\n\n## 运动模糊去噪挑战\n\n|年份|发表|论文|代码库|\n|:---:|:---:|:---:|:---:|\n|2019|CVPR_W|[NTIRE 2019 视频去模糊挑战：方法与结果](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2019\u002Fhtml\u002FNTIRE\u002FNah_NTIRE_2019_Challenge_on_Video_Deblurring_Methods_and_Results_CVPRW_2019_paper.html)||\n|2019|CVPR_W|[NTIRE 2019 视频去模糊与超分辨率挑战：数据集与研究](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2019\u002Fhtml\u002FNTIRE\u002FNah_NTIRE_2019_Challenge_on_Video_Deblurring_and_Super-Resolution_Dataset_and_CVPRW_2019_paper.html)||\n|2019|CVPR_W|[EDVR：基于增强形变卷积网络的视频修复](https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.02716)|[PyTorch代码](https:\u002F\u002Fgithub.com\u002Fxinntao\u002FEDVR)|[项目页面](https:\u002F\u002Fxinntao.github.io\u002Fprojects\u002FEDVR)|\n|2020|CVPR_W|[Ntire 2020 图像和视频去模糊挑战](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2020\u002Fpapers\u002Fw31\u002FNah_NTIRE_2020_Challenge_on_Image_and_Video_Deblurring_CVPRW_2020_paper.pdf)||\n|2020|CVPR_W|[跨移动设备部署图像去模糊：质量与延迟的视角](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2020\u002Fpapers\u002Fw31\u002FChiang_Deploying_Image_Deblurring_Across_Mobile_Devices_A_Perspective_of_Quality_CVPRW_2020_paper.pdf)||\n|2020|CVPR_W|[用于单张图像和视频去模糊的高分辨率双阶段多级特征聚合](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2020\u002Fpapers\u002Fw31\u002FBrehm_High-Resolution_Dual-Stage_Multi-Level_Feature_Aggregation_for_Single_Image_and_Video_CVPRW_2020_paper.pdf)||\n\n## 深度感知运动模糊去噪\n|年份|发表|论文|代码库|\n|:---:|:---:|:---:|:---:|\n|2012|ICCP|[深度感知运动模糊去噪](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F6215220)||\n|2014|CVPR|[从单张模糊图像中联合估计深度与去除相机抖动](https:\u002F\u002Fwww.cv-foundation.org\u002Fopenaccess\u002Fcontent_cvpr_2014\u002Fhtml\u002FHu_Joint_Depth_Estimation_2014_CVPR_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Fchaehonglee\u002FJoint_Depth_Esimation_and_Deblur)|\n|2019|WACV|[利用深度图进行单张图像去模糊和相机运动估计](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8658686)||\n|2019|CVPR|[DAVANet：基于视图聚合的立体去模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FZhou_DAVANet_Stereo_Deblurring_With_View_Aggregation_CVPR_2019_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Fsczhou\u002FDAVANet)|\n|2020|TIP|[基于深度引导模型的动态场景去模糊](https:\u002F\u002Ffaculty.ucmerced.edu\u002Fmhyang\u002Fpapers\u002Ftip2020_dynamic_scene_deblurring.pdf)|[项目页面](https:\u002F\u002Fsites.google.com\u002Fview\u002Flerenhanli\u002Fhomepage\u002Fdepth_deblurring)|\n|2020|TCSVT|[使用环状信念传播的深度感知运动模糊去噪](https:\u002F\u002Fwww4.comp.polyu.edu.hk\u002F~pinli\u002FCoRR\u002FTCSVT\u002FTCSVT2020_2.pdf)||\n|2022|ICME|[Dast-Net：用于视频去模糊的深度感知时空网络](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9858929)||\n|2023|SCIA|[用于视差相机运动模糊的深度感知图像合成模型](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.09334)|[代码及项目页面](https:\u002F\u002Fgermanftv.github.io\u002FParallaxICB.github.io\u002F)|\n|2023|WACV|[快速且准确：利用稀疏深度进行视频增强](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2023\u002Fhtml\u002FFeng_Fast_and_Accurate_Video_Enhancement_Using_Sparse_Depth_WACV_2023_paper.html)||\n|2024|ECCV_W|[DAVIDE：深度感知视频去模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.01274)|[代码及项目页面](https:\u002F\u002Fgermanftv.github.io\u002FDAVIDE.github.io\u002F)|\n|2024||[基于深度激光雷达引导的图像去模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.07262v1)|[代码](https:\u002F\u002Fgithub.com\u002Fdiegovalsesia\u002Flidardeblurring)|\n\n## 其他密切相关的工作\n|年份|发表|论文|代码库|\n|:---:|:---:|:---:|:---:|\n|2000||[用于图像合成与恢复的多帧复原方法，约瑟夫·J·格林，亚利桑那大学博士论文](https:\u002F\u002Frepository.arizona.edu\u002Fhandle\u002F10150\u002F284110)|[代码](https:\u002F\u002Fgithub.com\u002Fnasa-jpl\u002Fpmapper)|\n|2013|TOG|[一种用于评估运动模糊去除质量的无参考指标](https:\u002F\u002Fgfx.cs.princeton.edu\u002Fpubs\u002FLiu_2013_ANM\u002Fsa13.pdf)|[代码及项目页面](https:\u002F\u002Fgfx.cs.princeton.edu\u002Fpubs\u002FLiu_2013_ANM\u002Findex.php)|\n|2018|CVPR|[从单张运动模糊图像中学习提取视频序列](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fhtml\u002FJin_Learning_to_Extract_CVPR_2018_paper.html)||[代码](https:\u002F\u002Fgithub.com\u002FMeiguangJin\u002FLearning-to-Extract-a-Video-Sequence-from-a-Single-Motion-Blurred-Image)|\n|2019|CVPR|[利用事件相机以高帧率使模糊帧“复活”](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FPan_Bringing_a_Blurry_Frame_Alive_at_High_Frame-Rate_With_an_CVPR_2019_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Fpanpanfei\u002FBringing-a-Blurry-Frame-Alive-at-High-Frame-Rate-with-an-Event-Camera)|\n|2019|CVPR|[从模糊视频中学习提取无瑕疵的慢动作](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FJin_Learning_to_Extract_Flawless_Slow_Motion_From_Blurry_Videos_CVPR_2019_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002FMeiguangJin\u002Fslow-motion)|\n|2019|CVPR|[学习合成运动模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FBrooks_Learning_to_Synthesize_Motion_Blur_CVPR_2019_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fgoogle-research\u002Ftree\u002Fmaster\u002Fmotion_blur), [项目页面](http:\u002F\u002Ftimothybrooks.com\u002Ftech\u002Fmotion-blur\u002F)|\n|2019|CVPR|[从模糊中重建世界](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FQiu_World_From_Blur_CVPR_2019_paper.pdf)|||\n|2019|ICCV|[FAB：一种鲁棒的运动模糊视频人脸关键点检测框架](https:\u002F\u002Farxiv.org\u002Fabs\u002F1910.12100)|[代码](https:\u002F\u002Fgithub.com\u002FKeqiangSun\u002FFAB)|\n|2019|ICCV|[视觉去投影：坍缩维度的概率恢复](https:\u002F\u002Farxiv.org\u002Fabs\u002F1909.00475)|||\n|2020|CVPR-W|[利用短曝光和长曝光对运动模糊进行光序化](https:\u002F\u002Farxiv.org\u002Fabs\u002F1912.06102)|[项目页面](https:\u002F\u002Fapvijay.github.io\u002Fphotoseq_blur.html)|\n|2020|ACM-MM|[每个时刻都重要：细节感知网络让模糊图像“复活”](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3394171.3413929)|||\n|2020|NIPS|[小心！运动正在模糊你的深度神经网络的视觉](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Ffile\u002F0a73de68f10e15626eb98701ecf03adb-Paper.pdf)|[代码](https:\u002F\u002Fgithub.com\u002Ftsingqguo\u002FABBA)|\n|2021|Arxiv|[运动模糊下的几何矩不变量](https:\u002F\u002Farxiv.org\u002Fabs\u002F2101.08647v2)|||\n|2021|AAAI|[从单张运动模糊图像中估计光流](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.02996.pdf)|||\n|2021|CVPR|[面向动态场景的滚动快门校正与去模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.01601)|[代码](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FRSCD)|||\n|2021|CVPR|[在线目标检测中运动模糊处理的改进](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.14448)|||\n|2021|CVPR|[鲁棒的抗模糊、抗噪声和抗压缩生成对抗网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2003.07849)|||\n|2021|ICCV|[基于真实事件的运动去模糊](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fhtml\u002FXu_Motion_Deblurring_With_Real_Events_ICCV_2021_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Fxufangchn\u002FMotion-Deblurring-with-Real-Events)|\n|2021|ICCV|[将事件引入非连续模糊帧的视频去模糊](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fhtml\u002FShang_Bringing_Events_Into_Video_Deblurring_With_Non-Consecutively_Blurry_Frames_ICCV_2021_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Fshangwei5\u002FD2Net)|\n|2021|IEEEAccess|[利用陀螺仪传感器进行鲁棒的单幅图像去模糊](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9444479)|||\n|2022|ECCV|[从模糊中生成动画：带运动引导的多模态模糊分解](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.10123)|[代码](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FAnimation-from-Blur)|\n|2022|ECCV|[用于图像去模糊学习的真实模糊合成](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.08771)|[代码和数据集](https:\u002F\u002Fgithub.com\u002Frimchang\u002FRSBlur)|||\n|2022|ECCV|[未知曝光时间视频的事件引导去模糊](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2022\u002Fpapers_ECCV\u002Fhtml\u002F3601_ECCV_2022_paper.php)|||\n|2023|CVPR|[用于从模糊中恢复真实世界运动的模糊插值变换器](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.11423)|[代码](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FBiT)|\n|2023|CVPR|[使用以类别为中心的数据增强提升语义分割对运动模糊的鲁棒性](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FAakanksha_Improving_Robustness_of_Semantic_Segmentation_to_Motion-Blur_Using_Class-Centric_Augmentation_CVPR_2023_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Faka-discover\u002FCCMBA_CVPR23)|\n|[2023]|CVPR|[从单张模糊图像中恢复三维手部网格序列：一个新的数据集和时序展开](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FOh_Recovering_3D_Hand_Mesh_Sequence_From_a_Single_Blurry_Image_CVPR_2023_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002FJaehaKim97\u002FBlurHand_RELEASE)|\n|2023|CVPR|[用于从模糊中恢复真实世界运动的模糊插值变换器](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FZhong_Blur_Interpolation_Transformer_for_Real-World_Motion_From_Blur_CVPR_2023_paper.pdf)|[代码](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FBiT?tab=readme-ov-file)|\n|2023|CVPR|[基于事件的帧插值与临时去模糊](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FSun_Event-Based_Frame_Interpolation_With_Ad-Hoc_Deblurring_CVPR_2023_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002FAHupuJR\u002FREFID)|\n|2023|CVPR|[DartBlur：通过抑制检测伪影实现隐私保护](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FJiang_DartBlur_Privacy_Preservation_With_Detection_Artifact_Suppression_CVPR_2023_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002FJaNg2333\u002FDartBlur.)|\n|2023|CVPR|[HyperCUT：利用无监督排序从单张模糊图像中生成视频序列](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FPham_HyperCUT_Video_Sequence_From_a_Single_Blurry_Image_Using_Unsupervised_CVPR_2023_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002FVinAIResearch\u002FHyperCUT.git)|\n|2023|CVPR|[用于具有运动模糊的大规模场景的混合神经渲染](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FDai_Hybrid_Neural_Rendering_for_Large-Scale_Scenes_With_Motion_Blur_CVPR_2023_paper.html)|[代码](https:\u002F\u002Fdaipengwa.github.io\u002FHybrid-Rendering-ProjectPage)|\n|2023|CVPR|[盲曝光条件下的基于事件的模糊帧插值](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FWeng_Event-Based_Blurry_Frame_Interpolation_Under_Blind_Exposure_CVPR_2023_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002FWarranWeng\u002FEBFI-BE)|\n|2023|ICCV|[非同轴事件引导的空间对齐运动去模糊](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FCho_Non-Coaxial_Event-Guided_Motion_Deblurring_with_Spatial_Alignment_ICCV_2023_paper.pdf)|||\n|2023|ICCV|[在现实场景中推广基于事件的运动去模糊](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FZhang_Generalizing_Event-Based_Motion_Deblurring_in_Real-World_Scenarios_ICCV_2023_paper.pdf)|[代码](https:\u002F\u002Fgithub.com\u002FXiangZ-0\u002FGEM)|\n|2024|WACV|[利用去噪扩散概率模型进行单幅图像去模糊、快速移动物体的轨迹与形状恢复](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2024\u002Fpapers\u002FSpetlik_Single-Image_Deblurring_Trajectory_and_Shape_Recovery_of_Fast_Moving_Objects_WACV_2024_paper.pdf)|[代码](https:\u002F\u002Fgithub.com\u002Fradimspetlik\u002FSI-DDPM-FMO)|\n|2024|CVPR|[具有未知模态时空对齐的尖峰引导运动去模糊](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FZhang_Spike-guided_Motion_Deblurring_with_Unknown_Modal_Spatiotemporal_Alignment_CVPR_2024_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002FLeozhangjiyuan\u002FUaSDN)|\n|2024|CVPR|[针对事件引导去模糊和帧插值的延迟校正](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FYang_Latency_Correction_for_Event-guided_Deblurring_and_Frame_Interpolation_CVPR_2024_paper.html)|||\n|2024|CVPR|[面向真实世界运动模糊的频率感知事件驱动视频去模糊](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FKim_Frequency-aware_Event-based_Video_Deblurring_for_Real-World_Motion_Blur_CVPR_2024_paper.html)|||\n|2024|CVPR|[通过传感器逆向建模辅助EVS联合去模糊、滚动快门校正和视频帧插值](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FJiang_EVS-assisted_Joint_Deblurring_Rolling-Shutter_Correction_and_Video_Frame_Interpolation_through_CVPR_2024_paper.html)|||\n|2024|CVPR|[带有跨快门引导的运动模糊分解](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FJi_Motion_Blur_Decomposition_with_Cross-shutter_Guidance_CVPR_2024_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Fjixiang2016\u002FdualBR)\n\n## 焦外模糊去模糊及潜在数据集\n|年份|会议|论文|代码库|\n|:---:|:---:|:---:|:---:|\n|2009|ICCP|[用于焦外模糊去模糊的好光圈是什么？](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F5559018)|||\n|2009|ICIP|[利用局部对比度先验进行单张图像的散焦图估计](https:\u002F\u002Fwww.eecs.yorku.ca\u002F~mbrown\u002Fpdf\u002Ficip09_defocus.pdf)|||\n|2011|PR|[从单张图像中估计散焦图](https:\u002F\u002Fwww.comp.nus.edu.sg\u002F~tsim\u002Fdocuments\u002FdefocusEstimation-published.pdf)|||\n|2012|ICASSP|[基于L1-2优化和引导模糊图的空间变焦外模糊图像去模糊](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F6288071)|||\n|2013|ICASSP|[从相似图像对中去除焦外模糊](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F6637925)|||\n|2014|CVPR|[判别式模糊检测特征](http:\u002F\u002Fwww.shijianping.me\u002Fblur_cvpr14.pdf)|[项目页面](http:\u002F\u002Fwww.cse.cuhk.edu.hk\u002F~leojia\u002Fprojects\u002Fdblurdetect\u002Findex.html)|\n|2015|CVPR|[可察觉的散焦模糊检测与估计](http:\u002F\u002Fshijianping.me\u002Fjnb\u002Fpapers\u002Fjnbdetection_final.pdf)|[项目页面](http:\u002F\u002Fshijianping.me\u002Fjnb\u002Findex.html)|\n|2016||[基于单张图像的空间变散焦模糊图估计与去模糊](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1047320316000031)|[代码](https:\u002F\u002Fgithub.com\u002FZHANGXinxinPKU\u002Fdefocus-deblurring)|\n|2017|BMVC|[从一张焦外模糊图像中进行深度估计与模糊去除](https:\u002F\u002Fsaeed-anwar.github.io\u002Fpapers\u002FBMVC17-depth.pdf)|||\n|2017|CVPR|[基于梯度幅值多尺度融合排序变换系数的空间变模糊检测](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fhtml\u002FGolestaneh_Spatially-Varying_Blur_Detection_CVPR_2017_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Fisalirezag\u002FHiFST)|\n|2017|CVPR|[一种统一的多尺度深度与手工特征方法用于散焦估计](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.08992)|[代码](https:\u002F\u002Fgithub.com\u002Fzzangjinsun\u002FDHDE_CVPR17)|\n|2017|ICCV|[学习从单张图像合成四维RGBD光场](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_iccv_2017\u002Fhtml\u002FSrinivasan_Learning_to_Synthesize_ICCV_2017_paper.html)|[数据集及项目页面](https:\u002F\u002Fgithub.com\u002Fpratulsrinivasan\u002FLocal_Light_Field_Synthesis)|\n|2018|ECCV|[RefocusGAN：使用单张图像进行场景重新聚焦](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FParikshit_Sakurikar_Single_Image_Scene_ECCV_2018_paper.pdf)|||\n|2018|ECCV_W|[基于散焦的深度学习：散焦模糊如何通过密集神经网络提升三维估计？](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_eccv_2018_workshops\u002Fw3\u002Fhtml\u002FCarvalho_Deep_Depth_from_Defocus_how_can_defocus_blur_improve_3D_ECCVW_2018_paper.html)|[代码与数据集](https:\u002F\u002Fgithub.com\u002Fmarcelampc\u002Fd3net_depth_estimation)|\n|2018|PG|[基于深度上下文特征的散焦与运动模糊检测](http:\u002F\u002Fcg.postech.ac.kr\u002Fpapers\u002FKim2018Defocus.pdf)|[代码与数据集](https:\u002F\u002Fgithub.com\u002FHyeongseokSon1\u002Fdeep_blur_detection_and_classification)|\n|2018|TIP|[基于边缘的自适应尺度选择散焦模糊估计](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8101511)|[代码](https:\u002F\u002Fgithub.com\u002Falikaraali\u002FTIP2018-Edge-Based-Defocus-Blur-Estimation-With-Adaptive-Scale-Selection)|\n|2019|CVPR|[利用领域自适应进行深度散焦图估计](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FLee_Deep_Defocus_Map_Estimation_Using_Domain_Adaptation_CVPR_2019_paper.html)|[代码与数据集](https:\u002F\u002Fgithub.com\u002Fcodeslake\u002FDMENet)|\n|2019|CVPR|[DeFusionNET：通过递归融合与精炼多尺度深度特征进行散焦模糊检测](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FTang_DeFusionNET_Defocus_Blur_Detection_via_Recurrently_Fusing_and_Refining_Multi-Scale_CVPR_2019_paper.pdf)|||\n|2020|ECCV|[利用双像素数据进行焦外模糊去模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F2005.00305)|[代码与数据集](https:\u002F\u002Fgithub.com\u002FAbdullah-Abuolaim\u002Fdefocus-deblurring-dual-pixel)|\n|2020|ECCV|[重新思考散焦模糊检测问题及实时深度DBD模型](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fhtml\u002F1182_ECCV_2020_paper.php)|||\n|2020|ECCV|[通过深度蒸馏进行散焦模糊检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.08113)|[代码](https:\u002F\u002Fgithub.com\u002Fvinthony\u002Fdepth-distillation)|\n|2020|TCI|[AIFNet：基于光场数据集的全聚焦图像恢复网络](https:\u002F\u002Fsweb.cityu.edu.hk\u002Fmiullam\u002FAIFNET\u002F)|[代码](https:\u002F\u002Fgithub.com\u002Fbinorchen\u002FAIFNET)，[数据集](https:\u002F\u002Fsweb.cityu.edu.hk\u002Fmiullam\u002FAIFNET\u002Fdataset\u002FLFDOF.zip)|\n|2020|Arxiv|[用于反卷积显微镜的带模糊核的CycleGAN：最优传输几何](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.09414)|||\n|2020|Arxiv|[用于散焦模糊估计的深度多尺度特征学习](https:\u002F\u002Farxiv.org\u002Fabs\u002F2009.11939)|||\n|2020|TCSVT|[为焦外模糊图像去模糊估计广义高斯模糊核](http:\u002F\u002Fivlab.org\u002Fpublications\u002FTCSVT2021-GGdeblurring.pdf)|||\n|2021|Arxiv|[通过显著区域检测先验进行散焦模糊检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.09677)|||\n|2021|Arxiv|[学习利用非对称编码孔径估计散焦模糊的核尺度与方向](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.05843)|||\n|2021|CVPR|[用于单张图像焦外模糊去模糊的迭代滤波自适应网络](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FLee_Iterative_Filter_Adaptive_Network_for_Single_Image_Defocus_Deblurring_CVPR_2021_paper.pdf)|[代码与数据集](https:\u002F\u002Fgithub.com\u002Fcodeslake\u002FIFAN)|\n|2021|CVPR|[通过双重对抗判别器进行自生成散焦模糊检测](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fhtml\u002FZhao_Self-Generated_Defocus_Blur_Detection_via_Dual_Adversarial_Discriminators_CVPR_2021_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Fshangcai1\u002FSG)|\n|2021|CVPR|[双像素探索：同时进行深度估计与图像恢复](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FPan_Dual_Pixel_Exploration_Simultaneous_Depth_Estimation_and_Image_Restoration_CVPR_2021_paper.pdf)|[代码](https:\u002F\u002Fgithub.com\u002Fpanpanfei\u002FDual-Pixel-Exploration-Simultaneous-Depth-Estimation-and-Image-Restoration)|\n|2021|CVPRW|[NTIRE 2021双像素图像焦外模糊去模糊挑战赛：方法与结果](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021W\u002FNTIRE\u002Fpapers\u002FAbuolaim_NTIRE_2021_Challenge_for_Defocus_Deblurring_Using_Dual-Pixel_Images_Methods_CVPRW_2021_paper.pdf)|||\n|2021|CVPRW|[注意！保持专注！](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.07925)|[代码](https:\u002F\u002Fgithub.com\u002Ftuvovan\u002FATTSF)|\n|2021|ICCV|[利用内核共享并行空洞卷积进行单张图像焦外模糊去模糊](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.09108.pdf)|[代码](https:\u002F\u002Fgithub.com\u002FHyeongseokSon1\u002FKPAC)|\n|2021|ICCV|[通过真实建模双像素数据来学习减少散焦模糊](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fhtml\u002FAbuolaim_Learning_To_Reduce_Defocus_Blur_by_Realistically_Modeling_Dual-Pixel_Data_ICCV_2021_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002FAbdullah-Abuolaim\u002Frecurrent-defocus-deblurring-synth-dual-pixel)|\n|2022|WACV|[改进单张图像焦外模糊去模糊：双像素图像如何通过多任务学习提供帮助](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2022\u002Fhtml\u002FAbuolaim_Improving_Single-Image_Defocus_Deblurring_How_Dual-Pixel_Images_Help_Through_Multi-Task_WACV_2022_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002FAbdullah-Abuolaim\u002Fmulti-task-defocus-deblurring-dual-pixel-nimat)|\n|2022|CVPR|[利用光场生成与真实散焦图像进行去模糊学习](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fhtml\u002FRuan_Learning_to_Deblur_Using_Light_Field_Generated_and_Real_Defocus_CVPR_2022_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Flingyanruan\u002FDRBNet)|\n|2022|CVPR|[AR-NeRF：无监督学习自然图像中的深度与散焦效果，结合孔径渲染神经辐射场](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fhtml\u002FKaneko_AR-NeRF_Unsupervised_Learning_of_Depth_and_Defocus_Effects_From_Natural_CVPR_2022_paper.html)|||\n|2022|ECCV|[通过对抗促进学习实现散焦模糊检测与去模糊的联合](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2022\u002Fpapers_ECCV\u002Fhtml\u002F3308_ECCV_2022_paper.php)|[代码](https:\u002F\u002Fgithub.com\u002Fwdzhao123\u002FAPL)|\n|2023|AAAI|[利用错位训练样本学习单张图像焦外模糊去模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.14502)|[代码](https:\u002F\u002Fgithub.com\u002Fliyucs\u002FJDRL)|\n|2023|CVPR|[K3DN：面向双像素焦外模糊去模糊的视差感知核估计](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FYang_K3DN_Disparity-Aware_Kernel_Estimation_for_Dual-Pixel_Defocus_Deblurring_CVPR_2023_paper.html)|||\n|2023|CVPR|[更好的“CMOS”带来更清晰的图像：盲态图像超分辨率中的空间变模糊估计学习](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FChen_Better_CMOS_Produces_Clearer_Images_Learning_Space-Variant_Blur_Estimation_for_CVPR_2023_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002FByChelsea\u002FCMOS)|\n|2023|CVPR|[具有递归核的诺伊曼网络用于单张图像焦外模糊去模糊](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FQuan_Neumann_Network_With_Recursive_Kernels_for_Single_Image_Defocus_Deblurring_CVPR_2023_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002FcsZcWu\u002FNRKNet)|\n|2023|CVPR|[DP-NeRF：带有物理场景先验的去模糊神经辐射场](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FLee_DP-NeRF_Deblurred_Neural_Radiance_Field_With_Physical_Scene_Priors_CVPR_2023_paper.html)|[代码](https:\u002F\u002Fdogyoonlee.github.io\u002Fdpnerf\u002F)|\n|2023|ICCV|[通过隐式神经逆核进行单张图像焦外模糊去模糊](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FQuan_Single_Image_Defocus_Deblurring_via_Implicit_Neural_Inverse_Kernels_ICCV_2023_paper.pdf)|||\n|2023|IJCV|[针对现实世界盲态超分辨率的端到端交替优化](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.08816)|[代码](https:\u002F\u002Fgithub.com\u002Fgreatlog\u002FRealDAN.git)|\n|2023|Arxiv|[LaKDNet：以高效卷积网络重访图像去模糊](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.02234.pdf)|[代码](https:\u002F\u002Fgithub.com\u002Flingyanruan\u002FLaKDNet)|\n|2024|WACV|[与相机无关的基于散焦模糊的单张图像深度估计](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2024\u002Fpapers\u002FWijayasingha_Camera-Independent_Single_Image_Depth_Estimation_From_Defocus_Blur_WACV_2024_paper.pdf)|||\n|2024|CVPR|[基于多金字塔Transformer和对比学习的显微镜焦外模糊去模糊统一框架](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FZhang_A_Unified_Framework_for_Microscopy_Defocus_Deblur_with_Multi-Pyramid_Transformer_CVPR_2024_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002FPieceZhang\u002FMPT-CataBlur)|\n|2024|CVPR|[LDP：语言驱动的双像素图像焦外模糊去模糊网络](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FYang_LDP_Language-driven_Dual-Pixel_Image_Defocus_Deblurring_Network_CVPR_2024_paper.html)|[代码](https:\u002F\u002Fgithub.com\u002Fnoxsine\u002FLDP)|\n\n## 运动模糊基准数据集\n|年份|会议|论文|代码库|\n|:---:|:---:|:---:|:---:|\n|2009|CVPR|[理解和评估盲去卷积算法](http:\u002F\u002Fwebee.technion.ac.il\u002Fpeople\u002Fanat.levin\u002Fpapers\u002FdeconvLevinEtalCVPR09.pdf)|[数据集](http:\u002F\u002Fwebee.technion.ac.il\u002Fpeople\u002Fanat.levin\u002Fpapers\u002FLevinEtalCVPR09Data.rar)|\n|2012|ECCV|[相机抖动的录制与回放：基于真实世界数据库的盲去卷积基准测试](http:\u002F\u002Fwebdav.is.mpg.de\u002Fpixel\u002Fbenchmark4camerashake\u002Fsrc_files\u002FPdf\u002FKoehler_ECCV2012_Benchmark.pdf)|[数据集](http:\u002F\u002Fwebdav.is.mpg.de\u002Fpixel\u002Fbenchmark4camerashake\u002F)|\n|2013|ICCP|[基于边缘和块先验的模糊核估计](http:\u002F\u002Fcs.brown.edu\u002F~lbsun\u002Fdeblur2013\u002Fpatchdeblur_iccp2013.pdf)|[数据集](http:\u002F\u002Fcs.brown.edu\u002F~lbsun\u002Fdeblur2013\u002Fdeblur2013iccp.html)|\n|2016|CVPR|[单张图像盲去模糊的比较研究](http:\u002F\u002Fvllab.ucmerced.edu\u002Fwlai24\u002Fcvpr16_deblur_study\u002Fpaper\u002Fcvpr16_deblur_study.pdf)|[数据集](http:\u002F\u002Fvllab.ucmerced.edu\u002Fwlai24\u002Fcvpr16_deblur_study\u002F)|\n|2017|CVPR (GOPRO)|[用于动态场景去模糊的深度多尺度卷积神经网络](http:\u002F\u002Fzpascal.net\u002Fcvpr2017\u002FNah_Deep_Multi-Scale_Convolutional_CVPR_2017_paper.pdf)|[数据集](https:\u002F\u002Fgithub.com\u002FSeungjunNah\u002FDeepDeblur_release)|\n|2017|CVPR (DVD)|[手持摄像机的深度视频去模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fhtml\u002FSu_Deep_Video_Deblurring_CVPR_2017_paper.html)|[数据集](http:\u002F\u002Fwww.cs.ubc.ca\u002Flabs\u002Fimager\u002Ftr\u002F2017\u002FDeepVideoDeblurring\u002F)|\n|2017|GCPR|[野外运动去模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F1701.01486)||\n|2019|CVPR (立体模糊数据集)|[视图聚合的立体去模糊](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fhtml\u002FZhou_DAVANet_Stereo_Deblurring_With_View_Aggregation_CVPR_2019_paper.html)|[数据集](https:\u002F\u002Fstereoblur.shangchenzhou.com\u002F)|\n|2019|CVPR_W (REDS)|[NTIRE 2019 视频去模糊与超分辨率挑战：数据集与研究](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2019\u002Fhtml\u002FNTIRE\u002FNah_NTIRE_2019_Challenge_on_Video_Deblurring_and_Super-Resolution_Dataset_and_CVPRW_2019_paper.html)|[数据集](https:\u002F\u002Fseungjunnah.github.io\u002FDatasets\u002Freds)|\n|2019|ICCV (HIDE)|[人感知的运动去模糊](https:\u002F\u002Fpdfs.semanticscholar.org\u002F20a4\u002Fb3353579525f0b76ec42e17a2284b4453f9a.pdf)|[数据集](https:\u002F\u002Fgithub.com\u002Fjoanshen0508\u002FHA_deblur)|\n|2020|CVPR|[通过真实模糊进行去模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.01860)|[数据集](https:\u002F\u002Fgithub.com\u002FHDCVLab\u002FDeblurring-by-Realistic-Blurring)|\n|2020|CVPR|[基于事件的运动去模糊学习](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.05794)||\n|2020|ECCV (BSD)|[用于视频去模糊的高效时空循环神经网络](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fpapers\u002F123510188.pdf)|[数据集](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FESTRNN)|\n|2020|ECCV|[用于学习和基准测试去模糊算法的真实世界模糊数据集](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fpapers\u002F123700188.pdf)|[代码与数据集](http:\u002F\u002Fcg.postech.ac.kr\u002Fresearch\u002Frealblur\u002F)|\n|2021|CVPR (BS-RSCD)|[迈向动态场景中的滚动快门校正和去模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.01601)|[数据集](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FRSCD)||\n|2021|Arxiv|[MC-Blur：一个全面的图像去模糊基准](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2112.00234.pdf)|[数据集](https:\u002F\u002Fgithub.com\u002FHDCVLab\u002FMC-Blur-Dataset)||\n|2022|ECCV|[用于学习图像去模糊的真实模糊合成](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.08771)|[代码与数据集](https:\u002F\u002Fgithub.com\u002Frimchang\u002FRSBlur)||\n|2022|IJCV (BSD)|[真实世界视频去模糊：一个基准数据集和高效的循环神经网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.16028)|[数据集](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FESTRNN)|\n|2023|CVPR (RBI)|[用于从模糊中恢复真实世界运动的模糊插值Transformer](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.11423)|[代码与数据集](https:\u002F\u002Fgithub.com\u002Fzzh-tech\u002FBiT)|\n|2023|AAAI|[真实世界的深度局部运动去模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.08179)|[代码与数据集](https:\u002F\u002Fgithub.com\u002FLeiaLi\u002FReLoBlur)|\n|2024|ECCV_W|[DAVIDE：深度感知的视频去模糊](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.01274)|[代码与数据集](https:\u002F\u002Fgermanftv.github.io\u002FDAVIDE.github.io\u002F)|\n\n缩写：\n\n+ DL -> 深度学习\n+ non-DL -> 非深度学习\n\n## AI照片增强应用\n+ [HitPaw 照片增强器](https:\u002F\u002Fwww.hitpaw.com\u002Fphoto-enhancer.html)","# Awesome-Deblurring 快速上手指南\n\n**Awesome-Deblurring** 并非一个单一的独立软件工具，而是一个精选的**图像与视频去模糊资源列表**。它汇集了从传统非深度学习算法到最新深度学习（DL）模型的论文、代码库和数据集。\n\n由于该仓库包含数十个不同的项目，以下指南将指导你如何浏览该列表，并选取一个主流的深度学习去模糊模型进行快速部署和测试。\n\n## 环境准备\n\n在运行具体的去模糊算法（特别是基于深度学习的模型）之前，请确保你的开发环境满足以下要求：\n\n*   **操作系统**: Linux (推荐 Ubuntu 18.04\u002F20.04) 或 macOS。Windows 用户建议使用 WSL2 或 Docker。\n*   **硬件要求**: \n    *   强烈建议配备 NVIDIA GPU (显存 >= 4GB) 以加速推理。\n    *   若仅使用传统非深度学习算法（non-DL），CPU 即可运行。\n*   **核心依赖**:\n    *   Python 3.7+\n    *   PyTorch 1.7+ (大多数现代 DL 模型依赖此框架)\n    *   CUDA Toolkit (需与 PyTorch 版本匹配)\n    *   OpenCV (`opencv-python`)\n    *   NumPy, SciPy\n\n**前置依赖安装命令：**\n\n```bash\n# 推荐使用国内镜像源加速安装\npip install torch torchvision torchaudio --index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu118\npip install opencv-python numpy scipy matplotlib -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n```\n\n## 安装步骤\n\n由于 Awesome-Deblurring 是资源索引，你需要从中选择一个具体的项目进行安装。以下以列表中热门的 **SRN-DeblurNet** (Single Image Deblurring using Multi-Scale Convolutional Neural Networks) 或类似的通用 DL 模型为例演示流程。\n\n1.  **访问资源列表**：\n    前往 [Awesome-Deblurring GitHub 页面](https:\u002F\u002Fgithub.com\u002Fsubeeshvasu\u002FAwesome-Deblurring)，在 \"Single-Image-Blind-Motion-Deblurring (DL)\" 章节查找带有 `[Code]` 或 `[Repo]` 链接的项目。\n\n2.  **克隆选定项目的代码库**：\n    假设我们选择了一个典型的开源实现（请以实际选中的项目 Repo 地址为准）：\n\n    ```bash\n    # 示例：克隆一个典型的去模糊项目 (请替换为实际选中的 repo 地址)\n    git clone https:\u002F\u002Fgithub.com\u002Fselected-author\u002Fdeblur-project.git\n    cd deblur-project\n    ```\n\n3.  **安装项目特定依赖**：\n    大多数项目会在根目录提供 `requirements.txt`。\n\n    ```bash\n    # 使用清华源加速安装项目依赖\n    pip install -r requirements.txt -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n    ```\n\n4.  **下载预训练模型**：\n    查看项目 README 中的 \"Pretrained Models\" 部分，下载对应的 `.pth` 或 `.h5` 权重文件，并放入项目指定的文件夹（通常为 `checkpoints\u002F` 或 `models\u002F`）。\n    *提示：如果官方链接下载缓慢，可尝试在 Gitee 或国内 AI 社区搜索是否有镜像资源。*\n\n## 基本使用\n\n大多数去模糊工具的使用逻辑相似：加载预训练模型 -> 输入模糊图像 -> 输出去模糊图像。\n\n以下是一个基于 Python 脚本的最简使用示例（伪代码逻辑，具体参数请参考所选项目的文档）：\n\n1.  **准备测试图片**：\n    将一张模糊的图片命名为 `input_blur.jpg` 并放入当前目录。\n\n2.  **运行推理命令**：\n\n    ```bash\n    # 典型运行命令示例\n    python test.py --model_path checkpoints\u002Fpretrained_model.pth --input_path input_blur.jpg --output_path result_deblur.jpg\n    ```\n\n    或者，如果项目提供了交互式 Python API：\n\n    ```python\n    import torch\n    from model import DeblurNet  # 导入具体项目的模型类\n    import cv2\n\n    # 1. 加载模型\n    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n    model = DeblurNet().to(device)\n    model.load_state_dict(torch.load('checkpoints\u002Fpretrained_model.pth'))\n    model.eval()\n\n    # 2. 读取并预处理图像\n    img = cv2.imread('input_blur.jpg')\n    img_tensor = torch.from_numpy(img).float().permute(2, 0, 1).unsqueeze(0).to(device) \u002F 255.0\n\n    # 3. 执行去模糊\n    with torch.no_grad():\n        output = model(img_tensor)\n\n    # 4. 保存结果\n    result = (output.squeeze().permute(1, 2, 0).cpu() * 255).numpy().astype('uint8')\n    cv2.imwrite('result_deblur.jpg', result)\n    print(\"去模糊完成，结果已保存为 result_deblur.jpg\")\n    ```\n\n**下一步建议**：\n回到 **Awesome-Deblurring** 列表，根据你的具体场景（如：人脸去模糊、视频去模糊、大运动模糊、离焦模糊等）选择更专用的算法进行尝试。","某新闻机构的摄影记者在突发事故现场拍摄了一组关键照片，但因手持拍摄时发生剧烈抖动，导致所有图像出现严重的运动模糊，急需在截稿前恢复画面细节以用于报道。\n\n### 没有 Awesome-Deblurring 时\n- **算法选型如大海捞针**：面对海量的去模糊论文和代码库，团队难以快速区分哪些是传统的非深度学习方案，哪些是最新的深度学习模型，浪费大量时间筛选。\n- **场景匹配度低**：盲目尝试通用修复工具，无法针对“单张盲运动模糊”或“视频多帧去模糊”等具体损坏类型找到最优解，修复效果往往伴随严重伪影。\n- **缺乏基准测试支持**：没有权威的基准数据集参考，难以量化评估不同算法在当前模糊程度下的实际表现，只能凭肉眼反复试错。\n- **深度信息缺失**：对于包含复杂景深的现场照片，普通工具无法利用深度感知技术进行处理，导致背景或前景依然模糊不清。\n\n### 使用 Awesome-Deblurring 后\n- **资源精准定位**：通过分类清晰的目录（如单图盲去模糊、视频去模糊、离焦去模糊），团队迅速锁定了适合突发新闻场景的 SOTA（最先进）深度学习模型。\n- **定制化解决方案**：直接调用列表中针对“相机抖动”优化的特定算法代码，有效去除了运动拖影，同时保留了记者面部和事故细节的真实纹理。\n- **高效验证流程**：利用列表提供的标准基准数据集快速验证模型效果，确认无误后立即投入批量处理，将修图时间从数小时压缩至几分钟。\n- **高级特性应用**：针对现场复杂的远近景别，采用了列表中推荐的“深度感知运动去模糊”技术，实现了全画面清晰锐利的高质量还原。\n\nAwesome-Deblurring 通过将分散的去模糊资源系统化整理，让技术人员能从繁琐的文献调研中解脱出来，专注于利用最佳算法解决实际的图像质量危机。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsubeeshvasu_Awesome-Deblurring_f90506b3.png","subeeshvasu","Subeesh Vasu","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fsubeeshvasu_6c402b1f.jpg","AI Specialist, KLA","KLA","India","subeeshvasu@gmail.com",null,"https:\u002F\u002Fsubeeshvasu.github.io\u002F","https:\u002F\u002Fgithub.com\u002Fsubeeshvasu",2852,375,"2026-04-07T02:45:16",5,"","未说明",{"notes":89,"python":87,"dependencies":90},"该仓库是一个去模糊（Deblurring）相关资源的精选列表（Awesome List），主要收录了从 2006 年至今的学术论文、项目页面和代码库链接。它本身不是一个可直接运行的单一软件工具，因此 README 中未包含具体的操作系统、GPU、内存、Python 版本或依赖库的安装需求。用户需根据列表中具体选择的某篇论文或其对应的代码仓库（Repo 列）去查阅特定的运行环境要求。早期（2015 年前）多为非深度学习算法，可能仅需 MATLAB 或 C++；后期深度学习算法通常需 PyTorch\u002FTensorFlow 及 GPU 支持。",[],[14,15,92],"视频",[94,95,96,97,98,99,100,101,102,103,104,105,106,107],"image-deblurring","video-deblurring","image-deconvolution","stereo-deblurring","burst-deblurring","motion-blur","kernel-estimation","camera-shake","deep-learning","restoration","deblurring","defocus-deblurring","motion-deblurring","defocus-blur","2026-03-27T02:49:30.150509","2026-04-11T22:00:06.896411",[111,116,121,126,131,136,141],{"id":112,"question_zh":113,"answer_zh":114,"source_url":115},29897,"这个去模糊论文列表会持续更新吗？","是的，维护者会根据时间安排进行更新（例如已更新了 2023 年的论文）。同时，维护者非常欢迎社区贡献，如果您发现最新的论文，建议直接提交 Pull Request (PR) 来帮助更新列表，这样效率更高。","https:\u002F\u002Fgithub.com\u002Fsubeeshvasu\u002FAwesome-Deblurring\u002Fissues\u002F36",{"id":117,"question_zh":118,"answer_zh":119,"source_url":120},29895,"如果没有配对的清晰 - 模糊图像数据集，如何评估去模糊结果？","可以使用无参考图像质量评估（No-Reference Image Quality Assessment, NR-IQA）指标。这类指标专门设计用于在没有参考图像（即没有配对的清晰图像）的情况下评估图像质量。推荐查看 Awesome-Image-Quality-Assessment 仓库获取相关工具和指标：https:\u002F\u002Fgithub.com\u002Fchaofengc\u002FAwesome-Image-Quality-Assessment","https:\u002F\u002Fgithub.com\u002Fsubeeshvasu\u002FAwesome-Deblurring\u002Fissues\u002F30",{"id":122,"question_zh":123,"answer_zh":124,"source_url":125},29896,"列表中的 \"DL\" 和 \"non-DL\" 分别代表什么意思？","\"DL\" 代表深度学习（Deep Learning），指基于深度学习的去模糊方法；\"non-DL\" 代表非深度学习，指传统的、不基于深度学习的去模糊方法。由于单图像运动去模糊领域这两类论文数量都很多，因此列表将它们分为了两类。","https:\u002F\u002Fgithub.com\u002Fsubeeshvasu\u002FAwesome-Deblurring\u002Fissues\u002F6",{"id":127,"question_zh":128,"answer_zh":129,"source_url":130},29898,"有关于 RAW 格式图像或视频去模糊的论文吗？","目前列表中收录较少，但确实存在相关研究，例如 \"Raw Image Deblurring\"。由于该方向相对较新或特定，可能还有更多论文未被广泛收录。建议重点关注针对 RAW 域处理的特定文献。","https:\u002F\u002Fgithub.com\u002Fsubeeshvasu\u002FAwesome-Deblurring\u002Fissues\u002F24",{"id":132,"question_zh":133,"answer_zh":134,"source_url":135},29899,"我想向列表推荐一篇新论文，最好的方式是什么？","虽然可以通过 Issue 留言推荐，但维护者强烈建议直接提交 Pull Request (PR)。通过 PR 添加新论文可以更快地被合并到列表中，也能减轻维护者的工作负担。请确保在 PR 中提供论文链接、代码链接以及正确的分类。","https:\u002F\u002Fgithub.com\u002Fsubeeshvasu\u002FAwesome-Deblurring\u002Fissues\u002F37",{"id":137,"question_zh":138,"answer_zh":139,"source_url":140},29900,"我发现列表中某篇论文的链接错误或代码缺失，该如何反馈？","您可以直接在 Issue 中指出错误的链接并提供正确的论文地址或代码仓库地址（例如 GitHub 链接或官方项目页）。维护者在收到确认后通常会立即修正。例如，曾有用户提供了 ECCV 2020 论文的正确链接和 ESTRNN 的代码库，随后得到了及时更新。","https:\u002F\u002Fgithub.com\u002Fsubeeshvasu\u002FAwesome-Deblurring\u002Fissues\u002F5",{"id":142,"question_zh":143,"answer_zh":144,"source_url":145},29901,"列表中包含非盲去模糊（non-blind）或半盲去模糊的传统方法吗？","包含。列表不仅涵盖深度学习的方法，也收录了非深度学习（non-DL）的方法，包括严格意义上的非盲去模糊和半盲去模糊算法。例如，已有收录关于使用清洁和噪声滤波器进行反向图像滤波以及黑盒去模糊的相关论文及其代码实现。","https:\u002F\u002Fgithub.com\u002Fsubeeshvasu\u002FAwesome-Deblurring\u002Fissues\u002F32",[]]