[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-zhenyingfang--Awesome-Temporal-Action-Detection-Temporal-Action-Proposal-Generation":3,"tool-zhenyingfang--Awesome-Temporal-Action-Detection-Temporal-Action-Proposal-Generation":65},[4,18,32,41,49,57],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4292,"Deep-Live-Cam","hacksider\u002FDeep-Live-Cam","Deep-Live-Cam 是一款专注于实时换脸与视频生成的开源工具，用户仅需一张静态照片，即可通过“一键操作”实现摄像头画面的即时变脸或制作深度伪造视频。它有效解决了传统换脸技术流程繁琐、对硬件配置要求极高以及难以实时预览的痛点，让高质量的数字内容创作变得触手可及。\n\n这款工具不仅适合开发者和技术研究人员探索算法边界，更因其极简的操作逻辑（仅需三步：选脸、选摄像头、启动），广泛适用于普通用户、内容创作者、设计师及直播主播。无论是为了动画角色定制、服装展示模特替换，还是制作趣味短视频和直播互动，Deep-Live-Cam 都能提供流畅的支持。\n\n其核心技术亮点在于强大的实时处理能力，支持口型遮罩（Mouth Mask）以保留使用者原始的嘴部动作，确保表情自然精准；同时具备“人脸映射”功能，可同时对画面中的多个主体应用不同面孔。此外，项目内置了严格的内容安全过滤机制，自动拦截涉及裸露、暴力等不当素材，并倡导用户在获得授权及明确标注的前提下合规使用，体现了技术发展与伦理责任的平衡。",88924,3,"2026-04-06T03:28:53",[13,14,15,16],"开发框架","图像","Agent","视频","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":24,"last_commit_at":25,"category_tags":26,"status":17},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",85092,2,"2026-04-10T11:13:16",[14,27,16,28,15,29,30,13,31],"数据工具","插件","其他","语言模型","音频",{"id":33,"name":34,"github_repo":35,"description_zh":36,"stars":37,"difficulty_score":38,"last_commit_at":39,"category_tags":40,"status":17},5784,"funNLP","fighting41love\u002FfunNLP","funNLP 是一个专为中文自然语言处理（NLP）打造的超级资源库，被誉为\"NLP 民工的乐园”。它并非单一的软件工具，而是一个汇集了海量开源项目、数据集、预训练模型和实用代码的综合性平台。\n\n面对中文 NLP 领域资源分散、入门门槛高以及特定场景数据匮乏的痛点，funNLP 提供了“一站式”解决方案。这里不仅涵盖了分词、命名实体识别、情感分析、文本摘要等基础任务的标准工具，还独特地收录了丰富的垂直领域资源，如法律、医疗、金融行业的专用词库与数据集，甚至包含古诗词生成、歌词创作等趣味应用。其核心亮点在于极高的全面性与实用性，从基础的字典词典到前沿的 BERT、GPT-2 模型代码，再到高质量的标注数据和竞赛方案，应有尽有。\n\n无论是刚刚踏入 NLP 领域的学生、需要快速验证想法的算法工程师，还是从事人工智能研究的学者，都能在这里找到急需的“武器弹药”。对于开发者而言，它能大幅减少寻找数据和复现模型的时间；对于研究者，它提供了丰富的基准测试资源和前沿技术参考。funNLP 以开放共享的精神，极大地降低了中文自然语言处理的开发与研究成本，是中文 AI 社区不可或缺的宝藏仓库。",79857,1,"2026-04-08T20:11:31",[30,27,29],{"id":42,"name":43,"github_repo":44,"description_zh":45,"stars":46,"difficulty_score":38,"last_commit_at":47,"category_tags":48,"status":17},5773,"cs-video-courses","Developer-Y\u002Fcs-video-courses","cs-video-courses 是一个精心整理的计算机科学视频课程清单，旨在为自学者提供系统化的学习路径。它汇集了全球知名高校（如加州大学伯克利分校、新南威尔士大学等）的完整课程录像，涵盖从编程基础、数据结构与算法，到操作系统、分布式系统、数据库等核心领域，并深入延伸至人工智能、机器学习、量子计算及区块链等前沿方向。\n\n面对网络上零散且质量参差不齐的教学资源，cs-video-courses 解决了学习者难以找到成体系、高难度大学级别课程的痛点。该项目严格筛选内容，仅收录真正的大学层级课程，排除了碎片化的简短教程或商业广告，确保用户能接触到严谨的学术内容。\n\n这份清单特别适合希望夯实计算机基础的开发者、需要补充特定领域知识的研究人员，以及渴望像在校生一样系统学习计算机科学的自学者。其独特的技术亮点在于分类极其详尽，不仅包含传统的软件工程与网络安全，还细分了生成式 AI、大语言模型、计算生物学等新兴学科，并直接链接至官方视频播放列表，让用户能一站式获取高质量的教育资源，免费享受世界顶尖大学的课堂体验。",79792,"2026-04-08T22:03:59",[29,14,27,13],{"id":50,"name":51,"github_repo":52,"description_zh":53,"stars":54,"difficulty_score":10,"last_commit_at":55,"category_tags":56,"status":17},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,"2026-04-04T04:44:48",[15,14,13,30,29],{"id":58,"name":59,"github_repo":60,"description_zh":61,"stars":62,"difficulty_score":10,"last_commit_at":63,"category_tags":64,"status":17},519,"PaddleOCR","PaddlePaddle\u002FPaddleOCR","PaddleOCR 是一款基于百度飞桨框架开发的高性能开源光学字符识别工具包。它的核心能力是将图片、PDF 等文档中的文字提取出来，转换成计算机可读取的结构化数据，让机器真正“看懂”图文内容。\n\n面对海量纸质或电子文档，PaddleOCR 解决了人工录入效率低、数字化成本高的问题。尤其在人工智能领域，它扮演着连接图像与大型语言模型（LLM）的桥梁角色，能将视觉信息直接转化为文本输入，助力智能问答、文档分析等应用场景落地。\n\nPaddleOCR 适合开发者、算法研究人员以及有文档自动化需求的普通用户。其技术优势十分明显：不仅支持全球 100 多种语言的识别，还能在 Windows、Linux、macOS 等多个系统上运行，并灵活适配 CPU、GPU、NPU 等各类硬件。作为一个轻量级且社区活跃的开源项目，PaddleOCR 既能满足快速集成的需求，也能支撑前沿的视觉语言研究，是处理文字识别任务的理想选择。",75388,"2026-04-11T21:46:15",[30,14,13,29],{"id":66,"github_repo":67,"name":68,"description_en":69,"description_zh":70,"ai_summary_zh":71,"readme_en":72,"readme_zh":73,"quickstart_zh":74,"use_case_zh":75,"hero_image_url":76,"owner_login":77,"owner_name":78,"owner_avatar_url":79,"owner_bio":80,"owner_company":81,"owner_location":82,"owner_email":83,"owner_twitter":81,"owner_website":84,"owner_url":85,"languages":81,"stars":86,"forks":87,"last_commit_at":88,"license":81,"difficulty_score":89,"env_os":90,"env_gpu":91,"env_ram":91,"env_deps":92,"category_tags":95,"github_topics":81,"view_count":24,"oss_zip_url":81,"oss_zip_packed_at":81,"status":17,"created_at":96,"updated_at":97,"faqs":98,"releases":114},6742,"zhenyingfang\u002FAwesome-Temporal-Action-Detection-Temporal-Action-Proposal-Generation","Awesome-Temporal-Action-Detection-Temporal-Action-Proposal-Generation","Temporal Action Detection & Weakly Supervised Temporal Action Detection & Temporal Action Proposal Generation","Awesome-Temporal-Action-Detection-Temporal-Action-Proposal-Generation 是一个专注于视频理解领域的开源资源合集，旨在系统性地整理与时序动作检测、弱监督\u002F半监督检测以及时序动作提案生成相关的顶尖论文、代码实现及预训练模型。\n\n在长视频中精准定位动作发生的起止时间是一项极具挑战的任务，传统方法往往依赖昂贵且繁琐的逐帧标注数据。该资源库通过汇聚前沿研究成果，有效解决了如何在不同监督条件下（包括无标签或仅视频级标签）高效识别动作片段的技术难题。其内容覆盖从基础的提案生成到最新的开放词汇检测，并特别收录了如边界敏感预训练（BSP）、时序敏感预训练（TSP）等独特技术亮点，帮助模型更好地捕捉视频中的时间动态特征。\n\n这份资料非常适合计算机视觉领域的研究人员、算法工程师及高校学生使用。对于希望深入探索视频分析技术、复现经典算法或寻找最新研究灵感的开发者而言，Awesome-Temporal-Action-Detection-Temporal-Action-Proposal-Generation 提供了一条清晰的技术演进路径，是进入该","Awesome-Temporal-Action-Detection-Temporal-Action-Proposal-Generation 是一个专注于视频理解领域的开源资源合集，旨在系统性地整理与时序动作检测、弱监督\u002F半监督检测以及时序动作提案生成相关的顶尖论文、代码实现及预训练模型。\n\n在长视频中精准定位动作发生的起止时间是一项极具挑战的任务，传统方法往往依赖昂贵且繁琐的逐帧标注数据。该资源库通过汇聚前沿研究成果，有效解决了如何在不同监督条件下（包括无标签或仅视频级标签）高效识别动作片段的技术难题。其内容覆盖从基础的提案生成到最新的开放词汇检测，并特别收录了如边界敏感预训练（BSP）、时序敏感预训练（TSP）等独特技术亮点，帮助模型更好地捕捉视频中的时间动态特征。\n\n这份资料非常适合计算机视觉领域的研究人员、算法工程师及高校学生使用。对于希望深入探索视频分析技术、复现经典算法或寻找最新研究灵感的开发者而言，Awesome-Temporal-Action-Detection-Temporal-Action-Proposal-Generation 提供了一条清晰的技术演进路径，是进入该细分领域不可或缺的参考指南。","\u003C!--\n * @Author: fzy\n * @Date: 2020-03-09 21:53:10\n * @LastEditors: Zhenying\n * @LastEditTime: 2020-12-03 18:58:12\n * @Description: \n -->\n# Awesome-Temporal-Action-Detection-Temporal-Action-Proposal-Generation [![Awesome](https:\u002F\u002Fcdn.rawgit.com\u002Fsindresorhus\u002Fawesome\u002Fd7305f38d29fed78fa85652e3a63e154dd8e8829\u002Fmedia\u002Fbadge.svg)](https:\u002F\u002Fgithub.com\u002Fzhenyingfang\u002FAwesome-Temporal-Action-Detection-Temporal-Action-Proposal-Generation)\nTemporal Action Detection &amp; Weakly Supervised & Semi Supervised Temporal Action Detection &amp; Temporal Action Proposal Generation &amp; Open-Vocabulary Temporal Action Detection\n\n\n-----\n**Contents**\n\u003C!-- TOC -->\n- [Awesome-Temporal-Action-Detection-Temporal-Action-Proposal-Generation](#awesome-temporal-action-detection-temporal-action-proposal-generation)\n- [**about pretrained model**](#about-pretrained-model)\n- [**ActivityNet Challenge**](#activitynet-challenge)\n- [**Temporal Action Proposal Generation**](#papers-temporal-action-proposal-generation)\n  - [2023](#2023) - [2022](#2022) - [2021](#2021) - [2020](#2020) - [2019](#2019) - [2018](#2018) - [2017](#2017) - [before](#before)\n- [**Temporal Action Detection**](#papers-temporal-action-detection)\n  - [2026](#2026) - [2025](#2025) - [2024](#2024) - [2023](#2023-1) - [2022](#2022-1) - [2021](#2021-1) - [2020](#2020-1) - [2019](#2019-1) - [2018](#2018-1) - [2017](#2017-1) - [before](#before-1)\n- [**Weakly Supervised Temporal Action Detection**](#papers-weakly-supervised-temporal-action-detection)\n  - [2026](#2026-1) - [2025](#2025-1) - [2024](#2024-1) - [2023](#2023-2) - [2022](#2022-2) - [2021](#2021-2) - [2020](#2020-2) - [2019](#2019-2) - [2018](#2018-2) - [2017](#2017-2)\n- [**Online Action Detection**](#papers-online-action-detection)\n  - [2026](#2026-2) - [2025](#2025-2) - [2024](#2024-2) - [2023](#2023-3) - [2022](#2022-3) - [2021](#2021-3)\n- [**Semi Supervised Temporal Action Detection**](#semi-supervised)\n  - [2024](#2024-3) - [2023](#2023-4) - [2022](#2022-4) - [2021](#2021-4) - [2019](#2019-3)\n- [**Open-Vocabulary Temporal Action Detection**](#open-vocabulary-temporal-action-detection)\n  - [2026](#2026-3) - [2025](#2025-3) - [2024](#2024-4) - [2023](#2023-5) - [2022](#2022-5)\n\n\n-----\n# **about pretrained model**\n1. (BSP) [Boundary-sensitive Pre-training for Temporal Localization in Videos](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.10830) (ICCV 2021)\n2. (TSP) [TSP: Temporally-Sensitive Pretraining of Video Encoders for Localization Tasks](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.11479) (ICCVW 2021)\n3. (UP-TAL) [Unsupervised Pre-training for Temporal Action Localization Tasks](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.13609) (CVPR 2022) [code](https:\u002F\u002Fgithub.com\u002Fzhang-can\u002FUP-TAL)\n4. [Contrastive Language-Action Pre-training for Temporal Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.12293) (arxiv 2022)\n5. [Low-Fidelity End-to-End Video Encoder Pre-training for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.15233) (NeurIPS 2021)\n\n# **ActivityNet Challenge and talks**\n1. (2021) [AcitvityNet 2021](http:\u002F\u002Factivity-net.org\u002Fchallenges\u002F2021\u002Fchallenge.html)\n2. (2021) [Transformer在时序行为检测中的应用 & 基于自监督学习的半监督时序行为检测](https:\u002F\u002Fwww.techbeat.net\u002Ftalk-info?id=545) (DAMO Academy, Alibaba Group)\n\n# **Papers: Temporal Action Proposal Generation**\n\n## 2023\n1. (MIFNet) [MIFNet: Multiple Instances Focused Temporal Action Proposal Generation](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0925231223000553) (Neurocomputing 2023)\n2. (SMBG) [Faster Learning of Temporal Action Proposal via Sparse Multilevel Boundary Generator](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.03166) (arxiv 2023) [code](https:\u002F\u002Fgithub.com\u002Fzhouyang-001\u002FSMBG-for-temporal-action-proposal)\n3. (MCBD) [Multi-Level Content-Aware Boundary Detection for Temporal Action Proposal Generation](Tip 2023) [code](https:\u002F\u002Fmic.tongji.edu.cn\u002Fff\u002F32\u002Fc9778a327474\u002Fpage.htm)\n\n## 2022\n1. (BCNet) [Temporal Action Proposal Generation with Background Constraint](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.07984) (AAAI 2022)\n2. (PRSA-Net) [Pyramid Region-based Slot Attention Network for Temporal Action Proposal Generation](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.10095) (BMVC 2022) [code](https:\u002F\u002Fgithub.com\u002Fhandhand123\u002FPRSA-Net)\n3. (TDN) [Modeling long-term video semantic distribution for temporal action proposal generation](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0925231221017616) (Neurocomputing 2022)\n4. (AOE-Net) [AOE-Net: Entities Interactions Modeling with Adaptive Attention Mechanism for Temporal Action Proposals Generation](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.02578) (IJCV 2022)\n\n## 2021\n1. (BSN++) [BSN++: Complementary Boundary Regressor with Scale-Balanced RelationModeling for Temporal Action Proposal Generation](https:\u002F\u002Farxiv.org\u002Fabs\u002F2009.07641) (AAAI 2021) [Author's Zhihu](https:\u002F\u002Fzhuanlan.zhihu.com\u002Fp\u002F344065976)\n2. (RTD-Net) [Relaxed Transformer Decoders for Direct Action Proposal Generation](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.01894) (ICCV 2021) [code](https:\u002F\u002Fgithub.com\u002FMCG-NJU\u002FRTD-Action) [Zhihu](https:\u002F\u002Fzhuanlan.zhihu.com\u002Fp\u002F363133304)\n3. (TCANet) [Temporal Context Aggregation Network for Temporal Action Proposal Refinement](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.13141) (CVPR 2021) [Zhihu](https:\u002F\u002Fzhuanlan.zhihu.com\u002Fp\u002F358754602)\n4. [Augmented Transformer with Adaptive Graph for Temporal Action Proposal Generation](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.16024) (arxiv 2021)\n5. (TAPG) [Temporal Action Proposal Generation with Transformers](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.12043) (arxiv 2021)\n6. (AEN) [Agent-Environment Network for Temporal Action Proposal Generation](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.08323) (ICASSP 2021)\n7. (AEI) [AEI: Actors-Environment Interaction with Adaptive Attention for Temporal Action Proposals Generation](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.11474) (BMVC 2021) [code](https:\u002F\u002Fgithub.com\u002Fvhvkhoa\u002FTAPG-AgentEnvInteration)\n\n## 2020\n\n1. **VALSE talk by Tianwei Lin** (2020.03.18) [link](https:\u002F\u002Fpan.baidu.com\u002Fs\u002F18uPJX3l69qJHaYOdeJ0IQw?errmsg=Auth+Login+Sucess&errno=0&ssnerror=0&) (7y8g)\n2. (RapNet) **Accurate Temporal Action Proposal Generation with Relation-Aware Pyramid Network** (AAAI 2020) [pre-paper 2019 ActivityNet task-1 2nd](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.03448)\n3. (DBG) **Fast Learning of Temporal Action Proposal via Dense Boundary Generator** (AAAI 2020) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.04127) [code.TensorFlow](https:\u002F\u002Fgithub.com\u002FTencentYoutuResearch\u002FActionDetection-DBG)\n4. (BC-GNN) **Boundary Content Graph Neural Network for Temporal Action Proposal Generation** (ECCV 2020) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2008.01432v1)\n5. [Bottom-Up Temporal Action Localization with Mutual Regularization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2002.07358) (ECCV 2020) [code.TensorFlow](https:\u002F\u002Fgithub.com\u002FPeisenZhao\u002FBottom-Up-TAL-with-MR)\n6. (TSI) [TSI: Temporal Scale Invariant Network for Action Proposal Generation](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FACCV2020\u002Fhtml\u002FLiu_TSI_Temporal_Scale_Invariant_Network_for_Action_Proposal_Generation_ACCV_2020_paper.html) (ACCV 2020)\n\n## 2019\n\n1. (SRG) **SRG: Snippet Relatedness-based Temporal Action Proposal Generator** (IEEE Trans 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.11306)\n2. (DPP) **Deep Point-wise Prediction for Action Temporal Proposal** (ICONIP 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1909.07725) [code.PyTorch](https:\u002F\u002Fgithub.com\u002Fliluxuan1997\u002FDPP)\n3. (BMN) **BMN: Boundary-Matching Network for Temporal Action Proposal Generation** (ICCV 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1907.09702) [code.PaddlePaddle](https:\u002F\u002Fgithub.com\u002FPaddlePaddle\u002Fmodels\u002Ftree\u002Fdevelop\u002FPaddleCV\u002Fvideo) [code.PyTorch_unofficial](https:\u002F\u002Fgithub.com\u002FJJBOY\u002FBMN-Boundary-Matching-Network)\n4. (MGG) **Multi-granularity Generator for Temporal Action Proposal** (CVPR 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1811.11524)\n5. **Investigation on Combining 3D Convolution of Image Data and Optical Flow to Generate Temporal Action Proposals** (2019 CVPR Workshop) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1903.04176)\n6. (CMSN) **CMSN: Continuous Multi-stage Network and Variable Margin Cosine Loss for Temporal Action Proposal Generation** (arxiv 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.06080)\n7. **A high performance computing method for accelerating temporal action proposal generation** (arxiv 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1906.06496)\n8. **Multi-Granularity Fusion Network for Proposal and Activity Localization: Submission to ActivityNet Challenge 2019 Task 1 and Task 2** (ActvityNet challenge 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1907.12223)\n9. [Joint Learning of Local and Global Context for Temporal Action Proposal Generation](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F8941024) (TCSVT 2019)\n\n## 2018\n\n1. (CTAP) **CTAP: Complementary Temporal Action Proposal Generation** (ECCV 2018) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.04821) [code.TensorFlow](https:\u002F\u002Fgithub.com\u002Fjiyanggao\u002FCTAP)\n2. (BSN) **BSN: Boundary Sensitive Network for Temporal Action Proposal Generation** (ECCV 2018) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1806.02964) [code.TensorFlow](https:\u002F\u002Fgithub.com\u002Fwzmsltw\u002FBSN-boundary-sensitive-network) [code.PyTorch](https:\u002F\u002Fgithub.com\u002Fwzmsltw\u002FBSN-boundary-sensitive-network.pytorch)\n3. (SAP) **SAP: Self-Adaptive Proposal Model for Temporal Action Detection based on Reinforcement Learning** (AAAI 2018) [paper](https:\u002F\u002Fgithub.com\u002Fhjjpku\u002FAction_Detection_DQN\u002Fblob\u002Fmaster\u002Fcamera%20ready.pdf) [code.Torch](https:\u002F\u002Fgithub.com\u002Fhjjpku\u002FAction_Detection_DQN)\n\n## 2017\n\n1. (TURN TAP) **TURN TAP: Temporal Unit Regression Network for Temporal Action Proposals** (ICCV 2017) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.06189) [code.TensorFlow](https:\u002F\u002Fgithub.com\u002Fjiyanggao\u002FTURN-TAP)\n2. (SST) **SST: Single-Stream Temporal Action Proposals** (CVPR 2017) [paper](http:\u002F\u002Fvision.stanford.edu\u002Fpdf\u002Fbuch2017cvpr.pdf) [code.theano](https:\u002F\u002Fgithub.com\u002Fshyamal-b\u002Fsst\u002F) [code.TensorFlow](https:\u002F\u002Fgithub.com\u002FJaywongWang\u002FSST-Tensorflow)\n3. **YoTube: Searching Action Proposal via Recurrent and Static Regression Networks** (IEEE Trans 2017) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1706.08218)\n4. **A Pursuit of Temporal Accuracy in General Activity Detection** (arxiv 2017) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.02716) [code.PyTorch](https:\u002F\u002Fgithub.com\u002Fyjxiong\u002Faction-detection)\n\n## before\n\n1. (DAPs) **DAPs: Deep Action Proposals for Action Understanding** (ECCV 2016) [paper](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F0B0ZXjo_p8lHBcjh1WDlmYVN3R2M\u002Fview) [code](https:\u002F\u002Fgithub.com\u002Fescorciav\u002Fdeep-action-proposals)\n\n----\n# **Papers: Temporal Action Detection**\n\n## 2026\n1. (ActionVLM) [Towards Mitigating Modality Bias in Vision-Language Models for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2601.21078) (arXiv 2026)\n2. [Light but Sharp: SlimSTAD for Real-Time Action Detection from Sensor Data](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F36975) (AAAI 2026)\n3. [Scene-Aware Spatiotemporal Generalization: Towards Robust Temporal Action Detection Across Domains](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37392) (AAAI 2026)\n\n## 2025\n\n1. (MS-Temba) [MS-Temba : Multi-Scale Temporal Mamba for Efficient Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.06138) (arXiv 2025) [code](https:\u002F\u002Fgithub.com\u002Fthearkaprava\u002FMS-Temba)\n2. (zero-shot) [Training-Free Zero-Shot Temporal Action Detection with Vision-Language Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.13795) (arXiv 2025)\n3. (domain adaptation) [Dynamic Switching Teacher: How to Generalize Temporal Action Detection Models](https:\u002F\u002Fopenreview.net\u002Fforum?id=o8SPZJaJyj) (arXiv 2025)\n4. (LoSA) [LoSA: Long-Short-range Adapter for Scaling End-to-End Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.01282) (WACV 2025)\n5. (TimeLoc) [TimeLoc: A Unified End-to-End Framework for Precise Timestamp Localization in Long Videos](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.06526) (arXiv 2025) [code](https:\u002F\u002Fgithub.com\u002Fsming256\u002FTimeLoc)\n6. [Temporal Action Detection Model Compression by Progressive Block Drop](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.16916) (CVPR 2025)\n7. (DiGIT) [DiGIT: Multi-Dilated Gated Encoder and Central-Adjacent Region Integrated Decoder for Temporal Action Detection Transformer](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.05711) (CVPR 2025) [code](https:\u002F\u002Fgithub.com\u002FDotori-HJ\u002FDiGIT)\n8. (FDDet) [FDDet: Frequency-Decoupling for Boundary Refinement in Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.00647) (arXiv 2025)\n9. [Chain-of-Thought Textual Reasoning for Few-shot Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.13460) (arXiv 2025)\n10. [ProTAL: A Drag-and-Link Video Programming Framework for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.17555) (CHI 2025)\n11. [CLIP-AE: CLIP-assisted Cross-view Audio-Visual Enhancement for Unsupervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.23524) (arXiv 2025)\n12. [LLM-powered Query Expansion for Enhancing Boundary Prediction in Language-driven Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.24282)\n13. (BRTAL) [BRTAL: Boundary Refinement Temporal Action Localization via Offset-Driven Diffusion Models](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10912675) (TCSVT 2025)\n14. (EDMP) [Energy vs. Noise: Towards Robust Temporal Action Localization in Open-World](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F32659) (AAAI 2025) [code](https:\u002F\u002Fgithub.com\u002FXD-mu\u002FEDMP)\n15. (AdaTAD++) [Scaling Action Detection: AdaTAD++ with Transformer-Enhanced Temporal-Spatial Adaptation](https:\u002F\u002Ficcv.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F1355) (ICCV 2025)\n16. (WiFiTAD) [WiFi Temporal Activity Detection via Dual Pyramid Network](https:\u002F\u002Fgithub.com\u002FAVC2-UESTC\u002FWiFiTAD\u002Fblob\u002Fmain\u002FmainPaper.pdf) (AAAI 2025) [code](https:\u002F\u002Fgithub.com\u002FAVC2-UESTC\u002FWiFiTAD)\n17. (RDFA-S6) [Enhancing Temporal Action Localization: Advanced S6 Modeling with Recurrent Mechanism](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.13078) (arXiv 2025) [code](https:\u002F\u002Fgithub.com\u002Flsy0882\u002FRDFA-S6)\n18. (MambaTAD) [MambaTAD: When State-Space Models Meet Long-Range Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2511.17929) (TMM 2025)\n19. (FreETAD) (multi label) [Ex Pede Herculem, Predicting Global Actionness Curve from Local Clips](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3746027.3754712) (ACM MM 2025)\n20. (TBT-Former) [TBT-Former: Learning Temporal Boundary Distributions for Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2512.01298) (arXiv 2025)\n21. [Multi-task Learning with Extended Temporal Shift Module for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2512.11189) (ICCV 2025 BinEgo-360 Challenge)\n\n## 2024\n\n1. (DenoiseLoc) [Boundary Denoising for Video Activity Localization](https:\u002F\u002Fopenreview.net\u002Fforum?id=bLpUtGyf9g) (ICLR 2024) [code](https:\u002F\u002Fgithub.com\u002Ffrostinassiky\u002Fdenoiseloc)\n2. (LITA) [LITA: Language Instructed Temporal-Localization Assistant](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.19046) (arXiv 2024) [code](https:\u002F\u002Fgithub.com\u002FNVlabs\u002FLITA)\n3. (PLOT-TAL) (few-shot) [PLOT-TAL -- Prompt Learning with Optimal Transport for Few-Shot Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.18915) (Arxiv 2024)\n4. [Benchmarking the Robustness of Temporal Action Detection Models Against Temporal Corruptions](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.20254) (CVPR 2024) [code](https:\u002F\u002Fgithub.com\u002FAlvin-Zeng\u002Ftemporal-robustness-benchmark)\n5. (zero-shot) (T3AL) [Test-Time Zero-Shot Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.05426) (CVPR 2024) [code](https:\u002F\u002Fgithub.com\u002Fbenedettaliberatori\u002FT3AL)\n6. (UniMD) [UniMD: Towards Unifying Moment Retrieval and Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.04933) (ECCV 2024) [code](https:\u002F\u002Fgithub.com\u002Fyingsen1\u002FUniMD)\n7. [Adapting Short-Term Transformers for Action Detection in Untrimmed Videos](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.01897) (CVPR 2024)\n8. (AdaTAD) [End-to-End Temporal Action Detection with 1B Parameters Across 1000 Frames](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.17241) (CVPR 2024) [code](https:\u002F\u002Fgithub.com\u002Fsming256\u002FOpenTAD\u002Ftree\u002Fmain\u002Fconfigs\u002Fadatad)\n9. [Video Mamba Suite: State Space Model as a Versatile Alternative for Video Understanding](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.09626) (ECCV 2024) [code](https:\u002F\u002Fgithub.com\u002FOpenGVLab\u002Fvideo-mamba-suite)\n10. (TE-TAD) [TE-TAD: Towards Full End-to-End Temporal Action Detection via Time-Aligned Coordinate Expression](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.02405) (CVPR 2024) [code](https:\u002F\u002Fgithub.com\u002FDotori-HJ\u002FTE-TAD)\n11. (ADI-Diff) [Action Detection via an Image Diffusion Process](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.01051) (CVPR 2024)\n12. (DualDETR) [Dual DETRs for Multi-Label Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.00653) (CVPR 2024) [code](https:\u002F\u002Fgithub.com\u002FMCG-NJU\u002FDualDETR)\n13. [An Effective-Efficient Approach for Dense Multi-Label Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.06187) (arXiv 2024)\n14. (Spatio-Temporal) [End-to-End Spatio-Temporal Action Localisation with Video Transformers](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FGritsenko_End-to-End_Spatio-Temporal_Action_Localisation_with_Video_Transformers_CVPR_2024_paper.html) (CVPR 2024)\n15. (DyFADet) [DyFADet: Dynamic Feature Aggregation for Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.03197) (ECCV 2024) [code](https:\u002F\u002Fgithub.com\u002Fyangle15\u002FDyFADet-pytorch)\n16. (causaltad) [Harnessing Temporal Causality for Advanced Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.17792) (arxiv 2024) [code](https:\u002F\u002Fgithub.com\u002Fsming256\u002FOpenTAD\u002Fcausaltad)\n17. (LTP) [Long-Term Pre-training for Temporal Action Detection with Transformers](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.13152) (arxiv 2024)\n18. (Pred-DETR) [Prediction-Feedback DETR for Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.16729) (arxiv 2024)\n19. [Introducing Gating and Context into Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.04205) (ECCV W 2024)\n20. (ContextDet) [ContextDet: Temporal Action Detection with Adaptive Context Aggregation](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.15279) (arXiv 2024)\n21. (LMM: TimeMarker) [TimeMarker: A Versatile Video-LLM for Long and Short Video Understanding with Superior Temporal Localization Ability](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.18211) [code](https:\u002F\u002Fgithub.com\u002FTimeMarker-LLM\u002FTimeMarker\u002F)\n\n## 2023\n1. (AMNet) [Action-aware Masking Network with Group-based Attention for Temporal Action Localization](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2023\u002Fpapers\u002FKang_Action-Aware_Masking_Network_With_Group-Based_Attention_for_Temporal_Action_Localization_WACV_2023_paper.pdf) (WACV 2023)\n2. (ContextLoc++) [ContextLoc++: A Unified Context Model for Temporal Action Localization](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10018461) (TPAMI 2023)\n3. [Temporal action detection with dynamic weights based on curriculum learning](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0925231222015557) (Neurocomputing 2023)\n4. (GAP) [Post-Processing Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.14924) (CVPR 2023) [code](https:\u002F\u002Fgithub.com\u002Fsauradip\u002FGAP)\n5. (TriDet) [TriDet: Temporal Action Detection with Relative Boundary Modeling](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.07347) (CVPR 2023) [code](https:\u002F\u002Fgithub.com\u002Fsssste\u002FTriDet)\n   - [Temporal Action Localization with Enhanced Instant Discriminability](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.05590) (extend version)\n6. (TemporalMaxer) [TemporalMaxer: Maximize Temporal Context with only Max Pooling for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.09055.pdf) (ArXiv 2023) [code](https:\u002F\u002Fgithub.com\u002Ftuantng\u002Ftemporalmaxer)\n7. (DiffTAD) [DiffTAD: Temporal Action Detection with Proposal Denoising Diffusion](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.14863) (ICCV 2023) [code](https:\u002F\u002Fgithub.com\u002Fsauradip\u002FDiffusionTAD)\n8. [Decomposed Cross-modal Distillation for RGB-based Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.17285) (CVPR 2023)\n9. [Boundary-Denoising for Video Activity Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.02934) (Arxiv 2023)\n10. (ASL) [Action Sensitivity Learning for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15701) (ICCV 2023)\n11. (MMNet) [A Multi-Modal Transformer Network for Action Detection](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320323004119) (Pattern Recognition 2023)\n12. [Truncated attention-aware proposal networks with multi-scale dilation for temporal action detection](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320323003825) (Pattern Recognition 2023)\n13. (MSST) [A Multitemporal Scale and Spatial–Temporal Transformer Network for Temporal Action Localization](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10120600) (IEEE Transactions on Human-Machine Systems 2023)\n14. [Exploring Action Centers for Temporal Action Localization](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10058582) (TMM 2023)\n15. (ETAD) [ETAD: Training Action Detection End to End on a Laptop](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023W\u002FECV\u002Fhtml\u002FLiu_ETAD_Training_Action_Detection_End_to_End_on_a_Laptop_CVPRW_2023_paper.html) (CVPRW 2023) [code](https:\u002F\u002Fgithub.com\u002Fsming256\u002FETAD)\n16. (BasicTAD) [BasicTAD: an Astounding RGB-Only Baseline for Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.02717) (CVIU 2023) [code](https:\u002F\u002Fgithub.com\u002FMCG-NJU\u002FBasicTAD)\n17. (Re2TAL) [Re2TAL: Rewiring Pretrained Video Backbones for Reversible Temporal Action Localization](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FZhao_Re2TAL_Rewiring_Pretrained_Video_Backbones_for_Reversible_Temporal_Action_Localization_CVPR_2023_paper.pdf) (CVPR 2023) [code](https:\u002F\u002Fgithub.com\u002Fcoolbay\u002FRe2TAL)\n18. (SoLa) [Soft-Landing Strategy for Alleviating the Task Discrepancy Problem in Temporal Action Localization Tasks](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FKang_Soft-Landing_Strategy_for_Alleviating_the_Task_Discrepancy_Problem_in_Temporal_CVPR_2023_paper.pdf) (CVPR 2023)\n19. (APN) [Progression-Guided Temporal Action Detection in Videos](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.09268) (Arxiv 2023) [code](https:\u002F\u002Fgithub.com\u002Fmakecent\u002FAPN)\n20. (Self-DETR) [Self-Feedback DETR for Temporal Action Detection](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fhtml\u002FKim_Self-Feedback_DETR_for_Temporal_Action_Detection_ICCV_2023_paper.html) (ICCV 2023)\n21. (UnLoc) [UnLoc: A Unified Framework for Video Localization Tasks](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.11062) (ICCV 2023) [code](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fscenic)\n22. [Benchmarking Data Efficiency and Computational Efficiency of Temporal Action Localization Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.13082) (ICCV 2023 Workshop)\n23. (BAPG) [Boundary-Aware Proposal Generation Method for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.13810) (Arxiv 2023)\n24. (MENet) [Movement Enhancement toward Multi-Scale Video Feature Representation for Temporal Action Detection](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fhtml\u002FZhao_Movement_Enhancement_toward_Multi-Scale_Video_Feature_Representation_for_Temporal_Action_ICCV_2023_paper.html) (ICCV 2023)\n25. (MRAV-FF) [Multi-Resolution Audio-Visual Feature Fusion for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.03456) (Arxiv 2023)\n26. (BDRC-Net) [Boundary Discretization and Reliable Classification Network for Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.06403) (Arxiv 2023) [code](https:\u002F\u002Fgithub.com\u002Fzhenyingfang\u002FBDRC-Net)\n27. (STAN) [STAN: Spatial-Temporal Awareness Network for Temporal Action Detection](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3606038.3616169) (ACM MM W 2023)\n28. (RefineTAD) [RefineTAD: Learning Proposal-free Refinement for Temporal Action Detection](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3581783.3611872) (ACM MM 2023)\n29. [SADA: Semantic adversarial unsupervised domain adaptation for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.13377) (arXiv 2023) [code](https:\u002F\u002Fgithub.com\u002Fdavidpujol\u002FSADA)\n\n## 2022\n1. (DCAN) [DCAN: Improving Temporal Action Detection via Dual Context Aggregation](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.03612) (AAAI 2022)\n2. (TVNet) [TVNet: Temporal Voting Network for Action Localization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2201.00434.pdf) (arxiv 2022) [code](https:\u002F\u002Fgithub.com\u002Fhanielwang\u002FTVNet)\n3. (ActionFormer) [ActionFormer: Localizing Moments of Actions with Transformers](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.07925) (ECCV 2022) [code](https:\u002F\u002Fgithub.com\u002Fhappyharrycn\u002Factionformer_release)\n4. (SegTAD）[SegTAD: Precise Temporal Action Detection via Semantic Segmentation](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.01542) (arxiv 2022)\n5. (OpenTAL) [OpenTAL: Towards Open Set Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.05114.pdf) (CVPR 2022) [code](https:\u002F\u002Fwww.rit.edu\u002Factionlab\u002Fopental)\n6. (TALLFormer) [TALLFormer: Temporal Action Localization with Long-memory Transformer](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.01680) (CVPR 2022)\n7. [An Empirical Study of End-to-End Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.02932) (CVPR 2022) [code](https:\u002F\u002Fgithub.com\u002Fxlliu7\u002FE2E-TAD)\n8. (BREM) [Estimation of Reliable Proposal Quality for Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.11695) (ACM MM 2022)\n9. [Structured Attention Composition for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.09956) (Tip 2022) [code](https:\u002F\u002Fgithub.com\u002FVividLe\u002FStructured-Attention-Composition)\n10. (RCL) [RCL: Recurrent Continuous Localization for Temporal Action Detection](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FWang_RCL_Recurrent_Continuous_Localization_for_Temporal_Action_Detection_CVPR_2022_paper.pdf) (CVPR 2022)\n11. (RefactorNet) [Learning to Refactor Action and Co-occurrence Features for Temporal Action Localization](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FXia_Learning_To_Refactor_Action_and_Co-Occurrence_Features_for_Temporal_Action_CVPR_2022_paper.pdf) (CVPR 2022)\n12. (MS-TCT) [MS-TCT: Multi-Scale Temporal ConvTransformer for Action Detection](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FDai_MS-TCT_Multi-Scale_Temporal_ConvTransformer_for_Action_Detection_CVPR_2022_paper.pdf) (CVPR 2022) [code](https:\u002F\u002Fgithub.com\u002Fdairui01\u002FMS-TCT)\n13. (OATD) [One-stage Action Detection Transformer](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.10080) (EPICKITCHENS-100 2022 V. 26.35 N. 25.83)\n14. [Context-aware Proposal Network for Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.09082) (CVPR-2022 ActivityNet Challenge winning solution)\n15. [Dual relation network for temporal action localization](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0031320322002060) (Pattern Recognition 2022)\n16. [Learning Disentangled Classification and Localization Representations for Temporal Action Localization](https:\u002F\u002Fwww.aaai.org\u002FAAAI22Papers\u002FAAAI-926.ZhuZ.pdf) (AAAI 2022)\n17. (DDM) [Progressive Attention on Multi-Level Dense Difference Maps for Generic Event Boundary Detection](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FTang_Progressive_Attention_on_Multi-Level_Dense_Difference_Maps_for_Generic_Event_CVPR_2022_paper.pdf) (CVPR 2022) [code](https:\u002F\u002Fgithub.com\u002FMCG-NJU\u002FDDM)\n18. [Submission to Generic Event Boundary Detection Challenge@CVPR 2022: Local Context Modeling and Global Boundary Decoding Approach](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.15268.pdf) (CVPR 2022 Challenge)\n19. (HTNet) [HTNet: Anchor-free Temporal Action Localization with Hierarchical Transformers](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.09662) (arxiv 2022)\n20. (STPT) [An Efficient Spatio-Temporal Pyramid Transformer for Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.10448) (ECCV 2022)\n21. (TAGS) [Proposal-Free Temporal Action Detection with Global Segmentation Mask Learning](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.06580) (ECCV 2022) [code](https:\u002F\u002Fgithub.com\u002Fsauradip\u002FTAGS)\n22. [Prompting Visual-Language Models for Efficient Video Understanding](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.04478) (ECCV 2022) [code](https:\u002F\u002Fgithub.com\u002Fju-chen\u002FEfficient-Prompt)\n23. (ReAct) [ReAct: Temporal Action Detection with Relational Queries](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.07097) (ECCV 2022) [code](https:\u002F\u002Fgithub.com\u002Fsssste\u002FReact)\n24. (TadTR) [End-to-end Temporal Action Detection with Transformer](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.10271) (TIP 2022) [code](https:\u002F\u002Fgithub.com\u002Fxlliu7\u002FTadTR)\n25. (TAL-MTS) [Temporal Action Localization with Multi-temporal Scales](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.07493) (arxiv 2022)\n26. (AdaPerFormer) [Adaptive Perception Transformer for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.11908) (arxiv 2022) [code](https:\u002F\u002Fgithub.com\u002FSouperO\u002FAdaPerFormer)\n27. (PointTAD) [PointTAD: Multi-Label Temporal Action Detection with Learnable Query Points](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.11035) (NeurIPS 2022) [code](https:\u002F\u002Fgithub.com\u002FMCG-NJU\u002FPointTAD) (multi action detection, eg: multiTHUMOS, charades)\n28. (SoLa) [Soft-Landing Strategy for Alleviating the Task Discrepancy Problem in Temporal Action Localization Tasks](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.06023) (arxiv 2022)\n29. (Re2TAL) [Re2TAL: Rewiring Pretrained Video Backbones for Reversible Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.14053.pdf) (arxiv 2022)\n30. (MUPPET) [Multi-Modal Few-Shot Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.14905) (arxiv 2022) [code](https:\u002F\u002Fgithub.com\u002Fsauradip\u002FMUPPET)\n31. [Deep Learning-Based Action Detection in Untrimmed Videos: A Survey](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9839464) (TPAMI 2022)\n\n## 2021\n1. (activity graph transformer) [Activity Graph Transformer for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2101.08540) (arxiv 2021) [project](https:\u002F\u002Fwww.sfu.ca\u002F~mnawhal\u002Fprojects\u002Fagt.html) [code](https:\u002F\u002Fgithub.com\u002FNmegha2601\u002Factivitygraph_transformer)\n2. [Coarse-Fine Networks for Temporal Activity Detection in Videos](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.01302) (CVPR 2021) [code](https:\u002F\u002Fgithub.com\u002Fkkahatapitiya\u002FCoarse-Fine-Networks)\n3. (MLAD) [Modeling Multi-Label Action Dependencies for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.03027) (CVPR 2021)\n4. (PcmNet) [PcmNet: Position-Sensitive Context Modeling Network for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.05270) (Tip 2021)\n5. (AFSD) [Learning Salient Boundary Feature for Anchor-free Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.13137) (CVPR 2021) [code](https:\u002F\u002Fgithub.com\u002FTencentYoutuResearch\u002FActionDetection-AFSD?utm_source=catalyzex.com)\n6. [Low-Fidelity End-to-End Video Encoder Pre-training for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.15233) (arxiv 2021)\n7. [Read and Attend: Temporal Localisation in Sign Language Videos](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.16481) (CVPR 2021) (Sign Language Videos)\n8. [Low Pass Filter for Anti-aliasing in Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.11403) (arxiv 2021)\n9. [FineAction: A Fined Video Dataset for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.11107) (One track of DeeperAction Workshop@ICCV2021) [Homepage](https:\u002F\u002Fdeeperaction.github.io\u002Ffineaction\u002F)\n10. [Three Birds with One Stone: Multi-Task Temporal Action Detection via Recycling Temporal Annotations](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fhtml\u002FLi_Three_Birds_with_One_Stone_Multi-Task_Temporal_Action_Detection_via_CVPR_2021_paper.html) (CVPR 2021)\n11. [Proposal Relation Network for Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.11812) (CVPRW 2021)\n12. [Exploring Stronger Feature for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.13014) (CVPRW 2021)\n13. (SRF-Net) [SRF-Net: Selective Receptive Field Network for Anchor-Free Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.15258) (ICASSP 2021)\n14. [RGB Stream Is Enough for Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.04362) (arxiv 2021) [code](https:\u002F\u002Fgithub.com\u002FMedia-Smart\u002Fvedatad?utm_source=catalyzex.com)\n15. (AVFusion) [Hear Me Out: Fusional Approaches for Audio Augmented Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.14118v1.pdf) (arxiv 2021) [Code](https:\u002F\u002Fgithub.com\u002Fskelemoa\u002Ftal-hmo)\n16. [Transferable Knowledge-Based Multi-Granularity Aggregation Network for Temporal Action Localization: Submission to ActivityNet Challenge 2021](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.12618) (HACS challenge 2021)\n17. [Enriching Local and Global Contexts for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.12960) (ICCV 2021)\n18. (CSA) [Class Semantics-based Attention for Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.02613) (ICCV 2021)\n19. (SP-TAD) [Towards High-Quality Temporal Action Detection with Sparse Proposals](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.08847) (arxiv 2021) [Code](https:\u002F\u002Fgithub.com\u002Fwjn922\u002FSP-TAD)\n20. [Few-Shot Temporal Action Localization with Query Adaptive Transformer](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.10552) (BMVC 2021) [code](https:\u002F\u002Fgithub.com\u002Fsauradip\u002FfewshotQAT) (Few-Shot)\n21. [Graph Convolutional Module for Temporal Action Localization in Videos](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.00302) (TPAMI 2021)\n22. [MS-TCT: Multi-Scale Temporal ConvTransformer for Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.03902) (arxiv 2021)\n23. (VSGN) [Video Self-Stitching Graph Network for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.14598) (ICCV 2021) [code](https:\u002F\u002Fgithub.com\u002Fcoolbay\u002FVSGN)\n24. (MUSES) [Multi-shot Temporal Event Localization: a Benchmark](https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.09434) (CVPR 2021) [project](https:\u002F\u002Fsongbai.site\u002Fmuses\u002F) [code](https:\u002F\u002Fgithub.com\u002Fxlliu7\u002FMUSES) [dataset](https:\u002F\u002Fsongbai.site\u002Fmuses\u002F)\n\n## 2020\n\n1. (G-TAD) **G-TAD: Sub-Graph Localization for Temporal Action Detection** (CVPR 2020) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.11462) [code.PyTorch](https:\u002F\u002Fgithub.com\u002Ffrostinassiky\u002Fgtad) [video](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=BlPxnDcykUo)\n2. (AGCN-P-3DCNNs) **Graph Attention based Proposal 3D ConvNets for Action Detection** (AAAI 2020) [paper](https:\u002F\u002Fwww.aaai.org\u002FPapers\u002FAAAI\u002F2020GB\u002FAAAI-LiJ.1424.pdf)\n3. (PBRNet) **Progressive Boundary Refinement Network for Temporal Action Detection** (AAAI 2020) [paper](https:\u002F\u002Fwww.aaai.org\u002FPapers\u002FAAAI\u002F2020GB\u002FAAAI-LiuQ.4870.pdf)\n4. (TsaNet) **Scale Matters: Temporal Scale Aggregation Network for Precise Action Localization in Untrimmed Videos** (ICME 2020) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.00707)\n5. **Constraining Temporal Relationship for Action Localization** (arxiv 2020) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2002.07358)\n6. (CBR-Net) **CBR-Net: Cascade Boundary Refinement Network for Action Detection: Submission to ActivityNet Challenge 2020 (Task 1)** (ActivityNet Challenge 2020) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2006.07526v2)\n7. [Temporal Action Localization with Variance-Aware Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F2008.11254) (arxiv 2020)\n8. [Boundary Uncertainty in a Single-Stage Temporal Action Localization Network](https:\u002F\u002Farxiv.org\u002Fabs\u002F2008.11170) (arxiv 2020, Tech report)\n9. [Revisiting Anchor Mechanisms for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2008.09837) (Tip 2020) [code.PyTorch](https:\u002F\u002Fgithub.com\u002FVividLe\u002FA2Net?utm_source=catalyzex.com)\n10. (C-TCN) [Deep Concept-wise Temporal Convolutional Networks for Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.09442) (ACM MM 2020) [code.PaddlePaddle](https:\u002F\u002Fgithub.com\u002FPaddlePaddle\u002Fmodels\u002Ftree\u002Fdevelop\u002FPaddleCV\u002Fvideo)\n11. (MLTPN) [Multi-Level Temporal Pyramid Network for Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2008.03270) (PRCV 2020)\n12. (SALAD) [SALAD: Self-Assessment Learning for Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.06958) (arxiv 2020)\n\n## 2019\n\n1. (CMS-RC3D) **Contextual Multi-Scale Region Convolutional 3D Network for Activity Detection** (ICCVBIC 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1801.09184)\n2. (TGM) **Temporal Gaussian Mixture Layer for Videos** (ICML 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1803.06316) [code.PyTorch](https:\u002F\u002Fgithub.com\u002Fpiergiaj\u002Ftgm-icml19)\n3. (Decouple-SSAD) **Decoupling Localization and Classification in Single Shot Temporal Action Detection** (ICME 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1904.07442) [code.TensorFlow](https:\u002F\u002Fgithub.com\u002FHYPJUDY\u002FDecouple-SSAD)\n4. **Exploring Feature Representation and Training strategies in Temporal Action Localization** (ICIP 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.10608)\n5. (PGCN) **Graph Convolutional Networks for Temporal Action Localization** (ICCV 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1909.03252) [code.PyTorch](https:\u002F\u002Fgithub.com\u002FAlvin-Zeng\u002FPGCN)\n6. (S-2D-TAN) **Learning Sparse 2D Temporal Adjacent Networks for Temporal Action Localization** (ICCV 2019) (*winner solution for the HACS Temporal Action Localization Challenge at ICCV 2019*) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1912.03612) \n   - (2D-TAN) **Learning 2D Temporal Adjacent Networks for Moment Localization with Natural Language** (AAAI 2020) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1912.03590) [code.PyTorch](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002F2D-TAN)\n7. (LCDC) **Learning Motion in Feature Space: Locally-Consistent Deformable Convolution Networks for Fine-Grained Action Detection** (ICCV 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1811.08815) [slide](https:\u002F\u002Fknmac.github.io\u002Fprojects\u002Flcdc\u002FLCDC_slides_extended.pdf) [code.TensorFlow](https:\u002F\u002Fgithub.com\u002Fknmac\u002FLCDC_release)\n8. (BLP) **BLP -- Boundary Likelihood Pinpointing Networks for Accurate Temporal Action Localization** (ICASSP 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1811.02189)\n9. (GTAN) **Gaussian Temporal Awareness Networks for Action Localization** (CVPR 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1909.03877)\n10. **Temporal Action Localization using Long Short-Term Dependency** (arxiv 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.01060)\n11. **Relation Attention for Temporal Action Localization** (IEEE Trans TMM 2019) [paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8933113\u002Fversions)\n12. (AFO-TAD) **AFO-TAD: Anchor-free One-Stage Detector for Temporal Action Detection** (arxiv 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1910.08250)\n13. (DBS) **Video Imprint Segmentation for Temporal Action Detection in Untrimmed Videos** (AAAI 2019) [paper](https:\u002F\u002Fwww.aaai.org\u002Fojs\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F4846)\n\n## 2018\n\n1. **Diagnosing Error in Temporal Action Detectors** (ECCV 2018) [paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FHumam_Alwassel_Diagnosing_Error_in_ECCV_2018_paper.pdf)\n2. (ETP) **Precise Temporal Action Localization by Evolving Temporal Proposals** (ICMR 2018) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1804.04803)\n3. (Action Search) **Action Search: Spotting Actions in Videos and Its Application to Temporal Action Localization** (ECCV 2018) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1706.04269) [code.TensorFlow](https:\u002F\u002Fgithub.com\u002FHumamAlwassel\u002Faction-search)\n4. (TAL-Net) **Rethinking the Faster R-CNN Architecture for Temporal Action Localization** (CVPR 2018) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1804.07667)\n5. **One-shot Action Localization by Learning Sequence Matching Network** (CVPR 2018) [paper](http:\u002F\u002Fwww.porikli.com\u002Fmysite\u002Fpdfs\u002Fporikli%202018%20-%20One-shot%20action%20localization%20by%20learning%20sequence%20matching%20network.pdf)\n6. **Temporal Action Detection by Joint Identification-Verification** (arxiv 2018) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1810.08375)\n7. (TPC) **Exploring Temporal Preservation Networks for Precise Temporal Action Localization** (AAAI 2018) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1708.03280)\n8. (SAP) **A Self-Adaptive Proposal Model for Temporal Action Detection based on Reinforcement Learning** (AAAI 2018) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1706.07251) [code.Torch](https:\u002F\u002Fgithub.com\u002Fhjjpku\u002FAction_Detection_DQN)\n\n## 2017\n\n1. (TCN) **Temporal Context Network for Activity Localization in Videos** (ICCV 2017) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1708.02349) [code.caffe](https:\u002F\u002Fgithub.com\u002Fvdavid70619\u002FTCN)\n2. (SSN) **Temporal Action Detection with Structured Segment Networks** (ICCV 2017) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.06228) [code.PyTorch](https:\u002F\u002Fgithub.com\u002Fyjxiong\u002Faction-detection)\n3. (R-C3D) **R-C3D: Region Convolutional 3D Network for Temporal Activity Detection** (ICCV 2017) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.07814) [code.caffe](https:\u002F\u002Fgithub.com\u002FVisionLearningGroup\u002FR-C3D) [code.PyTorch](https:\u002F\u002Fgithub.com\u002Fsunnyxiaohu\u002FR-C3D.pytorch)\n4. (TCNs) **Temporal Convolutional Networks for Action Segmentation and Detection** (CVPR 2017) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.05267) [code.TensorFlow](https:\u002F\u002Fgithub.com\u002Fcolincsl\u002FTemporalConvolutionalNetworks)\n5. (SMS) **Temporal Action Localization by Structured Maximal Sums** (CVPR 2017) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.04671) [code](https:\u002F\u002Fgithub.com\u002Fshallowyuan\u002Fstruct-max-sum)\n6. (SCC) **SCC: Semantic Context Cascade for Efficient Action Detection** (CVPR 2017) [paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FHeilbron_SCC_Semantic_Context_CVPR_2017_paper.pdf)\n7. (CDC) **CDC: Convolutional-De-Convolutional Networks for Precise Temporal Action Localization in Untrimmed Videos** (CVPR 2017) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.01515) [code](https:\u002F\u002Fbitbucket.org\u002Fcolumbiadvmm\u002Fcdc\u002Fsrc\u002Fmaster\u002F) [project](http:\u002F\u002Fwww.ee.columbia.edu\u002Fln\u002Fdvmm\u002FresearchProjects\u002Fcdc\u002Fcdc.html)\n8. (SS-TAD) **End-to-End, Single-Stream Temporal ActionDetection in Untrimmed Videos** (BMVC 2017) [paper](http:\u002F\u002Fvision.stanford.edu\u002Fpdf\u002Fbuch2017bmvc.pdf) [code.PyTorch](https:\u002F\u002Fgithub.com\u002Fshyamal-b\u002Fss-tad\u002F)\n9. (CBR) **Cascaded Boundary Regression for Temporal Action Detection** (BMVC 2017) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1705.01180) [code.TensorFlow](https:\u002F\u002Fgithub.com\u002Fjiyanggao\u002FCBR)\n10. (SSAD) **Single Shot Temporal Action Detection** (ACM MM 2017) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1710.06236)\n\n## before\n\n1. (PSDF) **Temporal Action Localization with Pyramid of Score Distribution Features** (CVPR 2016) [paper](https:\u002F\u002Fwww.zpascal.net\u002Fcvpr2016\u002FYuan_Temporal_Action_Localization_CVPR_2016_paper.pdf)\n2. **Temporal Action Detection using a Statistical Language Model** (CVPR 2016) [paper](https:\u002F\u002Fwww.zpascal.net\u002Fcvpr2016\u002FRichard_Temporal_Action_Detection_CVPR_2016_paper.pdf) [code](https:\u002F\u002Fgithub.com\u002Falexanderrichard\u002Fsquirrel)\n3. (S-CNN) **Temporal Action Localization in Untrimmed Videos via Multi-stage CNNs** (CVPR 2016) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1601.02129) [code](https:\u002F\u002Fgithub.com\u002Fzhengshou\u002Fscnn\u002F) [project](http:\u002F\u002Fwww.ee.columbia.edu\u002Fln\u002Fdvmm\u002FresearchProjects\u002Fcdc\u002Fscnn.html)\n4. **End-to-end Learning of Action Detection from Frame Glimpses in Videos** (CVPR 2016) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1511.06984) [code](https:\u002F\u002Fgithub.com\u002Fsyyeung\u002Fframeglimpses)\n\n----\n# **Papers: Weakly Supervised Temporal Action Detection**\n\n## 2026\n1. (VLPO) [Vision-Language Preference Optimization for Weakly Supervised Temporal Action Localization](https:\u002F\u002Fopenreview.net\u002Fforum?id=ENwxBjOlAR) (to ICLR 2026)\n2. [Boosting Point-supervised Temporal Action Localization via Text Refinement and Alignment](https:\u002F\u002Farxiv.org\u002Fabs\u002F2602.01257) (arXiv 2026)\n3. [Exploring the Temporal Consistency for Point-Level Weakly-Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2602.05718) (arXiv 2026)\n4. [MSLU] [Modeling Semantic and Localization Uncertainty for Weakly Supervised Temporal Action Localization](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F11370967) (TCSVT 2026)\n\n## 2025\n\n1. (AAPL) [Action-Agnostic Point-Level Supervision for Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.21205) (AAAI 2025) [code](https:\u002F\u002Fgithub.com\u002Fsmy-nec\u002FAAPL)\n2. (NoCo) [Rethinking Pseudo-Label Guided Learning for Weakly Supervised Temporal Action Localization from the Perspective of Noise Correction](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.11124) (AAAI 2025)\n3. (SAL) [Multilevel semantic and adaptive actionness learning for weakly supervised temporal action localization](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0893608024008347) (NN 2025) [code](https:\u002F\u002Fgithub.com\u002Flizhilin-ustc\u002FSAL)\n4. (SDANet) [Snippet-inter Difference Attention Network for Weakly-supervised Temporal Action Localization](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10856540) (TMM 2025)\n5. (grounding) [Collaborative Temporal Consistency Learning for Point-supervised Natural Language Video Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.17651) (arXiv 2025)\n6. [Bridge the Gap: From Weak to Full Supervision for Temporal Action Localization with PseudoFormer](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.14860) (CVPR 2025)\n7. (MLLM4WTAL) [Weakly Supervised Temporal Action Localization via Dual-Prior Collaborative Learning Guided by Multimodal Large Language Models](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FZhang_Weakly_Supervised_Temporal_Action_Localization_via_Dual-Prior_Collaborative_Learning_Guided_CVPR_2025_paper.html) (CVPR 2025)\n8. (QROT) [Boosting Point-Supervised Temporal Action Localization through Integrating Query Reformation and Optimal Transport](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FLiu_Boosting_Point-Supervised_Temporal_Action_Localization_through_Integrating_Query_Reformation_and_CVPR_2025_paper.html) (CVPR 2025)\n9. (ActionDiff) [Action-to-Action Diffusion Network for Weakly Supervised Temporal Action Localization](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F11197057) (TMM 2025)\n10. [CL-WTAL:Weakly-supervised temporal complex action localization based on multi-scale contrast learning](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F11272450) (TCSVT 2025)\n\n## 2024\n1. (ISSF) [Weakly-Supervised Temporal Action Localization by Inferring Snippet-Feature Affinity](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.12332) (AAAI 2024)\n2. (HR-Pro) [HR-Pro: Point-supervised Temporal Action Localization via Hierarchical Reliability Propagation](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.12608) (AAAI 2024) [code](https:\u002F\u002Fgithub.com\u002Fpipixin321\u002FHR-Pro)\n3. [STAT: Towards Generalizable Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.13311) (Arxiv 2024)\n4. (TSPNet) [Realigning Confidence with Temporal Saliency Information for Point-level Weakly-Supervised Temporal Action Localization](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FXia_Realigning_Confidence_with_Temporal_Saliency_Information_for_Point-Level_Weakly-Supervised_Temporal_CVPR_2024_paper.html) (CVPR 2024) [code](https:\u002F\u002Fgithub.com\u002Fzyxia1009\u002FCVPR2024-TSPNet)\n5. (M2PT) [Weakly-Supervised Temporal Action Localization with Multi-Modal Plateau Transformers](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024W\u002FL3D-IVU\u002Fhtml\u002FHu_Weakly-Supervised_Temporal_Action_Localization_with_Multi-Modal_Plateau_Transformers_CVPRW_2024_paper.html) (CVPR Workshop 2024)\n6. (EPNet) [Ensemble Prototype Network For Weakly Supervised Temporal Action Localization](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10479157) (TNNLS 2024)\n7. (FuSTAL) [Full-Stage Pseudo Label Quality Enhancement for Weakly-supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.08971) (arXiv 2024) [code](https:\u002F\u002Fgithub.com\u002Ffqhank\u002FFuSTAL)\n8. (PVLR) [Probabilistic Vision-Language Representation for Weakly Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.05955) (ACM MM 2024) [code](https:\u002F\u002Fgithub.com\u002Fsejong-rcv\u002FPVLR)\n9. (zero-shot) [Towards Completeness: A Generalizable Action Proposal Generator for Zero-Shot Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.13777) (ICPR 2024) [code](https:\u002F\u002Fgithub.com\u002FRun542968\u002FGAP)\n10. (SMBD) [Stepwise Multi-grained Boundary Detector for Point-supervised Temporal Action Localization](https:\u002F\u002Feccv.ecva.net\u002Fvirtual\u002F2024\u002Fposter\u002F390) (ECCV 2024)\n11. [Zero-shot Action Localization via the Confidence of Large Vision-Language Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.14340) (arXiv 2024)\n12. [Can MLLMs Guide Weakly-Supervised Temporal Action Localization Tasks?](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.08466) (arXiv 2024)\n13. [Generalized Uncertainty-Based Evidential Fusion with Hybrid Multi-Head Attention for Weak-Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.19418) (arXiv 2024) [code](https:\u002F\u002Fgithub.com\u002Fheyuanpengpku\u002FGUEF\u002Ftree\u002Fmain)\n14. (SQL-Net) (point) [SQL-Net: Semantic Query Learning for Point-Supervised Temporal Action Localization](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10814700) (TMM 2024)\n15. (AFPS) [Weakly supervised temporal action localization with actionness-guided false positive suppression](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0893608024002314?via%3Dihub) (NN 2024) [code](https:\u002F\u002Fgithub.com\u002Flizhilin-ustc\u002FAFPS)\n16. (point) [Neighbor-Guided Pseudo-Label Generation and Refinement for Single-Frame Supervised Temporal Action Localization](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10478311) (TIP 2024)\n\n## 2023\n1. (ASCN) [A Novel Action Saliency and Context-Aware Network for Weakly-Supervised Temporal Action Localization](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10007033) (TMM 2023)\n2. (TFE-DCN) [Temporal Feature Enhancement Dilated Convolution Network for Weakly-Supervised Temporal Action Localization](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2023\u002Fhtml\u002FZhou_Temporal_Feature_Enhancement_Dilated_Convolution_Network_for_Weakly-Supervised_Temporal_Action_WACV_2023_paper.html) (WACV 2023)\n3. (JCDNet) [JCDNet: Joint of Common and Definite phases Network for Weakly Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.17294) (Arxiv 2023)\n4. (P-MIL) [Proposal-Based Multiple Instance Learning for Weakly-Supervised Temporal Action Localization](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FRen_Proposal-Based_Multiple_Instance_Learning_for_Weakly-Supervised_Temporal_Action_Localization_CVPR_2023_paper.html) (CVPR 2023) [code](https:\u002F\u002Fgithub.com\u002FRenHuan1999\u002FCVPR2023_P-MIL)\n5. [Two-Stream Networks for Weakly-Supervised Temporal Action Localization With Semantic-Aware Mechanisms](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FWang_Two-Stream_Networks_for_Weakly-Supervised_Temporal_Action_Localization_With_Semantic-Aware_Mechanisms_CVPR_2023_paper.html) (CVPR 2023)\n6. [Boosting Weakly-Supervised Temporal Action Localization with Text Information](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.00607) (CVPR 2023) [code](https:\u002F\u002Fgithub.com\u002FlgzlIlIlI\u002FBoosting-WTAL)\n7. (PivoTAL) [PivoTAL: Prior-Driven Supervision for Weakly-Supervised Temporal Action Localization](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FRizve_PivoTAL_Prior-Driven_Supervision_for_Weakly-Supervised_Temporal_Action_Localization_CVPR_2023_paper.html) (CVPR 2023)\n8. [Improving Weakly Supervised Temporal Action Localization by Bridging Train-Test Gap in Pseudo Labels](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FZhou_Improving_Weakly_Supervised_Temporal_Action_Localization_by_Bridging_Train-Test_Gap_CVPR_2023_paper.pdf) (CVPR 2023) [code](https:\u002F\u002Fgithub.com\u002Fzhou745\u002FGauFuse_WSTAL)\n9. (MTP) [Multiple Temporal Pooling Mechanisms for Weakly Supervised Temporal Action Localization](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3567828) (TOMM 2023)\n10. (VQK-Net) [Video-Specific Query-Key Attention Modeling for Weakly-Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.04186)\n11. (DFE) [Dual-Feature Enhancement for Weakly Supervised Temporal Action Localization](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10096383) (ICASSP 2023)\n12. (FBA-Net) [Collaborative Foreground, Background, and Action Modeling Network for Weakly Supervised Temporal Action Localization](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10115434) (TCSVT 2023)\n13. (Bi-SCC) [Weakly Supervised Temporal Action Localization With Bidirectional Semantic Consistency Constraint](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10115234) (TNNLS 2023)\n14. (F3-Net) [Feature Weakening, Contextualization, and Discrimination for Weakly Supervised Temporal Action Localization](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10091234) (TMM 2023) [code](https:\u002F\u002Fmoniruzzamanmd.github.io\u002FF3-Net\u002F)\n15. (LPR) [Learning Proposal-aware Re-ranking for Weakly-supervised Temporal Action Localization](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10144792) (TCSVT 2023)\n16. (STCL-Net) [Semantic and Temporal Contextual Correlation Learning for Weakly-Supervised Temporal Action Localization](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10155179) (TPAMI 2023)\n17. [Distilling Vision-Language Pre-training to Collaborate with Weakly-Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.09335) (CVPR 2023)\n18. [Weakly-Supervised Action Localization by Hierarchically-structured Latent Attention Modeling](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.09946) (ICCV 2023)\n19. [Cross-Video Contextual Knowledge Exploration and Exploitation for Ambiguity Reduction in Weakly Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.12609) (TCSVT 2023)\n20. (SPL-Loc) [Sub-action Prototype Learning for Point-level Weakly-supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.09060) (arXiv 2023)\n21. (DDG-Net) [DDG-Net: Discriminability-Driven Graph Network for Weakly-supervised Temporal Action Localization](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fhtml\u002FTang_DDG-Net_Discriminability-Driven_Graph_Network_for_Weakly-supervised_Temporal_Action_Localization_ICCV_2023_paper.html) (ICCV 2023) [code](https:\u002F\u002Fgithub.com\u002FXiaojunTang22\u002FICCV2023-DDGNet)\n22. [Proposal-based Temporal Action Localization with Point-level Supervision](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.05511) (BMVC 2023)\n23. (LPR) [LPR: learning point-level temporal action localization through re-training](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs00530-023-01128-4) (MMSJ 2023)\n24. (POTLoc) [POTLoc: Pseudo-Label Oriented Transformer for Point-Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.13585) (arXiv 2023)\n25. (ADM-Loc) [ADM-Loc: Actionness Distribution Modeling for Point-supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.15916) (arXiv 2023)\n26. [Revisiting Foreground and Background Separation in Weakly-supervised Temporal Action Localization: A Clustering-based Approach](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fhtml\u002FLiu_Revisiting_Foreground_and_Background_Separation_in_Weakly-supervised_Temporal_Action_Localization_ICCV_2023_paper.html) (ICCV 2023) [code](https:\u002F\u002Fgithub.com\u002FQinying-Liu\u002FCASE)\n27. [Sub-action Prototype Learning for Point-level Weakly-supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.09060) (arXiv 2023)\n28. (AICL) [Actionness Inconsistency-Guided Contrastive Learning for Weakly-Supervised Temporal Action Localizatio](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F25237) (AAAI 2023) [code](https:\u002F\u002Fgithub.com\u002Flizhilin-ustc\u002FAAAI2023-AICL)\n\n## 2022\n1. (ACGNet) [ACGNet: Action Complement Graph Network for Weakly-supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2112.10977.pdf) (AAAI 2022)\n2. (RSKP) [Weakly Supervised Temporal Action Localization via Representative Snippet Knowledge Propagation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.02925.pdf) (CVPR 2022) [code](https:\u002F\u002Fgithub.com\u002FLeonHLJ\u002FRSKP)\n3. (ASM-Loc) [ASM-Loc: Action-aware Segment Modeling for Weakly-Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15187) (CVPR 2022) [code](https:\u002F\u002Fgithub.com\u002Fboheumd\u002FASM-Loc)\n4. (FTCL) [Fine-grained Temporal Contrastive Learning for Weakly-supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.16800) (CVPR 2022) [code](https:\u002F\u002Fgithub.com\u002FMengyuanChen21\u002FCVPR2022-FTCL)\n5. (C3BN) [Convex Combination Consistency between Neighbors for Weakly-supervised Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.00400) (arxiv 2022)\n6. (DCC) [Exploring Denoised Cross-video Contrast for Weakly-supervised Temporal Action Localization](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FLi_Exploring_Denoised_Cross-Video_Contrast_for_Weakly-Supervised_Temporal_Action_Localization_CVPR_2022_paper.pdf) (CVPR 2022)\n7. (HAAN) [Weakly-Supervised Temporal Action Detection for Fine-Grained Videos with Hierarchical Atomic Actions](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.11805) (ECCV 2022) [code](https:\u002F\u002Fgithub.com\u002Flizhi1104\u002FHAAN)\n8. (STALE) (**Zero-Shot**) [Zero-Shot Temporal Action Detection via Vision-Language Prompting](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.08184) (ECCV 2022) [code](https:\u002F\u002Fgithub.com\u002Fsauradip\u002Fstale)\n9. (SMEN) [Slow Motion Matters: A Slow Motion Enhanced Network for Weakly Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.11324) (TCSVT 2022)\n10. [Dilation-Erosion for Single-Frame Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.06348) (arxiv 2022)\n11. (AMS) [Adaptive Mutual Supervision for Weakly-Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.02357) (TMM 2022)\n12. (DELU) [Dual-Evidential Learning for Weakly-supervised Temporal Action Localization](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-19772-7_12) (ECCV 2022) [code](https:\u002F\u002Fgithub.com\u002FMengyuanChen21\u002FECCV2022-DELU)\n\n## 2021\n1. (HAM-Net) [A Hybrid Attention Mechanism for Weakly-Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2101.00545). (AAAI 2021)\n2. [Cross-Attentional Audio-Visual Fusion for Weakly-Supervised Action Localization](https:\u002F\u002Fopenreview.net\u002Fforum?id=hWr3e3r-oH5) (ICLR 2021)\n3. [Weakly-supervised Temporal Action Localization by Uncertainty Modeling](https:\u002F\u002Farxiv.org\u002Fabs\u002F2006.07006) (AAAI 2021) [code](https:\u002F\u002Fgithub.com\u002FPilhyeon\u002FWTAL-Uncertainty-Modeling)\n4. (TS-PCA) [The Blessings of Unlabeled Background in Untrimmed Videos](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.13183) (CVPR 2021) [code](https:\u002F\u002Fgithub.com\u002Faliyun\u002FThe-Blessings-of-Unlabeled-Background-in-Untrimmed-Videos)\n5. (ACSNet) [ACSNet: Action-Context Separation Network for Weakly Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.15088) (AAAI 2021)\n6. (CoLA) [CoLA: Weakly-Supervised Temporal Action Localization with Snippet Contrastive Learning](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.16392) (CVPR 2021)\n7. [Weakly Supervised Temporal Action Localization Through Learning Explicit Subspaces for Action and Context](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.16155) (AAAI 2021)\n8. [ACM-Net: Action Context Modeling Network for Weakly-Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.02967) (arxiv 2021, submitted to Tip) [code](https:\u002F\u002Fgithub.com\u002Fispc-lab\u002FACM-Net)\n9. (AUMN) [Action Unit Memory Network for Weakly Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.14135) (CVPR 2021)\n10. (ASL) [Weakly Supervised Action Selection Learning in Video](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.02439) (CVPR 2021)\n11. (ActShufNet) [Action Shuffling for Weakly Supervised Temporal Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.04208) (arxiv 2021)\n12. [Few-Shot Action Localization without Knowing Boundaries](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.04150) (arxiv 2021)\n13. [Uncertainty Guided Collaborative Training for Weakly Supervised Temporal Action Detection](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fhtml\u002FYang_Uncertainty_Guided_Collaborative_Training_for_Weakly_Supervised_Temporal_Action_Detection_CVPR_2021_paper.html) (CVPR 2021)\n14. [Two-Stream Consensus Network: Submission to HACS Challenge 2021Weakly-Supervised Learning Track](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.10829) (CVPRW 2021)\n15. [Weakly-Supervised Temporal Action Localization Through Local-Global Background Modeling](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.11811) (CVPRW 2021)\n16. [Cross-modal Consensus Network for Weakly Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.12589) (ACM MM 2021) [code](https:\u002F\u002Fgithub.com\u002Fharlanhong\u002FMM2021-CO2-Net)\n17. [Learning Action Completeness from Points for Weakly-supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.05029) (ICCV 2021) [code](https:\u002F\u002Fgithub.com\u002FPilhyeon\u002FLearning-Action-Completeness-from-Points)\n18. [Deep Motion Prior for Weakly-Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.05607) (submit to Tip 2021) [project](https:\u002F\u002Fsites.google.com\u002Fview\u002Fmengcao\u002Fpublication\u002Fdmp-net?authuser=0)\n19. [Foreground-Action Consistency Network for Weakly Supervised Temporal Action Localization](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FHuang_Foreground-Action_Consistency_Network_for_Weakly_Supervised_Temporal_Action_Localization_ICCV_2021_paper.pdf) (ICCV 2021)\n20. (BackTAL) [Background-Click Supervision for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.12449) (TPAMI 2021) [code](https:\u002F\u002Fgithub.com\u002FVividLe\u002FBackTAL)\n21. (ACN) [Action Coherence Network for Weakly-Supervised Temporal Action Localization](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9404867) (TMM 2021)\n22. [Divide and Conquer for Single-frame Temporal Action Localization](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FJu_Divide_and_Conquer_for_Single-Frame_Temporal_Action_Localization_ICCV_2021_paper.pdf) (ICCV 2021)\n\n## 2020\n\n1. (WSGN) **Weakly Supervised Gaussian Networks for Action Detection** (WACV 2020) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1904.07774)\n2. **Weakly Supervised Temporal Action Localization Using Deep Metric Learning** (WACV 2020) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2001.07793)\n3. **Action Graphs: Weakly-supervised Action Localization with Graph Convolution Networks** (WACV 2020) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2002.01449)\n4. (DGAM) **Weakly-Supervised Action Localization by Generative Attention Modeling** (CVPR 2020) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2003.12424) [code.PyTorch](https:\u002F\u002Fgithub.com\u002Fbfshi\u002FDGAM-Weakly-Supervised-Action-Localization)\n5. (EM-MIL) **Weakly-Supervised Action Localization with Expectation-Maximization Multi-Instance Learning** (ECCV 2020) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.00163)\n6. **Relational Prototypical Network for Weakly Supervised Temporal ActionLocalization** (AAAI 2020) [paper](https:\u002F\u002Faaai.org\u002FPapers\u002FAAAI\u002F2020GB\u002FAAAI-HuangL.1235.pdf)\n7. (BaS-Net) **Background Suppression Networkfor Weakly-supervised Temporal Action Localization** (AAAI 2020) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.09963) [code.PyTorch](https:\u002F\u002Fgithub.com\u002FPilhyeon\u002FBaSNet-pytorch)\n8. **Background Modeling via Uncertainty Estimation for Weakly-supervised Action Localization** (arxiv 2020) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2006.07006) [code.PyTorch](https:\u002F\u002Fgithub.com\u002FPilhyeon\u002FBackground-Modeling-via-Uncertainty-Estimation)\n9. (A2CL-PT) **Adversarial Background-Aware Loss for Weakly-supervised Temporal Activity Localization** (ECCV 2020) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.06643) [code.PyTorch](https:\u002F\u002Fgithub.com\u002FMichiganCOG\u002FA2CL-PT)\n10. **Weakly Supervised Temporal Action Localization with Segment-Level Labels** (arxiv 2020)\n11. (ECM) **Equivalent Classification Mapping for Weakly Supervised Temporal Action Localization** (arxiv 2020 -> TPAMI 2022) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2008.07728v1)\n12. [Two-Stream Consensus Network for Weakly-Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2010.11594v1) (ECCV 2020 spotlight)\n13. [Learning Temporal Co-Attention Models for Unsupervised Video Action Localization](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fhtml\u002FGong_Learning_Temporal_Co-Attention_Models_for_Unsupervised_Video_Action_Localization_CVPR_2020_paper.html) (CVPR 2020)\n14. [Action Completeness Modeling with Background Aware Networks for Weakly-Supervised Temporal Action Localization](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3394171.3413687) (ACM MM 2020)\n15. (D2-Net) [D2-Net: Weakly-Supervised Action Localization via Discriminative Embeddingsand Denoised Activations](https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.06440) (arxiv 2020) (THUMOS'14 mAP@0.5: 35.9)\n16. (SF-Net) [SF-Net: Single-Frame Supervision for Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2003.06845) (ECCV 2020) [code.PyTorch](https:\u002F\u002Fgithub.com\u002FFlowerfan\u002FSF-Net)\n17. [Point-Level Temporal Action Localization: Bridging Fully-supervised Proposals to Weakly-supervised Losses](https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.08236) (arxiv 2020)\n18. [Transferable Knowledge-Based Multi-Granularity Fusion Network for Weakly Supervised Temporal Action Detection](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9105103\u002Fkeywords#keywords) (TMM 2020)\n19. [ActionBytes: Learning From Trimmed Videos to Localize Actions](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fhtml\u002FJain_ActionBytes_Learning_From_Trimmed_Videos_to_Localize_Actions_CVPR_2020_paper.html) (CVPR 2020)\n\n## 2019\n\n1. (AdapNet) **AdapNet: Adaptability Decomposing Encoder-Decoder Network for Weakly Supervised Action Recognition and Localization** (IEEE Transactions on Neural Networks and Learning Systems) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.11961)\n2. **Breaking Winner-Takes-All: Iterative-Winners-Out Networks for Weakly Supervised Temporal Action Localization** (IEEE Transactions on Image Processing) [paper](https:\u002F\u002Ftanmingkui.github.io\u002Ffiles\u002Fpublications\u002FBreaking.pdf)\n3. **Weakly-Supervised Temporal Localization via Occurrence Count Learning** (ICML 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.07293) [code.TensorFlow](https:\u002F\u002Fgithub.com\u002FSchroeterJulien\u002FICML-2019-Weakly-Supervised-Temporal-Localization-via-Occurrence-Count-Learning)\n4. (MAAN) **Marginalized Average Attentional Network for Weakly-Supervised Learning** (ICLR 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.08586) [code.PyTorch](https:\u002F\u002Fgithub.com\u002Fyyuanad\u002FMAAN)\n5. **Weakly-supervised Action Localization with Background Modeling** (ICCV 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.06552)\n6. (TSM) **Temporal Structure Mining for Weakly Supervised Action Detection** (ICCV 2019) [paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FYu_Temporal_Structure_Mining_for_Weakly_Supervised_Action_Detection_ICCV_2019_paper.pdf)\n7. (CleanNet) **Weakly Supervised Temporal Action Localization through Contrast basedEvaluation Networks** (ICCV 2019) [paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fhtml\u002FLiu_Weakly_Supervised_Temporal_Action_Localization_Through_Contrast_Based_Evaluation_Networks_ICCV_2019_paper.html)\n8. (3C-Net) **3C-Net: Category Count and Center Loss for Weakly-Supervised Action Localization** (ICCV 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.08216) [code.PyTorch](https:\u002F\u002Fgithub.com\u002Fnaraysa\u002F3c-net)\n9. (CMCS) **Completeness Modeling and Context Separation for Weakly SupervisedTemporal Action Localization** (CVPR 2019) [paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FLiu_Completeness_Modeling_and_Context_Separation_for_Weakly_Supervised_Temporal_Action_CVPR_2019_paper.pdf) [code.PyTorch](https:\u002F\u002Fgithub.com\u002FFinspire13\u002FCMCS-Temporal-Action-Localization)\n10. (RefineLoc) **RefineLoc: Iterative Refinement for Weakly-Supervised Action Localization** (arxiv 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1904.00227) [homepage](http:\u002F\u002Fhumamalwassel.com\u002Fpublication\u002Frefineloc\u002F)\n11. (ASSG) **Adversarial Seeded Sequence Growing for Weakly-Supervised Temporal Action Localization** (ACM MM 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.02422)\n12. (TSRNet) **Learning Transferable Self-attentive Representations for Action Recognition in Untrimmed Videos with Weak Supervision** (AAAI 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1902.07370)\n13. (STAR) **Segregated Temporal Assembly Recurrent Networks for Weakly Supervised Multiple Action Detection** (AAAI 2019) [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1811.07460)\n\n## 2018\n\n1. [Weakly Supervised Temporal Action Detection with Shot-Based Temporal Pooling Network](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-030-04212-7_37) (ICONIP 2018)\n2. (W-TALC) [W-TALC: Weakly-supervised Temporal Activity Localization and Classification](https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.10418) (ECCV 2018) [code.PyTorch](https:\u002F\u002Fgithub.com\u002Fsujoyp\u002Fwtalc-pytorch?utm_source=catalyzex.com)\n3. (AutoLoc) [AutoLoc: Weakly-supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.08333) (ECCV 2018) [code](https:\u002F\u002Fgithub.com\u002Fzhengshou\u002FAutoLoc?utm_source=catalyzex.com)\n4. (STPN) [Weakly Supervised Action Localization by Sparse Temporal Pooling Network](https:\u002F\u002Farxiv.org\u002Fabs\u002F1712.05080) (CVPR 2018) [code](https:\u002F\u002Fgithub.com\u002Fdemianzhang\u002Fweakly-action-localization?utm_source=catalyzex.com)\n5. [Step-by-step Erasion, One-by-one Collection: A Weakly Supervised Temporal Action Detector](https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.02929) (ACM MM 2018)\n6. (CPMN) [Cascaded Pyramid Mining Network for Weakly Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F1810.11794) (accv 2018)\n\n## 2017\n\n1. (Hide-and-Seek) [Hide-and-Seek: Forcing a Network to be Meticulous for\nWeakly-supervised Object and Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.04232) (ICCV 2017)\n2. (UntrimmedNets) [UntrimmedNets for Weakly Supervised Action Recognition and Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.03329) (CVPR 2017) [code](https:\u002F\u002Fgithub.com\u002Fwanglimin\u002FUntrimmedNet)\n\n----\n# **Papers: Online Action Detection**\n\n## 2026\n1. (COAD) [Continuous Online Action Detection from Egocentric Videos](https:\u002F\u002Fopenreview.net\u002Fforum?id=dwgtYTuSaS) (to ICLR 2026)\n2. (MOAD) [Backtrace Mamba: Reviving Critical Temporal Contexts via Hierarchical Memory Compression for Online Action Detection](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F38139) (AAAI 2026)\n\n## 2025\n1. (TOAD) [Text-driven Online Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.13518) (Integrated Computer-Aided Engineering 2025) [code](https:\u002F\u002Fgithub.com\u002F3dperceptionlab\u002Ftoad)\n2. (CMeRT) [Context-Enhanced Memory-Refined Transformer for Online Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.18359) (CVPR 2025) [code](https:\u002F\u002Fgithub.com\u002Fpangzhan27\u002FCMeRT)\n3. [Vision and Intention Boost Large Language Model in Long-Term Action Anticipation](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.01713) (arXiv 2025)\n4. (PTMA) [Probabilistic Temporal Masked Attention for Cross-view Online Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.17025) (TMM 2025)\n5. (CDM-Tr) [Long and Short-Term Collaborative Decision-Making Transformer for Online Action Detection and Anticipation](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320325004339) (PR 2025)\n6. (TPT) [Throughout Procedural Transformer for Online Action Detection and Anticipation](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10963729) (TCSVT 2025)\n7. (BiOMamba) [BiOMamba: Mamba-based Forward-Then-Backward Temporal Modeling for Online Action Detection and Anticipation](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3746027.3755847) (ACM MM 2025)\n\n## 2024\n1. (JOADAA) [JOADAA: joint online action detection and action anticipation](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.06130) (WACV 2024)\n2. [Object Aware Egocentric Online Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.01079) (CVPRW 2024)\n3. [ActionSwitch: Class-agnostic Detection of Simultaneous Actions in Streaming Videos](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.12987) (ECCV 2024) [code](https:\u002F\u002Fgithub.com\u002FmusicalOffering\u002FActionSwitch-release)\n4. (MATR) [Online Temporal Action Localization with Memory-Augmented Transformer](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.02957) (ECCV 2024) [code](https:\u002F\u002Fskhcjh231.github.io\u002FMATR_project\u002F)\n5. (HAT) [HAT: History-Augmented Anchor Transformer for Online Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.06437) (ECCV 2024) [code](https:\u002F\u002Fgithub.com\u002Fsakibreza\u002FECCV24-HAT\u002F)\n6. [Progressive Knowledge Distillation from Different Levels of Teachers for Online Action Detection](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10814662) (TMM 2024)\n7. [OnlineTAS: An Online Baseline for Temporal Action Segmentation](https:\u002F\u002Fopenreview.net\u002Fforum?id=bkLetzd97M&referrer=%5Bthe%20profile%20of%20Angela%20Yao%5D(%2Fprofile%3Fid%3D~Angela_Yao1)) (NeurIPS 2024)\n8. (OV-OAD) [Does Video-Text Pretraining Help Open-Vocabulary Online Action Detection?](https:\u002F\u002Fnips.cc\u002Fvirtual\u002F2024\u002Fposter\u002F95303) (NeurIPS 2024) [code](https:\u002F\u002Fgithub.com\u002FZQSIAT\u002FOV-OAD)\n9. [Bayesian Evidential Deep Learning for Online Action Detection](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-72640-8_16) (ECCV 2024)\n\n## 2023\n1. (recognation) (GliTr) [GliTr: Glimpse Transformers with Spatiotemporal Consistency for Online Action Prediction](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.13605) (WACV 2023)\n2. (E2E-LOAD) [E2E-LOAD: End-to-End Long-form Online Action Detection](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fhtml\u002FCao_E2E-LOAD_End-to-End_Long-form_Online_Action_Detection_ICCV_2023_paper.html) (ICCV 2023) [code](https:\u002F\u002Fgithub.com\u002Fsqiangcao99\u002FE2E-LOAD)\n3. (MiniROAD) [MiniROAD: Minimal RNN Framework for Online Action Detection](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fhtml\u002FAn_MiniROAD_Minimal_RNN_Framework_for_Online_Action_Detection_ICCV_2023_paper.html) (ICCV 2023) [code](https:\u002F\u002Fgithub.com\u002Fjbistanbul\u002FMiniROAD)\n4. (MAT) [Memory-and-Anticipation Transformer for Online Action Understanding](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fhtml\u002FWang_Memory-and-Anticipation_Transformer_for_Online_Action_Understanding_ICCV_2023_paper.html) (ICCV 2023) [code](https:\u002F\u002Fgithub.com\u002FEcho0125\u002FMemory-and-Anticipation-Transformer)\n5. [Online Action Detection with Learning Future Representations by Contrastive Learning](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10220027) (ICME 2023)\n6. (HCM) [HCM: Online Action Detection With Hard Video Clip Mining](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10246422) (TMM 2023)\n7. (DFAformer) [DFAformer: A Dual Filtering Auxiliary Transformer for Efficient Online Action Detection in Streaming Videos](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-981-99-8537-1_11) (PRCV 2023)\n\n## 2022\n1. (Colar) [Colar: Effective and Efficient Online Action Detection by Consulting Exemplars](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.01057) (CVPR 2022) [code](https:\u002F\u002Fgithub.com\u002FVividLe\u002FOnline-Action-Detection)\n2. (GateHUB) [GateHUB: Gated History Unit with Background Suppression for Online Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.04668) (CVPR 2022)\n3. [A Circular Window-based Cascade Transformer for Online Action Detection](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2208.14209.pdf) (TPAMI 2022)\n4. (TeSTra) [Real-time Online Video Detection with Temporal Smoothing Transformers](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.09236) (ECCV 2022) [code](https:\u002F\u002Fgithub.com\u002Fzhaoyue-zephyrus\u002FTeSTra)\n5. (OAT) [A Sliding Window Scheme for Online Temporal Action Localization](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-19830-4_37) [code](https:\u002F\u002Fgithub.com\u002FYHKimGithub\u002FOAT-OSN)\n6. (SimOn) [SimOn: A Simple Framework for Online Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.04905.pdf) (arxiv 2022) [code](https:\u002F\u002Fgithub.com\u002FTuanTNG\u002FSimOn)\n7. (survey) [Online human action detection and anticipation in videos: A survey](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0925231222003617?via%3Dihub)\n8. [Uncertainty-Based Spatial-Temporal Attention for Online Action Detection](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2022\u002Fpapers_ECCV\u002Fpapers\u002F136640068.pdf) (ECCV 2022)\n9. (PPKD) [Privileged Knowledge Distillation for Online Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.09158) (PR 2022)\n10. [Information Elevation Network for Online Action Detection and Anticipation](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9857490) (CVPR W 2022)\n11. (2PESNet) [2PESNet: Towards online processing of temporal action localization](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320322003521) (PR 2022)\n\n## 2021\n\n1. (WOAD) [WOAD: Weakly Supervised Online Action Detection in Untrimmed Videos](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fhtml\u002FGao_WOAD_Weakly_Supervised_Online_Action_Detection_in_Untrimmed_Videos_CVPR_2021_paper.html) (CVPR 2021)\n2. (OadTR) [OadTR: Online Action Detection with Transformers](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.11149) (ICCV 2021) [code](https:\u002F\u002Fgithub.com\u002Fwangxiang1230\u002FOadTR)\n3. (CAG-QIL) [CAG-QIL: Context-Aware Actionness Grouping via Q Imitation Learning for Online Temporal Action Localization](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FKang_CAG-QIL_Context-Aware_Actionness_Grouping_via_Q_Imitation_Learning_for_Online_ICCV_2021_paper.pdf) (ICCV 2021)\n4. (LSTR) [Long Short-Term Transformer for Online Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.03377) (NeurIPS 2021) [code](https:\u002F\u002Fgithub.com\u002Famazon-science\u002Flong-short-term-transformer)\n5. (TRN) [Temporal Recurrent Networks for Online Action Detection](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9009797) (ICCV 2019) [code](https:\u002F\u002Fgithub.com\u002Fxumingze0308\u002FTRN.pytorch)\n6. [pre awesome](https:\u002F\u002Fgithub.com\u002Fwangxiang1230\u002FAwesome-Online-Action-Detection)\n\n----\n# **Semi-Supervised**\n\n## 2024\n1. (APL) [Towards Adaptive Pseudo-label Learning for Semi-Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.07673) (ECCV 2024)\n\n## 2023\n1. (NPL) [Learning from Noisy Pseudo Labels for Semi-Supervised Temporal Action Localization](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fhtml\u002FXia_Learning_from_Noisy_Pseudo_Labels_for_Semi-Supervised_Temporal_Action_Localization_ICCV_2023_paper.html) (ICCV 2023) [code](https:\u002F\u002Fgithub.com\u002Fkunnxia\u002FNPL)\n\n## 2022\n1. (AL-STAL) [Active Learning with Effective Scoring Functions for Semi-Supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.14856) (Displays 2022)\n2. (SPOT) [Semi-Supervised Temporal Action Detection with Proposal-Free Masking](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.07059) (ECCV 2022) [code](https:\u002F\u002Fgithub.com\u002Fsauradip\u002FSPOT)\n\n## 2021\n1. (SSTAP) [Self-Supervised Learning for Semi-Supervised Temporal Action Proposal](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.03214) (CVPR 2021) [code](https:\u002F\u002Fgithub.com\u002Fwangxiang1230\u002FSSTAP)\n2. [Temporal Action Detection with Multi-level Supervision](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FShi_Temporal_Action_Detection_With_Multi-Level_Supervision_ICCV_2021_paper.pdf) (ICCV 2021) [code](https:\u002F\u002Fgithub.com\u002Fbfshi\u002FSSAD_OSAD)\n3. (KFC) [KFC: An Efficient Framework for Semi-Supervised Temporal Action Localization](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9500051) （Tip 2021）\n\n## 2019\n1. [Learning Temporal Action Proposals With Fewer Labels](https:\u002F\u002Farxiv.org\u002Fabs\u002F1910.01286) (ICCV 2019)\n2. (TTC-Loc) [Towards Train-Test Consistency for Semi-supervised Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F1910.11285v3) (arxiv 2019)\n\n----\n# **Open-Vocabulary Temporal Action Detection**\n\n## 2026\n1. (PSFTR) [Progressive Semantic Fusion Transformer for Zero-Shot Temporal Action Localization](https:\u002F\u002Fopenreview.net\u002Fforum?id=at3UEJzCRc) (to ICLR 2026)\n2. (zero-shot) [TF-CADE: Foreground-Concentrated Text-Video Alignment for Zero-Shot Temporal Action Detection]() (CVPR 2026)\n3. (zero-shot) [Bidirectional Temporal-Sensitive Adaptation for Generalized Zero-Shot Temporal Action Localization](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F11396058) (TCSVT 2026)\n4. [Decompose and Transfer: CoT-Prompting Enhanced Alignment for Open-Vocabulary Temporal Action Detection](https:\u002F\u002Farxiv.org\u002Fabs\u002F2603.24030) (CVPR 2026)\n\n## 2025\n1. (zero-shot, FreeZAD) [Training-Free Zero-Shot Temporal Action Detection with Vision-Language Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.13795) (arXiv 2025)\n2. (STOV-TAL) [Exploring Scalability of Self-Training for Open-Vocabulary Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.07024) (WACV 2025) [code](https:\u002F\u002Fgithub.com\u002FHYUNJS\u002FSTOV-TAL)\n3. [Zero-Shot Temporal Interaction Localization for Egocentric Videos](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.03662) (arXiv 2025) [code](https:\u002F\u002Fgithub.com\u002FIRMVLab\u002FEgoLoc)\n\n## 2024\n1. [One-Stage Open-Vocabulary Temporal Action Detection Leveraging Temporal Multi-scale and Action Label Features](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.19542) (FG 2024)\n2. (OVFormer) [Open-Vocabulary Temporal Action Localization using Multimodal Guidance](https:\u002F\u002Fbmva-archive.org.uk\u002Fbmvc\u002F2024\u002Fpapers\u002FPaper_1013\u002Fpaper.pdf) (BMVC 2024)\n3. (OV-OAD) [Does Video-Text Pretraining Help Open-Vocabulary Online Action Detection?](https:\u002F\u002Fnips.cc\u002Fvirtual\u002F2024\u002Fposter\u002F95303) (NeurIPS 2024)\n4. (DeTAL) [DeTAL: Open-Vocabulary Temporal Action Localization With Decoupled Networks](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10517407) (TPAMI 2024) [code](https:\u002F\u002Fgithub.com\u002Fvsislab\u002FDeTAL)\n5. [Open-Vocabulary Action Localization with Iterative Visual Prompting](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.17422) (arXiv 2024)\n6. (T3AL) [Test-Time Zero-Shot Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.05426) (CVPR 2024) [code](https:\u002F\u002Fgithub.com\u002Fbenedettaliberatori\u002FT3AL)\n7. (ZEETAD) [ZEETAD: Adapting Pretrained Vision-Language Model for Zero-Shot End-to-End Temporal Action Detection](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2024\u002Fpapers\u002FPhan_ZEETAD_Adapting_Pretrained_Vision-Language_Model_for_Zero-Shot_End-to-End_Temporal_Action_WACV_2024_paper.pdf) (WACV 2024)\n8. [Zero-shot Action Localization via the Confidence of Large Vision-Language Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.14340) (arXiv 2024)\n9. (Ti-FAD) [Text-Infused Attention and Foreground-Aware Modeling for Zero-Shot Temporal Action Detection](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2024\u002Fhash\u002F13250eb13871b3c2c0a0667b54bad165-Abstract-Conference.html) (NeurIPS 2024) [code](https:\u002F\u002Fgithub.com\u002FYearangLee\u002FTi-FAD)\n10. (mProTEA) [Zero-Shot Temporal Action Detection by Learning Multimodal Prompts and Text-Enhanced Actionness](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10556637) (TCSVT 2024)\n11. (GRIZAL) [GRIZAL: Generative Prior-guided Zero-Shot Temporal Action Localization](https:\u002F\u002Faclanthology.org\u002F2024.emnlp-main.1061.pdf) (EMNLP 2024) [code](https:\u002F\u002Fgithub.com\u002FCandleLabAI\u002FGRIZAL-EMNLP2024)\n\n## 2023\n\n1. (CELL) [Cascade Evidential Learning for Open-World Weakly-Supervised Temporal Action Localization](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FChen_Cascade_Evidential_Learning_for_Open-World_Weakly-Supervised_Temporal_Action_Localization_CVPR_2023_paper.html) (CVPR 2023)\n2. (OW-TAL) [OW-TAL: Learning Unknown Human Activities for Open-World Temporal Action Localization](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320322005076) (PR 2023)\n3. (TN-ZSTAD) [TN-ZSTAD: Transferable Network for Zero-Shot Temporal Activity Detection](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9797852) (TAPMI 2023)\n4. [Multi-modal Prompting for Low-Shot Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.11732) (arXiv 2023)\n\n## 2022\n& before\n\n1. [Open-Vocabulary Temporal Action Detection with Off-the-Shelf ImageText Features](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2212.10596.pdf) (arxiv 2022)\n2. (OpenTAL) [OpenTAL: Towards Open Set Temporal Action Localization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.05114.pdf) (CVPR 2022) [code](https:\u002F\u002Fwww.rit.edu\u002Factionlab\u002Fopental)\n3. [Prompting Visual-Language Models for Efficient Video Understanding](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.04478) (ECCV 2022) [code](https:\u002F\u002Fju-chen.github.io\u002Fefficient-prompt\u002F)\n4. (STALE) (**Zero-Shot**) [Zero-Shot Temporal Action Detection via Vision-Language Prompting](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.08184) (ECCV 2022) [code](https:\u002F\u002Fgithub.com\u002Fsauradip\u002Fstale)\n5. [Zero-shot Natural Language Video Localization](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.00428) (ICCV 2021)\n6. (ZSTAD) [ZSTAD: Zero-Shot Temporal Activity Detection](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FZhang_ZSTAD_Zero-Shot_Temporal_Activity_Detection_CVPR_2020_paper.pdf) (CVPR 2020)\n\n","\u003C!--\n * @Author: fzy\n * @Date: 2020-03-09 21:53:10\n * @LastEditors: Zhenying\n * @LastEditTime: 2020-12-03 18:58:12\n * @Description: \n -->\n# 优秀的时间动作检测与时间动作提案生成 [![Awesome](https:\u002F\u002Fcdn.rawgit.com\u002Fsindresorhus\u002Fawesome\u002Fd7305f38d29fed78fa85652e3a63e154dd8e8829\u002Fmedia\u002Fbadge.svg)](https:\u002F\u002Fgithub.com\u002Fzhenyingfang\u002FAwesome-Temporal-Action-Detection-Temporal-Action-Proposal-Generation)\n时间动作检测 & 弱监督与半监督时间动作检测 & 时间动作提案生成 & 开放词汇时间动作检测\n\n\n-----\n**目录**\n\u003C!-- TOC -->\n- [优秀的时间动作检测与时间动作提案生成](#awesome-temporal-action-detection-temporal-action-proposal-generation)\n- [**关于预训练模型**](#about-pretrained-model)\n- [**ActivityNet 挑战赛**](#activitynet-challenge)\n- [**时间动作提案生成**](#papers-temporal-action-proposal-generation)\n  - [2023年](#2023) - [2022年](#2022) - [2021年](#2021) - [2020年](#2020) - [2019年](#2019) - [2018年](#2018) - [2017年](#2017) - [之前](#before)\n- [**时间动作检测**](#papers-temporal-action-detection)\n  - [2026年](#2026) - [2025年](#2025) - [2024年](#2024) - [2023年](#2023-1) - [2022年](#2022-1) - [2021年](#2021-1) - [2020年](#2020-1) - [2019年](#2019-1) - [2018年](#2018-1) - [2017年](#2017-1) - [之前](#before-1)\n- [**弱监督时间动作检测**](#papers-weakly-supervised-temporal-action-detection)\n  - [2026年](#2026-1) - [2025年](#2025-1) - [2024年](#2024-1) - [2023年](#2023-2) - [2022年](#2022-2) - [2021年](#2021-2) - [2020年](#2020-2) - [2019年](#2019-2) - [2018年](#2018-2) - [2017年](#2017-2)\n- [**在线动作检测**](#papers-online-action-detection)\n  - [2026年](#2026-2) - [2025年](#2025-2) - [2024年](#2024-2) - [2023年](#2023-3) - [2022年](#2022-3) - [2021年](#2021-3)\n- [**半监督时间动作检测**](#semi-supervised)\n  - [2024年](#2024-3) - [2023年](#2023-4) - [2022年](#2022-4) - [2021年](#2021-4) - [2019年](#2019-3)\n- [**开放词汇时间动作检测**](#open-vocabulary-temporal-action-detection)\n  - [2026年](#2026-3) - [2025年](#2025-3) - [2024年](#2024-4) - [2023年](#2023-5) - [2022年](#2022-5)\n\n\n-----\n# **关于预训练模型**\n1. (BSP) [针对视频中时间定位的边界敏感型预训练](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.10830) (ICCV 2021)\n2. (TSP) [TSP：用于定位任务的视频编码器时间敏感型预训练](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.11479) (ICCVW 2021)\n3. (UP-TAL) [用于时间动作定位任务的无监督预训练](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.13609) (CVPR 2022) [代码](https:\u002F\u002Fgithub.com\u002Fzhang-can\u002FUP-TAL)\n4. [用于时间定位的对比语言-动作预训练](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.12293) (arXiv 2022)\n5. [用于时间动作定位的低保真端到端视频编码器预训练](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.15233) (NeurIPS 2021)\n\n# **ActivityNet 挑战赛及讲座**\n1. (2021年) [AcitvityNet 2021](http:\u002F\u002Factivity-net.org\u002Fchallenges\u002F2021\u002Fchallenge.html)\n2. (2021年) [Transformer在时序行为检测中的应用 & 基于自监督学习的半监督时序行为检测](https:\u002F\u002Fwww.techbeat.net\u002Ftalk-info?id=545) (DAMO Academy, 阿里巴巴集团)\n\n# **论文：时间动作提案生成**\n\n## 2023年\n1. (MIFNet) [MIFNet：多实例聚焦的时间动作提案生成](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0925231223000553) (Neurocomputing 2023)\n2. (SMBG) [通过稀疏多级边界生成器加速时间动作提案的学习](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.03166) (arXiv 2023) [代码](https:\u002F\u002Fgithub.com\u002Fzhouyang-001\u002FSMBG-for-temporal-action-proposal)\n3. (MCBD) [用于时间动作提案生成的多级内容感知边界检测](Tip 2023) [代码](https:\u002F\u002Fmic.tongji.edu.cn\u002Fff\u002F32\u002Fc9778a327474\u002Fpage.htm)\n\n## 2022年\n1. (BCNet) [带有背景约束的时间动作提案生成](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.07984) (AAAI 2022)\n2. (PRSA-Net) [基于金字塔区域的槽位注意力网络用于时间动作提案生成](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.10095) (BMVC 2022) [代码](https:\u002F\u002Fgithub.com\u002Fhandhand123\u002FPRSA-Net)\n3. (TDN) [为时间动作提案生成建模长期视频语义分布](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0925231221017616) (Neurocomputing 2022)\n4. (AOE-Net) [AOE-Net：具有适应性注意力机制的实体交互建模用于时间动作提案生成](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.02578) (IJCV 2022)\n\n## 2021年\n1. (BSN++) [BSN++：具有尺度均衡关系建模的互补边界回归器用于时间动作提案生成](https:\u002F\u002Farxiv.org\u002Fabs\u002F2009.07641) (AAAI 2021) [作者知乎](https:\u002F\u002Fzhuanlan.zhihu.com\u002Fp\u002F344065976)\n2. (RTD-Net) [用于直接动作提案生成的松弛型 Transformer 解码器](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.01894) (ICCV 2021) [代码](https:\u002F\u002Fgithub.com\u002FMCG-NJU\u002FRTD-Action) [知乎](https:\u002F\u002Fzhuanlan.zhihu.com\u002Fp\u002F363133304)\n3. (TCANet) [用于时间动作提案细化的时间上下文聚合网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.13141) (CVPR 2021) [知乎](https:\u002F\u002Fzhuanlan.zhihu.com\u002Fp\u002F358754602)\n4. [带有自适应图的增强型 Transformer 用于时间动作提案生成](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.16024) (arXiv 2021)\n5. (TAPG) [使用 Transformer 进行时间动作提案生成](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.12043) (arXiv 2021)\n6. (AEN) [用于时间动作提案生成的代理-环境网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.08323) (ICASSP 2021)\n7. (AEI) [AEI：具有适应性注意力的动作-环境交互用于时间动作提案生成](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.11474) (BMVC 2021) [代码](https:\u002F\u002Fgithub.com\u002Fvhvkhoa\u002FTAPG-AgentEnvInteration)\n\n## 2020年\n\n1. **VALSE 林天伟的演讲** (2020年3月18日) [链接](https:\u002F\u002Fpan.baidu.com\u002Fs\u002F18uPJX3l69qJHaYOdeJ0IQw?errmsg=Auth+Login+Sucess&errno=0&ssnerror=0&) (7y8g)\n2. (RapNet) [使用关系感知金字塔网络进行精确的时间动作提案生成] (AAAI 2020) [前期论文是2019年 ActivityNet 任务1第2名](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.03448)\n3. (DBG) [通过密集边界生成器快速学习时间动作提案] (AAAI 2020) [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.04127) [TensorFlow 代码](https:\u002F\u002Fgithub.com\u002FTencentYoutuResearch\u002FActionDetection-DBG)\n4. (BC-GNN) [用于时间动作提案生成的边界内容图神经网络] (ECCV 2020) [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2008.01432v1)\n5. [采用互惠正则化的自下而上时间动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2002.07358) (ECCV 2020) [TensorFlow 代码](https:\u002F\u002Fgithub.com\u002FPeisenZhao\u002FBottom-Up-TAL-with-MR)\n6. (TSI) [TSI：用于动作提案生成的时间尺度不变网络] (ACCV 2020) [论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FACCV2020\u002Fhtml\u002FLiu_TSI_Temporal_Scale_Invariant_Network_for_Action_Proposal_Generation_ACCV_2020_paper.html)\n\n## 2019年\n\n1. (SRG) **SRG：基于片段相关性的时序动作提案生成器**（IEEE汇刊，2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.11306)\n2. (DPP) **用于动作时序提案的深度逐点预测**（ICONIP 2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1909.07725) [代码.PyTorch](https:\u002F\u002Fgithub.com\u002Fliluxuan1997\u002FDPP)\n3. (BMN) **BMN：用于时序动作提案生成的边界匹配网络**（ICCV 2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1907.09702) [代码.PaddlePaddle](https:\u002F\u002Fgithub.com\u002FPaddlePaddle\u002Fmodels\u002Ftree\u002Fdevelop\u002FPaddleCV\u002Fvideo) [代码.PyTorch_非官方](https:\u002F\u002Fgithub.com\u002FJJBOY\u002FBMN-Boundary-Matching-Network)\n4. (MGG) **用于时序动作提案的多粒度生成器**（CVPR 2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1811.11524)\n5. **结合图像数据的3D卷积与光流生成时序动作提案的研究**（2019 CVPR Workshop）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1903.04176)\n6. (CMSN) **CMSN：用于时序动作提案生成的连续多阶段网络及可变边距余弦损失**（arXiv，2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.06080)\n7. **一种用于加速时序动作提案生成的高性能计算方法**（arXiv，2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1906.06496)\n8. **用于提案和活动定位的多粒度融合网络：提交至ActivityNet Challenge 2019任务1和任务2**（ActivityNet挑战赛2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1907.12223)\n9. [时序动作提案生成中局部与全局上下文的联合学习](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F8941024)（TCSVT 2019）\n\n## 2018年\n\n1. (CTAP) **CTAP：互补性时序动作提案生成**（ECCV 2018）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1807.04821) [代码.TensorFlow](https:\u002F\u002Fgithub.com\u002Fjiyanggao\u002FCTAP)\n2. (BSN) **BSN：用于时序动作提案生成的边界敏感网络**（ECCV 2018）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1806.02964) [代码.TensorFlow](https:\u002F\u002Fgithub.com\u002Fwzmsltw\u002FBSN-boundary-sensitive-network) [代码.PyTorch](https:\u002F\u002Fgithub.com\u002Fwzmsltw\u002FBSN-boundary-sensitive-network.pytorch)\n3. (SAP) **SAP：基于强化学习的自适应时序动作检测提案模型**（AAAI 2018）[论文](https:\u002F\u002Fgithub.com\u002Fhjjpku\u002FAction_Detection_DQN\u002Fblob\u002Fmaster\u002Fcamera%20ready.pdf) [代码.Torch](https:\u002F\u002Fgithub.com\u002Fhjjpku\u002FAction_Detection_DQN)\n\n## 2017年\n\n1. (TURN TAP) **TURN TAP：用于时序动作提案的时序单元回归网络**（ICCV 2017）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.06189) [代码.TensorFlow](https:\u002F\u002Fgithub.com\u002Fjiyanggao\u002FTURN-TAP)\n2. (SST) **SST：单流时序动作提案**（CVPR 2017）[论文](http:\u002F\u002Fvision.stanford.edu\u002Fpdf\u002Fbuch2017cvpr.pdf) [代码.theano](https:\u002F\u002Fgithub.com\u002Fshyamal-b\u002Fsst\u002F) [代码.TensorFlow](https:\u002F\u002Fgithub.com\u002FJaywongWang\u002FSST-Tensorflow)\n3. **YoTube：通过循环与静态回归网络搜索动作提案**（IEEE汇刊，2017）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1706.08218)\n4. **通用活动检测中时序精度的探索**（arXiv，2017）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.02716) [代码.PyTorch](https:\u002F\u002Fgithub.com\u002Fyjxiong\u002Faction-detection)\n\n## 之前\n\n1. (DAPs) **DAPs：用于动作理解的深度动作提案**（ECCV 2016）[论文](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F0B0ZXjo_p8lHBcjh1WDlmYVN3R2M\u002Fview) [代码](https:\u002F\u002Fgithub.com\u002Fescorciav\u002Fdeep-action-proposals)\n\n----\n# **论文：时序动作检测**\n\n## 2026年\n1. (ActionVLM) [迈向缓解视觉-语言模型在时序动作定位中的模态偏差]（arXiv，2026）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2601.21078)\n2. [轻量而精准：基于传感器数据的实时动作检测模型SlimSTAD]（AAAI 2026）[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F36975)\n3. [场景感知的时空泛化：迈向跨域鲁棒的时序动作检测]（AAAI 2026）[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F37392)\n\n## 2025\n\n1. (MS-Temba) [MS-Temba：用于高效时序动作检测的多尺度时间Mamba](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.06138)（arXiv 2025）[代码](https:\u002F\u002Fgithub.com\u002Fthearkaprava\u002FMS-Temba)\n2. (零样本) [基于视觉-语言模型的免训练零样本时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.13795)（arXiv 2025）\n3. (领域自适应) [动态切换教师：如何提升时序动作检测模型的泛化能力](https:\u002F\u002Fopenreview.net\u002Fforum?id=o8SPZJaJyj)（arXiv 2025）\n4. (LoSA) [LoSA：用于扩展端到端时序动作定位的长短距离适配器](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.01282)（WACV 2025）\n5. (TimeLoc) [TimeLoc：一种统一的端到端框架，用于在长视频中精确地进行时间戳定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.06526)（arXiv 2025）[代码](https:\u002F\u002Fgithub.com\u002Fsming256\u002FTimeLoc)\n6. [通过渐进式模块剪枝压缩时序动作检测模型](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.16916)（CVPR 2025）\n7. (DiGIT) [DiGIT：用于时序动作检测Transformer的多膨胀门控编码器与中心相邻区域集成解码器](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.05711)（CVPR 2025）[代码](https:\u002F\u002Fgithub.com\u002FDotori-HJ\u002FDiGIT)\n8. (FDDet) [FDDet：用于时序动作检测中边界精炼的频率解耦方法](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.00647)（arXiv 2025）\n9. [少样本时序动作定位中的思维链文本推理](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.13460)（arXiv 2025）\n10. [ProTAL：一种拖拽式视频编程框架，用于时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.17555)（CHI 2025）\n11. [CLIP-AE：CLIP辅助的跨视角音视频增强技术，用于无监督时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.23524)（arXiv 2025）\n12. [基于大语言模型的查询扩展，用于提升语言驱动的动作定位中的边界预测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.24282)\n13. (BRTAL) [BRTAL：基于偏移驱动扩散模型的边界精炼时序动作定位](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10912675)（TCSVT 2025）\n14. (EDMP) [能量 vs. 噪声：迈向开放世界中鲁棒的时序动作定位](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F32659)（AAAI 2025）[代码](https:\u002F\u002Fgithub.com\u002FXD-mu\u002FEDMP)\n15. (AdaTAD++) [动作检测的规模化：具有Transformer增强型时空自适应能力的AdaTAD++](https:\u002F\u002Ficcv.thecvf.com\u002Fvirtual\u002F2025\u002Fposter\u002F1355)（ICCV 2025）\n16. (WiFiTAD) [基于双金字塔网络的WiFi时序活动检测](https:\u002F\u002Fgithub.com\u002FAVC2-UESTC\u002FWiFiTAD\u002Fblob\u002Fmain\u002FmainPaper.pdf)（AAAI 2025）[代码](https:\u002F\u002Fgithub.com\u002FAVC2-UESTC\u002FWiFiTAD)\n17. (RDFA-S6) [提升时序动作定位：利用循环机制的高级S6建模](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.13078)（arXiv 2025）[代码](https:\u002F\u002Fgithub.com\u002Flsy0882\u002FRDFA-S6)\n18. (MambaTAD) [MambaTAD：状态空间模型遇上长时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2511.17929)（TMM 2025）\n19. (FreETAD) (多标签) [循序渐进，从局部片段预测全局动作性曲线](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3746027.3754712)（ACM MM 2025）\n20. (TBT-Former) [TBT-Former：学习动作定位的时间边界分布](https:\u002F\u002Farxiv.org\u002Fabs\u002F2512.01298)（arXiv 2025）\n21. [带有扩展时序移位模块的多任务学习，用于时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2512.11189)（ICCV 2025 BinEgo-360挑战赛）\n\n## 2024\n\n1. (DenoiseLoc) [视频活动定位中的边界去噪](https:\u002F\u002Fopenreview.net\u002Fforum?id=bLpUtGyf9g)（ICLR 2024）[代码](https:\u002F\u002Fgithub.com\u002Ffrostinassiky\u002Fdenoiseloc)\n2. (LITA) [LITA：语言指导的时序定位助手](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.19046)（arXiv 2024）[代码](https:\u002F\u002Fgithub.com\u002FNVlabs\u002FLITA)\n3. (PLOT-TAL) (少样本) [PLOT-TAL——基于最优传输的提示学习，用于少样本时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.18915)（arXiv 2024）\n4. [针对时序扰动的时序动作检测模型鲁棒性基准测试](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.20254)（CVPR 2024）[代码](https:\u002F\u002Fgithub.com\u002FAlvin-Zeng\u002Ftemporal-robustness-benchmark)\n5. (零样本) (T3AL) [测试时零样本时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.05426)（CVPR 2024）[代码](https:\u002F\u002Fgithub.com\u002Fbenedettaliberatori\u002FT3AL)\n6. (UniMD) [UniMD：迈向统一时刻检索与时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.04933)（ECCV 2024）[代码](https:\u002F\u002Fgithub.com\u002Fyingsen1\u002FUniMD)\n7. [将短时Transformer适配用于未修剪视频中的动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.01897)（CVPR 2024）\n8. (AdaTAD) [具有10亿参数、跨1000帧的端到端时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.17241)（CVPR 2024）[代码](https:\u002F\u002Fgithub.com\u002Fsming256\u002FOpenTAD\u002Ftree\u002Fmain\u002Fconfigs\u002Fadatad)\n9. [视频Mamba套件：状态空间模型作为视频理解的多功能替代方案](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.09626)（ECCV 2024）[代码](https:\u002F\u002Fgithub.com\u002FOpenGVLab\u002Fvideo-mamba-suite)\n10. (TE-TAD) [TE-TAD：通过时间对齐的坐标表达，迈向完全端到端的时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.02405)（CVPR 2024）[代码](https:\u002F\u002Fgithub.com\u002FDotori-HJ\u002FTE-TAD)\n11. (ADI-Diff) [通过图像扩散过程进行动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.01051)（CVPR 2024）\n12. (DualDETR) [用于多标签时序动作检测的双DETREs](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.00653)（CVPR 2024）[代码](https:\u002F\u002Fgithub.com\u002FMCG-NJU\u002FDualDETR)\n13. [一种高效密集型多标签动作检测方法](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.06187)（arXiv 2024）\n14. (时空) [使用视频Transformer进行端到端的时空动作定位](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FGritsenko_End-to-End_Spatio-Temporal_Action_Localisation_with_Video_Transformers_CVPR_2024_paper.html)（CVPR 2024）\n15. (DyFADet) [DyFADet：用于时序动作检测的动态特征聚合](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.03197)（ECCV 2024）[代码](https:\u002F\u002Fgithub.com\u002Fyangle15\u002FDyFADet-pytorch)\n16. (causaltad) [利用时序因果关系进行高级时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.17792)（arXiv 2024）[代码](https:\u002F\u002Fgithub.com\u002Fsming256\u002FOpenTAD\u002Fcausaltad)\n17. (LTP) [使用Transformer进行时序动作检测的长期预训练](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.13152)（arXiv 2024）\n18. (Pred-DETR) [用于时序动作检测的预测-反馈DETR](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.16729)（arXiv 2024）\n19. [在时序动作检测中引入门控机制和上下文信息](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.04205)（ECCV W 2024）\n20. (ContextDet) [ContextDet：具有自适应上下文聚合功能的时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.15279)（arXiv 2024）\n21. (LMM：TimeMarker) [TimeMarker：一款多功能视频LLM，适用于长短视频理解，并具备卓越的时序定位能力](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.18211) [代码](https:\u002F\u002Fgithub.com\u002FTimeMarker-LLM\u002FTimeMarker\u002F)\n\n## 2023年\n1. (AMNet) [基于分组注意力的动作感知掩码网络用于时序动作定位](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2023\u002Fpapers\u002FKang_Action-Aware_Masking_Network_With_Group-Based_Attention_for_Temporal_Action_Localization_WACV_2023_paper.pdf) (WACV 2023)\n2. (ContextLoc++) [ContextLoc++：一种用于时序动作定位的统一上下文模型](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10018461) (TPAMI 2023)\n3. [基于课程学习的动态权重时序动作检测](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0925231222015557) (Neurocomputing 2023)\n4. (GAP) [时序动作检测后处理](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.14924) (CVPR 2023) [代码](https:\u002F\u002Fgithub.com\u002Fsauradip\u002FGAP)\n5. (TriDet) [TriDet：基于相对边界建模的时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.07347) (CVPR 2023) [代码](https:\u002F\u002Fgithub.com\u002Fsssste\u002FTriDet)\n   - [增强瞬间可辨性的时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.05590)（扩展版本）\n6. (TemporalMaxer) [TemporalMaxer：仅使用最大池化最大化时序上下文以进行时序动作定位](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.09055.pdf) (ArXiv 2023) [代码](https:\u002F\u002Fgithub.com\u002Ftuantng\u002Ftemporalmaxer)\n7. (DiffTAD) [DiffTAD：基于提案去噪扩散的时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.14863) (ICCV 2023) [代码](https:\u002F\u002Fgithub.com\u002Fsauradip\u002FDiffusionTAD)\n8. [基于RGB的时序动作检测中的分解式跨模态蒸馏](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.17285) (CVPR 2023)\n9. [视频活动定位中的边界去噪](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.02934) (ArXiv 2023)\n10. (ASL) [用于时序动作定位的动作敏感性学习](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15701) (ICCV 2023)\n11. (MMNet) [用于动作检测的多模态Transformer网络](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320323004119) (Pattern Recognition 2023)\n12. [具有多尺度扩张的截断注意力感知提案网络用于时序动作检测](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320323003825) (Pattern Recognition 2023)\n13. (MSST) [用于时序动作定位的多时序尺度与时空Transformer网络](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10120600) (IEEE Transactions on Human-Machine Systems 2023)\n14. [探索动作中心以进行时序动作定位](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10058582) (TMM 2023)\n15. (ETAD) [ETAD：在笔记本电脑上端到端训练动作检测](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023W\u002FECV\u002Fhtml\u002FLiu_ETAD_Training_Action_Detection_End_to_End_on_a_Laptop_CVPRW_2023_paper.html) (CVPRW 2023) [代码](https:\u002F\u002Fgithub.com\u002Fsming256\u002FETAD)\n16. (BasicTAD) [BasicTAD：令人惊叹的仅基于RGB的时序动作检测基线](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.02717) (CVIU 2023) [代码](https:\u002F\u002Fgithub.com\u002FMCG-NJU\u002FBasicTAD)\n17. (Re2TAL) [Re2TAL：为可逆时序动作定位重布线预训练视频骨干网络](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FZhao_Re2TAL_Rewiring_Pretrained_Video_Backbones_for_Reversible_Temporal_Action_Localization_CVPR_2023_paper.pdf) (CVPR 2023) [代码](https:\u002F\u002Fgithub.com\u002Fcoolbay\u002FRe2TAL)\n18. (SoLa) [软着陆策略用于缓解时序动作定位任务中的任务差异问题](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FKang_Soft-Landing_Strategy_for_Alleviating_the_Task_Discrepancy_Problem_in_Temporal_CVPR_2023_paper.pdf) (CVPR 2023)\n19. (APN) [视频中基于进度引导的时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.09268) (ArXiv 2023) [代码](https:\u002F\u002Fgithub.com\u002Fmakecent\u002FAPN)\n20. (Self-DETR) [用于时序动作检测的自反馈DETR](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fhtml\u002FKim_Self-Feedback_DETR_for_Temporal_Action_Detection_ICCV_2023_paper.html) (ICCV 2023)\n21. (UnLoc) [UnLoc：一个用于视频定位任务的统一框架](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.11062) (ICCV 2023) [代码](https:\u002F\u002Fgithub.com\u002Fgoogle-research\u002Fscenic)\n22. [时序动作定位模型的数据效率与计算效率基准测试](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.13082) (ICCV 2023研讨会)\n23. (BAPG) [面向时序动作定位的边界感知提案生成方法](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.13810) (ArXiv 2023)\n24. (MENet) [面向多尺度视频特征表示的运动增强用于时序动作检测](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fhtml\u002FZhao_Movement_Enhancement_toward_Multi-Scale_Video_Feature_Representation_for_Temporal_Action_ICCV_2023_paper.html) (ICCV 2023)\n25. (MRAV-FF) [用于时序动作定位的多分辨率音视频特征融合](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.03456) (ArXiv 2023)\n26. (BDRC-Net) [用于时序动作检测的边界离散化与可靠分类网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.06403) (ArXiv 2023) [代码](https:\u002F\u002Fgithub.com\u002Fzhenyingfang\u002FBDRC-Net)\n27. (STAN) [STAN：用于时序动作检测的空间–时间感知网络](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3606038.3616169) (ACM MM W 2023)\n28. (RefineTAD) [RefineTAD：学习无提案细化以进行时序动作检测](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3581783.3611872) (ACM MM 2023)\n29. [SADA：语义对抗无监督域适应用于时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.13377) (ArXiv 2023) [代码](https:\u002F\u002Fgithub.com\u002Fdavidpujol\u002FSADA)\n\n## 2022年\n1. (DCAN) [DCAN：通过双上下文聚合提升时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.03612)（AAAI 2022）\n2. (TVNet) [TVNet：用于动作定位的时序投票网络](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2201.00434.pdf)（arXiv 2022）[代码](https:\u002F\u002Fgithub.com\u002Fhanielwang\u002FTVNet)\n3. (ActionFormer) [ActionFormer：基于Transformer的动作时刻定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.07925)（ECCV 2022）[代码](https:\u002F\u002Fgithub.com\u002Fhappyharrycn\u002Factionformer_release)\n4. (SegTAD）[SegTAD：基于语义分割的精确时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.01542)（arXiv 2022）\n5. (OpenTAL) [OpenTAL：面向开放集时序动作定位](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.05114.pdf)（CVPR 2022）[代码](https:\u002F\u002Fwww.rit.edu\u002Factionlab\u002Fopental)\n6. (TALLFormer) [TALLFormer：基于长记忆Transformer的时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.01680)（CVPR 2022）\n7. [端到端时序动作检测的实证研究](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.02932)（CVPR 2022）[代码](https:\u002F\u002Fgithub.com\u002Fxlliu7\u002FE2E-TAD)\n8. (BREM) [时序动作检测中可靠提案质量的估计](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.11695)（ACM MM 2022）\n9. [用于时序动作定位的结构化注意力组合](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.09956)（Tip 2022）[代码](https:\u002F\u002Fgithub.com\u002FVividLe\u002FStructured-Attention-Composition)\n10. (RCL) [RCL：用于时序动作检测的循环连续定位](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FWang_RCL_Recurrent_Continuous_Localization_for_Temporal_Action_Detection_CVPR_2022_paper.pdf)（CVPR 2022）\n11. (RefactorNet) [学习重构动作与共现特征以进行时序动作定位](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FXia_Learning_To_Refactor_Action_and_Co-Occurrence_Features_for_Temporal_Action_CVPR_2022_paper.pdf)（CVPR 2022）\n12. (MS-TCT) [MS-TCT：用于动作检测的多尺度时序卷积Transformer](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FDai_MS-TCT_Multi-Scale_Temporal_ConvTransformer_for_Action_Detection_CVPR_2022_paper.pdf)（CVPR 2022）[代码](https:\u002F\u002Fgithub.com\u002Fdairui01\u002FMS-TCT)\n13. (OATD) [单阶段动作检测Transformer](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.10080)（EPICKITCHENS-100 2022 V. 26.35 N. 25.83）\n14. [面向时序动作检测的上下文感知提案网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.09082)（CVPR-2022 ActivityNet挑战赛获奖方案）\n15. [用于时序动作定位的双关系网络](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0031320322002060)（模式识别2022）\n16. [学习解耦分类与定位表示以进行时序动作定位](https:\u002F\u002Fwww.aaai.org\u002FAAAI22Papers\u002FAAAI-926.ZhuZ.pdf)（AAAI 2022）\n17. (DDM) [针对通用事件边界检测的多级密集差异图上的渐进式注意力](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FTang_Progressive_Attention_on_Multi-Level_Dense_Difference_Maps_for_Generic_Event_CVPR_2022_paper.pdf)（CVPR 2022）[代码](https:\u002F\u002Fgithub.com\u002FMCG-NJU\u002FDDM)\n18. [提交至CVPR 2022通用事件边界检测挑战赛：局部上下文建模与全局边界解码方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.15268.pdf)（CVPR 2022挑战赛）\n19. (HTNet) [HTNet：基于层次化Transformer的无锚点时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.09662)（arXiv 2022）\n20. (STPT) [用于动作检测的高效时空金字塔Transformer](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.10448)（ECCV 2022）\n21. (TAGS) [基于全局分割掩码学习的无提案时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.06580)（ECCV 2022）[代码](https:\u002F\u002Fgithub.com\u002Fsauradip\u002FTAGS)\n22. [提示视觉-语言模型以实现高效的视频理解](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.04478)（ECCV 2022）[代码](https:\u002F\u002Fgithub.com\u002Fju-chen\u002FEfficient-Prompt)\n23. (ReAct) [ReAct：基于关系查询的时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.07097)（ECCV 2022）[代码](https:\u002F\u002Fgithub.com\u002Fsssste\u002FReact)\n24. (TadTR) [基于Transformer的端到端时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.10271)（TIP 2022）[代码](https:\u002F\u002Fgithub.com\u002Fxlliu7\u002FTadTR)\n25. (TAL-MTS) [基于多时间尺度的时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.07493)（arXiv 2022）\n26. (AdaPerFormer) [用于时序动作定位的自适应感知Transformer](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.11908)（arXiv 2022）[代码](https:\u002F\u002Fgithub.com\u002FSouperO\u002FAdaPerFormer)\n27. (PointTAD) [PointTAD：基于可学习查询点的多标签时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.11035)（NeurIPS 2022）[代码](https:\u002F\u002Fgithub.com\u002FMCG-NJU\u002FPointTAD)（多动作检测，例如multiTHUMOS、charades）\n28. (SoLa) [软着陆策略：缓解时序动作定位任务中的任务不匹配问题](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.06023)（arXiv 2022）\n29. (Re2TAL) [Re2TAL：重布线预训练视频骨干网络以实现可逆时序动作定位](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.14053.pdf)（arXiv 2022）\n30. (MUPPET) [多模态少样本时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.14905)（arXiv 2022）[代码](https:\u002F\u002Fgithub.com\u002Fsauradip\u002FMUPPET)\n31. [基于深度学习的未修剪视频中的动作检测：综述](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9839464)（TPAMI 2022）\n\n## 2021年\n1. (活动图变换器) [用于时序动作定位的活动图变换器](https:\u002F\u002Farxiv.org\u002Fabs\u002F2101.08540) (arXiv 2021) [项目](https:\u002F\u002Fwww.sfu.ca\u002F~mnawhal\u002Fprojects\u002Fagt.html) [代码](https:\u002F\u002Fgithub.com\u002FNmegha2601\u002Factivitygraph_transformer)\n2. [用于视频中时序动作检测的粗细网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.01302) (CVPR 2021) [代码](https:\u002F\u002Fgithub.com\u002Fkkahatapitiya\u002FCoarse-Fine-Networks)\n3. (MLAD) [建模多标签动作依赖关系以进行时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.03027) (CVPR 2021)\n4. (PcmNet) [PcmNet：用于时序动作定位的位置敏感上下文建模网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.05270) (Tip 2021)\n5. (AFSD) [学习无锚框时序动作定位中的显著边界特征](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.13137) (CVPR 2021) [代码](https:\u002F\u002Fgithub.com\u002FTencentYoutuResearch\u002FActionDetection-AFSD?utm_source=catalyzex.com)\n6. [用于时序动作定位的低保真端到端视频编码器预训练](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.15233) (arXiv 2021)\n7. [读取与注意：手语视频中的时序定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.16481) (CVPR 2021) (手语视频)\n8. [用于时序动作定位中抗混叠的低通滤波器](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.11403) (arXiv 2021)\n9. [FineAction：用于时序动作定位的精细视频数据集](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.11107) (ICCV2021深度行动研讨会的一个赛道) [主页](https:\u002F\u002Fdeeperaction.github.io\u002Ffineaction\u002F)\n10. [一石三鸟：通过复用时序标注实现多任务时序动作检测](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fhtml\u002FLi_Three_Birds_with_One_Stone_Multi-Task_Temporal_Action_Detection_via_CVPR_2021_paper.html) (CVPR 2021)\n11. [用于时序动作检测的提案关系网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.11812) (CVPRW 2021)\n12. [探索更强的特征用于时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.13014) (CVPRW 2021)\n13. (SRF-Net) [SRF-Net：用于无锚框时序动作检测的选择性感受野网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.15258) (ICASSP 2021)\n14. [仅RGB流就足以进行时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.04362) (arXiv 2021) [代码](https:\u002F\u002Fgithub.com\u002FMedia-Smart\u002Fvedatad?utm_source=catalyzex.com)\n15. (AVFusion) [请听我说：音频增强型时序动作定位的融合方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.14118v1.pdf) (arXiv 2021) [代码](https:\u002F\u002Fgithub.com\u002Fskelemoa\u002Ftal-hmo)\n16. [基于可迁移知识的多粒度聚合网络用于时序动作定位：提交至ActivityNet挑战赛2021](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.12618) (HACS挑战赛2021)\n17. [丰富局部与全局上下文以用于时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.12960) (ICCV 2021)\n18. (CSA) [基于类别语义的动作检测注意力机制](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.02613) (ICCV 2021)\n19. (SP-TAD) [迈向高质量的稀疏提案时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.08847) (arXiv 2021) [代码](https:\u002F\u002Fgithub.com\u002Fwjn922\u002FSP-TAD)\n20. [基于查询自适应变换器的少样本时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.10552) (BMVC 2021) [代码](https:\u002F\u002Fgithub.com\u002Fsauradip\u002FfewshotQAT) (少样本)\n21. [用于视频中时序动作定位的图卷积模块](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.00302) (TPAMI 2021)\n22. [MS-TCT：用于动作检测的多尺度时序卷积变换器](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.03902) (arXiv 2021)\n23. (VSGN) [用于时序动作定位的视频自拼接图网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.14598) (ICCV 2021) [代码](https:\u002F\u002Fgithub.com\u002Fcoolbay\u002FVSGN)\n24. (MUSES) [多镜头时序事件定位：一个基准](https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.09434) (CVPR 2021) [项目](https:\u002F\u002Fsongbai.site\u002Fmuses\u002F) [代码](https:\u002F\u002Fgithub.com\u002Fxlliu7\u002FMUSES) [数据集](https:\u002F\u002Fsongbai.site\u002Fmuses\u002F)\n\n## 2020年\n\n1. (G-TAD) **G-TAD：用于时序动作检测的子图定位** (CVPR 2020) [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.11462) [代码.PyTorch](https:\u002F\u002Fgithub.com\u002Ffrostinassiky\u002Fgtad) [视频](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=BlPxnDcykUo)\n2. (AGCN-P-3DCNNs) **基于图注意力的提案3D卷积网络用于动作检测** (AAAI 2020) [论文](https:\u002F\u002Fwww.aaai.org\u002FPapers\u002FAAAI\u002F2020GB\u002FAAAI-LiJ.1424.pdf)\n3. (PBRNet) **用于时序动作检测的渐进式边界精炼网络** (AAAI 2020) [论文](https:\u002F\u002Fwww.aaai.org\u002FPapers\u002FAAAI\u002F2020GB\u002FAAAI-LiuQ.4870.pdf)\n4. (TsaNet) **尺度很重要：用于在未修剪视频中精确动作定位的时序尺度聚合网络** (ICME 2020) [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.00707)\n5. **约束动作定位中的时序关系** (arXiv 2020) [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2002.07358)\n6. (CBR-Net) **CBR-Net：用于动作检测的级联边界精炼网络：提交至ActivityNet挑战赛2020（任务1）** (ActivityNet挑战赛2020) [论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2006.07526v2)\n7. [基于方差感知网络的时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2008.11254) (arXiv 2020)\n8. [单阶段时序动作定位网络中的边界不确定性](https:\u002F\u002Farxiv.org\u002Fabs\u002F2008.11170) (arXiv 2020，技术报告)\n9. [重新审视时序动作定位中的锚机制](https:\u002F\u002Farxiv.org\u002Fabs\u002F2008.09837) (Tip 2020) [代码.PyTorch](https:\u002F\u002Fgithub.com\u002FVividLe\u002FA2Net?utm_source=catalyzex.com)\n10. (C-TCN) [用于动作定位的深层概念级时序卷积网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.09442) (ACM MM 2020) [代码.PaddlePaddle](https:\u002F\u002Fgithub.com\u002FPaddlePaddle\u002Fmodels\u002Ftree\u002Fdevelop\u002FPaddleCV\u002Fvideo)\n11. (MLTPN) [用于动作检测的多层级时序金字塔网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2008.03270) (PRCV 2020)\n12. (SALAD) [SALAD：用于动作检测的自我评估学习](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.06958) (arXiv 2020)\n\n## 2019年\n\n1. (CMS-RC3D) **用于行为检测的上下文多尺度区域卷积3D网络**（ICCVBIC 2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1801.09184)\n2. (TGM) **用于视频的时序高斯混合层**（ICML 2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1803.06316) [代码.PyTorch](https:\u002F\u002Fgithub.com\u002Fpiergiaj\u002Ftgm-icml19)\n3. (Decouple-SSAD) **单阶段时序动作检测中的定位与分类解耦**（ICME 2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1904.07442) [代码.TensorFlow](https:\u002F\u002Fgithub.com\u002FHYPJUDY\u002FDecouple-SSAD)\n4. **时序动作定位中的特征表示与训练策略探索**（ICIP 2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.10608)\n5. (PGCN) **用于时序动作定位的图卷积网络**（ICCV 2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1909.03252) [代码.PyTorch](https:\u002F\u002Fgithub.com\u002FAlvin-Zeng\u002FPGCN)\n6. (S-2D-TAN) **学习稀疏2D时序相邻网络以进行时序动作定位**（ICCV 2019）（ICCV 2019 HACS时序动作定位挑战赛优胜方案）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1912.03612)  \n   - (2D-TAN) **学习2D时序相邻网络以实现自然语言描述的动作片段定位**（AAAI 2020）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1912.03590) [代码.PyTorch](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002F2D-TAN)\n7. (LCDC) **在特征空间中学习运动：用于细粒度动作检测的局部一致可变形卷积网络**（ICCV 2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1811.08815) [幻灯片](https:\u002F\u002Fknmac.github.io\u002Fprojects\u002Flcdc\u002FLCDC_slides_extended.pdf) [代码.TensorFlow](https:\u002F\u002Fgithub.com\u002Fknmac\u002FLCDC_release)\n8. (BLP) **BLP——边界似然精确定位网络，用于精确的时序动作定位**（ICASSP 2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1811.02189)\n9. (GTAN) **用于动作定位的高斯时序感知网络**（CVPR 2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1909.03877)\n10. **利用长短期依赖关系进行时序动作定位**（arXiv 2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.01060)\n11. **用于时序动作定位的关系注意力机制**（IEEE Trans TMM 2019）[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8933113\u002Fversions)\n12. (AFO-TAD) **AFO-TAD：无锚点单阶段时序动作检测器**（arXiv 2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1910.08250)\n13. (DBS) **未修剪视频中时序动作检测的视频印记分割**（AAAI 2019）[论文](https:\u002F\u002Fwww.aaai.org\u002Fojs\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F4846)\n\n## 2018年\n\n1. **诊断时序动作检测器中的错误**（ECCV 2018）[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FHumam_Alwassel_Diagnosing_Error_in_ECCV_2018_paper.pdf)\n2. (ETP) **通过演化时序提案实现精确的时序动作定位**（ICMR 2018）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1804.04803)\n3. (Action Search) **动作搜索：在视频中识别动作及其在时序动作定位中的应用**（ECCV 2018）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1706.04269) [代码.TensorFlow](https:\u002F\u002Fgithub.com\u002FHumamAlwassel\u002Faction-search)\n4. (TAL-Net) **重新思考用于时序动作定位的Faster R-CNN架构**（CVPR 2018）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1804.07667)\n5. **通过学习序列匹配网络实现单次动作定位**（CVPR 2018）[论文](http:\u002F\u002Fwww.porikli.com\u002Fmysite\u002Fpdfs\u002Fporikli%202018%20-%20One-shot%20action%20localization%20by%20learning%20sequence%20matching%20network.pdf)\n6. **通过联合识别-验证进行时序动作检测**（arXiv 2018）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1810.08375)\n7. (TPC) **探索时序保持网络以实现精确的时序动作定位**（AAAI 2018）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1708.03280)\n8. (SAP) **基于强化学习的自适应提案模型用于时序动作检测**（AAAI 2018）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1706.07251) [代码.Torch](https:\u002F\u002Fgithub.com\u002Fhjjpku\u002FAction_Detection_DQN)\n\n## 2017年\n\n1. (TCN) **用于视频中活动定位的时序上下文网络**（ICCV 2017）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1708.02349) [代码.caffe](https:\u002F\u002Fgithub.com\u002Fvdavid70619\u002FTCN)\n2. (SSN) **使用结构化片段网络进行时序动作检测**（ICCV 2017）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.06228) [代码.PyTorch](https:\u002F\u002Fgithub.com\u002Fyjxiong\u002Faction-detection)\n3. (R-C3D) **R-C3D：用于时序活动检测的区域卷积3D网络**（ICCV 2017）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.07814) [代码.caffe](https:\u002F\u002Fgithub.com\u002FVisionLearningGroup\u002FR-C3D) [代码.PyTorch](https:\u002F\u002Fgithub.com\u002Fsunnyxiaohu\u002FR-C3D.pytorch)\n4. (TCNs) **用于动作分割和检测的时序卷积网络**（CVPR 2017）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1611.05267) [代码.TensorFlow](https:\u002F\u002Fgithub.com\u002Fcolincsl\u002FTemporalConvolutionalNetworks)\n5. (SMS) **通过结构化最大值之和进行时序动作定位**（CVPR 2017）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1704.04671) [代码](https:\u002F\u002Fgithub.com\u002Fshallowyuan\u002Fstruct-max-sum)\n6. (SCC) **SCC：语义上下文级联以实现高效的动作检测**（CVPR 2017）[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FHeilbron_SCC_Semantic_Context_CVPR_2017_paper.pdf)\n7. (CDC) **CDC：卷积-反卷积网络用于未修剪视频中精确的时序动作定位**（CVPR 2017）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1703.01515) [代码](https:\u002F\u002Fbitbucket.org\u002Fcolumbiadvmm\u002Fcdc\u002Fsrc\u002Fmaster\u002F) [项目](http:\u002F\u002Fwww.ee.columbia.edu\u002Fln\u002Fdvmm\u002FresearchProjects\u002Fcdc\u002Fcdc.html)\n8. (SS-TAD) **端到端、单流的未修剪视频中时序动作检测**（BMVC 2017）[论文](http:\u002F\u002Fvision.stanford.edu\u002Fpdf\u002Fbuch2017bmvc.pdf) [代码.PyTorch](https:\u002F\u002Fgithub.com\u002Fshyamal-b\u002Fss-tad\u002F)\n9. (CBR) **用于时序动作检测的级联边界回归**（BMVC 2017）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1705.01180) [代码.TensorFlow](https:\u002F\u002Fgithub.com\u002Fjiyanggao\u002FCBR)\n10. (SSAD) **单次时序动作检测**（ACM MM 2017）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1710.06236)\n\n## 之前\n\n1. (PSDF) **基于分数分布金字塔特征的时序动作定位**（CVPR 2016）[论文](https:\u002F\u002Fwww.zpascal.net\u002Fcvpr2016\u002FYuan_Temporal_Action_Localization_CVPR_2016_paper.pdf)\n2. **使用统计语言模型进行时序动作检测**（CVPR 2016）[论文](https:\u002F\u002Fwww.zpascal.net\u002Fcvpr2016\u002FRichard_Temporal_Action_Detection_CVPR_2016_paper.pdf) [代码](https:\u002F\u002Fgithub.com\u002Falexanderrichard\u002Fsquirrel)\n3. (S-CNN) **通过多阶段CNN在未修剪视频中进行时序动作定位**（CVPR 2016）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1601.02129) [代码](https:\u002F\u002Fgithub.com\u002Fzhengshou\u002Fscnn\u002F) [项目](http:\u002F\u002Fwww.ee.columbia.edu\u002Fln\u002Fdvmm\u002FresearchProjects\u002Fcdc\u002Fscnn.html)\n4. **从视频帧瞥见中端到端学习动作检测**（CVPR 2016）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1511.06984) [代码](https:\u002F\u002Fgithub.com\u002Fsyyeung\u002Fframeglimpses)\n\n----\n# **论文：弱监督时序动作检测**\n\n## 2026年\n1. (VLPO) [弱监督时序动作定位中的视觉-语言偏好优化](https:\u002F\u002Fopenreview.net\u002Fforum?id=ENwxBjOlAR)（提交至ICLR 2026）\n2. [通过文本精炼与对齐提升点标注时序动作定位性能](https:\u002F\u002Farxiv.org\u002Fabs\u002F2602.01257)（arXiv 2026）\n3. [探索点级弱监督时序动作定位中的时间一致性](https:\u002F\u002Farxiv.org\u002Fabs\u002F2602.05718)（arXiv 2026）\n4. [MSLU] [为弱监督时序动作定位建模语义与定位不确定性](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F11370967)（TCSVT 2026）\n\n## 2025年\n\n1. (AAPL) [面向时序动作检测的无关动作点级监督](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.21205)（AAAI 2025）[代码](https:\u002F\u002Fgithub.com\u002Fsmy-nec\u002FAAPL)\n2. (NoCo) [从噪声校正视角重新思考伪标签引导的弱监督时序动作定位学习](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.11124)（AAAI 2025）\n3. (SAL) [用于弱监督时序动作定位的多级语义与自适应动作性学习](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0893608024008347)（NN 2025）[代码](https:\u002F\u002Fgithub.com\u002Flizhilin-ustc\u002FSAL)\n4. (SDANet) [用于弱监督时序动作定位的片段间差异注意力网络](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10856540)（TMM 2025）\n5. (grounding) [面向点标注自然语言视频定位的协同时间一致性学习](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.17651)（arXiv 2025）\n6. [弥合差距：利用PseudoFormer实现从弱监督到全监督的时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.14860)（CVPR 2025）\n7. (MLLM4WTAL) [基于多模态大语言模型引导的双先验协同学习的弱监督时序动作定位](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FZhang_Weakly_Supervised_Temporal_Action_Localization_via_Dual-Prior_Collaborative_Learning_Guided_CVPR_2025_paper.html)（CVPR 2025）\n8. (QROT) [通过整合查询重构与最优传输提升点标注时序动作定位性能](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FLiu_Boosting_Point-Supervised_Temporal_Action_Localization_through_Integrating_Query_Reformation_and_CVPR_2025_paper.html)（CVPR 2025）\n9. (ActionDiff) [用于弱监督时序动作定位的动作间扩散网络](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F11197057)（TMM 2025）\n10. [CL-WTAL：基于多尺度对比学习的弱监督复杂时序动作定位](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F11272450)（TCSVT 2025）\n\n## 2024年\n1. (ISSF) [通过推断片段特征亲和力进行弱监督时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.12332)（AAAI 2024）\n2. (HR-Pro) [HR-Pro：基于层次可靠性传播的点标注时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.12608)（AAAI 2024）[代码](https:\u002F\u002Fgithub.com\u002Fpipixin321\u002FHR-Pro)\n3. [STAT：迈向可泛化的时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.13311)（Arxiv 2024）\n4. (TSPNet) [针对点级弱监督时序动作定位，将置信度与时间显著性信息重新对齐](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FXia_Realigning_Confidence_with_Temporal_Saliency_Information_for_Point-Level_Weakly-Supervised_Temporal_CVPR_2024_paper.html)（CVPR 2024）[代码](https:\u002F\u002Fgithub.com\u002Fzyxia1009\u002FCVPR2024-TSPNet)\n5. (M2PT) [采用多模态高原Transformer的弱监督时序动作定位](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024W\u002FL3D-IVU\u002Fhtml\u002FHu_Weakly-Supervised_Temporal_Action_Localization_with_Multi-Modal_Plateau_Transformers_CVPRW_2024_paper.html)（CVPR Workshop 2024）\n6. (EPNet) [用于弱监督时序动作定位的集成原型网络](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10479157)（TNNLS 2024）\n7. (FuSTAL) [用于弱监督时序动作定位的全阶段伪标签质量提升](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.08971)（arXiv 2024）[代码](https:\u002F\u002Fgithub.com\u002Ffqhank\u002FFuSTAL)\n8. (PVLR) [用于弱监督时序动作定位的概率型视觉-语言表征](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.05955)（ACM MM 2024）[代码](https:\u002F\u002Fgithub.com\u002Fsejong-rcv\u002FPVLR)\n9. (zero-shot) [迈向完备性：面向零样本时序动作定位的可泛化动作提案生成器](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.13777)（ICPR 2024）[代码](https:\u002F\u002Fgithub.com\u002FRun542968\u002FGAP)\n10. (SMBD) [用于点标注时序动作定位的分步多粒度边界检测器](https:\u002F\u002Feccv.ecva.net\u002Fvirtual\u002F2024\u002Fposter\u002F390)（ECCV 2024）\n11. [基于大型视觉-语言模型置信度的零样本动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.14340)（arXiv 2024）\n12. [MLLM能否指导弱监督时序动作定位任务？](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.08466)（arXiv 2024）\n13. [用于弱监督时序动作定位的基于不确定性的广义证据融合与混合多头注意力机制](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.19418)（arXiv 2024）[代码](https:\u002F\u002Fgithub.com\u002Fheyuanpengpku\u002FGUEF\u002Ftree\u002Fmain)\n14. (SQL-Net) (point) [SQL-Net：用于点标注时序动作定位的语义查询学习](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10814700)（TMM 2024）\n15. (AFPS) [采用动作性引导的假阳性抑制进行弱监督时序动作定位](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0893608024002314?via%3Dihub)（NN 2024）[代码](https:\u002F\u002Fgithub.com\u002Flizhilin-ustc\u002FAFPS)\n16. (point) [用于单帧监督时序动作定位的邻居引导伪标签生成与精炼](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10478311)（TIP 2024）\n\n## 2023年\n1. (ASCN) [一种用于弱监督时序动作定位的新型动作显著性和上下文感知网络](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10007033)（TMM 2023）\n2. (TFE-DCN) [用于弱监督时序动作定位的时序特征增强空洞卷积网络](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2023\u002Fhtml\u002FZhou_Temporal_Feature_Enhancement_Dilated_Convolution_Network_for_Weakly-Supervised_Temporal_Action_WACV_2023_paper.html)（WACV 2023）\n3. (JCDNet) [JCDNet：用于弱监督时序动作定位的共性与确定性阶段联合网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.17294)（Arxiv 2023）\n4. (P-MIL) [基于提议的多实例学习用于弱监督时序动作定位](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FRen_Proposal-Based_Multiple_Instance_Learning_for_Weakly-Supervised_Temporal_Action_Localization_CVPR_2023_paper.html)（CVPR 2023）[代码](https:\u002F\u002Fgithub.com\u002FRenHuan1999\u002FCVPR2023_P-MIL)\n5. [具有语义感知机制的双流网络用于弱监督时序动作定位](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FWang_Two-Stream_Networks_for_Weakly-Supervised_Temporal_Action_Localization_With_Semantic-Aware_Mechanisms_CVPR_2023_paper.html)（CVPR 2023）\n6. [利用文本信息提升弱监督时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.00607)（CVPR 2023）[代码](https:\u002F\u002Fgithub.com\u002FlgzlIlIlI\u002FBoosting-WTAL)\n7. (PivoTAL) [PivoTAL：面向弱监督时序动作定位的先验驱动监督](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FRizve_PivoTAL_Prior-Driven_Supervision_for_Weakly-Supervised_Temporal_Action_Localization_CVPR_2023_paper.html)（CVPR 2023）\n8. [通过弥合伪标签中的训练-测试差距来改进弱监督时序动作定位](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FZhou_Improving_Weakly_Supervised_Temporal_Action_Localization_by_Bridging_Train-Test_Gap_CVPR_2023_paper.pdf)（CVPR 2023）[代码](https:\u002F\u002Fgithub.com\u002Fzhou745\u002FGauFuse_WSTAL)\n9. (MTP) [用于弱监督时序动作定位的多种时序池化机制](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3567828)（TOMM 2023）\n10. (VQK-Net) [用于弱监督时序动作定位的视频特异性查询-键注意力建模](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.04186)\n11. (DFE) [用于弱监督时序动作定位的双重特征增强](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10096383)（ICASSP 2023）\n12. (FBA-Net) [用于弱监督时序动作定位的前景、背景和动作协同建模网络](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10115434)（TCSVT 2023）\n13. (Bi-SCC) [具有双向语义一致性约束的弱监督时序动作定位](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10115234)（TNNLS 2023）\n14. (F3-Net) [用于弱监督时序动作定位的特征弱化、上下文化与判别](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10091234)（TMM 2023）[代码](https:\u002F\u002Fmoniruzzamanmd.github.io\u002FF3-Net\u002F)\n15. (LPR) [用于弱监督时序动作定位的学习提议感知重新排序](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10144792)（TCSVT 2023）\n16. (STCL-Net) [用于弱监督时序动作定位的语义与时空上下文相关性学习](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10155179)（TPAMI 2023）\n17. [将视觉-语言预训练蒸馏以协同弱监督时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.09335)（CVPR 2023）\n18. [基于分层结构的潜在注意力建模的弱监督动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.09946)（ICCV 2023）\n19. [跨视频上下文知识的探索与利用以减少弱监督时序动作定位中的歧义](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.12609)（TCSVT 2023）\n20. (SPL-Loc) [用于点级弱监督时序动作定位的子动作原型学习](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.09060)（arXiv 2023）\n21. (DDG-Net) [DDG-Net：用于弱监督时序动作定位的可区分性驱动图网络](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fhtml\u002FTang_DDG-Net_Discriminability-Driven_Graph_Network_for_Weakly-supervised_Temporal_Action_Localization_ICCV_2023_paper.html)（ICCV 2023）[代码](https:\u002F\u002Fgithub.com\u002FXiaojunTang22\u002FICCV2023-DDGNet)\n22. [基于提议的时序动作定位，采用点级监督](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.05511)（BMVC 2023）\n23. (LPR) [LPR：通过再训练实现点级时序动作定位的学习](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs00530-023-01128-4)（MMSJ 2023）\n24. (POTLoc) [POTLoc：面向点级监督的时序动作定位的伪标签导向Transformer](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.13585)（arXiv 2023）\n25. (ADM-Loc) [ADM-Loc：用于点级监督时序动作定位的动作性分布建模](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.15916)（arXiv 2023）\n26. [重新审视弱监督时序动作定位中的前景与背景分离：基于聚类的方法](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fhtml\u002FLiu_Revisiting_Foreground_and_Background_Separation_in_Weakly-supervised_Temporal_Action_Localization_ICCV_2023_paper.html)（ICCV 2023）[代码](https:\u002F\u002Fgithub.com\u002FQinying-Liu\u002FCASE)\n27. [用于点级弱监督时序动作定位的子动作原型学习](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.09060)（arXiv 2023）\n28. (AICL) [用于弱监督时序动作定位的动作性不一致性引导对比学习](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F25237)（AAAI 2023）[代码](https:\u002F\u002Fgithub.com\u002Flizhilin-ustc\u002FAAAI2023-AICL)\n\n## 2022年\n1. (ACGNet) [ACGNet：用于弱监督时序动作定位的动作互补图网络](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2112.10977.pdf)（AAAI 2022）\n2. (RSKP) [基于代表性片段知识传播的弱监督时序动作定位](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.02925.pdf)（CVPR 2022）[代码](https:\u002F\u002Fgithub.com\u002FLeonHLJ\u002FRSKP)\n3. (ASM-Loc) [ASM-Loc：面向弱监督时序动作定位的动作感知片段建模](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15187)（CVPR 2022）[代码](https:\u002F\u002Fgithub.com\u002Fboheumd\u002FASM-Loc)\n4. (FTCL) [用于弱监督时序动作定位的细粒度时序对比学习](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.16800)（CVPR 2022）[代码](https:\u002F\u002Fgithub.com\u002FMengyuanChen21\u002FCVPR2022-FTCL)\n5. (C3BN) [邻居之间凸组合一致性用于弱监督动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.00400)（arXiv 2022）\n6. (DCC) [探索去噪跨视频对比用于弱监督时序动作定位](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FLi_Exploring_Denoised_Cross-Video_Contrast_for_Weakly-Supervised_Temporal_Action_Localization_CVPR_2022_paper.pdf)（CVPR 2022）\n7. (HAAN) [基于层次化原子动作的细粒度视频弱监督时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.11805)（ECCV 2022）[代码](https:\u002F\u002Fgithub.com\u002Flizhi1104\u002FHAAN)\n8. (STALE) (**零样本**) [通过视觉-语言提示进行零样本时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.08184)（ECCV 2022）[代码](https:\u002F\u002Fgithub.com\u002Fsauradip\u002Fstale)\n9. (SMEN) [慢动作很重要：用于弱监督时序动作定位的慢动作增强网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.11324)（TCSVT 2022）\n10. [用于单帧监督时序动作定位的膨胀-腐蚀方法](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.06348)（arXiv 2022）\n11. (AMS) [自适应互监督用于弱监督时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.02357)（TMM 2022）\n12. (DELU) [用于弱监督时序动作定位的双重证据学习](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-19772-7_12)（ECCV 2022）[代码](https:\u002F\u002Fgithub.com\u002FMengyuanChen21\u002FECCV2022-DELU)\n\n## 2021年\n1. (HAM-Net) [一种用于弱监督时序动作定位的混合注意力机制](https:\u002F\u002Farxiv.org\u002Fabs\u002F2101.00545)。（AAAI 2021）\n2. [用于弱监督动作定位的跨注意力音频-视觉融合](https:\u002F\u002Fopenreview.net\u002Fforum?id=hWr3e3r-oH5)（ICLR 2021）\n3. [基于不确定性建模的弱监督时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2006.07006)（AAAI 2021）[代码](https:\u002F\u002Fgithub.com\u002FPilhyeon\u002FWTAL-Uncertainty-Modeling)\n4. (TS-PCA) [未修剪视频中未标记背景的优势](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.13183)（CVPR 2021）[代码](https:\u002F\u002Fgithub.com\u002Faliyun\u002FThe-Blessings-of-Unlabeled-Background-in-Untrimmed-Videos)\n5. (ACSNet) [ACSNet：用于弱监督时序动作定位的动作-上下文分离网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.15088)（AAAI 2021）\n6. (CoLA) [CoLA：基于片段对比学习的弱监督时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.16392)（CVPR 2021）\n7. [通过学习动作与上下文的显式子空间实现弱监督时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.16155)（AAAI 2021）\n8. [ACM-Net：用于弱监督时序动作定位的动作上下文建模网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.02967)（arXiv 2021，已提交至Tip）[代码](https:\u002F\u002Fgithub.com\u002Fispc-lab\u002FACM-Net)\n9. (AUMN) [用于弱监督时序动作定位的动作单元记忆网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.14135)（CVPR 2021）\n10. (ASL) [视频中的弱监督动作选择学习](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.02439)（CVPR 2021）\n11. (ActShufNet) [用于弱监督时序动作定位的动作洗牌](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.04208)（arXiv 2021）\n12. [无需知道边界即可进行少样本动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.04150)（arXiv 2021）\n13. [用于弱监督时序动作检测的不确定性引导协作训练](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fhtml\u002FYang_Uncertainty_Guided_Collaborative_Training_for_Weakly_Supervised_Temporal_Action_Detection_CVPR_2021_paper.html)（CVPR 2021）\n14. [双流共识网络：提交至HACS挑战赛2021弱监督学习赛道](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.10829)（CVPRW 2021）\n15. [通过局部-全局背景建模实现弱监督时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.11811)（CVPRW 2021）\n16. [用于弱监督时序动作定位的跨模态共识网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.12589)（ACM MM 2021）[代码](https:\u002F\u002Fgithub.com\u002Fharlanhong\u002FMM2021-CO2-Net)\n17. [从点云中学习动作完整性以实现弱监督时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.05029)（ICCV 2021）[代码](https:\u002F\u002Fgithub.com\u002FPilhyeon\u002FLearning-Action-Completeness-from-Points)\n18. [用于弱监督时序动作定位的深度运动先验](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.05607)（已提交至Tip 2021）[项目](https:\u002F\u002Fsites.google.com\u002Fview\u002Fmengcao\u002Fpublication\u002Fdmp-net?authuser=0)\n19. [用于弱监督时序动作定位的前景-动作一致性网络](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FHuang_Foreground-Action_Consistency_Network_for_Weakly_Supervised_Temporal_Action_Localization_ICCV_2021_paper.pdf)（ICCV 2021）\n20. (BackTAL) [背景点击监督用于时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.12449)（TPAMI 2021）[代码](https:\u002F\u002Fgithub.com\u002FVividLe\u002FBackTAL)\n21. (ACN) [用于弱监督时序动作定位的动作连贯性网络](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9404867)（TMM 2021）\n22. [用于单帧时序动作定位的分而治之法](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FJu_Divide_and_Conquer_for_Single-Frame_Temporal_Action_Localization_ICCV_2021_paper.pdf)（ICCV 2021）\n\n## 2020年\n\n1. (WSGN) **用于动作检测的弱监督高斯网络**（WACV 2020）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1904.07774)\n2. **基于深度度量学习的弱监督时序动作定位**（WACV 2020）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2001.07793)\n3. **动作图：利用图卷积网络进行弱监督动作定位**（WACV 2020）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2002.01449)\n4. (DGAM) **基于生成式注意力建模的弱监督动作定位**（CVPR 2020）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2003.12424) [代码.PyTorch](https:\u002F\u002Fgithub.com\u002Fbfshi\u002FDGAM-Weakly-Supervised-Action-Localization)\n5. (EM-MIL) **基于期望-最大化多实例学习的弱监督动作定位**（ECCV 2020）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.00163)\n6. **用于弱监督时序动作定位的关系原型网络**（AAAI 2020）[论文](https:\u002F\u002Faaai.org\u002FPapers\u002FAAAI\u002F2020GB\u002FAAAI-HuangL.1235.pdf)\n7. (BaS-Net) **用于弱监督时序动作定位的背景抑制网络**（AAAI 2020）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.09963) [代码.PyTorch](https:\u002F\u002Fgithub.com\u002FPilhyeon\u002FBaSNet-pytorch)\n8. **基于不确定性估计的背景建模用于弱监督动作定位**（arXiv 2020）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2006.07006) [代码.PyTorch](https:\u002F\u002Fgithub.com\u002FPilhyeon\u002FBackground-Modeling-via-Uncertainty-Estimation)\n9. (A2CL-PT) **用于弱监督时序活动定位的对抗性背景感知损失**（ECCV 2020）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.06643) [代码.PyTorch](https:\u002F\u002Fgithub.com\u002FMichiganCOG\u002FA2CL-PT)\n10. **使用片段级标签的弱监督时序动作定位**（arXiv 2020）\n11. (ECM) **等效分类映射用于弱监督时序动作定位**（arXiv 2020 → TPAMI 2022）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2008.07728v1)\n12. [用于弱监督时序动作定位的双流一致性网络](https:\u002F\u002Farxiv.org\u002Fabs\u002F2010.11594v1)（ECCV 2020亮点论文）\n13. [无监督视频动作定位中的时序协同注意力模型学习](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fhtml\u002FGong_Learning_Temporal_Co-Attention_Models_for_Unsupervised_Video_Action_Localization_CVPR_2020_paper.html)（CVPR 2020）\n14. [基于背景感知网络的动作完整性建模用于弱监督时序动作定位](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3394171.3413687)（ACM MM 2020）\n15. (D2-Net) [D2-Net：通过判别嵌入和去噪激活实现弱监督动作定位]（arXiv 2020）（THUMOS'14 mAP@0.5：35.9）\n16. (SF-Net) [SF-Net：单帧监督用于时序动作定位]（arXiv 2003.06845）（ECCV 2020）[代码.PyTorch](https:\u002F\u002Fgithub.com\u002FFlowerfan\u002FSF-Net)\n17. [点级时序动作定位：连接全监督提案与弱监督损失]（arXiv 2012.08236）（arXiv 2020）\n18. [基于可迁移知识的多粒度融合网络用于弱监督时序动作检测]（IEEE Xplore，文档编号：9105103，关键词#keywords）（TMM 2020）\n19. [ActionBytes：从修剪后的视频中学习以定位动作]（CVPR 2020）\n\n## 2019年\n\n1. (AdapNet) **AdapNet：用于弱监督动作识别与定位的自适应分解编码器-解码器网络**（IEEE神经网络与学习系统汇刊）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.11961)\n2. **打破赢者通吃：用于弱监督时序动作定位的迭代优胜者剔除网络**（IEEE图像处理汇刊）[论文](https:\u002F\u002Ftanmingkui.github.io\u002Ffiles\u002Fpublications\u002FBreaking.pdf)\n3. **基于发生次数学习的弱监督时序定位**（ICML 2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.07293) [代码.TensorFlow](https:\u002F\u002Fgithub.com\u002FSchroeterJulien\u002FICML-2019-Weakly-Supervised-Temporal-Localization-via-Occurrence-Count-Learning)\n4. (MAAN) **用于弱监督学习的边缘化平均注意力网络**（ICLR 2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1905.08586) [代码.PyTorch](https:\u002F\u002Fgithub.com\u002Fyyuanad\u002FMAAN)\n5. **基于背景建模的弱监督动作定位**（ICCV 2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.06552)\n6. (TSM) **用于弱监督动作检测的时间结构挖掘**（ICCV 2019）[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FYu_Temporal_Structure_Mining_for_Weakly_Supervised_Action_Detection_ICCV_2019_paper.pdf)\n7. (CleanNet) **通过对比评估网络实现弱监督时序动作定位**（ICCV 2019）[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fhtml\u002FLiu_Weakly_Supervised_Temporal_Action_Localization_Through_Contrast_Based_Evaluation_Networks_ICCV_2019_paper.html)\n8. (3C-Net) **3C-Net：类别计数与中心损失用于弱监督动作定位**（ICCV 2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.08216) [代码.PyTorch](https:\u002F\u002Fgithub.com\u002Fnaraysa\u002F3c-net)\n9. (CMCS) **用于弱监督时序动作定位的完整性建模与上下文分离**（CVPR 2019）[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FLiu_Completeness_Modeling_and_Context_Separation_for_Weakly_Supervised_Temporal_Action_CVPR_2019_paper.pdf) [代码.PyTorch](https:\u002F\u002Fgithub.com\u002FFinspire13\u002FCMCS-Temporal-Action-Localization)\n10. (RefineLoc) **RefineLoc：用于弱监督动作定位的迭代精炼**（arXiv 2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1904.00227) [主页](http:\u002F\u002Fhumamalwassel.com\u002Fpublication\u002Frefineloc\u002F)\n11. (ASSG) **用于弱监督时序动作定位的对抗性种子序列扩展**（ACM MM 2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1908.02422)\n12. (TSRNet) **在弱监督下从未修剪视频中学习可迁移的自我注意表征用于动作识别**（AAAI 2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1902.07370)\n13. (STAR) **用于弱监督多动作检测的分离式时序组装循环网络**（AAAI 2019）[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1811.07460)\n\n## 2018年\n\n1. [基于片段的时序池化网络的弱监督时序动作检测](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-030-04212-7_37)（ICONIP 2018）\n2. (W-TALC) [W-TALC：弱监督时序活动定位与分类]（ECCV 2018）[代码.PyTorch](https:\u002F\u002Fgithub.com\u002Fsujoyp\u002Fwtalc-pytorch?utm_source=catalyzex.com)\n3. (AutoLoc) [AutoLoc：弱监督时序动作定位]（ECCV 2018）[代码](https:\u002F\u002Fgithub.com\u002Fzhengshou\u002FAutoLoc?utm_source=catalyzex.com)\n4. (STPN) [稀疏时序池化网络的弱监督动作定位]（CVPR 2018）[代码](https:\u002F\u002Fgithub.com\u002Fdemianzhang\u002Fweakly-action-localization?utm_source=catalyzex.com)\n5. [逐步擦除、逐一收集：一种弱监督时序动作检测器]（ACM MM 2018）\n6. (CPMN) [用于弱监督时序动作定位的级联金字塔挖掘网络]（accv 2018）\n\n## 2017年\n\n1. (Hide-and-Seek) [捉迷藏：迫使网络在弱监督目标和动作定位中更加细致]（ICCV 2017）\n2. (UntrimmedNets) [用于弱监督动作识别与检测的UntrimmedNets]（CVPR 2017）[代码](https:\u002F\u002Fgithub.com\u002Fwanglimin\u002FUntrimmedNet)\n\n----\n# **论文：在线动作检测**\n\n## 2026年\n1. (COAD) [来自第一人称视频的连续在线动作检测]（即将发表于ICLR 2026）\n2. (MOAD) [回溯Mamba：通过层次化记忆压缩恢复关键时序上下文，用于在线动作检测]（AAAI 2026）\n\n## 2025年\n1. (TOAD) [文本驱动的在线动作检测]（Integrated Computer-Aided Engineering 2025）[代码](https:\u002F\u002Fgithub.com\u002F3dperceptionlab\u002Ftoad)\n2. (CMeRT) [面向在线动作检测的上下文增强型记忆精炼Transformer]（CVPR 2025）[代码](https:\u002F\u002Fgithub.com\u002Fpangzhan27\u002FCMeRT)\n3. [视觉与意图提升大语言模型在长期动作预测中的表现]（arXiv 2025）\n4. (PTMA) [跨视角在线动作检测中的概率性时序掩码注意力]（TMM 2025）\n5. (CDM-Tr) [用于在线动作检测与预测的长短时协同决策Transformer]（PR 2025）\n6. (TPT) [贯穿式程序化Transformer，用于在线动作检测与预测]（TCSVT 2025）\n7. (BiOMamba) [BiOMamba：基于Mamba的先向后向时序建模，用于在线动作检测与预测]（ACM MM 2025）\n\n## 2024年\n1. (JOADAA) [JOADAA：联合在线动作检测与动作预测]（WACV 2024）\n2. [对象感知的第一人称在线动作检测]（CVPRW 2024）\n3. [ActionSwitch：流式视频中同时动作的类无关检测]（ECCV 2024）[代码](https:\u002F\u002Fgithub.com\u002FmusicalOffering\u002FActionSwitch-release)\n4. (MATR) [基于记忆增强Transformer的在线时序动作定位]（ECCV 2024）[代码](https:\u002F\u002Fskhcjh231.github.io\u002FMATR_project\u002F)\n5. (HAT) [HAT：历史增强锚点Transformer，用于在线时序动作定位]（ECCV 2024）[代码](https:\u002F\u002Fgithub.com\u002Fsakibreza\u002FECCV24-HAT\u002F)\n6. [不同层级教师对在线动作检测的渐进式知识蒸馏]（TMM 2024）\n7. [OnlineTAS：时序动作分割的在线基线]（NeurIPS 2024）\n8. (OV-OAD) [视频-文本预训练是否有助于开放词汇在线动作检测？]（NeurIPS 2024）[代码](https:\u002F\u002Fgithub.com\u002FZQSIAT\u002FOV-OAD)\n9. [贝叶斯证据深度学习用于在线动作检测]（ECCV 2024）\n\n## 2023年\n1. (recognation) (GliTr) [GliTr：具有时空一致性的瞥视Transformer，用于在线动作预测]（WACV 2023）\n2. (E2E-LOAD) [E2E-LOAD：端到端长视频在线动作检测]（ICCV 2023）[代码](https:\u002F\u002Fgithub.com\u002Fsqiangcao99\u002FE2E-LOAD)\n3. (MiniROAD) [MiniROAD：用于在线动作检测的极简RNN框架]（ICCV 2023）[代码](https:\u002F\u002Fgithub.com\u002Fjbistanbul\u002FMiniROAD)\n4. (MAT) [用于在线动作理解的记忆与预测Transformer]（ICCV 2023）[代码](https:\u002F\u002Fgithub.com\u002FEcho0125\u002FMemory-and-Anticipation-Transformer)\n5. [通过对比学习学习未来表征的在线动作检测]（ICME 2023）\n6. (HCM) [HCM：基于硬视频片段挖掘的在线动作检测]（TMM 2023）\n7. (DFAformer) [DFAformer：一种双滤波辅助Transformer，用于流式视频中高效的在线动作检测]（PRCV 2023）\n\n## 2022年\n1. (Colar) [Colar：通过参考示例实现高效在线动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.01057)（CVPR 2022）[代码](https:\u002F\u002Fgithub.com\u002FVividLe\u002FOnline-Action-Detection)\n2. (GateHUB) [GateHUB：带有背景抑制的门控历史单元用于在线动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.04668)（CVPR 2022）\n3. [基于循环窗口的级联Transformer用于在线动作检测](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2208.14209.pdf)（TPAMI 2022）\n4. (TeSTra) [基于时间平滑Transformer的实时在线视频检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.09236)（ECCV 2022）[代码](https:\u002F\u002Fgithub.com\u002Fzhaoyue-zephyrus\u002FTeSTra)\n5. (OAT) [用于在线时序动作定位的滑动窗口方案](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-19830-4_37) [代码](https:\u002F\u002Fgithub.com\u002FYHKimGithub\u002FOAT-OSN)\n6. (SimOn) [SimOn：一种用于在线时序动作定位的简单框架](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.04905.pdf)（arXiv 2022）[代码](https:\u002F\u002Fgithub.com\u002FTuanTNG\u002FSimOn)\n7. (survey) [视频中的人类动作在线检测与预测：综述](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS0925231222003617?via%3Dihub)\n8. [基于不确定性的时空注意力机制用于在线动作检测](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2022\u002Fpapers_ECCV\u002Fpapers\u002F136640068.pdf)（ECCV 2022）\n9. (PPKD) [特权知识蒸馏用于在线动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.09158)（PR 2022）\n10. [信息提升网络用于在线动作检测与预测](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9857490)（CVPR W 2022）\n11. (2PESNet) [2PESNet：迈向时序动作定位的在线处理](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320322003521)（PR 2022）\n\n## 2021年\n\n1. (WOAD) [WOAD：未修剪视频中的弱监督在线动作检测](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fhtml\u002FGao_WOAD_Weakly_Supervised_Online_Action_Detection_in_Untrimmed_Videos_CVPR_2021_paper.html)（CVPR 2021）\n2. (OadTR) [OadTR：基于Transformer的在线动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.11149)（ICCV 2021）[代码](https:\u002F\u002Fgithub.com\u002Fwangxiang1230\u002FOadTR)\n3. (CAG-QIL) [CAG-QIL：基于Q模仿学习的上下文感知动作分组用于在线时序动作定位](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FKang_CAG-QIL_Context-Aware_Actionness_Grouping_via_Q_Imitation_Learning_for_Online_ICCV_2021_paper.pdf)（ICCV 2021）\n4. (LSTR) [用于在线动作检测的长短期Transformer](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.03377)（NeurIPS 2021）[代码](https:\u002F\u002Fgithub.com\u002Famazon-science\u002Flong-short-term-transformer)\n5. (TRN) [用于在线动作检测的时序循环网络](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9009797)（ICCV 2019）[代码](https:\u002F\u002Fgithub.com\u002Fxumingze0308\u002FTRN.pytorch)\n6. [pre awesome](https:\u002F\u002Fgithub.com\u002Fwangxiang1230\u002FAwesome-Online-Action-Detection)\n\n----\n# **半监督**\n\n## 2024年\n1. (APL) [面向半监督时序动作定位的自适应伪标签学习](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.07673)（ECCV 2024）\n\n## 2023年\n1. (NPL) [从噪声伪标签中学习用于半监督时序动作定位](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fhtml\u002FXia_Learning_from_Noisy_Pseudo_Labels_for_Semi-Supervised_Temporal_Action_Localization_ICCV_2023_paper.html)（ICCV 2023）[代码](https:\u002F\u002Fgithub.com\u002Fkunnxia\u002FNPL)\n\n## 2022年\n1. (AL-STAL) [具有有效评分函数的主动学习用于半监督时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.14856)（Displays 2022）\n2. (SPOT) [无提案掩码的半监督时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.07059)（ECCV 2022）[代码](https:\u002F\u002Fgithub.com\u002Fsauradip\u002FSPOT)\n\n## 2021年\n1. (SSTAP) [用于半监督时序动作提案的自监督学习](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.03214)（CVPR 2021）[代码](https:\u002F\u002Fgithub.com\u002Fwangxiang1230\u002FSSTAP)\n2. [多级监督下的时序动作检测](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FShi_Temporal_Action_Detection_With_Multi-Level_Supervision_ICCV_2021_paper.pdf)（ICCV 2021）[代码](https:\u002F\u002Fgithub.com\u002Fbfshi\u002FSSAD_OSAD)\n3. (KFC) [KFC：一种用于半监督时序动作定位的高效框架](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9500051)（Tip 2021）\n\n## 2019年\n1. [用更少的标注学习时序动作提案](https:\u002F\u002Farxiv.org\u002Fabs\u002F1910.01286)（ICCV 2019）\n2. (TTC-Loc) [迈向半监督时序动作定位的训练-测试一致性](https:\u002F\u002Farxiv.org\u002Fabs\u002F1910.11285v3)（arXiv 2019）\n\n----\n# **开放词汇时序动作检测**\n\n## 2026年\n1. (PSFTR) [用于零样本时序动作定位的渐进式语义融合Transformer](https:\u002F\u002Fopenreview.net\u002Fforum?id=at3UEJzCRc)（即将提交至ICLR 2026）\n2. (zero-shot) [TF-CADE：面向零样本时序动作检测的前景集中型文本-视频对齐]()（CVPR 2026）\n3. (zero-shot) [双向时序敏感适配用于广义零样本时序动作定位](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F11396058)（TCSVT 2026）\n4. [分解与迁移：CoT提示增强的对齐用于开放词汇时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2603.24030)（CVPR 2026）\n\n## 2025年\n1. (zero-shot, FreeZAD) [无需训练的零样本时序动作检测，借助视觉-语言模型](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.13795)（arXiv 2025）\n2. (STOV-TAL) [探索自训练在开放词汇时序动作定位中的可扩展性](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.07024)（WACV 2025）[代码](https:\u002F\u002Fgithub.com\u002FHYUNJS\u002FSTOV-TAL)\n3. [用于第一人称视角视频的零样本时序交互定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.03662)（arXiv 2025）[代码](https:\u002F\u002Fgithub.com\u002FIRMVLab\u002FEgoLoc)\n\n## 2024年\n1. [利用时间多尺度与动作标签特征的单阶段开放词汇时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.19542)（FG 2024）\n2. (OVFormer) [基于多模态引导的开放词汇时序动作定位](https:\u002F\u002Fbmva-archive.org.uk\u002Fbmvc\u002F2024\u002Fpapers\u002FPaper_1013\u002Fpaper.pdf)（BMVC 2024）\n3. (OV-OAD) [视频-文本预训练是否有助于开放词汇在线动作检测？](https:\u002F\u002Fnips.cc\u002Fvirtual\u002F2024\u002Fposter\u002F95303)（NeurIPS 2024）\n4. (DeTAL) [DeTAL：采用解耦网络的开放词汇时序动作定位](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10517407)（TPAMI 2024）[代码](https:\u002F\u002Fgithub.com\u002Fvsislab\u002FDeTAL)\n5. [基于迭代视觉提示的开放词汇动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.17422)（arXiv 2024）\n6. (T3AL) [测试时零样本时序动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.05426)（CVPR 2024）[代码](https:\u002F\u002Fgithub.com\u002Fbenedettaliberatori\u002FT3AL)\n7. (ZEETAD) [ZEETAD：将预训练视觉-语言模型适配用于零样本端到端时序动作检测](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2024\u002Fpapers\u002FPhan_ZEETAD_Adapting_Pretrained_Vision-Language_Model_for_Zero-Shot_End-to-End_Temporal_Action_WACV_2024_paper.pdf)（WACV 2024）\n8. [通过大型视觉-语言模型置信度实现零样本动作定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.14340)（arXiv 2024）\n9. (Ti-FAD) [面向零样本时序动作检测的文本注入注意力与前景感知建模](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2024\u002Fhash\u002F13250eb13871b3c2c0a0667b54bad165-Abstract-Conference.html)（NeurIPS 2024）[代码](https:\u002F\u002Fgithub.com\u002FYearangLee\u002FTi-FAD)\n10. (mProTEA) [通过学习多模态提示和文本增强的动作性实现零样本时序动作检测](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10556637)（TCSVT 2024）\n11. (GRIZAL) [GRIZAL：生成式先验引导的零样本时序动作定位](https:\u002F\u002Faclanthology.org\u002F2024.emnlp-main.1061.pdf)（EMNLP 2024）[代码](https:\u002F\u002Fgithub.com\u002FCandleLabAI\u002FGRIZAL-EMNLP2024)\n\n## 2023年\n\n1. (CELL) [面向开放世界弱监督时序动作定位的级联证据学习](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FChen_Cascade_Evidential_Learning_for_Open-World_Weakly-Supervised_Temporal_Action_Localization_CVPR_2023_paper.html)（CVPR 2023）\n2. (OW-TAL) [OW-TAL：为开放世界时序动作定位学习未知人类活动](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320322005076)（PR 2023）\n3. (TN-ZSTAD) [TN-ZSTAD：用于零样本时序活动检测的可迁移网络](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9797852)（TAPMI 2023）\n4. [面向低样本时序动作定位的多模态提示](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.11732)（arXiv 2023）\n\n## 2022年及之前\n\n1. [使用现成图像-文本特征的开放词汇时序动作检测](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2212.10596.pdf)（arxiv 2022）\n2. (OpenTAL) [OpenTAL：迈向开放集时序动作定位](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.05114.pdf)（CVPR 2022）[代码](https:\u002F\u002Fwww.rit.edu\u002Factionlab\u002Fopental)\n3. [通过提示视觉-语言模型实现高效视频理解](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.04478)（ECCV 2022）[代码](https:\u002F\u002Fju-chen.github.io\u002Fefficient-prompt\u002F)\n4. (STALE) (**零样本**) [通过视觉-语言提示实现零样本时序动作检测](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.08184)（ECCV 2022）[代码](https:\u002F\u002Fgithub.com\u002Fsauradip\u002Fstale)\n5. [零样本自然语言视频定位](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.00428)（ICCV 2021）\n6. (ZSTAD) [ZSTAD：零样本时序活动检测](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FZhang_ZSTAD_Zero-Shot_Temporal_Activity_Detection_CVPR_2020_paper.pdf)（CVPR 2020）","# Awesome-Temporal-Action-Detection 快速上手指南\n\n本仓库是一个关于**时序动作检测（Temporal Action Detection）**、**弱监督\u002F半监督检测**、**时序动作提案生成（TAPG）**以及**开放词汇检测**的论文与资源汇总列表。它本身不是一个单一的可执行软件，而是指向各个具体算法（如 BSN, BMN, RTD-Net 等）的代码库和论文链接。\n\n以下指南将指导你如何利用该列表找到目标模型，并搭建通用的开发环境以运行相关代码。\n\n## 1. 环境准备\n\n大多数列出的现代时序动作检测模型基于深度学习框架（PyTorch 或 TensorFlow）。以下是推荐的通用环境配置：\n\n*   **操作系统**: Linux (Ubuntu 18.04\u002F20.04 推荐) 或 macOS\n*   **Python**: 3.7 - 3.9 (根据具体模型要求，建议 3.8)\n*   **GPU**: NVIDIA GPU (推荐显存 >= 8GB)，需安装对应的 CUDA 驱动\n*   **核心依赖**:\n    *   PyTorch (主流选择) 或 TensorFlow\n    *   NumPy, SciPy\n    *   OpenCV-Python (`cv2`)\n    *   Pandas, Pickle (用于处理 ActivityNet 等数据集标注)\n\n> **国内加速建议**：\n> 推荐使用清华源或阿里源安装 Python 包，以提升下载速度。\n> ```bash\n> pip install -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple \u003Cpackage_name>\n> ```\n\n## 2. 安装步骤\n\n由于本仓库是资源列表，你需要先从中选择具体的算法项目（例如 `BMN` 或 `RTD-Net`），然后进入该项目目录进行安装。以下为基于主流 PyTorch 模型的通用安装流程：\n\n### 第一步：克隆目标项目代码\n在 [README](https:\u002F\u002Fgithub.com\u002Fzhenyingfang\u002FAwesome-Temporal-Action-Detection-Temporal-Action-Proposal-Generation) 中找到你感兴趣的模型（例如 2019 年的 **BMN** 或 2021 年的 **RTD-Net**），点击其对应的 `[code]` 链接进入官方仓库。\n\n假设我们选择一个典型的 PyTorch 项目：\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002FJJBOY\u002FBMN-Boundary-Matching-Network.git\ncd BMN-Boundary-Matching-Network\n```\n\n### 第二步：创建虚拟环境并安装依赖\n```bash\n# 创建虚拟环境\nconda create -n tad_env python=3.8\nconda activate tad_env\n\n# 安装 PyTorch (根据CUDA版本选择，此处以CUDA 11.1为例，国内用户可使用清华源)\npip install torch==1.9.0+cu111 torchvision==0.10.0+cu111 -f https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Ftorch_stable.html\n\n# 安装其他依赖 (通常项目根目录有 requirements.txt)\npip install -r requirements.txt -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n\n# 若没有 requirements.txt，手动安装常用包\npip install opencv-python numpy pandas scipy tensorboardX -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n```\n\n### 第三步：准备数据集\n时序动作检测通常使用 **ActivityNet-1.3** 或 **THUMOS14** 数据集。\n1. 下载视频文件和标注文件（JSON格式）。\n2. 按照项目说明提取视频特征（如 TSN 特征）或直接使用原始视频（取决于模型架构）。\n3. 修改配置文件中的数据路径。\n\n## 3. 基本使用\n\n以下以典型的 **训练** 和 **评估** 流程为例（具体命令参数需参考所选项目的 `README`）：\n\n### 场景 A：训练模型 (Training)\n使用预提取的特征或原始视频开始训练。\n```bash\n# 示例：启动训练脚本，指定配置文件\npython main.py --config config\u002Fbmn_activitynet.yaml --mode train\n```\n\n### 场景 B：评估模型 (Evaluation)\n加载预训练权重并在测试集上生成检测结果。\n```bash\n# 示例：运行评估脚本\npython main.py --config config\u002Fbmn_activitynet.yaml --mode eval --checkpoint checkpoints\u002Fbmn_best.pth\n```\n\n### 场景 C：获取预训练模型\n本列表在 **\"about pretrained model\"** 章节提供了多个强大的预训练编码器，可用于提升下游任务性能：\n*   **BSP**: Boundary-sensitive Pre-training (ICCV 2021)\n*   **TSP**: Temporally-Sensitive Pretraining (ICCVW 2021)\n*   **UP-TAL**: Unsupervised Pre-training (CVPR 2022) - [代码链接](https:\u002F\u002Fgithub.com\u002Fzhang-can\u002FUP-TAL)\n\n下载相应权重后，将其放入项目的 `checkpoints` 目录，并在配置文件中指定路径即可微调使用。\n\n---\n**提示**：由于该列表涵盖了从 2016 年到 2026 年（预测\u002F最新）的众多论文，不同年份的模型架构差异较大（如从 CNN 到 Transformer 再到 Mamba）。请务必进入具体论文的 GitHub 页面查看其特有的依赖和运行指令。","某视频内容平台的技术团队正致力于从海量用户上传的长视频中，自动提取出“烹饪教学”、“健身动作”等关键片段以构建结构化索引。\n\n### 没有 Awesome-Temporal-Action-Detection-Temporal-Action-Proposal-Generation 时\n- **人工标注成本极高**：面对数万小时的未剪辑视频，依赖人工逐帧标记动作起止时间，耗时数月且难以扩展。\n- **传统算法精度不足**：现有基础模型难以区分背景噪声与细微动作边界，导致提取的片段往往包含大量无效画面或截断关键步骤。\n- **弱监督场景束手无策**：对于仅有视频标题而无详细时间戳标注的数据，缺乏有效的弱监督学习方案，导致大量数据无法被利用。\n- **新类别识别困难**：每当需要新增一种动作类型（如“瑜伽拉伸”），必须重新收集大量标注数据训练模型，无法实现开放词汇检测。\n\n### 使用 Awesome-Temporal-Action-Detection-Temporal-Action-Proposal-Generation 后\n- **自动化提案生成**：利用集成的时序动作提案生成（TAPG）算法，自动精准定位潜在动作片段，将人工审核工作量减少 90% 以上。\n- **边界定位更精准**：采用如 BCNet 或 SMBG 等先进模型，显著提升了动作开始与结束帧的检测精度，确保提取片段干净完整。\n- **解锁弱监督潜力**：直接应用弱监督时序检测方案，仅需视频级标签即可训练高精度模型，盘活了平台存量无细粒度标注数据。\n- **灵活拓展新类别**：借助开放词汇检测能力，无需重新训练即可通过文本描述识别新动作类型，大幅缩短业务上线周期。\n\nAwesome-Temporal-Action-Detection-Temporal-Action-Proposal-Generation 通过整合前沿的时序定位与提案生成技术，将非结构化视频转化为高价值结构化数据的效率提升了数量级。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fzhenyingfang_Awesome-Temporal-Action-Detection-Temporal-Action-Proposal-Generation_4caf4c96.png","zhenyingfang","Zhenying Fang","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fzhenyingfang_584ede90.jpg","0.0",null,"hefei","zhenyingfang@outlook.com","https:\u002F\u002Fzhenyingfang.github.io","https:\u002F\u002Fgithub.com\u002Fzhenyingfang",578,43,"2026-04-09T05:04:08",5,"","未说明",{"notes":93,"python":91,"dependencies":94},"该仓库是一个论文和代码资源的汇总列表（Awesome List），而非单一的独立软件工具。它收录了从 2016 年到 2026 年（预测\u002F占位）的多个关于时序动作检测、提议生成及相关任务的学术项目。每个列出的项目（如 BSN, BMN, RTD-Net 等）都有自己独立的代码仓库链接和技术栈要求（部分基于 TensorFlow，部分基于 PyTorch 或 PaddlePaddle）。因此，本仓库本身没有统一的运行环境、GPU 或依赖库需求。用户需根据具体想要复现的某篇论文，访问其对应的子项目链接以获取详细的环境配置说明。",[],[16,29],"2026-03-27T02:49:30.150509","2026-04-12T07:55:24.391548",[99,104,109],{"id":100,"question_zh":101,"answer_zh":102,"source_url":103},30411,"该项目是否有计划跟进视频时空动作检测（spatio-temporal action detection）领域？","目前暂无计划进入时空动作检测领域。这主要取决于计算资源的限制，因此暂时不会跟进该方向。","https:\u002F\u002Fgithub.com\u002Fzhenyingfang\u002FAwesome-Temporal-Action-Detection-Temporal-Action-Proposal-Generation\u002Fissues\u002F8",{"id":105,"question_zh":106,"answer_zh":107,"source_url":108},30412,"如何请求更新列表中特定论文（如 MUSES 和 TadTR）的详细信息？","用户可以在 Issue 中提供论文的标题、发表会议\u002F期刊年份、arXiv 链接、代码仓库链接以及项目主页或数据集链接。维护者在收到这些信息并确认是优秀工作后，会及时更新列表。例如，提供 TadTR (TIP 2022) 和 MUSES (CVPR 2021) 的相关链接后，维护者已完成更新。","https:\u002F\u002Fgithub.com\u002Fzhenyingfang\u002FAwesome-Temporal-Action-Detection-Temporal-Action-Proposal-Generation\u002Fissues\u002F3",{"id":110,"question_zh":111,"answer_zh":112,"source_url":113},30413,"如果发现新的动作检测相关论文被顶级会议（如 ECCV）录用，该如何提交？","用户可以直接在 Issue 中列出新录用的论文清单，包含论文标题和 arXiv 链接。维护者表示感谢后会将其添加到列表中。例如，有用户提交了 5 篇 ECCV 2022 录用的时序动作检测论文，维护者随后已全部添加。","https:\u002F\u002Fgithub.com\u002Fzhenyingfang\u002FAwesome-Temporal-Action-Detection-Temporal-Action-Proposal-Generation\u002Fissues\u002F2",[]]