[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-DavidZhangdw--Visual-Tracking-Development":3,"tool-DavidZhangdw--Visual-Tracking-Development":61},[4,18,26,36,44,52],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",141543,2,"2026-04-06T11:32:54",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107888,"2026-04-06T11:32:50",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":10,"last_commit_at":50,"category_tags":51,"status":17},4487,"LLMs-from-scratch","rasbt\u002FLLMs-from-scratch","LLMs-from-scratch 是一个基于 PyTorch 的开源教育项目，旨在引导用户从零开始一步步构建一个类似 ChatGPT 的大型语言模型（LLM）。它不仅是同名技术著作的官方代码库，更提供了一套完整的实践方案，涵盖模型开发、预训练及微调的全过程。\n\n该项目主要解决了大模型领域“黑盒化”的学习痛点。许多开发者虽能调用现成模型，却难以深入理解其内部架构与训练机制。通过亲手编写每一行核心代码，用户能够透彻掌握 Transformer 架构、注意力机制等关键原理，从而真正理解大模型是如何“思考”的。此外，项目还包含了加载大型预训练权重进行微调的代码，帮助用户将理论知识延伸至实际应用。\n\nLLMs-from-scratch 特别适合希望深入底层原理的 AI 开发者、研究人员以及计算机专业的学生。对于不满足于仅使用 API，而是渴望探究模型构建细节的技术人员而言，这是极佳的学习资源。其独特的技术亮点在于“循序渐进”的教学设计：将复杂的系统工程拆解为清晰的步骤，配合详细的图表与示例，让构建一个虽小但功能完备的大模型变得触手可及。无论你是想夯实理论基础，还是为未来研发更大规模的模型做准备",90106,"2026-04-06T11:19:32",[35,15,13,14],{"id":53,"name":54,"github_repo":55,"description_zh":56,"stars":57,"difficulty_score":10,"last_commit_at":58,"category_tags":59,"status":17},4292,"Deep-Live-Cam","hacksider\u002FDeep-Live-Cam","Deep-Live-Cam 是一款专注于实时换脸与视频生成的开源工具，用户仅需一张静态照片，即可通过“一键操作”实现摄像头画面的即时变脸或制作深度伪造视频。它有效解决了传统换脸技术流程繁琐、对硬件配置要求极高以及难以实时预览的痛点，让高质量的数字内容创作变得触手可及。\n\n这款工具不仅适合开发者和技术研究人员探索算法边界，更因其极简的操作逻辑（仅需三步：选脸、选摄像头、启动），广泛适用于普通用户、内容创作者、设计师及直播主播。无论是为了动画角色定制、服装展示模特替换，还是制作趣味短视频和直播互动，Deep-Live-Cam 都能提供流畅的支持。\n\n其核心技术亮点在于强大的实时处理能力，支持口型遮罩（Mouth Mask）以保留使用者原始的嘴部动作，确保表情自然精准；同时具备“人脸映射”功能，可同时对画面中的多个主体应用不同面孔。此外，项目内置了严格的内容安全过滤机制，自动拦截涉及裸露、暴力等不当素材，并倡导用户在获得授权及明确标注的前提下合规使用，体现了技术发展与伦理责任的平衡。",88924,"2026-04-06T03:28:53",[14,15,13,60],"视频",{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":66,"readme_en":67,"readme_zh":68,"quickstart_zh":69,"use_case_zh":70,"hero_image_url":71,"owner_login":72,"owner_name":73,"owner_avatar_url":74,"owner_bio":75,"owner_company":76,"owner_location":75,"owner_email":75,"owner_twitter":75,"owner_website":75,"owner_url":77,"languages":78,"stars":83,"forks":84,"last_commit_at":85,"license":75,"difficulty_score":86,"env_os":87,"env_gpu":88,"env_ram":88,"env_deps":89,"category_tags":92,"github_topics":94,"view_count":32,"oss_zip_url":75,"oss_zip_packed_at":75,"status":17,"created_at":98,"updated_at":99,"faqs":100,"releases":101},4383,"DavidZhangdw\u002FVisual-Tracking-Development","Visual-Tracking-Development","Visual Object Tracking","Visual-Tracking-Development 是一个专注于视觉目标跟踪领域的开源协作项目，旨在汇聚全球研究力量，共同推动该技术的演进。它主要解决了计算机视觉中“如何在连续视频帧中精准锁定并跟随特定物体”的核心难题，广泛应用于智能监控、自动驾驶及人机交互等场景。\n\n该项目特别适合计算机视觉领域的研究人员、算法开发者以及希望深入探索跟踪技术的学生使用。与其说它是一个单一的软件工具，不如说它是一个丰富的学术资源库与发展平台。其独特亮点在于系统性地整理了从传统判别式滤波器到最新大模型（如 SAM、SAM 2）结合的前沿论文与代码资源，涵盖了综述调研、评估视角及多模态视频理解等多个维度。此外，项目发起人积极寻求合作伙伴，为有志于在该领域进行深度创新的研究者提供了宝贵的交流契机。无论你是想快速掌握行业动态，还是寻找新的科研突破口，Visual-Tracking-Development 都能提供坚实的技术支撑与广阔的协作空间。","# Visual-Tracking-Development\n\n### Collaborators Seeking\n\nI'm looking for collaborators to do research and help advance visual tracking.\n\nIf you are interested in this project, please feel free to contact me (davidzhang@zjnu.edu.cn).\n\n## Papers\n\n### :star2: Recommendations :star2:\n\n- **VOTSurvey:** Sajid Javed, Martin Danelljan, Fahad Shahbaz Khan, Muhammad Haris Khan, Michael Felsberg, Jiri Matas.\u003Cbr \u002F>\n  \"Visual Object Tracking with Discriminative Filters and Siamese Networks: A Survey and Outlook.\" TAPMI (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.02838)] \n\n- **VOTBook:**  Xin Zhao, Shiyu Hu, Xu-Cheng Yin.\u003Cbr \u002F>\n  \"Visual Object Tracking: An Evaluation Perspective.\" Springer (2025).\n  [[paper](https:\u002F\u002Flink.springer.com\u002Fbook\u002F10.1007\u002F978-981-96-4558-9)]\n\n- **VOTSurvey:** Sajid Javed, Martin Danelljan, Fahad Shahbaz Khan, Muhammad Haris Khan, Michael Felsberg, Jiri Matas.\u003Cbr \u002F>\n  \"Visual Object Tracking with Discriminative Filters and Siamese Networks: A Survey and Outlook.\" TAPMI (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.02838)]\n  \n- **DL4VT:** Seyed Mojtaba Marvasti-Zadeh, Li Cheng, Senior Member, Hossein Ghanei-Yakhdan, Shohreh Kasaei, Senior Member.\u003Cbr \u002F>\n  \"Deep Learning for Visual Tracking: A Comprehensive Survey.\" ArXiv (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1912.00535.pdf)] \n  [[code](https:\u002F\u002Fgithub.com\u002FMMarvasti\u002FDeep-Learning-for-Visual-Tracking-Survey)]\n\n- **SAMURAI:** Cheng-Yen Yang, Hsiang-Wei Huang, Zhongyu Jiang, Wenhao Chai, Jenq-Neng Hwang.\u003Cbr \u002F>\n  \"SAMURAI: Motion-Aware Memory for Training-Free Visual Object Tracking with SAM 2.\" TIP (2026).\n  [[arxiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.11922)]\n  [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F11351313)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fyangchris11\u002Fsamurai)]\n  \n- **SAM:** Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dollár, Ross Girshick.\u003Cbr \u002F>\n  \"Segment Anything.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.02643v1.pdf)] \n  [[homepage](https:\u002F\u002Fsegment-anything.com\u002F)] \n  [[code](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsegment-anything)]\n  \n- **TAM:** Jinyu Yang, Mingqi Gao, Zhe Li, Shang Gao, Fangjing Wang, Feng Zheng.\u003Cbr \u002F>\n  \"Track Anything: Segment Anything Meets Videos.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.11968)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fgaomingqi\u002FTrack-Anything)]\n  \n- **SAM-Track:** Yangming Cheng, Liulei Li, Yuanyou Xu, Xiaodi Li, Zongxin Yang, Wenguan Wang, Yi Yang.\u003Cbr \u002F>\n  \"Segment-and-Track Anything.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.06558)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fz-x-yang\u002FSegment-and-Track-Anything)]\n  \n- **SEEM:** Xueyan Zou, Jianwei Yang, Hao Zhang, Feng Li, Linjie Li, Jianfeng Gao, Yong Jae Lee.\u003Cbr \u002F>\n  \"Segment Everything Everywhere All at Once.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.06718v1.pdf)] \n  [[code](https:\u002F\u002Fgithub.com\u002FUX-Decoder\u002FSegment-Everything-Everywhere-All-At-Once)]\n\n- **SAM-PT:** Frano Rajič, Lei Ke, Yu-Wing Tai, Chi-Keung Tang, Martin Danelljan, Fisher Yu.\u003Cbr \u002F>\n  \"Segment Anything Meets Point Tracking.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.01197)] \n  [[code](https:\u002Fgithub.com\u002Fsyscv\u002Fsam-pt)]\n  \n- **ReviewLLM:** Jiaqi Wang, Zhengliang Liu, Lin Zhao, Zihao Wu, Chong Ma, Sigang Yu, Haixing Dai.\u003Cbr \u002F>\n  \"Review of Large Vision Models and Visual Prompt Engineering.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.00855)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fxxx)]\n  \n- **ChatVideo:** Junke Wang, Dongdong Chen, Chong Luo, Xiyang Dai, Lu Yuan, Zuxuan Wu, Yu-Gang Jiang.\u003Cbr \u002F>\n  \"ChatVideo: A Tracklet-centric Multimodal and Versatile Video Understanding System.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.14407)] \n  [[code](https:\u002F\u002Fwww.wangjunke.info\u002FChatVideo\u002F)]\n  \n- **Video-ChatGPT:** Muhammad Maaz, Hanoona Rasheed, Salman Khan, Fahad Shahbaz Khan.\u003Cbr \u002F>\n  \"Video-ChatGPT: Towards Detailed Video Understanding via Large Vision and Language Models.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.05424)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fmbzuai-oryx\u002FVideo-ChatGPT)]\n  \n- **SegGPT:** Xinlong Wang, Xiaosong Zhang, Yue Cao, Wen Wang, Chunhua Shen, Tiejun Huang.\u003Cbr \u002F>\n  \"SegGPT: Segmenting Everything In Context.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03284)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fbaaivision\u002FPainter)]\n\n\n### AAAI 2026\n\n- **SATA:** Tianlu Zhang, Qiang Zhang, Guiguang Ding, Jungong Han.\u003Cbr \u002F>\n  \"Tracking and Segmenting Anything in Any Modality.\" AAAI (2026).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2511.19475)]\n  [[code]( )]\n\n- **LUART:** Yun Xiao, Yuhang Wang, Jiandong Jin, Wangkang Zhang, Chenglong Li.\u003Cbr \u002F>\n  \"Unaligned UAV RGBT Tracking: A Largescale Benchmark and A Novel Approach.\" AAAI (2026).\n  [[paper]( )] \n  [[code](https:\u002F\u002Fgithub.com\u002FNOP1224\u002FUnaligned_RGBT_Tracking)]\n\n- **CADTrack:** Hao Li, Yuhao Wang, Xiantao Hu, Wenning Hao, Pingping Zhang, Dong Wang, Huchuan Lu.\u003Cbr \u002F>\n  \"CADTrack: Learning Contextual Aggregation with Deformable Alignment for Robust RGBT Tracking.\" AAAI (2026).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2511.17967)] \n  [[code](https:\u002F\u002Fgithub.com\u002FIdolLab\u002FCADTrack)]\n\n- **AlignTrack:** Chuanyu Sun, Jiqing Zhang, Yang Wang, Yuanchen Wang, Yutong Jiang, Baocai Yin, Xin Yang.\u003Cbr \u002F>\n  \"AlignTrack: Top-Down Spatiotemporal Resolution Alignment for RGB-Event Visual Tracking.\" AAAI (2026).\n  [[paper]( )] \n  [[code](https:\u002F\u002Fgithub.com\u002Fscy0712\u002FAlignTrack)]\n\n- **MoDTrack:** Hongtao Yang, Bineng Zhong, Qihua Liang, Xiantao Hu, Yufei Tan, Haiying Xia, Shuxiang Song.\u003Cbr \u002F>\n  \"Motion-Aware Object Tracking via Motion and Geometry-Aware Cues.\" AAAI (2026).\n  [[paper]( )] \n  [[code]( )]\n\n- **MUTrack:** Weijing Wu, Qihua Liang, Bineng Zhong, Xiaohu Tang, Yufei Tan, Ning Li, Yuanliang Xue.\u003Cbr \u002F>\n  \"MUTrack: A Memory-Aware Unified Representation Framework for Visual Trackings.\" AAAI (2026).\n  [[paper]( )] \n  [[code]( )]\n\n- **ADTrack:** Guangtong Zhang, Bineng Zhong, Shirui Yang, Yang Wang, Tian Bai.\u003Cbr \u002F>\n  \"Aware Distillation for Robust Vision-Language Tracking Under Linguistic Sparsity.\" AAAI (2026).\n  [[paper]( )] \n  [[code]( )]\n  \n- **MFDP:** Shilei Wang, Pujian Lai, Dong Gao, Jifeng Ning, Gong Cheng.\u003Cbr \u002F>\n  \"Exploring Modality-Aware Fusion and Decoupled Temporal Propagation for Multi-Modal Object Tracking.\" AAAI (2026).\n  [[paper]( )] \n  [[code]( )]\n\n- **STDTrack:** Junze Shi, Yang Yu, Jian Shi, Haibo Luo.\u003Cbr \u002F>\n  \"Exploring Reliable Spatiotemporal Dependencies for Efficient Visual Tracking.\" AAAI (2026).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2601.09078)] \n  [[code]( )]  \n\n- **AMTrack:** Ge Ying, Dawei Zhang, Chengzhuan Yang, Wei Liu, Sang-Woon Jeon, Hua Wang, Changqin Huang, Zhonglong Zheng.\u003Cbr \u002F>\n  \"Exploiting All Mamba Fusion for Efficient RGB-D Tracking.\" AAAI (2026).\n  [[paper]( )] \n  [[code]( )]\n\n- **GOLA:** Zekai Shao, Yufan Hu, Jingyuan Liu, Bin Fan, Hongmin Liu.\u003Cbr \u002F>\n  \"Group Orthogonal Low-Rank Adaptation for RGB-T Tracking.\" AAAI (2026).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2512.05359)] \n  [[code](https:\u002F\u002Fgithub.com\u002FMelanTech\u002FGOLA)]\n\n- **SFPT:** Jiahao Wang, Fang Liu, Hao Wang, Shuo Li, Xiyi Wang, Puhua Chen.\u003Cbr \u002F>\n  \"Semantic Feature Purification for Adversarially-Aware RGB-T Tracking.\" AAAI (2026).\n  [[paper]( )] \n  [[code]( )]\n\n- **HTTrack:** Jiahao Wang, Fang Liu, Licheng Jiao, Hao Wang, Shuo Li, Xiyi Wang, Lingling Li, Puhua Chen, Xu Liu.\u003Cbr \u002F>\n  \"HTTrack: Learning to Perceive Targets via Historical Trajectories in Satellite Video Tracking.\" AAAI (2026).\n  [[paper]( )] \n  [[code]( )]\n\n- **AerialMind:** Chenglizhao Chen, Shaofeng Liang, Runwei Guan, Xiaolou Sun, Haocheng Zhao, Haiyun Jiang, Tao Huang, Henghui Ding, Qing-Long Han.\u003Cbr \u002F>\n  \"AerialMind: Towards Referring Multi-Object Tracking in UAV Scenarios.\" AAAI (2026).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2511.21053)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fshawnliang420\u002FAerialMind)]\n\n- **SAM2-OV:** Yangkai Chen, Qiangqiang Wu, Guangyao Li, Junlong Gao, Guanglin Niu, Hanzi Wang.\u003Cbr \u002F>\n  \"SAM2-OV: A Novel Detection-Only Tuning Paradigm for Open-Vocabulary Multi-Object Tracking.\" AAAI (2026).\n  [[paper]( )] \n  [[code]( )]\n\n- **SAM2MOT:** Junjie Jiang, Zelin Wang, Manqi Zhao, Yin Li, DongSheng Jiang.\u003Cbr \u002F>\n  \"SAM2MOT: A Novel Paradigm of Multi-Object Tracking by Segmentation.\" AAAI (2026).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.04519)] \n  [[code](https:\u002F\u002Fgithub.com\u002FTripleJoy\u002FSAM2MOT)]\n\n  \n### ICLR 2026\n\n- **FARTrack:** Guijie Wang, Tong Lin, Yifan Bai, Anjia Cao, Shiyi Liang, Wangbo Zhao, Xing Wei.\u003Cbr \u002F>\n  \"FARTrack: Fast Autoregressive Visual Tracking with High Performance.\" ICLR (2026).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2602.03214)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fwangguijiepedeval\u002FFARTrack)]\n\n- **GOT-Edit:** Shih-Fang Chen, Jun-Cheng Chen, I-Hong Jhuo, Yen-Yu Lin.\u003Cbr \u002F>\n  \"GOT-Edit: Geometry-Aware Generic Object Tracking via Online Model Editing.\" ICLR (2026).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2602.08550)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fchenshihfang\u002FGOT)]\n  \n\n### NeurIPS 2025\n\n- **RGBDT500:** Xue-Feng Zhu, Tianyang Xu, Yifan Pan, Jinjie Gu, Xi Li, Jiwen Lu, Xiao-Jun Wu, Josef Kittler.\u003Cbr \u002F>\n  \"Collaborating Vision, Depth, and Thermal Signals for Multi-Modal Tracking.\" NeurIPS (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.24741)] \n  [[code](https:\u002F\u002Fxuefeng-zhu5.github.io\u002FRGBDT500\u002F)]\n\n- **MMOT:** Tianhao Li, Tingfa Xu, Ying Wang, Haolin Qin, Xu Lin, Jianan Li.\u003Cbr \u002F>\n  \"MMOT: The First Challenging Benchmark for Drone-based Multispectral Multi-Object Tracking.\" NeurIPS (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.12565)] \n  [[code](https:\u002F\u002Fgithub.com\u002FAnnzstbl\u002FMMOT)]\n  \n- **SpikeFET:** Jingjun Yang, Liangwei Fan, Jinpu Zhang, Xiangkai Lian, Hui Shen, Dewen Hu.\u003Cbr \u002F>\n  \"Fully Spiking Neural Networks for Unified Frame-Event Object Tracking.\" NeurIPS (2025).\n  [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=FooiwsnEH9)] \n  [[code](https:\u002F\u002Fgithub.com\u002FNoctis-A\u002FSpikeFET)]\n\n- **LoRATv2:** Liting Lin, Heng Fan, Zhipeng Zhang, Yuqing Huang, Yaowei Wang, Yong Xu, Haibin Ling.\u003Cbr \u002F>\n  \"LoRATv2: Enabling Low-Cost Temporal Modeling in One-Stream Trackers.\" NeurIPS (2025).\n  [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=q06YjUj0FB)] \n  [[code](https:\u002F\u002Fgithub.com\u002FLitingLin\u002FLoRATv2)]\n\n- **DSATrack:** Xinyu Zhou, Tongxin Pan, Lingyi Hong, Pinxue Guo, Haijing Guo, Zhaoyu Chen, Kaixun Jiang, Wenqiang Zhang.\u003Cbr \u002F>\n  \"Dynamic Semantic-Aware Correlation Modeling for UAV Tracking.\" NeurIPS (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.21351)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fzxyyxzz\u002FDSATrack)]\n\n\n\n### ICCV 2025\n\n- **UMDATrack:** Siyuan Yao, Rui Zhu, Ziqi Wang, Wenqi Ren, Yanyang Yan, Xiaochun Cao.\u003Cbr \u002F>\n  \"UMDATrack: Unified Multi-Domain Adaptive Tracking Under Adverse Weather Conditions.\" ICCV (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.00648)] \n  [[code](https:\u002F\u002Fgithub.com\u002FZ-Z188\u002FUMDATrack)]\n\n- **XTrack:** Yuedong Tan, Zongwei Wu, Yuqian Fu, Zhuyun Zhou, Guolei Sun, Eduard Zamfi, Chao Ma, Danda Pani Paudel, Luc Van Gool, Radu Timofte.\u003Cbr \u002F>\n  \"XTrack: Multimodal Training Boosts RGB-X Video Object Trackers.\" ICCV (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.17773)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fsupertyd\u002FXTrack)]\n\n- **FlexTrack:** Yuedong Tan, Jiawei Shao, Eduard Zamfir, Ruanjun Li, Zhaochong An, Chao Ma, Danda Paudel, Luc Van Gool, Radu Timofte, Zongwei Wu.\u003Cbr \u002F>\n  \"What You Have is What You Track: Adaptive and Robust Multimodal Tracking.\" ICCV (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.05899)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fsupertyd\u002FFlexTrack)]\n\n- **TUEs:** Qiangqiang Wu, Yi Yu, Chenqi Kong, Ziquan Liu, Jia Wan, Haoliang Li, Alex C. Kot, Antoni B. Chan.\u003Cbr \u002F>\n  \"Temporal Unlearnable Examples: Preventing Personal Video Data from Unauthorized Exploitation by Object Tracking.\" ICCV (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.07483)] \n  [[code]( )]\n\n- **ATCTrack:** Xiaokun Feng, Shiyu Hu, Xuchen Li, Dailing Zhang, Meiqi Wu, Jing Zhang, Xiaotang Chen, Kaiqi Huang.\u003Cbr \u002F>\n  \"ATCTrack: Aligning Target-Context Cues with Dynamic Target States for Robust Vision-Language Tracking.\" ICCV (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.19875)] \n  [[code](https:\u002F\u002Fgithub.com\u002FXiaokunFeng\u002FATCTrack)]\n\n- **CAT:** Yongsheng Yuan, Jie Zhao, Dong Wang, Huchuan Lu.\u003Cbr \u002F>\n  \"CAT: A Unified Click-and-Track Framework for Realistic Tracking.\" ICCV (2025).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fhtml\u002FYuan_CAT_A_Unified_Click-and-Track_Framework_for_Realistic_Tracking_ICCV_2025_paper.html)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fysyuann\u002FCAT)]\n\n- **CompressTracker:** Lingyi Hong, Jinglun Li, Xinyu Zhou, Shilin Yan, Pinxue Guo, Kaixun Jiang, Zhaoyu Chen, Shuyong Gao, Wei Zhang, Hong Lu, Wenqiang Zhang.\u003Cbr \u002F>\n  \"General Compression Framework for Efficient Transformer Object Tracking.\" ICCV (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.17564)] \n  [[code](https:\u002F\u002Fgithub.com\u002FLingyiHongfd\u002FCompressTracker)]\n\n- **SMSTracker:** Sixian Chan, Zedong Li, Wenhao Li, Shijian Lyu, Chunhua Shen, Xiaoqin Zhang.\u003Cbr \u002F>\n  \"SMSTracker: Tri-path Score Mask Sigma Fusion for Multi-Modal Tracking.\" ICCV (2025).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fhtml\u002FChan_SMSTracker_Tri-path_Score_Mask_Sigma_Fusion_for_Multi-Modal_Tracking_ICCV_2025_paper.html)] \n  [[code](https:\u002F\u002Fgithub.com\u002FLeezed525\u002FSMSTracker)]\n  \n\n  \n### CVPR 2025\n\n- **ARPTrack:** Shiyi Liang, Yifan Bai, Yihong Gong, Xing Wei.\u003Cbr \u002F>\n  \"Autoregressive Sequential Pretraining for Visual Tracking.\" CVPR (2025).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FLiang_Autoregressive_Sequential_Pretraining_for_Visual_Tracking_CVPR_2025_paper.html)] \n  [[code](https:\u002F\u002Farptrack.github.io\u002F)]\n\n- **DreamTrack:** Mingzhe Guo, Weiping Tan, Wenyu Ran, Liping Jing, Zhipeng Zhang.\u003Cbr \u002F>\n  \"DreamTrack: Dreaming the Future for Multimodal Visual Object Tracking.\" CVPR (2025).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FGuo_DreamTrack_Dreaming_the_Future_for_Multimodal_Visual_Object_Tracking_CVPR_2025_paper.html)] \n  [[code]( )]\n\n- **MamTrack:** Chuanyu Sun, Jiqing Zhang, Yang Wang, Huilin Ge, Qianchen Xia, Baocai Yin, Xin Yang.\u003Cbr \u002F>\n  \"Exploring Historical Information for RGBE Visual Tracking with Mamba.\" CVPR (2025).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FSun_Exploring_Historical_Information_for_RGBE_Visual_Tracking_with_Mamba_CVPR_2025_paper.html)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fscy0712\u002FMamTrack)]\n  \n- **PURA:** Zekai Shao, Yufan Hu, Bin Fan, Hongmin Liu.\u003Cbr \u002F>\n  \"PURA: Parameter Update-Recovery Test-Time Adaption for RGB-T Tracking.\" CVPR (2025).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FShao_PURA_Parameter_Update-Recovery_Test-Time_Adaption_for_RGB-T_Tracking_CVPR_2025_paper.html)] \n  [[code](https:\u002F\u002Fmelantech.github.io\u002FPURA)]\n\n- **ACAttack:** Xinyu Xiang, Qinglong Yan, Hao Zhang, Jiayi Ma.\u003Cbr \u002F>\n  \"ACAttack: Adaptive Cross Attacking RGB-T Tracker via Multi-Modal Response Decoupling.\" CVPR (2025).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FXiang_ACAttack_Adaptive_Cross_Attacking_RGB-T_Tracker_via_Multi-Modal_Response_Decoupling_CVPR_2025_paper.html)] \n  [[code](https:\u002F\u002Fgithub.com\u002FXinyu-Xiang\u002FACAttack)]\n  \n- **MITracker:** Mengjie Xu, Yitao Zhu, Haotian Jiang, Jiaming Li, Zhenrong Shen, Sheng Wang, Haolin Huang, Xinyu Wang, Qing Yang, Han Zhang, Qian Wang.\u003Cbr \u002F>\n  \"MITracker: Multi-View Integration for Visual Object Tracking.\" CVPR (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.20111)] \n  [[code](https:\u002F\u002Fmii-laboratory.github.io\u002FMITracker\u002F)]\n\n- **SPMTrack:** Wenrui Cai, Qingjie Liu, Yunhong Wang.\u003Cbr \u002F>\n  \"SPMTrack: Spatio-Temporal Parameter-Efficient Fine-Tuning with Mixture of Experts for Scalable Visual Tracking.\" CVPR (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.18338)] \n  [[code](https:\u002F\u002Fgithub.com\u002FWenRuiCai\u002FSPMTrack)]\n  \n- **ORTrack :** You Wu, Xucheng Wang, Xiangyang Yang, Mengyuan Liu, Dan Zeng, Hengzhou Ye, Shuiwang Li.\u003Cbr \u002F>\n  \"Learning Occlusion-Robust Vision Transformers for Real-Time UAV Tracking.\" CVPR (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.09228)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fwuyou3474\u002FORTrack)]\n\n- **SGLATrack:** Chaocan Xue, Bineng Zhong, Qihua Liang, Yaozong Zheng, Ning Li, Yuanliang Xue, Shuxiang Song.\u003Cbr \u002F>\n  \"Similarity-Guided Layer-Adaptive Vision Transformer for UAV Tracking.\" CVPR (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.06625)] \n  [[code](https:\u002F\u002Fgithub.com\u002FGXNU-ZhongLab\u002FSGLATrack)]\n\n- **DUTrack:** Xiaohai Li, Bineng Zhong, Qihua Liang, Zhiyi Mo, Jian Nong, Shuxiang Song.\u003Cbr \u002F>\n  \"Dynamic Updates for Language Adaptation in Visual-Language Tracking.\" CVPR (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.06621)] \n  [[code](https:\u002F\u002Fgithub.com\u002FGXNU-ZhongLab\u002FDUTrack)]\n\n- **MambaVLT:** Xinqi Liu, Li Zhou, Zikun Zhou, Jianqiu Chen, Zhenyu He.\u003Cbr \u002F>\n  \"MambaVLT: Time-Evolving Multimodal State Space Model for Vision-Language Tracking.\" CVPR (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.15459)] \n  [[code]( )]\n\n- **Mono3DVLT:** Hongkai Wei, Yang Yang, Shijie Sun, Mingtao Feng, Xiangyu Song, Qi Lei, Hongli Hu, Rong Wang, Huansheng Song, Naveed Akhtar, Ajmal Saeed Mian.\u003Cbr \u002F>\n  \"Mono3DVLT: Monocular-Video-Based 3D Visual Language Tracking.\" CVPR (2025).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FWei_Mono3DVLT_Monocular-Video-Based_3D_Visual_Language_Tracking_CVPR_2025_paper.html)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fhongkai-wei\u002FMono3DVLT)]\n  \n- **EdgeTAM:** Chong Zhou, Chenchen Zhu, Yunyang Xiong, Saksham Suri, Fanyi Xiao, Lemeng Wu, Raghuraman Krishnamoorthi, Bo Dai, Chen Change Loy, Vikas Chandra, Bilge Soran.\u003Cbr \u002F>\n  \"EdgeTAM: On-Device Track Anything Model.\" CVPR (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.07256)] \n  [[code](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FEdgeTAM)]\n\n- **DAM4SAM:** Jovana Videnovic, Alan Lukezic, Matej Kristan.\u003Cbr \u002F>\n  \"A Distractor-Aware Memory for Visual Object Tracking with SAM2.\" CVPR (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.17576)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fjovanavidenovic\u002FDAM4SAM)]\n\n- **MUST:** Haolin Qin, Tingfa Xu, Tianhao Li, Zhenxiang Chen, Tao Feng, Jianan Li.\u003Cbr \u002F>\n  \"MUST: The First Dataset and Unified Framework for Multispectral UAV Single Object Tracking.\" CVPR (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.17699)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fq2479036243\u002FMUST-Multispectral-UAV-Single-Object-Tracking)]\n\n- **ETAP:** Friedhelm Hamann, Daniel Gehrig, Filbert Febryanto, Kostas Daniilidis, Guillermo Gallego.\u003Cbr \u002F>\n  \"ETAP: Event-based Tracking of Any Point.\" CVPR (2025).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FHamann_ETAP_Event-based_Tracking_of_Any_Point_CVPR_2025_paper.html)] \n  [[code](https:\u002F\u002Fgithub.com\u002Ftub-rip\u002FETAP)]\n\n- **Chrono:** Inès Hyeonsu Kim, Seokju Cho, Jiahui Huang, Jung Yi, Joon-Young Lee, Seungryong Kim.\u003Cbr \u002F>\n  \"Exploring Temporally-Aware Features for Point Tracking.\" CVPR (2025).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FKim_Exploring_Temporally-Aware_Features_for_Point_Tracking_CVPR_2025_paper.html)] \n  [[code](https:\u002F\u002Fcvlab-kaist.github.io\u002FChrono\u002F)]\n\n- **Tracktention:** Zihang Lai, Andrea Vedaldi.\u003Cbr \u002F>\n  \"Tracktention: Leveraging Point Tracking to Attend Videos Faster and Better.\" CVPR (2025).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FLai_Tracktention_Leveraging_Point_Tracking_to_Attend_Videos_Faster_and_Better_CVPR_2025_paper.html)] \n  [[code](https:\u002F\u002Fzlai0.github.io\u002FTrackTention\u002F)]\n\n- **TimeTracker:** Haoyue Liu, Jinghan Xu, Yi Chang, Hanyu Zhou, Haozhi Zhao, Lin Wang, Luxin Yan.\u003Cbr \u002F>\n  \"TimeTracker: Event-based Continuous Point Tracking for Video Frame Interpolation with Non-linear Motion.\" CVPR (2025).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FLiu_TimeTracker_Event-based_Continuous_Point_Tracking_for_Video_Frame_Interpolation_with_CVPR_2025_paper.html)] \n  [[code]( )]\n  \n- **ADMCMT:** Huijie Fan, Yu Qiao, Yihao Zhen, Tinghui Zhao, Baojie Fan, Qiang Wang.\u003Cbr \u002F>\n  \"All-Day Multi-Camera Multi-Target Tracking.\" CVPR (2025).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FFan_All-Day_Multi-Camera_Multi-Target_Tracking_CVPR_2025_paper.html)] \n  [[code](https:\u002F\u002Fgithub.com\u002FQTRACKY\u002FADMCMT)]\n\n- **OmniTrack:** Kai Luo, Hao Shi, Sheng Wu, Fei Teng, Mengfei Duan, Chang Huang, Yuhang Wang, Kaiwei Wang, Kailun Yang.\u003Cbr \u002F>\n  \"Omnidirectional Multi-Object Tracking.\" CVPR (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.04565)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fxifen523\u002FOmniTrack)]\n\n- **DFormerv2:** Bo-Wen Yin, Jiao-Long Cao, Ming-Ming Cheng, Qibin Hou.\u003Cbr \u002F>\n  \"DFormerv2: Geometry Self-Attention for RGBD Semantic Segmentation.\" CVPR (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.04701)] \n  [[code](https:\u002F\u002Fgithub.com\u002FVCIP-RGBD\u002FDFormer)]\n  \n- **JTD-UAV:** Yifan Wang, Jian Zhao, Zhaoxin Fan, Xin Zhang, Xuecheng Wu, Yudian Zhang, Lei Jin, Xinyue Li, Gang Wang, Mengxi Jia, Ping Hu, Zheng Zhu, Xuelong Li.\u003Cbr \u002F>\n  \"JTD-UAV: MLLM-Enhanced Joint Tracking and Description Framework for Anti-UAV Systems .\" CVPR (2025).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FWang_JTD-UAV_MLLM-Enhanced_Joint_Tracking_and_Description_Framework_for_Anti-UAV_Systems_CVPR_2025_paper.html)] \n  [[code]( )]\n  \n\n### ICML 2025\n\n- **MPT:** Jie Zhao, Xin Chen, Yongsheng Yuan, Michael Felsberg, Dong Wang, Huchuan Lu.\u003Cbr \u002F>\n  \"Efficient Motion Prompt Learning for Robust Visual Tracking.\" ICML (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.16321)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fzj5559\u002FMotion-Prompt-Tracking)]\n\n- **CSTrack:** Xiaokun Feng, Dailing Zhang, Shiyu Hu, Xuchen Li, Meiqi Wu, Jing Zhang, Xiaotang Chen, Kaiqi Huang.\u003Cbr \u002F>\n  \"CSTrack: Enhancing RGB-X Tracking via Compact Spatiotemporal Features.\" ICML (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.19434)] \n  [[code](https:\u002F\u002Fgithub.com\u002FXiaokunFeng\u002FCSTrack)]\n  \n\n\n### ACM MM 2025\n\n- **RSTrack:** Fansheng Zeng, Bineng Zhong, Haiying Xia, Yufei Tan, Xiantao Hu, Liangtao Shi, Shuxiang Song.\u003Cbr \u002F>\n  \"Explicit Context Reasoning with Supervision for Visual Tracking.\" ACM MM (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.16191)] \n  [[code](https:\u002F\u002Fgithub.com\u002FGXNU-ZhongLab\u002FRSTrack)]\n\n- **UniBench300:** Zhangyong Tang, Tianyang Xu, Xuefeng Zhu, Chunyang Cheng, Tao Zhou, Xiaojun Wu, Josef Kittler.\u003Cbr \u002F>\n  \"Serial Over Parallel: Learning Continual Unification for Multi-Modal Visual Object Tracking and Benchmarking.\" ACM MM (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.10655)] \n  [[code](https:\u002F\u002Fgithub.com\u002FZhangyong-Tang\u002FUniBench300)]\n\n- **Gen4Track:** Jiawei Ge, Xinyu Zhang, Jiuxin Cao, Xuelin Zhu, Weijia Liu, Qingqing Gao, Biwei Cao, Kun Wang, Chang Liu, Bo Liu, Chen Feng, Ioannis Patras.\u003Cbr \u002F>\n  \"Gen4Track: A Tuning-free Data Augmentation Framework via Self-correcting Diffusion Model for Vision-Language Tracking.\" ACM MM (2025).\n  [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3746027.3754956)] \n  [[code]( )]\n\n- **FA3T:** Jiahao Wang, Fang Liu, Licheng Jiao, Hao Wang, Shuo Li, Lingling Li, Puhua Chen, Xu Liu, Xinyi Wang.\u003Cbr \u002F>\n  \"FA3T: Feature-Aware Adversarial Attacks for Multi-modal Tracking.\" ACM MM (2025).\n  [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3746027.3755155)] \n  [[code]( )]\n\n- **MST:** Shilei Wang, Gong Cheng, Pujian Lai, Dong Gao, Junwei Han.\u003Cbr \u002F>\n  \"Multi-State Tracker: Enhancing Efficient Object Tracking via Multi-State Specialization and Interaction.\" ACM MM (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.11531)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fwsumel\u002FMST)]\n\n\n\n### IJCAI 2025\n\n- **FastSeqTrack:** Dongdong Li, Zhinan Gao, Yangliu Kuai, Rui Chen.\u003Cbr \u002F>\n  \"Exploring Effcient and Effective Sequence Learning for Visual Object Tracking.\" IJCAI (2025).\n  [[paper](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F3672.pdf)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fvision4drones\u002FFastSeqTrack)]\n\n- **SSTrack:** Yutong Kou, Shubo Lin, Liang Li, Bing Li, Weiming Hu, Jin Gao.\u003Cbr \u002F>\n  \"SSTrack: Sample-interval Scheduling for Lightweight Visual Object Tracking.\" IJCAI (2025).\n  [[paper](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F3314.pdf)] \n  [[code](https:\u002F\u002Fgithub.com\u002FKou-99\u002FSSTrack)]\n\n- **TUMFNet:** Zhaodong Ding, Chenglong Li, Shengqing Miao, Jin Tang.\u003Cbr \u002F>\n  \"Template-based Uncertainty Multimodal Fusion Network for RGBT Tracking.\" IJCAI (2025).\n  [[paper](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F2815.pdf)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fdongdong2061\u002FIJCAI25-TUMFNet)]\n\n- **GDSTrack:** Shenglan Li, Rui Yao, Yong Zhou, Hancheng Zhu, Kunyang Sun, Bing Liu, Zhiwen Shao, Jiaqi Zhao.\u003Cbr \u002F>\n  \"Modality-Guided Dynamic Graph Fusion and Temporal Diffusion for Self-Supervised RGB-T Tracking.\" IJCAI (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.03507)] \n  [[code](https:\u002F\u002Fgithub.com\u002FLiShenglana\u002FGDSTrack)]\n\n\n### AAAI 2025\n\n- **STTrack:** Xiantao Hu, Ying Tai, Xu Zhao, Chen Zhao, Zhenyu Zhang, Jun Li, Bineng Zhong, Jian Yang.\u003Cbr \u002F>\n  \"Exploiting Multimodal Spatial-temporal Patterns for Video Object Tracking.\" AAAI (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.15691)] \n  [[code](https:\u002F\u002Fgithub.com\u002FNJU-PCALab\u002FSTTrack)]\n\n- **SUTrack:** Xin Chen, Ben Kang, Wanting Geng, Jiawen Zhu, Yi Liu, Dong Wang, Huchuan Lu.\u003Cbr \u002F>\n  \"SUTrack: Towards Simple and Unified Single Object Tracking.\" AAAI (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.19138)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fchenxin-dlut\u002FSUTrack)]\n\n- **MIMTrack:** Xingmei Wang, Guohao Nie, Jiaxiang Meng, Zining Yan.\u003Cbr \u002F>\n  \"MIMTrack: In-Context Tracking via Masked Image Modeling.\" AAAI (2025).\n  [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F32860)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fchenxin-dlut\u002FSUTrack)]\n  \n- **AINet:** Andong Lu, Wanyu Wang, Chenglong Li, Jin Tang, Bin Luo.\u003Cbr \u002F>\n  \"RGBT Tracking via All-layer Multimodal Interactions with Progressive Fusion Mamba.\" AAAI (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.08827)] \n  [[code]( )]\n\n- **CMS:** Xinyu Xiang, Qinglong Yan, Hao Zhang, Jianfeng Ding, Han Xu, Zhongyuan Wang, Jiayi Ma.\u003Cbr \u002F>\n  \"Cross-Modal Stealth: A Coarse-to-Fine Attack Framework for RGB-T Tracker .\" AAAI (2025).\n  [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F32931)] \n  [[code](https:\u002F\u002Fgithub.com\u002FXinyu-Xiang\u002FCMS)]\n  \n- **CAFormer:** Yun Xiao, Jiacong Zhao, Andong Lu, Chenglong Li, Yin Lin, Bing Yin, Cong Liu.\u003Cbr \u002F>\n  \"Cross-modulated Attention Transformer for RGBT Tracking.\" AAAI (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.02222)] \n  [[code]( )]\n  \n- **TemTrack:** Jinxia Xie, Bineng Zhong, Qihua Liang, Ning Li, Zhiyi Mo, Shuxiang Song.\u003Cbr \u002F>\n  \"Robust Tracking via Mamba-based Context-aware Token Learning.\" AAAI (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.13611)] \n  [[code](https:\u002F\u002Fgithub.com\u002FGXNU-ZhongLab\u002FTemTrack)]\n\n- **LMTrack:** Chenlong Xu, Bineng Zhong, Qihua Liang, Yaozong Zheng, Guorong Li, Shuxiang Song.\u003Cbr \u002F>\n  \"Less is More: Token Context-aware Learning for Object Tracking.\" AAAI (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.00758)] \n  [[code](https:\u002F\u002Fgithub.com\u002FXuChenLong\u002FLMTrack)]\n  \n- **MambaLCT:** Xiaohai Li, Bineng Zhong, Qihua Liang, Guorong Li, Zhiyi Mo, Shuxiang Song.\u003Cbr \u002F>\n  \"MambaLCT: Boosting Tracking via Long-term Context State Space Model.\" AAAI (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.13615)] \n  [[code](https:\u002F\u002Fgithub.com\u002FGXNU-ZhongLab\u002FMambaLCT)]\n\n- **SSTrack:** Yaozong Zheng , Bineng Zhong, Qihua Liang, Ning Li, Shuxiang Song.\u003Cbr \u002F>\n  \"Decoupled Spatio-Temporal Consistency Learning for Self-Supervised Tracking.\" AAAI (2025).\n  [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F33155)] \n  [[code](https:\u002F\u002Fgithub.com\u002FGXNU-ZhongLab\u002FSSTrack)]\n  \n- **MCITrack:** Ben Kang, Xin Chen, Simiao Lai, Yang Liu, Yi Liu, Dong Wang.\u003Cbr \u002F>\n  \"Exploring Enhanced Contextual Information for Video-Level Object Tracking.\" AAAI (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.11023)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fkangben258\u002FMCITrack\u002F)]\n  \n- **AsymTrack:** Jiawen Zhu, Huayi Tang, Xin Chen, Xinying Wang, Dong Wang, Huchuan Lu.\u003Cbr \u002F>\n  \"Two-stream Beats One-stream: Asymmetric Siamese Network for Efficient Visual Tracking.\" AAAI (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.00516)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fjiawen-zhu\u002FAsymTrack)]\n\n- **LVPTrack:** Hongjing Wu, Siyuan Yao, Feng Huang, Shu Wang, Linchao Zhang, Zhuoran Zheng, Wenqi Ren.\u003Cbr \u002F>\n  \"LVPTrack: High Performance Domain Adaptive UAV Tracking with Label Aligned Visual Prompt Tuning.\" AAAI (2025).\n  [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F32906)] \n  [[code]( )]\n\n- **MM-Tracker:** Mufeng Yao, Jinlong Peng, Qingdong He, Bo Peng, Hao Chen, Mingmin Chi, Chao Liu.\u003Cbr \u002F>\n  \"MM-Tracker: Motion Mamba for UAV-platform Multiple Object Tracking.\" AAAI (2025).\n  [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F33019)] \n  [[code](https:\u002F\u002Fgithub.com\u002FYaoMufeng\u002FMMTracker)]\n  \n- **PSOT:** Zhangbin Li, Jinxing Zhou, Jing Zhang, Shengeng Tang, Kun Li, Dan Guo.\u003Cbr \u002F>\n  \"Patch-level Sounding Object Tracking for Audio-Visual Question Answering.\" AAAI (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.10749)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fjiawen-zhu\u002FAsymTrack)]\n  \n\n### ICASSP 2025\n\n- **MFDA:** Zhiheng Li, Weng Zhimin, Yuehuan Wang.\u003Cbr \u002F>\n  \"Multi-view Feature Discrepancy Attack for Single Object Tracking.\" ICASSP (2025).\n  [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10889698)] \n  [[code]( )]\n\n- **CGTrack:** Weihong Li, Xiaoqiong Liu, Heng Fan, Libo Zhang.\u003Cbr \u002F>\n  \"CGTrack: Cascade Gating Network with Hierarchical Feature Aggregation for UAV Tracking.\" ICRA (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.05936)] \n  [[code](https:\u002F\u002Fgithub.com\u002FNightwatch-Fox11\u002FCGTrack)]\n\n- **CLTrack:** Bin Chen, Shenglong Hu, Gang Dong, Lingyan Liang, Dongchao Wen, Kaihua Zhang.\u003Cbr \u002F>\n  \"Continuously Learning Video-level Object Tokens for Robust UAV tracking.\" ICASSP (2025).\n  [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10887950)] \n  [[code]( )]\n  \n- **LunarTracking:** Mohammed Leo, Ding Zhang, Hai-Tao Zheng, Haiye Lin.\u003Cbr \u002F>\n  \"Lunar Tracking: A New Benchmark For Nighttime Tiny Object Tracking.\" ICASSP (2025).\n  [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10890681)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fkk123321x\u002FLunarTracking)]\n\n- **EHDA:** Qiao Li, Kanlun Tan, Qiao Liu, Di Yuan, Xin Li, Yunpeng Liu.\u003Cbr \u002F>\n  \"Efficient Hierarchical Domain Adaptive Thermal Infrared Tracking.\" ICASSP (2025).\n  [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10890354)] \n  [[code]( )]\n\n- **PDTrack:** Yeqiang Liu, Weiran Li, Yanhao Ding, Zhenbo Li.\u003Cbr \u002F>\n  \"PDTrack: Progressive Distance Association for Multiple Object Tracking.\" ICASSP (2025).\n  [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10888323)] \n  [[code]( )]\n  \n- **RSM:** Riran Cheng, Xupeng Wang, Ferdous Sohel, Hang Lei.\u003Cbr \u002F>\n  \"RSM: Refined Saliency Map For Explainable 3D Object Tracking.\" ICASSP (2025).\n  [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10887798)] \n  [[code]( )]\n\n- **LRPD:** Qingkuo Hu, Yichen Li, Wenbin Yu.\u003Cbr \u002F>\n  \"Exploiting Multimodal Prompt Learning and Distillation for RGB-T Tracking.\" ICMR (2025).\n  [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3731715.3733332)] \n  [[code]( )]\n\n- **VSS:** Pengfei Wei, Liu Qiao, Zhenyu He, Di Yuan.\u003Cbr \u002F>\n  \"A Multi-Stream Visual-Spectral-Spatial Adaptive Hyperspectral Object Tracking.\" ICMR (2025).\n  [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3731715.3733262)] \n  [[code]( )]\n\n- **DARTer:** Xuzhao Li, Xuchen Li, Shiyu Hu.\u003Cbr \u002F>\n  \"DARTer: Dynamic Adaptive Representation Tracker for Nighttime UAV Tracking.\" ICMR (2025).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.00752)] \n  [[code]( )]\n\n\n  \n### NeurIPS 2024\n\n- **ChatTracker:** Yiming Sun, Fan Yu, Shaoxiang Chen, Yu Zhang, Junwei Huang, Chenhui Li, Yang Li, Changbo Wang.\u003Cbr \u002F>\n  \"ChatTracker: Enhancing Visual Tracking Performance via Chatting with Multimodal Large Language Model.\" NeurIPS (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.01756)] \n  [[code]( )]\n\n- **WebUOT-1M:** Chunhui Zhang, Li Liu, Guanjie Huang, Hao Wen, Xi Zhou, Yanfeng Wang.\u003Cbr \u002F>\n  \"WebUOT-1M: Advancing Deep Underwater Object Tracking with A Million-Scale Benchmark.\" NeurIPS (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.19818)] \n  [[code](https:\u002F\u002Fgithub.com\u002F983632847\u002FAwesome-Multimodal-Object-Tracking)]\n\n- **VastTrack:** Liang Peng, Junyuan Gao, Xinran Liu, Weihong Li, Shaohua Dong, Zhipeng Zhang, Heng Fan, Libo Zhang.\u003Cbr \u002F>\n  \"VastTrack: Vast Category Visual Object Tracking.\" NeurIPS (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.03493)] \n  [[code](https:\u002F\u002Fgithub.com\u002FHengLan\u002FVastTrack)]\n  \n- **DeTrack:** Xinyu Zhou, Jinglun Li, Lingyi Hong, Kaixun Jiang, Pinxue Guo, Weifeng Ge, Wenqiang Zhang.\u003Cbr \u002F>\n  \"DeTrack: In-model Latent Denoising Learning for Visual Object Tracking.\" NeurIPS (2024).\n  [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=ZJjuNF0olj)] \n  [[code]( )]\n\n- **CSAM:** Tianlu Zhang, Kurt Debattista, Qiang Zhang, Guiguang Ding, Jungong Han.\u003Cbr \u002F>\n  \"Revisiting Motion Information for RGB-Event Tracking with MOT Philosophy.\" NeurIPS (2024).\n  [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=bzGAELYOyL)] \n  [[code]( )]\n\n- **DINTR:** Pha Nguyen, Ngan Le, Jackson Cothren, Alper Yilmaz, Khoa Luu.\u003Cbr \u002F>\n  \"DINTR: Tracking via Diffusion-based Interpolation.\" NeurIPS (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.10053)] \n  [[code]( )]\n\n- **UAV3D:** Hui Ye, Rajshekhar Sunderraman, Shihao Ji.\u003Cbr \u002F>\n  \"UAV3D: A Large-scale 3D Perception Benchmark for Unmanned Aerial  Vehicles.\" NeurIPS (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.11125)] \n  [[code](https:\u002F\u002Fhuiyegit.github.io\u002FUAV3D_Benchmark\u002F)]\n  \n- **MemVLT:** Xiaokun Feng, Xuchen Li, Shiyu Hu, Dailing Zhang, Meiqi Wu, Xiaotang Chen, Kaiqi Huang.\u003Cbr \u002F>\n  \"MemVLT: Vision-Language Tracking with Adaptive Memory-based Prompts.\" NeurIPS (2024).\n  [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=ZK1CZXKgG5)] \n  [[code](https:\u002F\u002Fgithub.com\u002FXiaokunFeng\u002FMemVLT)]\n\n- **CPDTrack:** Dailing Zhang, Shiyu Hu, Xiaokun Feng, Xuchen Li, Meiqi Wu, Kaiqi Huang.\u003Cbr \u002F>\n  \"Beyond Accuracy: Tracking more like Human via Visual Search.\" NeurIPS (2024).\n  [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=LezAEImfoc)] \n  [[code](https:\u002F\u002Fgithub.com\u002FZhangDailing8\u002FCPDTrack)]\n\n\n\n### ECCV 2024\n\n- **Diff-Tracker:** Zhengbo Zhang, Li Xu, Duo Peng, Hossein Rahmani, Jun Liu.\u003Cbr \u002F>\n  \"Diff-Tracker: Text-to-Image Diffusion Models are Unsupervised Trackers.\" ECCV (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.08394)] \n  [[code]( )]\n\n- **LoRAT:** Liting Lin, Heng Fan, Zhipeng Zhang, Yaowei Wang, Yong Xu, Haibin Ling.\u003Cbr \u002F>\n  \"Tracking Meets LoRA: Faster Training, Larger Model, Stronger Performance.\" ECCV (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.05231)] \n  [[code](https:\u002F\u002Fgithub.com\u002FLitingLin\u002FLoRAT)]\n\n- **VideoMamba:** Kunchang Li, Xinhao Li, Yi Wang, Yinan He, Yali Wang, Limin Wang, Yu Qiao.\u003Cbr \u002F>\n  \"VideoMamba: State Space Model for Efficient Video Understanding.\" ECCV (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.06977)] \n  [[code](https:\u002F\u002Fhuggingface.co\u002FOpenGVLab\u002FVideoMamba)]\n\n- **DINO-Tracker:** Narek Tumanyan, Assaf Singer, Shai Bagon, Tali Dekel.\u003Cbr \u002F>\n  \"DINO-Tracker: Taming DINO for Self-Supervised Point Tracking in a Single Video.\" ECCV (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.14548v1)] \n  [[code](https:\u002F\u002Fdino-tracker.github.io\u002F)]\n\n- **DecoMotion:** Rui Li, Dong Liu.\u003Cbr \u002F>\n  \"Decomposition Betters Tracking Everything Everywhere.\" ECCV (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.06531)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fqianduoduolr\u002FDecoMotion)]\n\n- **Elysium:** Han Wang, Yanjie Wang, Yongjie Ye, Yuxiang Nie, Can Huang.\u003Cbr \u002F>\n  \"Elysium: Exploring Object-level Perception in Videos via MLLM.\" ECCV (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.02049)] \n  [[code](https:\u002F\u002Fgithub.com\u002FHon-Wong\u002FElysium)]\n  \n- **HVTrack:** Qiao Wu, Kun Sun, Pei An, Mathieu Salzmann, Yanning Zhang, Jiaqi Yang.\u003Cbr \u002F>\n  \"3D Single-object Tracking in Point Clouds with High Temporal Variation.\" ECCV (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.02049)] \n  [[code]( )]\n\n- **AADN:** Zhewei Wu, Ruilong Yu, Qihe Liu, Shuying Cheng, Shilin Qiu, Shijie Zhou.\u003Cbr \u002F>\n  \"Enhancing Tracking Robustness with Auxiliary Adversarial Defense Networks.\" ECCV (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.17976)] \n  [[code](https:\u002F\u002Fgithub.com\u002F)]\n\n  \n### CVPR 2024\n\n- **MASA:** Siyuan Li, Lei Ke, Martin Danelljan, Luigi Piccinelli, Mattia Segu, Luc Van Gool, Fisher Yu.\u003Cbr \u002F>\n  \"Matching Anything by Segmenting Anything.\" CVPR (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.04221)] \n  [[code](https:\u002F\u002Fmatchinganything.github.io\u002F)]\n  \n- **OneTracker:** Lingyi Hong, Shilin Yan, Renrui Zhang, Wanyun Li, Xinyu Zhou, Pinxue Guo, Kaixun Jiang, Yiting Cheng, Jinglun Li, Zhaoyu Chen, Wenqiang Zhang.\u003Cbr \u002F>\n  \"OneTracker: Unifying Visual Object Tracking with Foundation Models and Efficient Tuning.\" CVPR (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.09634)] \n  [[code](https:\u002F\u002F)]\n\n- **ARTrackV2:** Yifan Bai, Zeyang Zhao, Yihong Gong, Xing Wei.\u003Cbr \u002F>\n  \"ARTrackV2: Prompting Autoregressive Tracker Where to Look and How to Describe.\" CVPR (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.17133)] \n  [[code](https:\u002F\u002Fartrackv2.github.io\u002F)]\n\n- **DiffusionTrack:** Fei Xie, Zhongdao Wang, Chao Ma.\u003Cbr \u002F>\n  \"DiffusionTrack: Point Set Diffusion Model for Visual Object Tracking.\" CVPR (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002F)] \n  [[code](https:\u002F\u002F)]\n\n- **RTracker:** Yuqing Huang, Xin Li, Zikun Zhou, Yaowei Wang, Zhenyu He, Ming-Hsuan Yang.\u003Cbr \u002F>\n  \"RTracker: Recoverable Tracking via PN Tree Structured Memory.\" CVPR (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002F)] \n  [[code](https:\u002F\u002F)]\n\n- **NetTrack:** Guangze Zheng, Shijie Lin, Haobo Zuo, Changhong Fu, Jia Pan.\u003Cbr \u002F>\n  \"NetTrack: Tracking Highly Dynamic Objects with a Net.\" CVPR (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.11186)] \n  [[code](https:\u002F\u002Fgeorge-zhuang.github.io\u002Fnettrack\u002F)]\n\n- **Un-Track:** Zongwei Wu, Jilai Zheng, Xiangxuan Ren, Florin-Alexandru Vasluianu, Chao Ma, Danda Pani Paudel, Luc Van Gool, Radu Timofte.\u003Cbr \u002F>\n  \"Single-Model and Any-Modality for Video Object Tracking.\" CVPR (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.15851)] \n  [[code](https:\u002F\u002Fgithub.com\u002FZongwei97\u002FUnTrack)]\n\n- **HIPTrack:** Wenrui Cai, Qingjie Liu, Yunhong Wang.\u003Cbr \u002F>\n  \"HIPTrack: Visual Tracking with Historical Prompts.\" CVPR (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.02072)] \n  [[code](https:\u002F\u002Fxxx)]\n\n- **AQATrack:** Jinxia Xie, Bineng Zhong, Zhiyi Mo, Shengping Zhang, Liangtao Shi, Shuxiang Song, Rongrong Ji.\u003Cbr \u002F>\n  \"Autoregressive Queries for Adaptive Tracking with Spatio-Temporal Transformers.\" CVPR (2024).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FXie_Autoregressive_Queries_for_Adaptive_Tracking_with_Spatio-Temporal_Transformers_CVPR_2024_paper.html)] \n  [[code](https:\u002F\u002Fgithub.com\u002FGXNU-ZhongLab\u002FAQATrack)]\n\n- **MMA:** Lingxiao Yang, Ru-Yuan Zhang, Yanchen Wang, Xiaohua Xie.\u003Cbr \u002F>\n  \"MMA: Multi-Modal Adapter for Vision-Language Models.\" CVPR (2024).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FYang_MMA_Multi-Modal_Adapter_for_Vision-Language_Models_CVPR_2024_paper.html)] \n  [[code](https:\u002F\u002Fgithub.com\u002FZjjConan\u002FMulti-Modal-Adapter)]\n\n- **SDSTrack:** Xiaojun Hou, Jiazheng Xing, Yijie Qian, Yaowei Guo, Shuo Xin, Junhao Chen, Kai Tang, Mengmeng Wang, Zhengkai Jiang, Liang Liu, Yong Liu.\u003Cbr \u002F>\n  \"SDSTrack: Self-Distillation Symmetric Adapter Learning for Multi-Modal Visual Object Tracking.\" CVPR (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002F)] \n  [[code](https:\u002F\u002F)]\n\n- **HDETrack:** Xiao Wang, Shiao Wang, Chuanming Tang, Lin Zhu, Bo Jiang, Yonghong Tian, Jin Tang.\u003Cbr \u002F>\n  \"Event Stream-based Visual Object Tracking: A High-Resolution Benchmark Dataset and A Novel Baseline.\" CVPR (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.14611)] \n  [[code](https:\u002F\u002Fgithub.com\u002FEvent-AHU\u002FEventVOT_Benchmark)]\n\n- **CAI:** Yanyan Shao, Shuting He, Qi Ye, Yuchao Feng, Wenhan Luo, Jiming Chen.\u003Cbr \u002F>\n  \"Context-Aware Integration of Language and Visual References for Natural Language Tracking.\" CVPR (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002F)] \n  [[code](https:\u002F\u002F)]\n\n- **ResampleTrack:** Xuhong Ren, Jianlang Chen, Yue Cao, Wanli Xue, Qing Guo, Lei Ma, Jianjun Zhao, Shenyong Chen.\u003Cbr \u002F>\n  \"ResampleTrack: Online Resampling for Adversarially Robust Visual Tracking.\" CVPR (2024).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024W\u002FAdvML\u002Fhtml\u002FRen_ResampleTrack_Online_Resampling_for_Adversarially_Robust_Visual_Tracking_CVPRW_2024_paper.html)] \n  [[code]( )]\n  \n\n### WACV 2024\n\n- **ContrasTR:** Pierre-François De Plaen, Nicola Marinello, Marc Proesmans, Tinne Tuytelaars, Luc Van Gool.\u003Cbr \u002F>\n  \"Contrastive Learning for Multi-Object Tracking With Transformers.\" WACV (2024).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2024\u002Fpapers\u002FDe_Plaen_Contrastive_Learning_for_Multi-Object_Tracking_With_Transformers_WACV_2024_paper.pdf)] \n  [[code]()]\n\n- **LaGOT:** Christoph Mayer, Martin Danelljan, Ming-Hsuan Yang, Vittorio Ferrari, Luc Van Gool, Alina Kuznetsova.\u003Cbr \u002F>\n  \"Beyond SOT: It's Time to Track Multiple Generic Objects at Once.\" WACV (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.11920)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fvisionml\u002Fpytracking)]\n\n- **SMAT:** Goutam Yelluru Gopal, Maria A. Amer.\u003Cbr \u002F>\n  \"Separable Self and Mixed Attention Transformers for Efficient Object Tracking.\" WACV (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.03979)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fgoutamyg\u002FSMAT)]\n  \n- **DATr:** Jie Zhao, Johan Edstedt, Michael Felsberg, Dong Wang, Huchuan Lu.\u003Cbr \u002F>\n  \"Leveraging the Power of Data Augmentation for Transformer-based Tracking.\" WACV (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.08264)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fzj5559\u002FDATr)]\n  \n### AAAI 2024\n\n- **GMMT:** Zhangyong Tang, Tianyang Xu, Xuefeng Zhu, Xiao-Jun Wu, Josef Kittler.\u003Cbr \u002F>\n  \"Generative-based Fusion Mechanism for Multi-Modal Tracking.\" AAAI (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.01728)] \n  [[code](https:\u002F\u002Fgithub.com\u002FZhangyong-Tang\u002FGMMT)]\n\n- **ODTrack:** Yaozong Zheng, Bineng Zhong, Qihua Liang, Zhiyi Mo, Shengping Zhang, Xianxian Li.\u003Cbr \u002F>\n  \"ODTrack: Online Dense Temporal Token Learning for Visual Tracking.\" AAAI (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.01686)] \n  [[code](https:\u002F\u002Fgithub.com\u002FGXNU-ZhongLab\u002FODTrack)]\n\n - **EVPTrack:** Liangtao Shi, Bineng Zhong, Qihua Liang, Ning Li, Shengping Zhang, Xianxian Li.\u003Cbr \u002F>\n  \"Explicit Visual Prompts for Visual Object Tracking.\" AAAI (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.03142)] \n  [[code](https:\u002F\u002Fgithub.com\u002FGXNU-ZhongLab\u002FEVPTrack)] \n  \n- **BAT:** Bing Cao, Junliang Guo, Pengfei Zhu, Qinghua Hu.\u003Cbr \u002F>\n  \"Bi-directional Adapter for Multimodal Tracking.\" AAAI (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.10611)] \n  [[code](https:\u002F\u002Fgithub.com\u002FSparkTempest\u002FBAT)]\n\n- **TATrack:** Hongyu Wang, Xiaotao Liu, Yifan Li, Meng Sun, Dian Yuan, Jing Liu.\u003Cbr \u002F>\n  \"Temporal Adaptive RGBT Tracking with Modality Prompt.\" AAAI (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.01244)] \n  [[code]()]\n\n- **Hybrid-SORT:** Mingzhan Yang, Guangxin Han, Bin Yan, Wenhua Zhang, Jinqing Qi, Huchuan Lu, Dong Wang.\u003Cbr \u002F>\n  \"Hybrid-SORT: Weak Cues Matter for Online Multi-Object Tracking.\" AAAI (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.00783)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fymzis69\u002FHybirdSORT)]\n\n\n### ArXiv 2024\n\n- **SeqTrack3D:** Yu Lin, Zhiheng Li, Yubo Cui, Zheng Fang.\u003Cbr \u002F>\n  \"SeqTrack3D: Exploring Sequence Information for Robust 3D Point Cloud Tracking.\" ICRA (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.16249)] \n  [[code](https:\u002F\u002Fgithub.com\u002Faron-lin\u002Fseqtrack3d)]\n  \n- **VAT:** Guangtong Zhang, Qihua Liang, Zhiyi Mo, Ning Li, Bineng Zhong.\u003Cbr \u002F>\n  \"Visual Adapt For RGBD Tracking.\" ICASSP (2024).\n  [[paper](https:\u002F\u002Farxiv.org )] \n  [[code](https:\u002F\u002Fgithub.com\u002F )]\n\n- **UVLTrack:** Yinchao Ma, Yuyang Tang, Wenfei Yang, Tianzhu Zhang, Jinpeng Zhang, Mengxue Kang.\u003Cbr \u002F>\n  \"Unifying Visual and Vision-Language Tracking via Contrastive Learning.\" ArXiv (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.11228)] \n  [[code](https:\u002F\u002Fgithub.com\u002FOpenSpaceAI\u002FUVLTrack)]\n\n- **SuperSBT:** Fei Xie, Wankou Yang, Chunyu Wang, Lei Chu, Yue Cao, Chao Ma, Wenjun Zeng.\u003Cbr \u002F>\n  \"Correlation-Embedded Transformer Tracking: A Single-Branch Framework.\" ArXiv (2024).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.12743)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fphiphiphi31\u002FSBT)]\n  \n\n### NeurIPS 2023\n\n- **MixFormerV2:** Yutao Cui, Tianhui Song, Gangshan Wu, Limin Wang.\u003Cbr \u002F>\n  \"MixFormerV2: Efficient Fully Transformer Tracking.\" NeurIPS (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15896)] \n  [[code](https:\u002F\u002Fgithub.com\u002FMCG-NJU\u002FMixFormerV2)]\n  \n- **ZoomTrack:** Yutong Kou, Jin Gao, Bing Li, Gang Wang, Weiming Hu, Yizheng Wang, Liang Li.\u003Cbr \u002F>\n  \"ZoomTrack: Target-aware Non-uniform Resizing for Efficient Visual Tracking.\" NeurIPS (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.10071)] \n  [[code](https:\u002F\u002Fgithub.com\u002FKou-99\u002FZoomTrack)]\n\n- **Type-to-Track:** Pha Nguyen, Kha Gia Quach, Kris Kitani, Khoa Luu.\u003Cbr \u002F>\n  \"Type-to-Track: Retrieve Any Object via Prompt-based Tracking.\" NeurIPS (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.13495)] \n  [[code](https:\u002F\u002Fuark-cviu.github.io\u002FType-to-Track)]\n\n- **MGIT:** Shiyu Hu, Dailin Zhang, Meiqi Wu, Xiaokun Feng, Xuchen Li, Xin Zhao, Kaiqi Huang.\u003Cbr \u002F>\n  \"A Multi-modal Global Instance Tracking Benchmark (MGIT): Better Locating Target in Complex Spatio-temporal and Causal Relationship.\" NeurIPS (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002Fxxxxx.xx)] \n  [[code](http:\u002F\u002Fvideocube.aitestunion.com\u002F)]\n\n\n### ICCV 2023\n\n- **VTDNet:** Thomas E. Huang, Yifan Liu, Luc Van Gool, Fisher Yu.\u003Cbr \u002F>\n  \"Video Task Decathlon: Unifying Image and Video Tasks in Autonomous Driving.\" ICCV (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.04422)] \n  [[code](https:\u002F\u002Fwww.vis.xyz\u002Fpub\u002Fvtd)]\n  \n- **HiT:** Ben Kang, Xin Chen, Dong Wang, Houwen Peng, Huchuan Lu.\u003Cbr \u002F>\n  \"Exploring Lightweight Hierarchical Vision Transformers for Efficient Visual Tracking.\" ICCV (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.06904)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fkangben258\u002FHiT)]\n\n- **ROMTrack:** Yidong Cai, Jie Liu, Jie Tang, Gangshan Wu.\u003Cbr \u002F>\n  \"Robust Object Modeling for Visual Tracking.\" ICCV (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.05140)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fdawnyc\u002FROMTrack)]\n\n- **F-BDMTrack:** Dawei Yang, Jianfeng He, Yinchao Ma, Qianjin Yu, Tianzhu Zhang.\u003Cbr \u002F>\n  \"Foreground-Background Distribution Modeling Transformer for Visual Object Tracking.\" ICCV (2023).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FYang_Foreground-Background_Distribution_Modeling_Transformer_for_Visual_Object_Tracking_ICCV_2023_paper.pdf)] \n  [[code]()]\n\n- **MITS:** Yuanyou Xu, Zongxin Yang, Yi Yang.\u003Cbr \u002F>\n  \"Integrating Boxes and Masks: A Multi-Object Framework for Unified Visual Tracking and Segmentation.\" ICCV (2023).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FXu_Integrating_Boxes_and_Masks_A_Multi-Object_Framework_for_Unified_Visual_ICCV_2023_paper.pdf)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fyoxu515\u002FMITS)]\n\n- **Aba-ViTrack:** Shuiwang Li, Yangxiang Yang, Dan Zeng, Xucheng Wang.\u003Cbr \u002F>\n  \"Adaptive and Background-Aware Vision Transformer for Real-Time UAV Tracking.\" ICCV (2023).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FLi_Adaptive_and_Background-Aware_Vision_Transformer_for_Real-Time_UAV_Tracking_ICCV_2023_paper.pdf)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fxyyang317\u002FAba-ViTrack)]\n  \n- **Omnimotion:** Qianqian Wang, Yen-Yu Chang, Ruojin Cai, Zhengqi Li, Bharath Hariharan, Aleksander Holynski, Noah Snavely.\u003Cbr \u002F>\n  \"Tracking Everything Everywhere All at Once.\" ICCV (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.05422)] \n  [[code](https:\u002F\u002Fomnimotion.github.io\u002F)]\n  \n- **DEVA:** Ho Kei Cheng, Seoung Wug Oh, Brian Price, Alexander Schwing, Joon-Young Lee.\u003Cbr \u002F>\n  \"Tracking Anything with Decoupled Video Segmentation.\" ICCV (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.03903)] \n  [[code](https:\u002F\u002Fhkchengrex.github.io\u002FTracking-Anything-with-DEVA)]\n\n- **CiteTracker:** Xin Li, Yuqing Huang, Zhenyu He, Yaowei Wang, Huchuan Lu, Ming-Hsuan Yang.\u003Cbr \u002F>\n  \"CiteTracker: Correlating Image and Text for Visual Tracking.\" ICCV (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.11322)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fxinli\u002Fcitetracker)]\n\n- **DecoupleTNL:** Ding Ma, Xiangqian Wu.\u003Cbr \u002F>\n  \"Tracking by Natural Language Specification with Long Short-term Context Decoupling.\" ICCV (2023).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FMa_Tracking_by_Natural_Language_Specification_with_Long_Short-term_Context_Decoupling_ICCV_2023_paper.pdf)] \n  [[code]()]\n  \n- **PVT++:** Bowen Li, Ziyuan Huang, Junjie Ye, Yiming Li, Sebastian Scherer, Hang Zhao, Changhong Fu.\u003Cbr \u002F>\n  \"PVT++: A Simple End-to-End Latency-Aware Visual Tracking Framework.\" ICCV (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.11629)] \n  [[code](https:\u002F\u002Fgithub.com\u002FJaraxxus-Me\u002FPVT_pp)]\n\n- **SyncTrack:** Teli Ma, Mengmeng Wang, Jimin Xiao, Huifeng Wu, Yong Liu.\u003Cbr \u002F>\n  \"Synchronize Feature Extracting and Matching: A Single Branch Framework for 3D Object Tracking.\" ICCV (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.12549)] \n  [[code](https:\u002F\u002Fxxxxx)]\n  \n- **360VOT:** Huajian Huang, Yinzhe Xu, Yingshu Chen, Sai-Kit Yeung.\u003Cbr \u002F>\n  \"360VOT: A New Benchmark Dataset for Omnidirectional Visual Object Tracking.\" ICCV (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.14630)] \n  [[code](https:\u002F\u002F360vot.hkustvgd.com\u002F)]\n\n- **PlanarTrack:** Xinran Liu, Xiaoqiong Liu, Ziruo Yi, Xin Zhou, Thanh Le, Libo Zhang, Yan Huang, Qing Yang, Heng Fan.\u003Cbr \u002F>\n  \"PlanarTrack: A Large-scale Challenging Benchmark for Planar Object Tracking.\" ICCV (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.07625)] \n  [[code](https:\u002F\u002Fhengfan2010.github.io\u002Fprojects\u002FPlanarTrack\u002F)]\n       \n### CVPR 2023\n\n- **X-Decoder:** Xueyan Zou, Zi-Yi Dou, Jianwei Yang, Zhe Gan, Linjie Li, Chunyuan Li, Xiyang Dai, Harkirat Behl, Jianfeng Wang, Lu Yuan, Nanyun Peng, Lijuan Wang, Yong Jae Lee, Jianfeng Gao.\u003Cbr \u002F>\n  \"Generalized Decoding for Pixel, Image, and Language.\" CVPR (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.11270)] \n  [[code](https:\u002F\u002Fx-decoder-vl.github.io\u002F)]\n  \n- **UNINEXT:** Bin Yan, Yi Jiang, Jiannan Wu, Dong Wang, Ping Luo, Zuhuan Yuan, Huchuan Lu.\u003Cbr \u002F>\n  \"Universial Instance Perception as Object Discovery and Retrieval.\" CVPR (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.06674)] \n  [[code](https:\u002F\u002Fgithub.com\u002FMasterBin-IIAU\u002FUNINEXT)]\n  \n- **OmniTracker:** Junke Wang, Dongdong Chen, Zuxuan Wu, Chong Luo, Xiyang Dai, Lu Yuan, Yu-Gang Jiang.\u003Cbr \u002F>\n  \"OmniTracker: Unifying Object Tracking by Tracking-with-Detection.\" CVPR (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.12079)] \n  [[code](https:\u002F\u002Fgithub.com\u002F)]\n\n- **SUSHI:** Orcun Cetintas, Guillem Brasó, Laura Leal-Taixé.\u003Cbr \u002F>\n  \"Unifying Short and Long-Term Tracking with Graph Hierarchies.\" CVPR (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.03038)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fdvl-tum\u002FSUSHI)]\n  \n- **DropMAE:** Qiangqiang Wu, Tianyu Yang, Ziquan Liu, Baoyuan Wu, Ying Shan, Antoni B. Chan.\u003Cbr \u002F>\n  \"DropMAE: Masked Autoencoders with Spatial-Attention Dropout for Tracking Tasks.\" CVPR (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.00571)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fjimmy-dq\u002FDropMAE)]\n  \n- **VideoTrack:** Fei Xie, Lei Chu, Jiahao Li, Yan Lu, Chao Ma.\u003Cbr \u002F>\n  \"VideoTrack: Learning to Track Objects via Video Transformer.\" CVPR (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002Fx)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fphiphiphi31\u002FVideoTrack)]\n  \n- **SwinV2:** Zhenda Xie, Zigang Geng, Jingcheng Hu, Zheng Zhang, Han Hu, Yue Cao.\u003Cbr \u002F>\n  \"Revealing the Dark Secrets of Masked Image Modeling.\" CVPR (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.13543)] \n  [[code](https:\u002F\u002Fgithub.com\u002FSwinTransformer\u002FMIM-Depth-Estimation)]\n  \n- **ViPT:** Jiawen Zhu, Simiao Lai, Xin Chen, Dong Wang, Huchuan Lu.\u003Cbr \u002F>\n  \"Visual Prompt Multi-Modal Tracking.\" CVPR (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.10826)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fjiawen-zhu\u002FViPT)]\n  \n - **JointNLT:** Li Zhou, Zikun Zhou, Kaige Mao, Zhenyu He.\u003Cbr \u002F>\n  \"Joint Visual Grounding and Tracking with Natural Language Specification.\" CVPR (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.12027)] \n  [[code](https:\u002F\u002Fgithub.com\u002Flizhou-cs\u002FJointNLT)]\n  \n - **ARKitTrack:** Haojie Zhao, Junsong Chen, Lijun Wang, Huchuan Lu.\u003Cbr \u002F>\n  \"ARKitTrack: A New Diverse Dataset for Tracking Using Mobile RGB-D Data.\" CVPR (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.13885)] \n  [[code](https:\u002F\u002Farkittrack.github.io\u002F)]\n  \n - **GRM:** Shenyuan Gao, Chunluan Zhou, Jun Zhang.\u003Cbr \u002F>\n  \"Generalized Relation Modeling for Transformer Tracking.\" CVPR (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.16580v1.pdf)] \n  [[code](https:\u002F\u002Fgithub.com\u002FLittle-Podi\u002FGRM)]\n  \n - **ARTrack:** Xing Wei, Yifan Bai, Yongchao Zheng, Dahu Shi, Yihong Gong.\u003Cbr \u002F>\n  \"Autoregressive Visual Tracking.\" CVPR (2023).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FWei_Autoregressive_Visual_Tracking_CVPR_2023_paper.html)] \n  [[code](https:\u002F\u002Fgithub.com\u002FMIV-XJTU\u002FARTrack)]\n  \n - **MAT:** Haojie Zhao, Dong Wang, Huchuan Lu.\u003Cbr \u002F>\n  \"Representation Learning for Visual Object Tracking by Masked Appearance Transfer.\" CVPR (2023).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FZhao_Representation_Learning_for_Visual_Object_Tracking_by_Masked_Appearance_Transfer_CVPR_2023_paper.html)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fdifhnp\u002FMAT)]\n  \n - **EMT:** Jinyu Yang, Shang Gao, Zhe Li, Feng Zheng, Aleš Leonardis.\u003Cbr \u002F>\n  \"Resource-Efficient RGBD Aerial Tracking.\" CVPR (2023).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FYang_Resource-Efficient_RGBD_Aerial_Tracking_CVPR_2023_paper.html)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fyjybuaa\u002FRGBDAerialTracking)]\n  \n - **TBSI:** Tianrui Hui, Zizheng Xun, Fengguang Peng, Junshi Huang, Xiaoming Wei, Xiaolin Wei, Jiao Dai, Jizhong Han, Si Liu.\u003Cbr \u002F>\n  \"Bridging Search Region Interaction With Template for RGB-T Tracking.\" CVPR (2023).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FHui_Bridging_Search_Region_Interaction_With_Template_for_RGB-T_Tracking_CVPR_2023_paper.html)] \n  [[code](https:\u002F\u002Fgithub.com\u002FRyanHTR\u002FTBSI)]\n  \n - **VisTracker:** Xianghui Xie, Bharat Lal Bhatnagar, Gerard Pons-Moll.\u003Cbr \u002F>\n  \"Visibility Aware Human-Object Interaction Tracking from Single RGB Camera.\" CVPR (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.16479v1)] \n  [[code](https:\u002F\u002Fvirtualhumans.mpi-inf.mpg.de\u002FVisTracker\u002F)]\n  \n - **OVTrack:** Siyuan Li, Tobias Fischer, Lei Ke, Henghui Ding, Martin Danelljan, Fisher Yu.\u003Cbr \u002F>\n  \"OVTrack: Open-Vocabulary Multiple Object Tracking.\" CVPR (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.08408)] \n  [[code](https:\u002F\u002Fwww.vis.xyz\u002Fpub\u002Fovtrack\u002F)]\n  \n - **SeqTrack:** Xin Chen, Houwen Peng, Dong Wang, Huchuan Lu, Han Hu.\u003Cbr \u002F>\n  \"SeqTrack: Sequence to Sequence Learning for Visual Object Tracking.\" CVPR (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.14394)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FVideoX)]\n  \n - **ImageBind:** Rohit Girdhar, Alaaeldin El-Nouby, Zhuang Liu, Mannat Singh, Kalyan Vasudev Alwala, Armand Joulin, Ishan Misra.\u003Cbr \u002F>\n  \"IMAGEBIND: One Embedding Space To Bind Them All.\" CVPR (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.05665)] \n  [[code](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FImageBind)]\n  \n - **TCOW:** Basile Van Hoorick, Pavel Tokmakov, Simon Stent, Jie Li, Carl Vondrick.\u003Cbr \u002F>\n  \"Tracking through Containers and Occluders in the Wild.\" CVPR (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.03052)] \n  [[code](https:\u002F\u002Ftcow.cs.columbia.edu\u002F)]\n  \n\n### ArXiv 2023\n\n- **UTrack:** Jie Gao, Bineng Zhong, Yan Chen.\u003Cbr \u002F>\n  \"Unambiguous Object Tracking by Exploiting Target Cues.\" ACM MM (2023).\n  [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3581783.3612240)] \n  [[code]()]\n\n- **UPVPT:** Guangtong Zhang, Qihua Liang, Ning Li, Zhiyi Mo, Bineng Zhong.\u003Cbr \u002F>\n  \"Robust Tracking via Unifying Pretrain-Finetuning and Visual Prompt Tuning.\" ACM MMAsia (2023).\n  [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3595916.3626410)] \n  [[code]()]\n\n- **TAO-Amodal:** Cheng-Yen Hsieh, Tarasha Khurana, Achal Dave, Deva Ramanan.\u003Cbr \u002F>\n  \"Tracking Any Object Amodally.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.12433)] \n  [[code](https:\u002F\u002Ftao-amodal.github.io\u002F)]\n\n- **HQTrack:** Jiawen Zhu, Zhenyu Chen, Zeqi Hao, Shijie Chang, Lu Zhang, Dong Wang, Huchuan Lu, Bin Luo, Jun-Yan He, Jin-Peng Lan, Hanyuan Chen, Chenyang Li.\u003Cbr \u002F>\n  \"Tracking Anything in High Quality.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.13974)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fjiawen-zhu\u002FHQTrack)]\n\n- **MMTrack:** Yaozong Zheng, Bineng Zhong, Qihua Liang, Guorong Li, Rongrong Ji, Xianxian Li.\u003Cbr \u002F>\n  \"Towards Unified Token Learning for Vision-Language Tracking.\" TCSVT (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.14103)] \n  [[code](https:\u002F\u002Fgithub.com\u002FAzong-HQU\u002FMMTrack)]\n\n- **OVLM:** Huanlong Zhang, Jingchao Wang, Jianwei Zhang, Tianzhu Zhang, Bineng Zhong.\u003Cbr \u002F>\n  \"One-stream Vision-Language Memory Network for Object Tracking.\" TMM (2023).\n  [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10149530)] \n  [[code]( )]\n\n- **All-in-One:** Chunhui Zhang, Xin Sun, Li Liu, Yiqian Yang, Qiong Liu, Xi Zhou, Yanfeng Wang.\u003Cbr \u002F>\n  \"All in One: Exploring Unified Vision-Language Tracking with Multi-Modal Alignment.\" ACM MM (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.03373)] \n  [[code]( )]\n  \n- **MPLT:** Yang Luo, Xiqing Guo, Hui Feng, Lei Ao.\u003Cbr \u002F>\n  \"RGB-T Tracking via Multi-Modal Mutual Prompt Learning.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.16386)] \n  [[code](https:\u002F\u002Fgithub.com\u002FHusterYoung\u002FMPLT)]\n  \n- **DCPT:** Jiawen Zhu, Huayi Tang, Zhi-Qi Cheng, Jun-Yan He, Bin Luo, Shihao Qiu, Shengming Li, Huchuan Lu.\u003Cbr \u002F>\n  \"DCPT: Darkness Clue-Prompted Tracking in Nighttime UAVs.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.10491)] \n  [[code](https:\u002F\u002Fxxx)]\n\n- **SRT:** Tianpeng Liu, Jing Li, Jia Wu, Lefei Zhang, Jun Chang, Jun Wan, Lezhi Lian.\u003Cbr \u002F>\n  \"Tracking with Saliency Region Transformer.\" TIP (2023).\n  [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10359476)] \n  [[code](https:\u002F\u002Fgithub.xxxxx)]\n\n- **TATrans:** Pujian Lai, Meili Zhang, Gong Cheng, Shengyang Li, Xiankai Huang, Junwei Han.\u003Cbr \u002F>\n  \"Target-aware Transformer for Satellite Video Object Tracking.\" TGRS (2023).\n  [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10342836)] \n  [[code](https:\u002F\u002Fgithub.com\u002Flaybebe\u002FTATrans_SVOT)]\n\n- **STRtrack:** Shaochuan Zhao, Tianyang Xu, Xiaojun Wu, Josef Kittler.\u003Cbr \u002F>\n  \"A Spatio-Temporal Robust Tracker with Spatial-Channel Transformer and Jitter Suppression.\" IJCV (2023).\n  [[paper](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs11263-023-01902-x)] \n  [[code](https:\u002F\u002Fxxx)]\n\n- **CoTracker:** Nikita Karaev, Ignacio Rocco, Benjamin Graham, Natalia Neverova, Andrea Vedaldi, Christian Rupprecht.\u003Cbr \u002F>\n  \"CoTracker: It is Better to Track Together.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.07635)] \n  [[code](https:\u002F\u002Fco-tracker.github.io\u002F)]\n  \n- **LiteTrack:** Qingmao Wei, Bi Zeng, Jianqi Liu, Li He, Guotian Zeng.\u003Cbr \u002F>\n  \"LiteTrack: Layer Pruning with Asynchronous Feature Extraction for Lightweight and Efficient Visual Tracking.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.09249)] \n  [[code](https:\u002F\u002Fgithub.com\u002FTsingWei\u002FLiteTrack)]\n  \n- **LightFC:** Li Yunfeng, Wang Bo, Li Ye, Liu Zhuoyan, Wu Xueyi.\u003Cbr \u002F>\n  \"Lightweight Full-Convolutional Siamese Tracker.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.05392)] \n  [[code](https:\u002F\u002Fgithub.com\u002FLiYunfengLYF\u002FLightFC)]\n\n- **DETRrack:** Qingmao Wei, Bi Zeng, Guotian Zeng.\u003Cbr \u002F>\n  \"Efficient Training for Visual Tracking with Deformable Transformer.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.02676)] \n  [[code](https:xxx)]\n\n- **JN:** Qingmao Wei, Bi Zeng, Guotian Zeng.\u003Cbr \u002F>\n  \"Towards Efficient Training with Negative Samples in Visual Tracking.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.02903)] \n  [[code](hxx)]\n\n- **COHA:** Zhiyu Zhu, Junhui Hou, Dapeng Oliver Wu.\u003Cbr \u002F>\n  \"Cross-modal Orthogonal High-rank Augmentation for RGB-Event Transformer-trackers.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.04129)] \n  [[code](https:\u002Fxx)]\n  \n- **SparseTrack:** Zelin Liu, Xinggang Wang, Cheng Wang, Wenyu Liu, Xiang Bai.\u003Cbr \u002F>\n  \"SparseTrack: Multi-Object Tracking by Performing Scene Decomposition based on Pseudo-Depth.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.05238)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fhustvl\u002FSparseTrack)]\n    \n- **TransSOT:** Janani Thangavel, Thanikasalam Kokul, Amirthalingam Ramanan, Subha Fernando.\u003Cbr \u002F>\n  \"Transformers in Single Object Tracking: An Experimental Survey.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.11867)] \n  [[code]()]\n  \n- **ProFormer:** Yabin Zhu, Chenglong Li, Xiao Wang, Jin Tang, Zhixiang Huang.\u003Cbr \u002F>\n  \"RGBT Tracking via Progressive Fusion Transformer with Dynamically Guided Learning.\" ArXiv (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.14778)] \n  [[code]()]\n\n- **SOTVerse:** Shiyu Hu, Xin Zhao, Kaiqi Huang.\u003Cbr \u002F>\n  \"SOTVerse: A User-defined Task Space of Single Object Tracking.\" IJCV (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.07414)] \n  [[code](http:\u002F\u002Fmetaverse.aitestunion.com\u002Fsotverse)]\n\n- **TSMTrack:** Chuanming Tang, Qintao Hu, Gaofan Zhou, Jinzhen Yao, Jianlin Zhang, Yongmei Huang, Qixiang Ye.\u003Cbr \u002F>\n  \"Transformer Sub-Patch Matching for High-Performance Visual Object Tracking.\" TITS (2023).\n  [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10101686)] \n  [[code](https:\u002Fxx)]\n\n- **TADS:** Xin Li, Wenjie Pei, Yaowei Wang, Zhenyu He, Huchuan Lu, Ming-Hsuan Yang.\u003Cbr \u002F>\n  \"Self-Supervised Tracking via Target-Aware Data Synthesis.\" TNNLS (2023).\n  [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10004981)] \n  [[code]()]\n  \n\n### IJCAI 2023\n\n- **OSP2B:** Jiahao Nie, Zhiwei He, Yuxiang Yang, Zhengyi Bao, Mingyu Gao, Jing Zhang.\u003Cbr \u002F>\n  \"OSP2B: One-Stage Point-to-Box Network for 3D Siamese Tracking.\" IJCAI (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.11584)] \n  [[code](https:\u002F\u002Fgithub.com\u002FHaozheQi\u002FP2B)]\n  \n  \n### WACV 2023\n\n- **MVT:** Goutam Yelluru Gopal, Maria A. Amer.\u003Cbr \u002F>\n  \"Mobile Vision Transformer-based Visual Object Tracking.\" BMVC (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.05829)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fgoutamyg\u002FMVT)]\n  \n- **E.T.Track:** Philippe Blatter, Menelaos Kanakis, Martin Danelljan, Luc Van Gool.\u003Cbr \u002F>\n  \"Efficient Visual Tracking with Exemplar Transformers.\" WACV (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.09686)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fpblatter\u002Fettrack)]\n  \n\n### AAAI 2023\n\n- **CTTrack:** Zikai Song, Run Luo, Junqing Yu, Yi-Ping Phoebe Chen, Wei Yang.\u003Cbr \u002F>\n  \"Compact Transformer Tracker with Correlative Masked Modeling.\" AAAI (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.10938)] \n  [[code](https:\u002F\u002Fgithub.com\u002FHUSTDML\u002FCTTrack)]\n  \n- **TATrack:** Kaijie He, Canlong Zhang, Sheng Xie, Zhixin Li, Zhiwen Wang.\u003Cbr \u002F>\n  \"Target-Aware Tracking with Long-term Context Attention.\" AAAI (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.13840)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fhekaijie123\u002FTATrack)]\n  \n- **RGBD1K:** Xue-Feng Zhu, Tianyang Xu, Zhangyong Tang, Zucheng Wu, Haodong Liu, Xiao Yang, Xiao-Jun Wu, Josef Kittler.\u003Cbr \u002F>\n  \"RGBD1K: A Large-scale Dataset and Benchmark for RGB-D Object Tracking.\" AAAI (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.09787)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fxuefeng-zhu5\u002FRGBD1K)]\n\n- **GdaTFT:** Yun Liang; Qiaoqiao Li; Fumian Long.\u003Cbr \u002F>\n  \"Global Dilated Attention and Target Focusing Network for Robust Tracking.\" AAAI (2023).\n  [[paper](https:\u002F\u002Funderline.io\u002Flecture\u002F69278-global-dilated-attention-and-target-focusing-network-for-robust-tracking)] \n  [[code](https:\u002F\u002Fgithub.com\u002F)]\n  \n- **GLT-T:** Jiahao Nie, Zhiwei He, Yuxiang Yang, Mingyu Gao, Jing Zhang.\u003Cbr \u002F>\n  \"GLT-T: Global-Local Transformer Voting for 3D Single Object Tracking in Point Clouds.\" AAAI (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.10927)] \n  [[extended](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.00242)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fhaooozi\u002FGLT-T)]\n  \n- **RSPT:** Fangwei Zhong, Xiao Bi, Yudi Zhang, Wei Zhang, Yizhou Wang.\u003Cbr \u002F>\n  \"RSPT: Reconstruct Surroundings and Predict Trajectories for Generalizable Active Object Tracking.\" AAAI (2023).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03623)] \n  [[code](https:\u002F\u002Fsites.google.com\u002Fview\u002Faot-rspt)]\n  \n### NeurIPS 2022\n\n- **SwinTrack:** Liting Lin, Heng Fan, Yong Xu, Haibin Ling.\u003Cbr \u002F>\n  \"SwinTrack: A Simple and Strong Baseline for Transformer Tracking.\" NeurIPS (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.00995)] \n  [[code](https:\u002F\u002Fgithub.com\u002FLitingLin\u002FSwinTrack)]\n  \n- **VLTrack:** Mingzhe Guo, Zhipeng Zhang, Heng Fan, Liping Jing.\u003Cbr \u002F>\n  \"Divert More Attention to Vision-Language Tracking.\" NeurIPS (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.01076)] \n  [[code](https:\u002F\u002Fgithub.com\u002FJudasDie\u002FSOTS)]\n  \n- **GKB:** Zhiyu Zhu, Junhui Hou, Xianqiang Lyu.\u003Cbr \u002F>\n  \"Leaning Graph-embedded Key-event Back-tracing for Object Tracking in Event Clouds.\" NeurIPS (2022).\n  [[paper](https:\u002F\u002Fnips.cc\u002FConferences\u002F2022\u002FSchedule?showEvent=54651)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fxxxx)]\n  \n- **TAP-Vid:** Carl Doersch, Ankush Gupta, Larisa Markeeva, Lucas Smaira, Yusuf Aytar, Andrew Zisserman, Yi Yang.\u003Cbr \u002F>\n  \"TAP-Vid: A Benchmark for Tracking Any Point in a Video.\" NeurIPS (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.03726)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fdeepmind\u002Ftapnet)]\n\n  \n### ECCV 2022\n\n- **OSTrack:** Botao Ye, Hong Chang, Bingpeng Ma, Shiguang Shan.\u003Cbr \u002F>\n  \"Joint Feature Learning and Relation Modeling for Tracking: A One-Stream Framework.\" ECCV (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.11991)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fbotaoye\u002FOSTrack)]\n  \n- **Unicorn:** Bin Yan, Yi Jiang, Peize Sun, Dong Wang, Zehuan Yuan, Ping Luo, Huchuan Lu.\u003Cbr \u002F>\n  \"Unicorn: Towards Grand Unification of Object Tracking.\" ECCV (2022) Oral.\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.07078)] \n  [[code](https:\u002F\u002Fgithub.com\u002FMasterBin-IIAU\u002FUnicorn)]\n  \n- **SimTrack:** Boyu Chen, Peixia Li, Lei Bai, Lei Qiao, Qiuhong Shen, Bo Li, Weihao Gan, Wei Wu, Wanli Ouyang.\u003Cbr \u002F>\n  \"Backbone is All Your Need: A Simplified Architecture for Visual Object Tracking.\" ECCV (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.05328)] \n  [[code](https:\u002F\u002Fgithub.com\u002FLPXTT\u002FSimTrack)]\n  \n- **CIA:** Zhixiong Pi, Weitao Wan, Chong Sun, Changxin Gao, Nong Sang, Chen Li.\u003Cbr \u002F>\n  \"Hierarchical Feature Embedding for Visual Tracking.\" ECCV (2022).\n  [[paper](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2022\u002Fpapers_ECCV\u002Fhtml\u002F4400_ECCV_2022_paper.php)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fzxgravity\u002FCIA)]\n  \n- **RTS:** Matthieu Paul,Martin Danelljan,Christoph Mayer,Luc Van Gool.\u003Cbr \u002F>\n  \"Robust Visual Tracking by Segmentation.\" ECCV (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.11191)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fvisionml\u002Fpytracking)]\n  \n- **AiATrack:** Shenyuan Gao, Chunluan Zhou, Chao Ma, Xinggang Wang, Junsong Yuan.\u003Cbr \u002F>\n  \"AiATrack: Attention in Attention for Transformer Visual Tracking.\" ECCV (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.09603)] \n  [[code](https:\u002F\u002Fgithub.com\u002FLittle-Podi\u002FAiATrack)]\n\n- **SLTtrack:** Minji Kim, Seungkwan Lee, Jungseul Ok, Bohyung Han, Minsu Cho.\u003Cbr \u002F>\n  \"Towards Sequence-Level Training for Visual Tracking.\" ECCV (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.05810)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fbyminji\u002FSLTtrack)]\n  \n- **FEAR:** Vasyl Borsuk, Roman Vei, Orest Kupyn, Tetiana Martyniuk, Igor Krashenyi, Jiři Matas.\u003Cbr \u002F>\n  \"FEAR: Fast, Efficient, Accurate and Robust Visual Tracker.\" ECCV (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2112.07957.pdf)] \n  [[code](https:\u002F\u002Fxxxxxxx)]\n  \n- **PersonPath22:** Bing Shuai, Alessandro Bergamo, Uta Buechler, Andrew Berneshawi, Alyssa Boden, Joseph Tighe.\u003Cbr \u002F>\n  \"Large Scale Real-World Multi-Person Tracking.\" ECCV (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.02175)] \n  [[code](https:\u002F\u002Famazon-science.github.io\u002Ftracking-dataset\u002Fpersonpath22.html)]\n  \n- **STNet:** Le Hui, Lingpeng Wang, Linghua Tang, Kaihao Lan, Jin Xie, Jian Yang.\u003Cbr \u002F>\n  \"3D Siamese Transformer Network for Single Object Tracking on Point Clouds.\" ECCV (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.11995)] \n  [[code](https:\u002F\u002Fgithub.com\u002Ffpthink\u002FSTNet)]\n  \n- **P3AFormer:** Zelin Zhao, Ze Wu, Yueqing Zhuang, Boxun Li, Jiaya Jia.\u003Cbr \u002F>\n  \"Tracking Objects as Pixel-wise Distributions.\" ECCV (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.05518)] \n  [[code](https:\u002F\u002Fsjtuytc.github.io\u002Fzelin_pages\u002Fp3aformer.html)]\n  \n- **TETer:** Siyuan Li, Martin Danelljan, Henghui Ding, Thomas E. Huang, Fisher Yu.\u003Cbr \u002F>\n  \"Tracking Every Thing in the Wild.\" ECCV (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.12978)] \n  [[code](http:\u002F\u002Fvis.xyz\u002Fpub\u002Ftet)]\n  \n- **ByteTrack:** Yifu Zhang, Peize Sun, Yi Jiang, Dongdong Yu, Zehuan Yuan, Ping Luo, Wenyu Liu, Xinggang Wang.\u003Cbr \u002F>\n  \"ByteTrack: Multi-Object Tracking by Associating Every Detection Box.\" ECCV (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.06864v2.pdf)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fifzhang\u002FByteTrack)]\n\n- **MOTR:** Fangao Zeng, Bin Dong, Yuang Zhang, Tiancai Wang, Xiangyu Zhang, Yichen Wei.\u003Cbr \u002F>\n  \"MOTR: End-to-End Multiple-Object Tracking with Transformer.\" ECCV (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.03247)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fmegvii-research\u002FMOTR)]\n  \n- **MTracker:** Yifu Zhang, Chunyu Wang, Xinggang Wang, Wenjun Zeng, Wenyu Liu.\u003Cbr \u002F>\n  \"Robust Multi-Object Tracking by Marginal Inference.\" ECCV (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.03727)] \n  [[code](https:\u002F\u002Fxxxxxxx)]\n  \n\n  \n  \n### CVPR 2022\n\n- **MixFormer:** Yutao Cui, Jiang Cheng, Limin Wang, Gangshan Wu.\u003Cbr \u002F>\n  \"MixFormer: End-to-End Tracking with Iterative Mixed Attention.\" CVPR (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.11082)] \n  [[code](https:\u002F\u002Fgithub.com\u002FMCG-NJU\u002FMixFormer)]\n  \n- **OWTB:** Yang Liu, Idil Esen Zulfikar, Jonathon Luiten, Achal Dave, Deva Ramanan, Bastian Leibe, Aljoša Ošep, Laura Leal-Taixé.\u003Cbr \u002F>\n  \"Opening up Open-World Tracking.\" CVPR (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.11221)] \n  [[code](https:\u002F\u002Fopenworldtracking.github.io\u002F)]\n  \n- **UTT:** Fan Ma, Mike Zheng Shou, Linchao Zhu, Haoqi Fan, Yilei Xu, Yi Yang, Zhicheng Yan.\u003Cbr \u002F>\n  \"Unified Transformer Tracker for Object Tracking.\" CVPR (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15175)] \n  [[code](https:\u002F\u002Fgithub.com\u002FFlowerfan\u002FTrackron)]\n  \n- **CSWinTT:** Zikai Song, Junqing Yu, Yi-Ping Phoebe Chen, Wei Yang.\u003Cbr \u002F>\n  \"Transformer Tracking with Cyclic Shifting Window Attention.\" CVPR (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.03806)] \n  [[code](https:\u002F\u002Fgithub.com\u002FSkyeSong38\u002FCSWinTT)]\n  \n- **ToMP:** Christoph Mayer, Martin Danelljan, Goutam Bhat, Matthieu Paul, Danda Pani Paudel, Fisher Yu, Luc Van Gool.\u003Cbr \u002F>\n  \"Transforming Model Prediction for Tracking.\" CVPR (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.11192)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fvisionml\u002Fpytracking)]\n  \n- **TCTrack:** Ziang Cao, Ziyuan Huang, Liang Pan, Shiwei Zhang, Ziwei Liu, Changhong Fu.\u003Cbr \u002F>\n  \"TCTrack: Temporal Contexts for Aerial Tracking.\" CVPR (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.01885)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FTCTrack)]\n  \n- **SBT:** Fei Xie, Chunyu Wang, Guangting Wang, Yue Cao, Wankou Yang, Wenjun Zeng.\u003Cbr \u002F>\n  \"Correlation-Aware Deep Tracking.\" CVPR (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.01666)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fphiphiphi31\u002FSuperSBT)]\n  \n- **AdaRS:** Yihao Li, Jun Yu, Zhongpeng Cai, Yuwen Pan.\u003Cbr \u002F>\n  \"Cross-Modal Target Retrieval for Tracking by Natural Language.\" CVPR (2022).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022W\u002FODRUM\u002Fhtml\u002FLi_Cross-Modal_Target_Retrieval_for_Tracking_by_Natural_Language_CVPRW_2022_paper.html)] \n  [[code](xxxx)]\n  \n- **STNet:** Jiqing Zhang, Bo Dong, Haiwei Zhang, Jianchuan Ding, Felix Heide, Baocai Yin, Xin Yang.\u003Cbr \u002F>\n  \"Spiking Transformers for Event-based Single Object Tracking.\" CVPR (2022).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fhtml\u002FZhang_Spiking_Transformers_for_Event-Based_Single_Object_Tracking_CVPR_2022_paper.html)] \n  [[code](https:\u002F\u002Fgithub.com\u002FJee-King\u002FCVPR2022_STNet)]\n  \n- **VTUAV:** Pengyu Zhang, Jie Zhao, Dong Wang, Huchuan Lu, Xiang Ruan.\u003Cbr \u002F>\n  \"Visible-Thermal UAV Tracking: A Large-Scale Benchmark and New Baseline.\" CVPR (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.04120)] \n  [[code](https:\u002F\u002Fzhang-pengyu.github.io\u002FDUT-VTUAV\u002F)]\n  \n- **UAVMOT:** Shuai Liu, Xin Li, Huchuan Lu, You He.\u003Cbr \u002F>\n  \"Multi-Object Tracking Meets Moving UAV.\" CVPR (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002Fxxxx.xxxx)] \n  [[code](https:\u002F\u002Fgithub.com\u002FLiuShuaiyr\u002FUAVMOT)]\n  \n- **GTR:** Xingyi Zhou, Tianwei Yin, Vladlen Koltun, Phillip Krähenbühl.\u003Cbr \u002F>\n  \"Global Tracking Transformers.\" CVPR (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.13250)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fxingyizhou\u002FGTR)]\n  \n- **GTELT:** Zikun Zhou, Jianqiu Chen, Wenjie Pei, Kaige Mao, Hongpeng Wang, Zhenyu He.\u003Cbr \u002F>\n  \"Global Tracking via Ensemble of Local Trackers.\" CVPR (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.16092)] \n  [[code](https:\u002F\u002Fgithub.com\u002FZikunZhou\u002FGTELT)]\n  \n- **RBO:** Feng Tang, Qiang Ling.\u003Cbr \u002F>\n  \"Ranking-based Siamese Visual Tracking.\" CVPR (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2205.11761.pdf)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fsansanfree\u002FRBO)]\n  \n- **ULAST:** Qiuhong Shen, Lei Qiao, Jinyang Guo, Peixia Li, Xin Li, Bo Li, Weitao Feng, Weihao Gan, Wei Wu, Wanli Ouyang.\u003Cbr \u002F>\n  \"Unsupervised Learning of Accurate Siamese Tracking.\" CVPR (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.01475)] \n  [[code](https:\u002F\u002Fgithub.com\u002FFlorinShum\u002FULAST)]\n  \n- **UDAT:** Junjie Ye, Changhong Fu, Guangze Zheng, Danda Pani Paudel, Guang Chen.\u003Cbr \u002F>\n  \"Unsupervised Domain Adaptation for Nighttime Aerial Tracking.\" CVPR (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.10541)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FUDAT)]\n  \n- **M2Track:** Chaoda Zheng, Xu Yan, Haiming Zhang, Baoyuan Wang, Shenghui Cheng, Shuguang Cui, Zhen Li.\u003Cbr \u002F>\n  \"Beyond 3D Siamese Tracking: A Motion-Centric Paradigm for 3D Single Object Tracking in Point Clouds.\" CVPR (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.01730)] \n  [[code](https:\u002F\u002Fgithub.com\u002FGhostish\u002FOpen3DSOT)]\n  \n\n### IJCAI 2022\n\n- **InBN:** Mingzhe Guo, Zhipeng Zhang, Heng Fan, Liping Jing, Yilin Lyu, Bing Li, Weiming Hu.\u003Cbr \u002F>\n  \"Learning Target-aware Representation for Visual Tracking via Informative Interactions.\" IJCAI (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.02526)] \n  [[code](https:\u002F\u002Fxxxxxxx)]\n  \n- **SparseTT:** Zhihong Fu, Zehua Fu, Qingjie Liu, Zehua Fu, Yunhong Wang.\u003Cbr \u002F>\n  \"SparseTT: Visual Tracking with Sparse Transformers.\" IJCAI (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.03776)] \n  [[code](https:\u002F\u002Fgithub.com\u002Ffzh0917\u002FSparseTT)]\n  \n- **HybTransT:** Ilchae Jung, Minji Kim, Eunhyeok Park, Bohyung Han.\u003Cbr \u002F>\n  \"Online Hybrid Lightweight Representations Learning: Its Application to Visual Tracking.\" IJCAI (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.11179)] \n  [[code](https:\u002F\u002Fgithub.com\u002Ffzh0917\u002FSparseTT)]\n  \n  \n### MICCAI 2022\n\n- **TLT:** Wen Tang, Han Kang, Haoyue Zhang, Pengxin Yu, Corey W. Arnold, Rongguo Zhang.\u003Cbr \u002F>\n  \"Transformer Lesion Tracker.\" MICCAI (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.06252)] \n  [[code](https:\u002F\u002Fgithub.com\u002FTangWen920812\u002FTLT)]\n  \n  \n### ArXiv 2022\n \n- **ProTrack:** Jinyu Yang, Zhe Li, Feng Zheng, Aleš Leonardis, Jingkuan Song.\u003Cbr \u002F>\n  \"Prompting for Multi-Modal Tracking.\" ACM MM (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.14571)] \n  [[code](https:\u002F\u002F)]\n  \n- **GATransT:** Libo Wang, Si Chen, Zhen Wang, Da-Han Wang, Shunzhi Zhu.\u003Cbr \u002F>\n  \"Graph Attention Transformer Network for Robust Visual Tracking.\" ICONIP (2022).\n  [[paper](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-981-99-1639-9_14)] \n  [[code]()]\n\n- **SiamTDN:** Yanjie Liang, Penghui Zhao, Yifei Hao, Hanzi Wang.\u003Cbr \u002F>\n  \"Siamese Template Diffusion Networks for Robust Visual Tracking.\" ICME (2022).\n  [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9859929)] \n  [[code]()]\n  \n- **TAT:** Kaihao Lan, Haobo Jiang, Jin Xie.\u003Cbr \u002F>\n  \"Temporal-aware Siamese Tracker: Integrate Temporal Context for 3D Object Tracking.\" ACCV (2022).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FACCV2022\u002Fhtml\u002FLan_Temporal-aware_Siamese_Tracker_Integrate_Temporal_Context_for_3D_Object_Tracking_ACCV_2022_paper.html)] \n  [[code](https:\u002F\u002Fgithub.com\u002Ftqsdyy\u002FTAT)]\n  \n - **COESOT:** Chuanming Tang, Xiao Wang, Ju Huang, Bo Jiang, Lin Zhu, Jianlin Zhang, Yaowei Wang, Yonghong Tian.\u003Cbr \u002F>\n  \"Revisiting Color-Event based Tracking: A Unified Network, Dataset, and Metric.\" ArXiv (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.11010)] \n  [[code](COESOT)]\n  \n- **WATB:** Fasheng Wang, Ping Cao, Fu Li, Xing Wang, Bing He, Fuming Sun.\u003Cbr \u002F>\n  \"WATB: Wild Animal Tracking Benchmark.\" IJCV (2022).\n  [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002Fs11263-022-01732-3.pdf?pdf=button)] \n  [[code](https:\u002F\u002Fw-1995.github.io\u002F)]\n  \n- **UAV2UAV:** Yong Wang, Zirong Huang, Robert Laganière, Huanlong Zhang, Lu Ding.\u003Cbr \u002F>\n  \"A UAV to UAV tracking benchmark.\" KBS (2023).\n  [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS095070512201293X)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fhapless19\u002FUAV2UAV-dataset)]\n  \n- **UOT100:** K. Panetta, L. Kezebou, V. Oludare, and S. S. Agaian.\u003Cbr \u002F>\n  \"Comprehensive Underwater Object Tracking Benchmark Dataset and Underwater Image Enhancement With GAN.\" IEEE JOE (2022).\n  [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9499961)] \n  [[code](https:\u002F\u002Fwww.kaggle.com\u002Fdatasets\u002Flandrykezebou\u002Fuot100-underwater-object-tracking-dataset)]\n  \n- **NeighborTrack:** Yu-Hsi Chen, Chien-Yao Wang, Cheng-Yun Yang, Hung-Shuo Chang, Youn-Long Lin, Yung-Yu Chuang, Hong-Yuan Mark Liao.\u003Cbr \u002F>\n  \"NeighborTrack: Improving Single Object Tracking by Bipartite Matching with Neighbor Tracklets.\" ArXiv (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.06663.pdf)] \n  [[code](https   )]\n  \n- **MTTSiam:** Ali Sekhavati, Won-Sook Lee.\u003Cbr \u002F>\n  \"Multi-Template Temporal Siamese Network for Long-Term Object Tracking.\" ArXiv (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.13812)] \n  [[code](https:\u002F\u002Fgithub.com\u002FAliGreen0\u002FMTTSiam)]\n  \n- **PruningInTracking:** Saksham Aggarwal, Taneesh Gupta, Pawan Kumar Sahu, Arnav Chavan, Rishabh Tiwari, Dilip K. Prasad, Deepak K. Gupta.\u003Cbr \u002F>\n  \"On designing light-weight object trackers through network pruning: Use CNNs or transformers?.\" ArXiv (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.13769)] \n  [[code](https   )]\n  \n- **ProContEXT:** Jin-Peng Lan, Zhi-Qi Cheng, Jun-Yan He, Chenyang Li, Bin Luo, Xu Bao, Wangmeng Xiang, Yifeng Geng, Xuansong Xie.\u003Cbr \u002F>\n  \"ProContEXT: Exploring Progressive Context Transformer for Tracking.\" ArXiv (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.15511)] \n  [[code](https:\u002F\u002Fdrive.google.com\u002Fdrive\u002Ffolders\u002F18kHdBNEwvbk8S4-mwHaI-mw5w6cK-pyY?usp=sharing)]\n  \n- **TSFMO:** Zhewen Zhang, Fuliang Wu, Yuming Qiu, Jingdong Liang, Shuiwang Li.\u003Cbr \u002F>\n  \"Tracking Small and Fast Moving Objects: A Benchmark.\" ArXiv (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.04284)] \n  [[code](https:\u002F\u002Fgithub.com\u002FCodeOfGithub\u002FS-KeepTrack)]\n  \n- **SFTransT:** Chuanming Tang, Xiao Wang, Yuanchao Bai, Zhe Wu, Jianlin Zhang, Yongmei Huang.\u003Cbr \u002F>\n  \"Learning Spatial-Frequency Transformer for Visual Object Tracking.\" ArXiv (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.08829)] \n  [[code](https:\u002F\u002Fgithub.com\u002FTchuanm\u002FSFTransT.git)]\n  \n- **DMTracker:** Shang Gao, Jinyu Yang, Zhe Li, Feng Zheng, Aleš Leonardis, Jingkuan Song.\u003Cbr \u002F>\n  \"Learning Dual-Fused Modality-Aware Representations for RGBD Tracking.\" ArXiv (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.03055)] \n  [[code](https:\u002F\u002Fgithub.com\u002FShangGaoG\u002FDMTracker)]\n  \n- **AVisT:** Mubashir Noman, Wafa Al Ghallabi, Daniya Najiha, Christoph Mayer, Akshay Dudhane, Martin Danelljan, Hisham Cholakkal, Salman Khan, Luc Van Gool, Fahad Shahbaz Khan.\u003Cbr \u002F>\n  \"AVisT: A Benchmark for Visual Object Tracking in Adverse Visibility.\" ArXiv (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.06888)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fvisionml\u002Fpytracking)]\n\n- **RGBDReview:** Jinyu Yang, Zhe Li, Song Yan, Feng Zheng, Aleš Leonardis, Joni-Kristian Kämäräinen, Ling Shao.\u003Cbr \u002F>\n  \"RGBD Object Tracking: An In-depth Review.\" ArXiv (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.14134)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fmemoryunreal\u002FRGBD-tracking-review)]\n  \n- **TOT\u002FMKDNet:** Yabin Zhu, Chenglong Li, Yao Liu, Xiao Wang, Jin Tang, Bin Luo, Zhixiang Huang.\u003Cbr \u002F>\n  \"Tiny Object Tracking: A Large-scale Dataset and A Baseline.\" ArXiv (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.05659)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fmmic-lcl\u002FDatasets-and-benchmark-code)]\n  \n- **WebUAV-3M:** Chunhui Zhang, Guanjie Huang, Li Liu, Shan Huang, Yinan Yang, Yuxuan Zhang, Xiang Wan, Shiming Ge.\u003Cbr \u002F>\n  \"WebUAV-3M: A Benchmark Unveiling the Power of Million-Scale Deep UAV Tracking.\" ArXiv (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.07425)] \n  [[code](https:\u002F\u002Fgithub.com\u002F983632847\u002FWebUAV-3M)]\n  \n- **SiamTracking4UAV:** Changhong Fu, Kunhan Lu, Guangze Zheng, Junjie Ye, Ziang Cao, Bowen Li.\u003Cbr \u002F>\n  \"Siamese Object Tracking for Unmanned Aerial Vehicle: A Review and Comprehensive Analysis.\" ArXiv (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.04281)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FSiameseTracking4UAV)]\n  \n- **SOTSurvey:** Zahra Soleimanitaleb, Mohammad Ali Keyvanrad.\u003Cbr \u002F>\n  \"Single Object Tracking: A Survey of Methods, Datasets, and Evaluation Metrics.\" ArXiv (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.13066)] \n  \n- **SOTRearch:** Ruize Han, Wei Feng, Qing Guo, Qinghua Hu.\u003Cbr \u002F>\n  \"Single Object Tracking Research: A Survey.\" ArXiv (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.11410)] \n  \n- **VOTSurvey:** Fei Chen, Xiaodong Wang, Yunxiang Zhao, Shaohe Lv, Xin Niu.\u003Cbr \u002F>\n  \"Visual object tracking: A survey.\" CVIU (2022).\n  [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1077314222001011?dgcid=author)] \n    \n- **HCAT:** Xin Chen, Dong Wang, Dongdong Li, Huchuan Lu.\u003Cbr \u002F>\n  \"Efficient Visual Tracking via Hierarchical Cross-Attention Transformer.\" ArXiv (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.13537)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fchenxin-dlut\u002FHCAT)]\n  \n- **TransT-M:** Xin Chen, Bin Yan, Jiawen Zhu, Dong Wang, Huchuan Lu.\u003Cbr \u002F>\n  \"High-Performance Transformer Tracking.\" ArXiv (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.13533)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fchenxin-dlut\u002FTransT-M)]\n  \n- **RGBDT:** Jinyu Yang, Zhe Li, Song Yan, Feng Zheng, Aleš Leonardis, Joni-Kristian Kämäräinen, Ling Shao.\u003Cbr \u002F>\n  \"RGBD Object Tracking: An In-depth Review.\" ArXiv (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.14134)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fmemoryunreal\u002FRGBD-tracking-review)]\n  \n- **DST:** Yao Sui, Guanghui Wang, Li Zhang.\u003Cbr \u002F>\n  \"In Defense of Subspace Tracker: Orthogonal Embedding for Visual Tracking.\" ArXiv (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.07927)] \n  [[code](https:\u002F\u002Fxxxxxxx)]\n  \n- **DUT-Anti-UAV:** Jie Zhao, Jingshu Zhang, Dongdong Li, Dong Wang.\u003Cbr \u002F>\n  \"Vision-based Anti-UAV Detection and Tracking.\" TITS (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.10851)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fwangdongdut\u002FDUT-Anti-UAV)]\n  \n- **CoCoLoT:** Matteo Dunnhofer, Christian Micheloni.\u003Cbr \u002F>\n  \"CoCoLoT: Combining Complementary Trackers in Long-Term Visual Tracking.\" ICPR (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.04261)] \n  [[code](https:\u002F\u002Fxxxxxxx)]\n  \n- **EUSA:** Siao Liu, Zhaoyu Chen, Wei Li, Jiwei Zhu, Jiafeng Wang, Wenqiang Zhang, Zhongxue Gan.\u003Cbr \u002F>\n  \"Efficient universal shuffle attack for visual object tracking.\" ICASSP (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.06898)] \n  [[code](https:\u002F\u002Fxxxxxxx)]\n  \n- **ITB:** Xin Li, Qiao Liu, Wenjie Pei, Qiuhong Shen, Yaowei Wang, Huchuan Lu, Ming-Hsuan Yang.\u003Cbr \u002F>\n  \"An Informative Tracking Benchmark.\" ArXiv (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.06467)] \n  [[code](https:\u002F\u002Fgithub.com\u002FXinLi-zn\u002FInformative-tracking-benchmark)]\n  \n- **VisEvent:** Xiao Wang, Jianing Li, Lin Zhu, Zhipeng Zhang, Zhe Chen, Xin Li, Yaowei Wang, Yonghong Tian, Feng Wu.\u003Cbr \u002F>\n  \"VisEvent: Reliable Object Tracking via Collaboration of Frame and Event Flows.\" ArXiv (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.05015)] \n  [[code](https:\u002F\u002Fsites.google.com\u002Fview\u002Fviseventtrack\u002F)]\n  \n- **TrTr:** Moju Zhao, Kei Okada, Masayuki Inaba.\u003Cbr \u002F>\n  \"TrTr: Visual Tracking with Transformer.\" ArXiv (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2105.03817.pdf)] \n  [[code](https:\u002F\u002Fgithub.com\u002Ftongtybj\u002FTrTr)]\n\n- **TS-RCN:** Ning Zhang, Jingen Liu, Ke Wang, Dan Zeng, Tao Mei.\u003Cbr \u002F>\n  \"Robust Visual Object Tracking with Two-Stream Residual Convolutional Networks.\" ArXiv (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2005.06536.pdf)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fxxxxx\u002Fxxxx)]\n  \n- **FCOT:** Yutao Cui, Cheng Jiang, Limin Wang, Gangshan Wu.\u003Cbr \u002F>\n  \"Fully Convolutional Online Tracking.\" ArXiv (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.07109)] \n  [[code](https:\u002F\u002Fgithub.com\u002FMCG-NJU\u002FFCOT)]\n  \n  \n### AAAI 2022\n\n- **HDN:** Xinrui Zhan, Yueran Liu, jianke Zhu, Yang Li.\u003Cbr \u002F>\n  \"Homography Decomposition Networks for Planar Object Tracking.\" AAAI (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2112.07909.pdf)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fzhanxinrui\u002FHDN)]\n\n- **MArMOT:** Chenglong Li, Tianhao Zhu, Lei Liu, Xiaonan Si, Zilin Fan, Sulan Zhai.\u003Cbr \u002F>\n  \"Cross-Modal Object Tracking: Modality-Aware Representations and a Unified Benchmark.\" AAAI (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.04264)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fxxxxx\u002FMArMOT)]\n\n- **APFNet:** Yun Xiao, Mengmeng Yang, Chenglong Li, Lei Liu, Jin Tang.\u003Cbr \u002F>\n  \"Attribute-based Progressive Fusion Network for RGBT Tracking.\" AAAI (2022).\n  [[paper](https:\u002F\u002Fgithub.com\u002Fyangmengmeng1997\u002FAPFNet\u002Ftree\u002Fmain\u002FPaper)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fyangmengmeng1997\u002FAPFNet)]\n\n- **TAV:** Tahar Allouche, Jerome Lang, Florian Yger.\u003Cbr \u002F>\n  \"Truth-Tracking via Approval Voting: Size Matters.\" AAAI (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.04387)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fzhanxinrui\u002FHDN)]\n  \n  \n### ICLR 2022\n\n- **FSBA:** Yiming Li, Haoxiang Zhong, Xingjun Ma, Yong Jiang, Shu-Tao Xia.\u003Cbr \u002F>\n  \"Few-Shot Backdoor Attacks on Visual Object Tracking.\" ICLR (2022).\n  [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=qSV5CuSaK_a)] \n  [[code](https:\u002F\u002Fwww.dropbox.com\u002Fs\u002Fnfg7en8azc1cvz3\u002Fcodes_FSBA_ICLR22.zip?dl=0)]\n  \n  \n### ICRA 2022\n\n- **Ad2Attack:** Changhong Fu, Sihang Li, Xinnan Yuan, Junjie Ye, Ziang Cao, Fangqiang Ding.\u003Cbr \u002F>\n  \"Ad2Attack: Adaptive Adversarial Attack on Real-Time UAV Tracking.\" ICRA (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.01516)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FAd2Attack)]\n \n- **SCT:** Junjie Ye, Changhong Fu, Ziang Cao, Shan An, Guangze Zheng, Bowen Li.\u003Cbr \u002F>\n  \"Tracker Meets Night: A Transformer Enhancer for UAV Tracking.\" ICRA\u002FRAL (2022).\n  [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9696362)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FSCT)]\n\n- **SiamX:** Huajian Huang, Sai-Kit Yeung.\u003Cbr \u002F>\n  \"SiamX: An Efficient Long-term Tracker Using Cross-level Feature Correlation and Adaptive Tracking Scheme.\" ICRA (2022).\n  [[paper](https:\u002F\u002Fhuajianup.github.io\u002Fresearch\u002FSiamX\u002FSiamX_ICRA2022_final.pdf)] \n  [[code](https:\u002F\u002Fhuajianup.github.io\u002Fresearch\u002FSiamX\u002F)]\n \n \n### WACV 2022\n\n- **SiamTPN:** Daitao Xing, Nikolaos Evangeliou, Athanasios Tsoukalas, Anthony Tzes.\u003Cbr \u002F>\n  \"Siamese Transformer Pyramid Networks for Real-Time UAV Tracking.\" WACV (2022).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.08822.pdf)] \n  [[code](https:\u002F\u002Fgithub.com\u002FRISC-NYUAD\u002FSiamTPNTracker)]\n  \n### ICCV 2021\n\n- **STARK:** Bin Yan, Houwen Peng, Jianlong Fu, Dong Wang, Huchuan Lu.\u003Cbr \u002F>\n  \"Learning Spatio-Temporal Transformer for Visual Tracking.\" ICCV (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.17154.pdf)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fresearchmm\u002FStark)]\n  \n- **AutoMatch:**  Zhang Zhipeng, Liu Yihao, Wang Xiao, Li Bing, Hu Weiming. \u003Cbr \u002F>\n  \"Learn to Match: Automatic Matching Network Design for Visual Tracking.\" ICCV (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.00803.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002FJudasDie\u002FSOTS)]\n  \n- **DDT:** Bin Yu, Ming Tang, Linyu Zheng, Guibo Zhu, Jinqiao Wang.\u003Cbr \u002F>\n  \"High-Performance Discriminative Tracking with Transformers.\" ICCV (2021).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FYu_High-Performance_Discriminative_Tracking_With_Transformers_ICCV_2021_paper.pdf)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fxxxx\u002Fxxxx)]\n  \n- **HiFT:**  Ziang Cao, Changhong Fu, Junjie Ye, Bowen Li, Yiming Li. \u003Cbr \u002F>\n  \"HiFT: Hierarchical Feature Transformer for Aerial Tracking.\" ICCV (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.00202.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FHiFT)]\n  \n- **DualTFR:**  Fei Xie, Chunyu Wang, Guangting Wang, Wankou Yang, Wenjun Zeng. \u003Cbr \u002F>\n  \"Learning Tracking Representations via Dual-Branch Fully Transformer Networks.\" ICCVW (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.02571)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fphiphiphi31\u002FDualTFR)]\n  \n- **DMB:**  Fei Xie, Wankou Yang, Kaihua Zhang, Bo Liu, Wanli Xue, Wangmeng Zuo. \u003Cbr \u002F> \n  \"Learning Spatio-Appearance Memory Network for High-Performance Visual Tracking.\" ICCVW (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2009.09669.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fphiphiphi31\u002FDMB)]\n\n- **KeepTrack:** Christoph Mayer, Martin Danelljan, Danda Pani Paudel, Luc Van Gool.\u003Cbr \u002F>\n  \"Learning Target Candidate Association to Keep Track of What Not to Track.\" ICCV (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.16556)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fvisionml\u002Fpytracking)]\n\n- **SAOT:** Zikun Zhou, Wenjie Pei, Xin Li, Hongpeng Wang, Feng Zheng, Zhenyu He. \u003Cbr \u002F>\n  \"Saliency-Associated Object Tracking.\" ICCV (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.03637.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002FZikunZhou\u002FSAOT)]\n \n- **MLVSNet:** Zhoutao Wang, Qian Xie, Yu-Kun Lai, Jing Wu, Kun Long , Jun Wang. \u003Cbr \u002F>\n  \"MLVSNet: Multi-level Voting Siamese Network for 3D Visual Tracking.\" ICCV (2021).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FWang_MLVSNet_Multi-Level_Voting_Siamese_Network_for_3D_Visual_Tracking_ICCV_2021_paper.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002FCodeWZT\u002FMLVSNet)]\n  \n - **EFTrack:** Jiqing Zhang, Xin Yang, Yingkai Fu, Xiaopeng Wei, Baocai Yin, Bo Dong. \u003Cbr \u002F>\n  \"Object Tracking by Jointly Exploiting Frame and Event Domain.\" ICCV (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.09052)]\n  [[code](https:\u002F\u002Fgithub.com\u002FJee-King\u002FICCV2021_Event_Frame_Tracking)]\n  \n - **Box2Mask:** Bin Zhao, Goutam Bhat, Martin Danelljan, Luc Van Gool, Radu Timofte. \u003Cbr \u002F>\n  \"Generating Masks from Boxes by Mining Spatio-Temporal Consistencies in Videos.\" ICCV (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2101.02196)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fvisionml\u002Fpytracking)]\n  \n- **DepthTrack:** Song Yan, Jinyu Yang, Jani Käpylä, Feng Zheng, Aleš Leonardis, Joni-Kristian Kämäräinen. \u003Cbr \u002F>\n  \"DepthTrack : Unveiling the Power of RGBD Tracking.\" ICCV (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.13962)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fxiaozai\u002FDeT)]\n  \n- **USOT:** Jilai Zheng, Chao Ma, Houwen Peng, Xiaokang Yang. \u003Cbr \u002F>\n  \"Learning to Track Objects from Unlabeled Videos.\" ICCV (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.12711)]\n  [[code](https:\u002F\u002Fgithub.com\u002FVISION-SJTU\u002FUSOT)]\n  \n- **TOTB:** Heng Fan, Halady Akhilesha Miththanthaya, Harshit, Siranjiv Ramana Rajan, Xiaoqiong Liu, Zhilin Zou, Yuewei Lin, Haibin Ling. \u003Cbr \u002F>\n  \"Transparent Object Tracking Benchmark.\" ICCV (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.10875)]\n  [[code](https:\u002F\u002Fhengfan2010.github.io\u002Fprojects\u002FTOTB\u002F)]\n  \n- **TREK-150:** Matteo Dunnhofer, Antonino Furnari, Giovanni Maria Farinella, Christian Micheloni. \u003Cbr \u002F>\n  \"Is First Person Vision Challenging for Object Tracking?.\" ICCVW (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.13665)]\n  [[code](https:\u002F\u002Fmachinelearning.uniud.it\u002Fdatasets\u002Ftrek150\u002F)]\n  [[toolkit](https:\u002F\u002Fgithub.com\u002Fmatteo-dunnhofer\u002FTREK-150-toolkit)]\n  \n- **VASR:** Kenan Dai, Jie Zhao, Lijun Wang, Dong Wang, Jianhua Li, Huchuan Lu, Xuesheng Qian, Xiaoyun Yang. \u003Cbr \u002F>\n  \"Video Annotation for Visual Tracking via Selection and Refinement.\" ICCV (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.03821.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002FDaikenan\u002FVASR)]\n  \n- **BAT:** Chaoda Zheng, Xu Yan, Jiantao Gao, Weibing Zhao, Wei Zhang, Zhen Li, Shuguang Cui. \u003Cbr \u002F>\n  \"Box-Aware Feature Enhancement for Single Object Tracking on Point Clouds.\" ICCV (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.04728.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002FGhostish\u002FBAT)]\n  \n- **ABA:** Qing Guo, Ziyi Cheng, Felix Juefei-Xu, Lei Ma, Xiaofei Xie, Yang Liu, Jianjun Zhao. \u003Cbr \u002F>\n  \"Learning to Adversarially Blur Visual Object Tracking.\" ICCV (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.12085)]\n  [[code](https:\u002F\u002Fgithub.com\u002Ftsingqguo\u002FABA)]\n  \n  \n### CVPR 2021\n\n- **TransT:** Xin Chen, Bin Yan, Jiawen Zhu, Dong Wang, Xiaoyun yang, Huchuan Lu. \u003Cbr \u002F>\n  \"Transformer Tracking.\" CVPR (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.15436)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fchenxin-dlut\u002FTransT)]\n  \n- **Alpha-Refine:** Bin Yan, Xinyu Zhang, Dong Wang, Huchuan Lu, Xiaoyun Yang. \u003Cbr \u002F>\n  \"Alpha-Refine: Boosting Tracking Performance by Precise Bounding Box Estimation.\" CVPR (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1911.12836.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002FMasterBin-IIAU\u002FAlphaRefine)]\n  \n- **LightTrack:** Bin Yan, Houwen Peng, Kan Wu, Dong Wang, Jianlong Fu, Huchuan Lu. \u003Cbr \u002F>\n  \"LightTrack: Finding Lightweight Neural Networks for Object Tracking via One-Shot Architecture Search.\" CVPR (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.14545)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fcvpr-2021\u002Flighttrack)]\n  \n- **TrTrack:** Ning Wang, Wengang Zhou, Jie Wang, Houqiang Li. \u003Cbr \u002F>\n  \"Transformer Meets Tracker: Exploiting Temporal Context for Robust Visual Tracking.\" CVPR (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.11681.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002F594422814\u002FTransformerTrack)]\n  \n- **STMTrack:** Zhihong Fu, Qingjie Liu, Zehua Fu, Yunhong Wang. \u003Cbr \u002F>\n  \"STMTrack: Template-free Visual Tracking with Space-time Memory Networks.\" CVPR (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.00324)]\n  [[code](https:\u002F\u002Fgithub.com\u002Ffzh0917\u002FSTMTrack)]\n  \n- **SiamGAT:** Dongyan Guo, Yanyan Shao, Ying Cui, Zhenhua Wang, Liyan Zhang, Chunhua Shen.\u003Cbr \u002F>\n  \"Graph Attention Tracking.\" CVPR (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.11204)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fohhhyeahhh\u002FSiamGAT)]\n  \n- **SiamACM:** Wencheng Han, Xingping Dong, Fahad Shahbaz Khan, Ling Shao, Jianbing Shen.\u003Cbr \u002F>\n  \"Learning to Fuse Asymmetric Feature Maps in Siamese Trackers.\" CVPR (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.02776.pdf)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fwencheng256\u002FSiamBAN-ACM)]\n  \n- **PST:** Gunhee Nam, Miran Heo, Seoung Wug Oh, Joon-Young Lee, Seon Joo Kim.\u003Cbr \u002F>\n  \"Polygonal Point Set Tracking.\" CVPR (2021).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FNam_Polygonal_Point_Set_Tracking_CVPR_2021_paper.pdf)] \n  [[code](https:\u002F\u002Fgithub.com\u002FPST)]\n  \n- **PUL:** Qiangqiang Wu, Jia Wan, Antoni B. Chan. \u003Cbr \u002F>\n  \"Progressive Unsupervised Learning for Visual Object Tracking.\" CVPR (2021).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FWu_Progressive_Unsupervised_Learning_for_Visual_Object_Tracking_CVPR_2021_paper.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002FPUL)]\n  \n- **CapsuleRRT:** Ding Ma, Xiangqian Wu. \u003Cbr \u002F>\n  \"CapsuleRRT: Relationships-Aware Regression Tracking via Capsules.\" CVPR (2021).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FMa_CapsuleRRT_Relationships-Aware_Regression_Tracking_via_Capsules_CVPR_2021_paper.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002FCapsuleRRT)]\n  \n- **Semi-Track:** Yang Fu, Sifei Liu, Umar Iqbal, Shalini De Mello, Humphrey Shi, Jan Kautz.\u003Cbr \u002F>\n  \"Learning to Track Instances without Video Annotations.\" CVPR (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.00287.pdf)] \n  [[code](https:\u002F\u002Foasisyang.github.io\u002Fprojects\u002Fsemi-track\u002Findex.html)]\n\n- **RE-Siam:** Deepak K. Gupta, Devanshu Arya, Efstratios Gavves. \u003Cbr \u002F>\n  \"Rotation Equivariant Siamese Networks for Tracking.\" CVPR (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.13078)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fdkgupta90\u002Fre-siamnet)]\n  \n- **SiamNLP:** Qi Feng, Vitaly Ablavsky, Qinxun Bai, Stan Sclaroff. \u003Cbr \u002F>\n  \"Siamese Natural Language Tracker: Tracking by Natural Language Descriptions with Siamese Trackers.\" CVPR (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1912.02048v2)]\n  [[code](https:\u002F\u002Fgithub.com\u002Ffredfung007\u002Fsnlt)]\n  \n- **LangTrackBenchmark:** Xiao Wang, Xiujun Shu, Zhipeng Zhang, Bo Jiang, Yaowei Wang, Yonghong Tian, Feng Wu. \u003Cbr \u002F>\n  \"Towards More Flexible and Accurate Object Tracking with Natural Language: Algorithms and Benchmark.\" CVPR (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.16746.pdf)]\n  [[code](https:\u002F\u002Fsites.google.com\u002Fview\u002Flangtrackbenchmark\u002F)]\n  \n- **DroneCrowd:** Longyin Wen, Dawei Du, Pengfei Zhu, Qinghua Hu, Qilong Wang, Liefeng Bo, Siwei Lyu. \u003Cbr \u002F>\n  \"Detection, Tracking, and Counting Meets Drones in Crowds: A Benchmark.\" CVPR (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2105.02440.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002FVisDrone\u002FDroneCrowd)]\n  \n- **DMTrack:** Zikai Zhang, Bineng Zhong, Shengping Zhang, Zhenjun Tang, Xin Liu, Zhaoxiang Zhang. \u003Cbr \u002F>\n  \"Distractor-Aware Fast Tracking via Dynamic Convolutions and MOT Philosophy.\" CVPR (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.12041)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fhqucv\u002Fdmtrack)]\n  \n- **LF-Siam:** Siyuan Cheng, Bineng Zhong, Guorong Li, Xin Liu, Zhenjun Tang, Xianxian Li, Jing Wang. \u003Cbr \u002F>\n  \"Learning to Filter: Siamese Relation Network for Robust Tracking.\" CVPR (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.00829)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fhqucv\u002Fsiamrn)]\n  \n- **IoU Attack:** Shuai Jia, Yibing Song, Chao Ma, Xiaokang Yang. \u003Cbr \u002F>\n  \"IoU Attack: Towards Temporally Coherent Black-Box Adversarial Attack for Visual Object Tracking.\" CVPR (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.14938)]\n  [[code](https:\u002F\u002Fgithub.com\u002FVISION-SJTU\u002FIoUattack)]\n  \n- **MeanShift++:** Jennifer Jang, Heinrich Jiang. \u003Cbr \u002F>\n  \"MeanShift++: Extremely Fast Mode-Seeking With Applications to Segmentation and Object Tracking.\" CVPR (2021).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FJang_MeanShift_Extremely_Fast_Mode-Seeking_With_Applications_to_Segmentation_and_Object_CVPR_2021_paper.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002FMeanShift++)]\n  \n  \n### IROS 2021\n\n- **CRACT:** Heng Fan, Haibin Ling.\u003Cbr \u002F>\n  \"CRACT: Cascaded Regression-Align-Classification for Robust Visual Tracking.\" IROS (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.12483)] \n\n- **SiamAPN++:** Ziang Cao, Changhong Fu, Junjie Ye, Bowen Li, Yiming Li.\u003Cbr \u002F>\n  \"SiamAPN++: Siamese Attentional Aggregation Network for Real-Time UAV Tracking.\" IROS (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.08816.pdf)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FSiamAPN)]\n\n- **DarkLighter:** Junjie Ye, Changhong Fu, Guangze Zheng, Ziang Cao, Bowen Li.\u003Cbr \u002F>\n  \"DarkLighter: Light Up the Darkness for UAV Tracking.\" IROS (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2107.14389.pdf)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FDarkLighter)]\n  \n- **PTT:** Jiayao Shan, Sifan Zhou, Zheng Fang, Yubo Cui.\u003Cbr \u002F>\n  \"PTT: Point-Track-Transformer Module for 3D Single Object Tracking in Point Clouds.\" IROS (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.06455)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fshanjiayao\u002FPTT)]\n  \n  \n### NeurIPS 2021\n\n- **PathTrack:** Drew Linsley, Girik Malik, Junkyung Kim, Lakshmi Narasimhan Govindarajan, Ennio Mingolla, Thomas Serre.\u003Cbr \u002F>\n  \"Tracking Without Re-recognition in Humans and Machines.\" NeurIPS (2021).\n  [[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2021\u002Fhash\u002Fa2557a7b2e94197ff767970b67041697-Abstract.html)] \n  [[code](http:\u002F\u002Fbit.ly\u002FInTcircuit)]\n  \n- **UniTrack:** Zhongdao Wang, Hengshuang Zhao, Ya-Li Li, Shengjin Wang, Philip Torr, Luca Bertinetto.\u003Cbr \u002F>\n  \"Do Different Tracking Tasks Require Different Appearance Models?.\" NeurIPS (2021).\n  [[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2021\u002Fhash\u002F06997f04a7db92466a2baa6ebc8b872d-Abstract.html)] \n  [[code](https:\u002F\u002Fzhongdao.github.io\u002FUniTrack\u002F)]\n\n  \n### WACV 2021\n\n- **MART:** Heng Fan, Haibin Ling.\u003Cbr \u002F>\n  \"MART: Motion-Aware Recurrent Neural Network for Robust Visual Tracking.\" WACV (2021).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2021\u002Fpapers\u002FFan_MART_Motion-Aware_Recurrent_Neural_Network_for_Robust_Visual_Tracking_WACV_2021_paper.pdf)] \n  [[code](https:\u002F\u002Fhengfan2010.github.io\u002Fprojects\u002FMART\u002FMART.htm)]\n  \n- **SiamSE:** Ivan Sosnovik, Artem Moskalev, Arnold Smeulders.\u003Cbr \u002F>\n  \"Scale Equivariance Improves Siamese Tracking.\" WACV (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.09115.pdf)] \n  [[code](https:\u002F\u002Fgithub.com\u002FISosnovik\u002FSiamSE)]\n  \n- **TracKlinic:** Heng Fan, Fan Yang, Peng Chu, Yuewei Lin, Lin Yuan, Haibin Ling. \u003Cbr \u002F>\n  \"TracKlinic: Diagnosis of Challenge Factors in Visual Tracking.\" WACV (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.07959)]\n  [[code](https:\u002F\u002Fhengfan2010.github.io\u002Fprojects\u002FTracKlinic\u002FTracKlinic.htm.)]\n  \n  \n### AAAI 2021\n\n- **MUG:** Lijun Zhou, Antoine Ledent, Qintao Hu, Ting Liu, Jianlin Zhang, Marius Kloft.\u003Cbr \u002F>\n  \"Model Uncertainty Guides Visual Object Tracking.\" AAAI (2021).\n  [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F16473)] \n  \n- **UPA:** Li Ding, Yongwei Wang, Kaiwen Yuan, Minyang Jiang, Ping Wang, Hua Huang, Z. Jane Wang. \u003Cbr \u002F>\n  \"Towards Universal Physical Attacks on Single Object Tracking.\" AAAI (2021).\n  [[paper](https:\u002F\u002Fwww.aaai.org\u002FAAAI21Papers\u002FAAAI-2606.DingL.pdf)]\n\n- **PACNet:** Dawei Zhang, Zhonglong Zheng, Riheng Jia, Minglu Li.\u003Cbr \u002F>\n  \"Visual Tracking via Hierarchical Deep Reinforcement Learning.\" AAAI (2021).\n  [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F16443)] \n  \n- **MSANet:** Xuesong Chen, Canmiao Fu, Feng Zheng, Yong Zhao, Hongsheng Li, Ping Luo, Guo-Jun Qi. \u003Cbr \u002F>\n  \"A Unified Multi-Scenario Attacking Network for Visual Object Tracking.\" AAAI (2021).\n  [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F16195)]\n  \n\n### Others 2021\n\n- **SiamAPN:** Changhong Fu, Ziang Cao, Yiming Li, Junjie Ye, Chen Feng.\u003Cbr \u002F>\n  \"Onboard Real-Time Aerial Tracking with Efficient Siamese Anchor Proposal Network.\" IEEE TGRS (2021).\n  [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9477413)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FSiamAPN)]\n  \n- **CCR:** Shiming Ge, Chunhui Zhang, Shikun Li, Dan Zeng, Dacheng Tao.\u003Cbr \u002F>\n  \"Cascaded Correlation Refinement for Robust Deep Tracking.\" IEEE TNNLS (2021).\n  [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9069312)] \n  [[code](https:\u002F\u002Fgithub.com\u002F983632847\u002FCCR)]\n  \n- **CHASE:** Seyed Mojtaba Marvasti-Zadeh, Javad Khaghani, Li Cheng, Hossein Ghanei-Yakhdan, Shohreh Kasaei.\u003Cbr \u002F>\n  \"CHASE: Robust Visual Tracking via Cell-Level Differentiable Neural Architecture Search.\" BMVC (2021).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.03463)] \n  \n### ECCV 2020\n\n- **Ocean:** Zhipeng Zhang, Houwen Peng, Jianlong Fu, Bing Li, Weiming Hu. \u003Cbr \u002F>\n  \"Ocean: Object-aware Anchor-free Tracking.\" ECCV (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2006.10721.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fresearchmm\u002FTracKit)]\n  \n- **KYS:** Goutam Bhat, Martin Danelljan, Luc Van Gool, Radu Timofte. \u003Cbr \u002F>\n  \"Know Your Surroundings: Exploiting Scene Information for Object Tracking.\" ECCV (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2003.11014v1.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fvisionml\u002Fpytracking)]\n  \n- **PGNet:** Bingyan Liao, Chenye Wang, Yayun Wang, Yaonong Wang, Jun Yin. \u003Cbr \u002F>\n  \"PG-Net: Pixel to Global Matching Network for Visual Tracking.\" ECCV (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2003.11014)]\n  \n- **STN:** Yuan Liu, Ruoteng Li, Yu Cheng, Robby T.Tan, Xiubao Sui. \u003Cbr \u002F>\n  \"Object Tracking using Spatio-Temporal Networks for Future Prediction Location.\" ECCV (2020).\n  [[paper](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fpapers\u002F123670001.pdf)]\n  \n- **RPT:** Ziang Ma, Linyuan Wang, Haitao Zhang, Wei Lu, Jun Yin. \u003Cbr \u002F>\n  \"RPT: Learning Point Set Representation for Siamese Visual Tracking.\" ECCVW (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2008.03467)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fzhanght021\u002FRPT)]\n  \n- **CenterTrack:** Xingyi Zhou, Vladlen Koltun, and Philipp Krahenbuhl. \u003Cbr \u002F>\n  \"Tracking objects as points.\" ECCV (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.01177)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fxingyizhou\u002FCenterTrack)]\n  \n- **PointTracker:** Zhenbo Xu, Wei Zhang, Xiao Tan, Wei Yang, Huan Huang, Shilei Wen, Errui Ding, Liusheng Huang. \u003Cbr \u002F>\n  \"Segment as Points for Efficient Online Multi-Object Tracking and Segmentation.\" ECCV (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.01550)]\n  [[code](https:\u002F\u002Fgithub.com\u002FdetectRecog\u002FPointTrack)]\n  \n- **DCFST:** Linyu Zheng, Ming Tang, Yingying Chen, Jinqiao Wang, Hanqing Lu. \u003Cbr \u002F>\n  \"Learning Feature Embeddings for Discriminant Model based Tracking.\" ECCV (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1906.10414)]\n  [[code](https:\u002F\u002Fgithub.com\u002FnoneUmbrella\u002FDCFST)]\n  \n- **CLNet:** Xingping Dong, Jianbing Shen, Ling Shao, Fatih Porikli. \u003Cbr \u002F>\n  \"CLNet: A Compact Latent Network for Fast Adjusting Siamese Tracker.\" ECCV (2020).\n  [[paper](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fpapers\u002F123650375.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fxingpingdong\u002FCLNet-tracking)]\n  \n- **RTAA:** Shuai Jia, Chao Ma, Yibing Song, Xiaokang Yang. \u003Cbr \u002F>\n  \"Robust Tracking against Adversarial Attacks.\" ECCV (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.09919)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fjoshuajss\u002FRTAA)]\n  \n- **EAA:** Siyuan Liang, Xingxing Wei, Siyuan Yao, Xiaochun Cao. \u003Cbr \u002F>\n  \"Efficient Adversarial Attacks for Visual Object Tracking.\" ECCV (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2008.00217)]\n\n- **SPARK:** Qing Guo, Xiaofei Xie, Felix Juefei-Xu, Lei Ma, Zhongguo Li, Wanli Xue, Wei Feng, Yang Liu. \u003Cbr \u002F>\n  \"SPARK: Spatial-aware Online Incremental Attack Against Visual Tracking.\" ECCV (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1910.08681.pdf)]\n  \n- **CAT:** Chenglong Li, Lei Liu, Andong Lu, Qing Ji, Jin Tang. \u003Cbr \u002F>\n  \"Challenge-Aware RGBT Tracking.\" ECCV (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.13143)]\n\n- **JDE:** Zhongdao Wang, Liang Zheng, Yixuan Liu, Shengjin Wang. \u003Cbr \u002F>\n  \"Towards Real-Time Multi-Object Tracking.\" ECCV (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1909.12605v1.pdf)]\n  [[code](https:\u002F\u002Fgitee.com\u002Fmat026\u002FTowards-Realtime-MOT)]\n  \n- **Chained-Tracker:** Jinlong Peng, Changan Wang, Fangbin Wan, Yang Wu, Yabiao Wang, Ying Tai, Chengjie Wang, Jilin Li, Feiyue Huang, Yanwei Fu. \u003Cbr \u002F>\n  \"Chained-Tracker: Chaining Paired Attentive Regression Results for End-to-End Joint Multiple-Object Detection and Tracking.\" ECCV (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.14557.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fpjl1995\u002FCTracker)]\n  \n- **TAO:** Achal Dave, Tarasha Khurana, Pavel Tokmakov, Cordelia Schmid, Deva Ramanan. \u003Cbr \u002F>\n  \"TAO: A Large-scale Benchmark for Tracking Any Object.\" ECCV (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2005.10356)]\n  [[code](http:\u002F\u002Ftaodataset.org\u002F)]\n\n### CVPR2020\n\n* **MAML:** Guangting Wang, Chong Luo, Xiaoyan Sun, Zhiwei Xiong, Wenjun Zeng.\u003Cbr \u002F>\n  \"Tracking by Instance Detection: A Meta-Learning Approach.\" CVPR (2020 **Oral**).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2004.00830v1.pdf)]\n\n* **Siam R-CNN:** Paul Voigtlaender, Jonathon Luiten, Philip H.S. Torr, Bastian Leibe.\u003Cbr \u002F>\n  \"Siam R-CNN: Visual Tracking by Re-Detection.\" CVPR (2020).\n  [[BoLTVOS](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1904.04552.pdf)] \n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1911.12836.pdf)] \n  [[code](https:\u002F\u002Fwww.vision.rwth-aachen.de\u002Fpage\u002Fsiamrcnn)]\n\n* **D3S:** Alan Lukežič, Jiří Matas, Matej Kristan.\u003Cbr \u002F>\n  \"D3S – A Discriminative Single Shot Segmentation Tracker.\" CVPR (2020).\n  [[paper](http:\u002F\u002Farxiv.org\u002Fpdf\u002F1911.08862v2.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002Falanlukezic\u002Fd3s)]\n\n* **PrDiMP:** Martin Danelljan, Luc Van Gool, Radu Timofte.\u003Cbr \u002F>\n  \"Probabilistic Regression for Visual Tracking.\" CVPR (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2003.12565v1.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fvisionml\u002Fpytracking)]\n\n* **ROAM:** Tianyu Yang, Pengfei Xu, Runbo Hu, Hua Chai, Antoni B. Chan.\u003Cbr \u002F>\n  \"ROAM: Recurrently Optimizing Tracking Model.\" CVPR (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1907.12006v3.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fskyoung\u002FROAM)]\n\n* **AutoTrack:** Yiming Li, Changhong Fu, Fangqiang Ding, Ziyuan Huang, Geng Lu.\u003Cbr \u002F>\n  \"AutoTrack: Towards High-Performance Visual Tracking for UAV with Automatic Spatio-Temporal Regularization.\" CVPR (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2003.12949.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FAutoTrack)]\n\n* **SiamBAN:** Zedu Chen, Bineng Zhong, Guorong Li, Shengping Zhang, Rongrong Ji.\u003Cbr \u002F>\n  \"Siamese Box Adaptive Network for Visual Tracking.\" CVPR (2020).\n  [[paper](http:\u002F\u002Farxiv.org\u002Fpdf\u002F1911.08862v2.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fhqucv\u002Fsiamban)]\n\n* **SiamCAR:** Dongyan Guo, Jun Wang, Ying Cui, Zhenhua Wang, Shengyong Chen.\u003Cbr \u002F>\n  \"SiamCAR: Siamese Fully Convolutional Classification and Regression for Visual Tracking.\" CVPR (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.07241)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fohhhyeahhh\u002FSiamCAR)]\n\n* **SiamAttn:** Yuechen Yu, Yilei Xiong, Weilin Huang, Matthew R. Scott. \u003Cbr \u002F>\n  \"Deformable Siamese Attention Networks for Visual Object Tracking.\" CVPR (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2004.06711v1.pdf)]\n  \n* **CSA:** Bin Yan, Dong Wang, Huchuan Lu, Xiaoyun Yang.\u003Cbr \u002F>\n  \"Cooling-Shrinking Attack: Blinding the Tracker with Imperceptible Noises.\" CVPR (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2003.09595)]\n  [[code](https:\u002F\u002Fgithub.com\u002FMasterBin-IIAU\u002FCSA)]\n\n* **LTMU:** Kenan Dai, Yunhua Zhang, Dong Wang, Jianhua Li, Huchuan Lu, Xiaoyun Yang.\u003Cbr \u002F>\n  \"High-Performance Long-Term Tracking with Meta-Updater.\" CVPR (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.00305)]\n  [[code](https:\u002F\u002Fgithub.com\u002FDaikenan\u002FLTMU)]\n  \n* **MAST:** Zihang Lai, Erika Lu, Weidi Xie.\u003Cbr \u002F>\n  \"MAST: A Memory-Augmented Self-supervised Tracker.\" CVPR (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2002.07793)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fzlai0\u002FMAST)]\n  \n* **CGACD:** Fei Du, Peng Liu, Wei Zhao, Xianglong Tang.\u003Cbr \u002F>\n  \"Correlation-Guided Attention for Corner Detection Based Visual Tracking.\" CVPR (2020).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FDu_Correlation-Guided_Attention_for_Corner_Detection_Based_Visual_Tracking_CVPR_2020_paper.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002Ffeiaxyt\u002FCGACD)]\n\n### IJCAI 2020\n\n- **TLPG-Tracker:** Siyuan Li, Zhi Zhang, Ziyu Liu, Anna Wang, Linglong Qiu, Feng Du. \u003Cbr \u002F>\n  \"TLPG-Tracker: Joint Learning of Target Localization and Proposal Generation for Visual Tracking.\" IJCAI (2020).\n  [[paper](https:\u002F\u002Fwww.ijcai.org\u002FProceedings\u002F2020\u002F99)]\n  \n- **E3SN:** Meng Lan, Yipeng Zhang, Qinning Xu, Lefei Zhang. \u003Cbr \u002F>\n  \"E3SN: Efficient End-to-End Siamese Network for Video Object Segmentation.\" IJCAI (2020).\n  [[paper](https:\u002F\u002Fwww.ijcai.org\u002FProceedings\u002F2020\u002F98)]\n  \n### AAAI 2020\n\n- **SiamFC++:** Yinda Xu, Zeyu Wang, Zuoxin Li, Ye Yuan, Gang Yu. \u003Cbr \u002F>\n  \"SiamFC++: Towards Robust and Accurate Visual Tracking with Target Estimation Guidelines.\" AAAI (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1911.06188v4.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002FMegviiDetection\u002Fvideo_analyst)]\n  \n- **DROL:** Jinghao Zhou, Peng Wang, Haoyang Sun. \u003Cbr \u002F>\n  \"Discriminative and Robust Online Learning for Siamese Visual Tracking.\" AAAI (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1909.02959)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fshallowtoil\u002FDROL)]\n  \n- **POST:** Ning Wang, Wengang Zhou, Guojun Qi, Houqiang Li. \u003Cbr \u002F>\n  \"POST: POlicy-Based Switch Tracking.\" AAAI (2020).\n  [[paper](https:\u002F\u002Fojs.aaai.org\u002F\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F6899)]\n  \n- **SPS:** Qintao Hu, Lijun Zhou, Xiaoxiao Wang, Yao Mao, Jianlin Zhang, Qixiang Ye. \u003Cbr \u002F>\n  \"SPSTracker: Sub-Peak Suppression of Response Map for Robust Object Tracking.\" AAAI (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1912.00597.pdf)]\n  [[code](https:\u002F\u002Fwww.ctolib.com\u002Fhttps:\u002F\u002Fgithub.com\u002FTrackerLB\u002FSPSTracker)]\n  \n- **RPOT:** Yifan Yang, Guorong Li, Yuankai Qi, Qingming Huang. \u003Cbr \u002F>\n  \"Release the Power of Online-Training for Robust Visual Tracking.\" AAAI (2020).\n  [[paper](https:\u002F\u002Fojs.aaai.org\u002F\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F6956)]\n  \n- **MetaRTT:** Ilchae Jung, Kihyun You, Hyeonwoo Noh, Minsu Cho, Bohyung Han. \u003Cbr \u002F>\n  \"Real-Time Object Tracking via Meta-Learning: Efficient Model Adaptation and One-Shot Channel Pruning.\" AAAI (2020).\n  [[paper](https:\u002F\u002Fojs.aaai.org\u002F\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F6779)]\n  \n- **GlobalTrack:** Lianghua Huang, Xin Zhao, Kaiqi Huang. \u003Cbr \u002F>\n  \"GlobalTrack: A Simple and Strong Baseline for Long-term Tracking.\" AAAI (2020).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1912.08531)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fhuanglianghua\u002FGlobalTrack)]\n\n### Others 2020\n\n* **VTT:** Tianling Bian, Yang Hua, Tao Song, Zhengui Xue, Ruhui Ma, Neil Robertson, Haibing Guan.\u003Cbr \u002F>\n  \"VTT: Long-term Visual Tracking with Transformers.\" ICPR 2020. \n  [[paper](https:\u002F\u002Fpure.qub.ac.uk\u002Fen\u002Fpublications\u002Fvtt-long-term-visual-tracking-with-transformers)]\n  [[code](https:\u002F\u002Fgithub.com\u002FVisualTrackingVLL)]\n  \n* **COMET:** Seyed Mojtaba Marvasti-Zadeh, Javad Khaghani, Hossein Ghanei-Yakhdan, Shohreh Kasaei, and Li Cheng.\u003Cbr \u002F>\n  \"COMET: Context-aware iOu-guided network for sMall objEct Tracking.\" ACCV 2020. \n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2006.02597.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002FVisualTrackingVLL)]\n  \n* **SiamKPN:** Qiang Li, Zekui Qin, Wenbo Zhang, Wen Zheng.\u003Cbr \u002F>\n  \"Siamese Keypoint Prediction Network for Visual Object Tracking.\" ArXiv 2020. \n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2006.04078)]\n  [[code](https:\u002F\u002Fgithub.com\u002FZekuiQin\u002FSiamKPN)]\n\n* **SiamCAN:** Wenzhang Zhou, Longyin Wen, Libo Zhang, Dawei Du, Tiejian Luo, Yanjun Wu. \u003Cbr \u002F>\n  \"SiamMan: Siamese Motion-aware Network for Visual Tracking.\" TIP 2020. \n  [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F1912.05515v2)]\n  [[paper_new](https:\u002F\u002Farxiv.org\u002Fabs\u002F1912.05515v2)]\n  [[code](https:\u002F\u002Fisrc.iscas.ac.cn\u002Fgitlab\u002Fresearch\u002Fsiamcan)]\n  \n### ICCV 2019\n\n* **DiMP:** Goutam Bhat, Martin Danelljan, Luc Van Gool, Radu Timofte.\u003Cbr \u002F>\n  \"Learning Discriminative Model Prediction for Tracking.\" ICCV (2019 **oral**). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FBhat_Learning_Discriminative_Model_Prediction_for_Tracking_ICCV_2019_paper.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fvisionml\u002Fpytracking)]\n\n* **GradNet:** Peixia Li, Boyu Chen, Wanli Ouyang, Dong Wang, Xiaoyun Yang, Huchuan Lu. \u003Cbr \u002F>\n  \"GradNet: Gradient-Guided Network for Visual Object Tracking.\" ICCV (2019 **oral**).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FLi_GradNet_Gradient-Guided_Network_for_Visual_Object_Tracking_ICCV_2019_paper.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002FLPXTT\u002FGradNet-Tensorflow)]\n\n* **MLT:** Janghoon Choi, Junseok Kwon, Kyoung Mu Lee. \u003Cbr \u002F>\n  \"Deep Meta Learning for Real-Time Target-Aware Visual Tracking.\" ICCV (2019).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FChoi_Deep_Meta_Learning_for_Real-Time_Target-Aware_Visual_Tracking_ICCV_2019_paper.pdf)]\n\n* **SPLT:** Bin Yan, Haojie Zhao, Dong Wang, Huchuan Lu, Xiaoyun Yang \u003Cbr \u002F>\n  \"'Skimming-Perusal' Tracking: A Framework for Real-Time and Robust Long-Term Tracking.\" ICCV (2019).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FYan_Skimming-Perusal_Tracking_A_Framework_for_Real-Time_and_Robust_Long-Term_Tracking_ICCV_2019_paper.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fiiau-tracker\u002FSPLT)]\n\n* **ARCF:** Ziyuan Huang, Changhong Fu, Yiming Li, Fuling Lin, Peng Lu. \u003Cbr \u002F>\n  \"Learning Aberrance Repressed Correlation Filters for Real-Time UAV Tracking.\" ICCV (2019).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FHuang_Learning_Aberrance_Repressed_Correlation_Filters_for_Real-Time_UAV_Tracking_ICCV_2019_paper.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FARCF-tracker)]\n\n* **BGDT:** Lianghua Huang, Xin Zhao, Kaiqi Huang. \u003Cbr \u002F>\n  \"Bridging the Gap Between Detection and Tracking: A Unified Approach.\" ICCV (2019).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FHuang_Bridging_the_Gap_Between_Detection_and_Tracking_A_Unified_Approach_ICCV_2019_paper.pdf)]\n\n* **PAT:** Rey Reza Wiyatno, Anqi Xu. \u003Cbr \u002F>\n  \"Physical Adversarial Textures That Fool Visual Object Tracking.\" ICCV (2019).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FWiyatno_Physical_Adversarial_Textures_That_Fool_Visual_Object_Tracking_ICCV_2019_paper.pdf)]\n\n* **GFS-DCF:** Tianyang Xu, Zhen-Hua Feng, Xiao-Jun Wu, Josef Kittler. \u003Cbr \u002F>\n  \"Joint Group Feature Selection and Discriminative Filter Learning for Robust Visual Object Tracking.\" ICCV (2019).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FXu_Joint_Group_Feature_Selection_and_Discriminative_Filter_Learning_for_Robust_ICCV_2019_paper.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002FXU-TIANYANG\u002FGFS-DCF)]\n\n* **CDTB:** Alan Lukežič, Ugur Kart, Jani Käpylä, Ahmed Durmush, Joni-Kristian Kämäräinen, Jiří Matas, Matej Kristan. \u003Cbr \u002F>\n  \"CDTB: A Color and Depth Visual Object Tracking Dataset and Benchmark.\" ICCV (2019).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FLukezic_CDTB_A_Color_and_Depth_Visual_Object_Tracking_Dataset_and_ICCV_2019_paper.pdf)]\n  \n* **fdKCF:** Linyu Zheng, Ming Tang, Yingying Chen, Jinqiao Wang, Hanqing Lu. \u003Cbr \u002F>\n  \"Fast-deepKCF Without Boundary Effect.\" ICCV (2019).\n  [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FZheng_Fast-deepKCF_Without_Boundary_Effect_ICCV_2019_paper.pdf)]\n\n* **VOT2019:** Kristan, Matej, et al.\u003Cbr \u002F>\n  \"The Seventh Visual Object Tracking VOT2019 Challenge Results.\" ICCV workshops (2019).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCVW_2019\u002Fpapers\u002FVOT\u002FKristan_The_Seventh_Visual_Object_Tracking_VOT2019_Challenge_Results_ICCVW_2019_paper.pdf)]\n\n### CVPR2019\n\n* **SiamMask:** Qiang Wang, Li Zhang, Luca Bertinetto, Weiming Hu, Philip H.S. Torr.\u003Cbr \u002F>\n  \"Fast Online Object Tracking and Segmentation: A Unifying Approach.\" CVPR (2019). \n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1812.05050.pdf)]\n  [[project](http:\u002F\u002Fwww.robots.ox.ac.uk\u002F~qwang\u002FSiamMask\u002F)]\n  [[code](https:\u002F\u002Fgithub.com\u002Ffoolwood\u002FSiamMask)]\n\n* **SiamRPN++:** Bo Li, Wei Wu, Qiang Wang, Fangyi Zhang, Junliang Xing, Junjie Yan.\u003Cbr \u002F>\n  \"SiamRPN++: Evolution of Siamese Visual Tracking with Very Deep Networks.\" CVPR (2019 **oral**). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FLi_SiamRPN_Evolution_of_Siamese_Visual_Tracking_With_Very_Deep_Networks_CVPR_2019_paper.pdf)]\n  [[project](http:\u002F\u002Fbo-li.info\u002FSiamRPN++\u002F)]\n\n* **ATOM:** Martin Danelljan, Goutam Bhat, Fahad Shahbaz Khan, Michael Felsberg. \u003Cbr \u002F>\n  \"ATOM: Accurate Tracking by Overlap Maximization.\" CVPR (2019 **oral**). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FDanelljan_ATOM_Accurate_Tracking_by_Overlap_Maximization_CVPR_2019_paper.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fvisionml\u002Fpytracking)]\n\n* **SiamDW:** Zhipeng Zhang, Houwen Peng.\u003Cbr \u002F>\n  \"Deeper and Wider Siamese Networks for Real-Time Visual Tracking.\" CVPR (2019 **oral**). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FZhang_Deeper_and_Wider_Siamese_Networks_for_Real-Time_Visual_Tracking_CVPR_2019_paper.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fresearchmm\u002FSiamDW)]\n\n* **GCT:** Junyu Gao, Tianzhu Zhang, Changsheng Xu.\u003Cbr \u002F>\n  \"Graph Convolutional Tracking.\" CVPR (2019 **oral**).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FGao_Graph_Convolutional_Tracking_CVPR_2019_paper.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fresearchmm\u002FSiamDW)]\n\n* **ASRCF:** Kenan Dai, Dong Wang, Huchuan Lu, Chong Sun, Jianhua Li. \u003Cbr \u002F>\n  \"Visual Tracking via Adaptive Spatially-Regularized Correlation Filters.\" CVPR (2019 **oral**).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FDai_Visual_Tracking_via_Adaptive_Spatially-Regularized_Correlation_Filters_CVPR_2019_paper.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002FDaikenan\u002FASRCF)]\n\n* **UDT:** Ning Wang, Yibing Song, Chao Ma, Wengang Zhou, Wei Liu, Houqiang Li.\u003Cbr \u002F>\n  \"Unsupervised Deep Tracking.\" CVPR (2019). \n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1904.01828.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002F594422814\u002FUDT)]\n\n* **TADT:** Xin Li, Chao Ma, Baoyuan Wu, Zhenyu He, Ming-Hsuan Yang.\u003Cbr \u002F>\n  \"Target-Aware Deep Tracking.\" CVPR (2019). \n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1904.01772.pdf)]\n  [[project](https:\u002F\u002Fxinli-zn.github.io\u002FTADT-project-page\u002F)]\n  [[code](https:\u002F\u002Fgithub.com\u002FXinLi-zn\u002FTADT)]\n\n* **C-RPN:** Heng Fan, Haibin Ling.\u003Cbr \u002F>\n  \"Siamese Cascaded Region Proposal Networks for Real-Time Visual Tracking.\" CVPR (2019). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FFan_Siamese_Cascaded_Region_Proposal_Networks_for_Real-Time_Visual_Tracking_CVPR_2019_paper.pdf)]\n\n* **SPM:** Guangting Wang, Chong Luo, Zhiwei Xiong, Wenjun Zeng.\u003Cbr \u002F>\n  \"SPM-Tracker: Series-Parallel Matching for Real-Time Visual Object Tracking.\" CVPR (2019). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FWang_SPM-Tracker_Series-Parallel_Matching_for_Real-Time_Visual_Object_Tracking_CVPR_2019_paper.pdf)]\n\n* **OTR:** Ugur Kart, Alan Lukezic, Matej Kristan, Joni-Kristian Kamarainen, Jiri Matas. \u003Cbr \u002F>\n  \"Object Tracking by Reconstruction with View-Specific Discriminative Correlation Filters.\" CVPR (2019). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FKart_Object_Tracking_by_Reconstruction_With_View-Specific_Discriminative_Correlation_Filters_CVPR_2019_paper.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fugurkart\u002FOTR)]\n\n* **RPCF:** Yuxuan Sun, Chong Sun, Dong Wang, Huchuan Lu, You He. \u003Cbr \u002F>\n  \"ROI Pooled Correlation Filters for Visual Tracking.\" CVPR (2019).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FSun_ROI_Pooled_Correlation_Filters_for_Visual_Tracking_CVPR_2019_paper.pdf)]\n\n* **LaSOT:** Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao, Haibin Ling.\u003Cbr \u002F>\n  \"LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking.\" CVPR (2019). \n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1809.07845.pdf)]\n  [[project](https:\u002F\u002Fcis.temple.edu\u002Flasot\u002F)]\n\n### AAAI2019\n\n* **LDES:** Yang Li, Jianke Zhu, Steven C.H. Hoi, Wenjie Song, Zhefeng Wang, Hantang Liu.\u003Cbr \u002F>\n  \"Robust Estimation of Similarity Transformation for Visual Object Tracking.\" AAAI (2019). \n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1712.05231.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fihpdep\u002FLDES)] \n  \n* **ANT:** Yuankai Qi, Shengping Zhang, Weigang Zhang, Li Su, Qingming Huang, Ming-Hsuan Yang.\u003Cbr \u002F>\n  \"Learning Attribute-Specific Representations for Visual Tracking.\" AAAI (2019). \n  [[paper](https:\u002F\u002Ffaculty.ucmerced.edu\u002Fmhyang\u002Fpapers\u002Faaai2019_tracking.pdf)]\n  \n* **Re2EMA:** Jianglei Huang, Wengang Zhou.\u003Cbr \u002F>\n  \"Re2EMA: Regularized and Reinitialized Exponential Moving Average for Target Model Update in Object Tracking.\" AAAI (2019). \n  [[paper](https:\u002F\u002Fojs.aaai.org\u002F\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F4862)]\n\n### NIPS2018\n\n* **DAT:** Shi Pu, Yibing Song, Chao Ma, Honggang Zhang, Ming-Hsuan Yang.\u003Cbr \u002F>\n  \"Deep Attentive Tracking via Reciprocative Learning.\" NIPS (2018). \n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1810.03851.pdf)] \n  [[project](https:\u002F\u002Fybsong00.github.io\u002Fnips18_tracking\u002Findex)] \n  [[code](https:\u002F\u002Fgithub.com\u002Fshipubupt\u002FNIPS2018)] \n\n### ECCV2018\n\n* **UPDT:** Goutam Bhat, Joakim Johnander, Martin Danelljan, Fahad Shahbaz Khan, Michael Felsberg.\u003Cbr \u002F>\n  \"Unveiling the Power of Deep Tracking.\" ECCV (2018). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FGoutam_Bhat_Unveiling_the_Power_ECCV_2018_paper.pdf)]  \n\n* **DaSiamRPN:** Zheng Zhu, Qiang Wang, Bo Li, Wu Wei, Junjie Yan, Weiming Hu.\u003Cbr \u002F>\n  \"Distractor-aware Siamese Networks for Visual Object Tracking.\" ECCV (2018).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FZheng_Zhu_Distractor-aware_Siamese_Networks_ECCV_2018_paper.pdf)]\n  [[github](https:\u002F\u002Fgithub.com\u002Ffoolwood\u002FDaSiamRPN)]\n  \n* **SiamMCF:** Henrique Morimitsu.\u003Cbr \u002F>\n  \"Multiple Context Features in Siamese Networks for Visual Object Tracking.\" ECCV (2018).\n  [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007%2F978-3-030-11009-3_6.pdf)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fhmorimitsu\u002Fsiam-mcf)]\n\n* **SACF:** Mengdan Zhang, Qiang Wang, Junliang Xing, Jin Gao, Peixi Peng, Weiming Hu, Steve Maybank.\u003Cbr \u002F>\n  \"Visual Tracking via Spatially Aligned Correlation Filters Network.\" ECCV (2018).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002Fmengdan_zhang_Visual_Tracking_via_ECCV_2018_paper.pdf)]\n\n* **RTINet:** Yingjie Yao, Xiaohe Wu, Lei Zhang, Shiguang Shan, Wangmeng Zuo.\u003Cbr \u002F>\n  \"Joint Representation and Truncated Inference Learning for Correlation Filter based Tracking.\" ECCV (2018).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FYingjie_Yao_Joint_Representation_and_ECCV_2018_paper.pdf)]\n\n* **Meta-Tracker:** Eunbyung Park, Alexander C. Berg.\u003Cbr \u002F>\n  \"Meta-Tracker: Fast and Robust Online Adaptation for Visual Object Trackers.\"\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FEunbyung_Park_Meta-Tracker_Fast_and_ECCV_2018_paper.pdf)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fsilverbottlep\u002Fmeta_trackers)]\n\n* **DSLT:** Xiankai Lu, Chao Ma*, Bingbing Ni, Xiaokang Yang, Ian Reid, Ming-Hsuan Yang.\u003Cbr \u002F>\n  \"Deep Regression Tracking with Shrinkage Loss.\" ECCV (2018).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FXiankai_Lu_Deep_Regression_Tracking_ECCV_2018_paper.pdf)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fchaoma99\u002FDSLT)]\n\n* **DRL-IS:** Liangliang Ren, Xin Yuan, Jiwen Lu, Ming Yang, Jie Zhou.\u003Cbr \u002F>\n  \"Deep Reinforcement Learning with Iterative Shift for Visual Tracking.\" ECCV (2018).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FLiangliang_Ren_Deep_Reinforcement_Learning_ECCV_2018_paper.pdf)]\n\n* **RT-MDNet:** Ilchae Jung, Jeany Son, Mooyeol Baek, Bohyung Han.\u003Cbr \u002F>\n  \"Real-Time MDNet.\" ECCV (2018).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FIlchae_Jung_Real-Time_MDNet_ECCV_2018_paper.pdf)]\n\n* **ACT:** Boyu Chen, Dong Wang, Peixia Li, Huchuan Lu.\u003Cbr \u002F>\n  \"Real-time 'Actor-Critic' Tracking.\" ECCV (2018).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FBoyu_Chen_Real-time_Actor-Critic_Tracking_ECCV_2018_paper.pdf)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fbychen515\u002FACT)]\n\n* **StructSiam:** Yunhua Zhang, Lijun Wang, Dong Wang, Mengyang Feng, Huchuan Lu, Jinqing Qi.\u003Cbr \u002F>\n  \"Structured Siamese Network for Real-Time Visual Tracking.\" ECCV (2018).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FYunhua_Zhang_Structured_Siamese_Network_ECCV_2018_paper.pdf)]\n\n* **MemTrack:** Tianyu Yang, Antoni B. Chan.\u003Cbr \u002F>\n  \"Learning Dynamic Memory Networks for Object Tracking.\" ECCV (2018).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FTianyu_Yang_Learning_Dynamic_Memory_ECCV_2018_paper.pdf)]\n\n* **SiamFC-tri:** Xingping Dong, Jianbing Shen.\u003Cbr \u002F>\n  \"Triplet Loss in Siamese Network for Object Tracking.\" ECCV (2018).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FXingping_Dong_Triplet_Loss_with_ECCV_2018_paper.pdf)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fshenjianbing\u002FTripletTracking)]\n\n* **OxUvA long-term dataset+benchmark:** Jack Valmadre, Luca Bertinetto, João F. Henriques, Ran Tao, Andrea Vedaldi, Arnold Smeulders, Philip Torr, Efstratios Gavves.\u003Cbr \u002F>\n  \"Long-term Tracking in the Wild: a Benchmark.\" ECCV (2018).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FEfstratios_Gavves_Long-term_Tracking_in_ECCV_2018_paper.pdf)]\n  [[project](https:\u002F\u002Foxuva.github.io\u002Flong-term-tracking-benchmark\u002F)]\n\n* **TrackingNet:** Matthias Müller, Adel Bibi, Silvio Giancola, Salman Al-Subaihi, Bernard Ghanem.\u003Cbr \u002F>\n  \"TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\" ECCV (2018).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FMatthias_Muller_TrackingNet_A_Large-Scale_ECCV_2018_paper.pdf)] \n  [[project](http:\u002F\u002Ftracking-net.org\u002F)]\n\n\n### CVPR2018\n\n* **VITAL:** Yibing Song, Chao Ma, Xiaohe Wu, Lijun Gong, Linchao Bao, Wangmeng Zuo, Chunhua Shen, Rynson Lau, and Ming-Hsuan Yang.\n  \"VITAL: VIsual Tracking via Adversarial Learning.\" CVPR (2018 **Spotlight**). \n  [[project](https:\u002F\u002Fybsong00.github.io\u002Fcvpr18_tracking\u002Findex)]\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FSong_VITAL_VIsual_Tracking_CVPR_2018_paper.pdf)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fybsong00\u002FVital_release)]\n\n* **LSART:** Chong Sun, Dong Wang, Huchuan Lu, Ming-Hsuan Yang.\n  \"Learning Spatial-Aware Regressions for Visual Tracking.\" CVPR (2018 **Spotlight**). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FSun_Learning_Spatial-Aware_Regressions_CVPR_2018_paper.pdf)]\n\n* **SiamRPN:** Bo Li, Wei Wu, Zheng Zhu, Junjie Yan.\n  \"High Performance Visual Tracking with Siamese Region Proposal Network.\" CVPR (2018 **Spotlight**). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FLi_High_Performance_Visual_CVPR_2018_paper.pdf)]\n\n* **TRACA:** Jongwon Choi, Hyung Jin Chang, Tobias Fischer, Sangdoo Yun, Kyuewang Lee, Jiyeoup Jeong, Yiannis Demiris, Jin Young Choi.\n  \"Context-aware Deep Feature Compression for High-speed Visual Tracking.\" CVPR (2018). \n  [[project](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fjwchoivision\u002F)]\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FChoi_Context-Aware_Deep_Feature_CVPR_2018_paper.pdf)]\n\n* **RASNet:** Qiang Wang, Zhu Teng, Junliang Xing, Jin Gao, Weiming Hu, Stephen Maybank.\n  \"Learning Attentions: Residual Attentional Siamese Network for High Performance Online Visual Tracking.\" CVPR 2018. \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FWang_Learning_Attentions_Residual_CVPR_2018_paper.pdf)]\n\n* **SA-Siam:** Anfeng He, Chong Luo, Xinmei Tian, Wenjun Zeng.\n  \"A Twofold Siamese Network for Real-Time Object Tracking.\" CVPR (2018). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FHe_A_Twofold_Siamese_CVPR_2018_paper.pdf)]\n\n* **STRCF:** Feng Li, Cheng Tian, Wangmeng Zuo, Lei Zhang, Ming-Hsuan Yang.\n  \"Learning Spatial-Temporal Regularized Correlation Filters for Visual Tracking.\" CVPR (2018). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FLi_Learning_Spatial-Temporal_Regularized_CVPR_2018_paper.pdf)]\n  [[github](https:\u002F\u002Fgithub.com\u002Flifeng9472\u002FSTRCF)]\n\n* **FlowTrack:** Zheng Zhu, Wei Wu, Wei Zou, Junjie Yan.\n  \"End-to-end Flow Correlation Tracking with Spatial-temporal Attention.\" CVPR (2018). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FZhu_End-to-End_Flow_Correlation_CVPR_2018_paper.pdf)]\n\n* **DEDT:** Kourosh Meshgi, Shigeyuki Oba, Shin Ishii.\n  \"Efficient Diverse Ensemble for Discriminative Co-Tracking.\" CVPR (2018). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FMeshgi_Efficient_Diverse_Ensemble_CVPR_2018_paper.pdf)]\n\n* **SINT++:** Xiao Wang, Chenglong Li, Bin Luo, Jin Tang.\n  \"SINT++: Robust Visual Tracking via Adversarial Positive Instance Generation.\" CVPR (2018).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FWang_SINT_Robust_Visual_CVPR_2018_paper.pdf)]\n\n* **DRT:** Chong Sun, Dong Wang, Huchuan Lu, Ming-Hsuan Yang.\n  \"Correlation Tracking via Joint Discrimination and Reliability Learning.\" CVPR (2018). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FSun_Correlation_Tracking_via_CVPR_2018_paper.pdf)]\n\n* **MCCT:** Ning Wang, Wengang Zhou, Qi Tian, Richang Hong, Meng Wang, Houqiang Li.\n  \"Multi-Cue Correlation Filters for Robust Visual Tracking.\" CVPR (2018). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FWang_Multi-Cue_Correlation_Filters_CVPR_2018_paper.pdf)]\n  [[github](https:\u002F\u002Fgithub.com\u002F594422814\u002FMCCT)]\n\n* **MKCF:** Ming Tang, Bin Yu, Fan Zhang, Jinqiao Wang.\n  \"High-speed Tracking with Multi-kernel Correlation Filters.\" CVPR (2018).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FTang_High-Speed_Tracking_With_CVPR_2018_paper.pdf)]\n\n* **HP:** Xingping Dong, Jianbing Shen, Wenguan Wang, Yu, Liu, Ling Shao, and Fatih Porikli.\n  \"Hyperparameter Optimization for Tracking with Continuous Deep Q-Learning.\" CVPR (2018).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FDong_Hyperparameter_Optimization_for_CVPR_2018_paper.pdf)]\n\n### NIPS2017\n\n* **HART:** Adam R. Kosiorek, Alex Bewley, Ingmar Posner. \n  \"Hierarchical Attentive Recurrent Tracking.\" NIPS (2017). \n  [[paper](https:\u002F\u002Fpapers.nips.cc\u002Fpaper\u002F6898-hierarchical-attentive-recurrent-tracking.pdf)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fakosiorek\u002Fhart)]\n\n\n### ICCV2017\n\n* **CREST:** Yibing Song, Chao Ma, Lijun Gong, Jiawei Zhang, Rynson Lau, Ming-Hsuan Yang. \n  \"CREST: Convolutional Residual Learning for Visual Tracking.\" ICCV (2017 **Spotlight**). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FSong_CREST_Convolutional_Residual_ICCV_2017_paper.pdf)]\n  [[project](http:\u002F\u002Fwww.cs.cityu.edu.hk\u002F~yibisong\u002Ficcv17\u002Findex.html)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fybsong00\u002FCREST-Release)]\n\n* **EAST:** Chen Huang, Simon Lucey, Deva Ramanan.\n  \"Learning Policies for Adaptive Tracking with Deep Feature Cascades.\" ICCV (2017 **Spotlight**). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FHuang_Learning_Policies_for_ICCV_2017_paper.pdf)]\n  [[supp](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fsupplemental\u002FHuang_Learning_Policies_for_ICCV_2017_supplemental.zip)]\n\n* **PTAV:** Heng Fan and Haibin Ling. \n  \"Parallel Tracking and Verifying: A Framework for Real-Time and High Accuracy Visual Tracking.\" ICCV (2017). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FFan_Parallel_Tracking_and_ICCV_2017_paper.pdf)]\n  [[supp](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fsupplemental\u002FFan_Parallel_Tracking_and_ICCV_2017_supplemental.pdf)]\n  [[project](http:\u002F\u002Fwww.dabi.temple.edu\u002F~hbling\u002Fcode\u002FPTAV\u002Fptav.htm)]\n  [[code](http:\u002F\u002Fwww.dabi.temple.edu\u002F~hbling\u002Fcode\u002FPTAV\u002Fserial_ptav_v1.zip)]\n\n* **BACF:** Hamed Kiani Galoogahi, Ashton Fagg, Simon Lucey. \n  \"Learning Background-Aware Correlation Filters for Visual Tracking.\" ICCV (2017). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FGaloogahi_Learning_Background-Aware_Correlation_ICCV_2017_paper.pdf)]\n  [[supp](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fsupplemental\u002FGaloogahi_Learning_Background-Aware_Correlation_ICCV_2017_supplemental.pdf)]\n  [[code](http:\u002F\u002Fwww.hamedkiani.com\u002Fuploads\u002F5\u002F1\u002F8\u002F8\u002F51882963\u002Fbacf_toupload.zip)]\n  [[project](http:\u002F\u002Fwww.hamedkiani.com\u002Fbacf.html)]\n\n* **TSN:** Zhu Teng, Junliang Xing, Qiang Wang, Congyan Lang, Songhe Feng and Yi Jin.\n  \"Robust Object Tracking based on Temporal and Spatial Deep Networks.\" ICCV (2017).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FTeng_Robust_Object_Tracking_ICCV_2017_paper.pdf)]\n\n* **p-tracker:** James Supančič, III; Deva Ramanan.\n  \"Tracking as Online Decision-Making: Learning a Policy From Streaming Videos With Reinforcement Learning.\" ICCV (2017).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FSupancic_Tracking_as_Online_ICCV_2017_paper.pdf)]\n  [[supp](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fsupplemental\u002FSupancic_Tracking_as_Online_ICCV_2017_supplemental.pdf)]\n\n* **DSiam:** Qing Guo; Wei Feng; Ce Zhou; Rui Huang; Liang Wan; Song Wang.\n  \"Learning Dynamic Siamese Network for Visual Object Tracking.\" ICCV (2017).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FGuo_Learning_Dynamic_Siamese_ICCV_2017_paper.pdf)]\n  [[github](https:\u002F\u002Fgithub.com\u002Ftsingqguo\u002FDSiam)]\n\n* **SP-KCF:** Xin Sun; Ngai-Man Cheung; Hongxun Yao; Yiluan Guo.\n  \"Non-Rigid Object Tracking via Deformable Patches Using Shape-Preserved KCF and Level Sets.\" ICCV (2017).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FSun_Non-Rigid_Object_Tracking_ICCV_2017_paper.pdf)]\n\n* **UCT:** Zheng Zhu, Guan Huang, Wei Zou, Dalong Du, Chang Huang.\n  \"UCT: Learning Unified Convolutional Networks for Real-Time Visual Tracking.\" ICCV workshop (2017).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017_workshops\u002Fpapers\u002Fw28\u002FZhu_UCT_Learning_Unified_ICCV_2017_paper.pdf)]\n\n* Tobias Bottger, Patrick Follmann.\n  \"The Benefits of Evaluating Tracker Performance Using Pixel-Wise Segmentations.\" ICCV workshop (2017).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017_workshops\u002Fpapers\u002Fw28\u002FBottger_The_Benefits_of_ICCV_2017_paper.pdf)]\n\n* **CFWCR:** Zhiqun He, Yingruo Fan, Junfei Zhuang, Yuan Dong, HongLiang Bai.\n  \"Correlation Filters With Weighted Convolution Responses.\" ICCV workshop (2017).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017_workshops\u002Fpapers\u002Fw28\u002FHe_Correlation_Filters_With_ICCV_2017_paper.pdf)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fhe010103\u002FCFWCR)]\n\n* **IBCCF:** Feng Li, Yingjie Yao, Peihua Li, David Zhang, Wangmeng Zuo, Ming-Hsuan Yang.\n  \"Integrating Boundary and Center Correlation Filters for Visual Tracking With Aspect Ratio Variation.\" ICCV workshop (2017).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017_workshops\u002Fpapers\u002Fw28\u002FLi_Integrating_Boundary_and_ICCV_2017_paper.pdf)]\n  [[github](https:\u002F\u002Fgithub.com\u002Flifeng9472\u002FIBCCF)]\n\n* **RFL:** Tianyu Yang, Antoni B. Chan.\n  \"Recurrent Filter Learning for Visual Tracking.\" ICCV workshop (2017).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017_workshops\u002Fpapers\u002Fw28\u002FYang_Recurrent_Filter_Learning_ICCV_2017_paper.pdf)]\n\n\n### CVPR2017\n\n* **ECO:** Martin Danelljan, Goutam Bhat, Fahad Shahbaz Khan, Michael Felsberg. \n  \"ECO: Efficient Convolution Operators for Tracking.\" CVPR (2017). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FDanelljan_ECO_Efficient_Convolution_CVPR_2017_paper.pdf)]\n  [[supp](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fsupplemental\u002FDanelljan_ECO_Efficient_Convolution_2017_CVPR_supplemental.pdf)]\n  [[project](http:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fecotrack\u002Findex.html)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fmartin-danelljan\u002FECO)]\n\n* **CFNet:** Jack Valmadre, Luca Bertinetto, João F. Henriques, Andrea Vedaldi, Philip H. S. Torr.\n  \"End-to-end representation learning for Correlation Filter based tracking.\" CVPR (2017). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FValmadre_End-To-End_Representation_Learning_CVPR_2017_paper.pdf)]\n  [[supp](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fsupplemental\u002FValmadre_End-To-End_Representation_Learning_2017_CVPR_supplemental.pdf)]\n  [[project](http:\u002F\u002Fwww.robots.ox.ac.uk\u002F~luca\u002Fcfnet.html)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fbertinetto\u002Fcfnet)]\n\n* **CACF:** Matthias Mueller, Neil Smith, Bernard Ghanem. \n  \"Context-Aware Correlation Filter Tracking.\" CVPR (2017 **oral**). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FMueller_Context-Aware_Correlation_Filter_CVPR_2017_paper.pdf)]\n  [[supp](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fsupplemental\u002FMueller_Context-Aware_Correlation_Filter_2017_CVPR_supplemental.zip)]\n  [[project](https:\u002F\u002Fivul.kaust.edu.sa\u002FPages\u002Fpub-ca-cf-tracking.aspx)]\n  [[code](https:\u002F\u002Fgithub.com\u002Fthias15\u002FContext-Aware-CF-Tracking)]\n\n* **RaF:** Le Zhang, Jagannadan Varadarajan, Ponnuthurai Nagaratnam Suganthan, Narendra Ahuja and Pierre Moulin\n  \"Robust Visual Tracking Using Oblique Random Forests.\" CVPR (2017). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FZhang_Robust_Visual_Tracking_CVPR_2017_paper.pdf)]\n  [[supp](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fsupplemental\u002FZhang_Robust_Visual_Tracking_2017_CVPR_supplemental.pdf)]\n  [[project](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fzhangleuestc\u002Fincremental-oblique-random-forest)]\n  [[code](https:\u002F\u002Fgithub.com\u002FZhangLeUestc\u002FIncremental-Oblique-Random-Forest)]\n\n* **MCPF:** Tianzhu Zhang, Changsheng Xu, Ming-Hsuan Yang. \n  \"Multi-Task Correlation Particle Filter for Robust Object Tracking.\" CVPR (2017). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FZhang_Multi-Task_Correlation_Particle_CVPR_2017_paper.pdf)]\n  [[project](http:\u002F\u002Fnlpr-web.ia.ac.cn\u002Fmmc\u002Fhomepage\u002Ftzzhang\u002Fmcpf.html)]\n  [[code](http:\u002F\u002Fnlpr-web.ia.ac.cn\u002Fmmc\u002Fhomepage\u002Ftzzhang\u002Fmcpf.html)]\n\n* **ACFN:** Jongwon Choi, Hyung Jin Chang, Sangdoo Yun, Tobias Fischer, Yiannis Demiris, and Jin Young Choi.\n  \"Attentional Correlation Filter Network for Adaptive Visual Tracking.\" CVPR (2017).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FChoi_Attentional_Correlation_Filter_CVPR_2017_paper.pdf)]\n  [[supp](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fsupplemental\u002FChoi_Attentional_Correlation_Filter_2017_CVPR_supplemental.pdf)]\n  [[project](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fjwchoivision\u002Fhome\u002Facfn-1)]\n  [[test code](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F0B0ZkG8zaRQoLQUswbW9qSWFaU0U\u002Fview?usp=drive_web)]\n  [[training code](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F0B0ZkG8zaRQoLZVVranBnbHlydnM\u002Fview?usp=drive_web)]\n\n* **LMCF:** Mengmeng Wang, Yong Liu, Zeyi Huang. \n  \"Large Margin Object Tracking with Circulant Feature Maps.\" CVPR (2017). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FWang_Large_Margin_Object_CVPR_2017_paper.pdf)]\n  [[zhihu](https:\u002F\u002Fzhuanlan.zhihu.com\u002Fp\u002F25761718)]\n\n* **ADNet:** Sangdoo Yun, Jongwon Choi, Youngjoon Yoo, Kimin Yun, Jin Young Choi.\n  \"Action-Decision Networks for Visual Tracking with Deep Reinforcement Learning.\" CVPR (2017 **Spotlight**). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FYun_Action-Decision_Networks_for_CVPR_2017_paper.pdf)]\n  [[supp](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fsupplemental\u002FYun_Action-Decision_Networks_for_2017_CVPR_supplemental.pdf)]\n  [[project](https:\u002F\u002Fsites.google.com\u002Fview\u002Fcvpr2017-adnet)]\n\n* **CSR-DCF:** Alan Lukežič, Tomáš Vojíř, Luka Čehovin, Jiří Matas, Matej Kristan. \n  \"Discriminative Correlation Filter with Channel and Spatial Reliability.\" CVPR (2017). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FLukezic_Discriminative_Correlation_Filter_CVPR_2017_paper.pdf)]\n  [[supp](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fsupplemental\u002FLukezic_Discriminative_Correlation_Filter_2017_CVPR_supplemental.pdf)]\n  [[code](https:\u002F\u002Fgithub.com\u002Falanlukezic\u002Fcsr-dcf)]\n\n* **BranchOut:** Bohyung Han, Jack Sim, Hartwig Adam.\n  \"BranchOut: Regularization for Online Ensemble Tracking with Convolutional Neural Networks.\" CVPR (2017). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FHan_BranchOut_Regularization_for_CVPR_2017_paper.pdf)]\n\n* **AMCT:** Donghun Yeo, Jeany Son, Bohyung Han, Joonhee Han.\n  \"Superpixel-based Tracking-by-Segmentation using Markov Chains.\" CVPR (2017).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FYeo_Superpixel-Based_Tracking-By-Segmentation_Using_CVPR_2017_paper.pdf)]\n\n* **SANet:** Heng Fan, Haibin Ling. \n  \"SANet: Structure-Aware Network for Visual Tracking.\" CVPRW (2017). \n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1611.06878.pdf)]\n  [[project](http:\u002F\u002Fwww.dabi.temple.edu\u002F~hbling\u002Fcode\u002FSANet\u002FSANet.html)]\n  [[code](http:\u002F\u002Fwww.dabi.temple.edu\u002F~hbling\u002Fcode\u002FSANet\u002Fsanet_code.zip)]\n\n### ECCV2016\n\n* **SiameseFC:** Luca Bertinetto, Jack Valmadre, João F. Henriques, Andrea Vedaldi, Philip H.S. Torr. \n  \"Fully-Convolutional Siamese Networks for Object Tracking.\" ECCV workshop (2016). \n  [[paper](http:\u002F\u002F120.52.73.78\u002Farxiv.org\u002Fpdf\u002F1606.09549v2.pdf)]\n  [[project](http:\u002F\u002Fwww.robots.ox.ac.uk\u002F~luca\u002Fsiamese-fc.html)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fbertinetto\u002Fsiamese-fc)]\n\n* **GOTURN:** David Held, Sebastian Thrun, Silvio Savarese. \n  \"Learning to Track at 100 FPS with Deep Regression Networks.\" ECCV (2016). \n  [[paper](http:\u002F\u002Fdavheld.github.io\u002FGOTURN\u002FGOTURN.pdf)]\n  [[project](http:\u002F\u002Fdavheld.github.io\u002FGOTURN\u002FGOTURN.html)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fdavheld\u002FGOTURN)]\n\n* **C-COT:** Martin Danelljan, Andreas Robinson, Fahad Khan, Michael Felsberg. \n  \"Beyond Correlation Filters: Learning Continuous Convolution Operators for Visual Tracking.\" ECCV (2016). \n  [[paper](http:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fconttrack\u002FC-COT_ECCV16.pdf)]\n  [[project](http:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fconttrack\u002Findex.html)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fmartin-danelljan\u002FContinuous-ConvOp)]\n\n* **CF+AT:** Adel Bibi, Matthias Mueller, and Bernard Ghanem. \n  \"Target Response Adaptation for Correlation Filter Tracking.\" ECCV (2016). \n  [[paper](http:\u002F\u002Fwww.adelbibi.com\u002Fpapers\u002FECCV2016\u002FTarget_Adap.pdf)]\n  [[project](https:\u002F\u002Fivul.kaust.edu.sa\u002FPages\u002Fpub-target-response-adaptation.aspx)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fadelbibi\u002FTarget-Response-Adaptation-for-Correlation-Filter-Tracking)]\n\n* Yao Sui, Ziming Zhang,  Guanghui Wang, Yafei Tang, Li Zhang. \n  \"Real-Time Visual Tracking: Promoting the Robustness of Correlation Filter Learning.\" ECCV (2016). \n  [[paper](http:\u002F\u002F120.52.73.78\u002Farxiv.org\u002Fpdf\u002F1608.08173.pdf)]\n\n* Yao Sui, Guanghui Wang, Yafei Tang, Li Zhang. \n  \"Tracking Completion.\" ECCV (2016). \n  [[paper](http:\u002F\u002F120.52.73.78\u002Farxiv.org\u002Fpdf\u002F1608.08171v1.pdf)]\n\n### CVPR2016\n\n* **MDNet:** Nam, Hyeonseob, and Bohyung Han. \n  \"Learning Multi-Domain Convolutional Neural Networks for Visual Tracking.\" CVPR (2016).\n  [[paper](http:\u002F\u002Farxiv.org\u002Fpdf\u002F1510.07945v2.pdf)]\n  [[VOT_presentation](http:\u002F\u002Fvotchallenge.net\u002Fvot2015\u002Fdownload\u002Fpresentation_Hyeonseob.pdf)]\n  [[project](http:\u002F\u002Fcvlab.postech.ac.kr\u002Fresearch\u002Fmdnet\u002F)]\n  [[github](https:\u002F\u002Fgithub.com\u002FHyeonseobNam\u002FMDNet)]\n\n* **SINT:** Ran Tao, Efstratios Gavves, Arnold W.M. Smeulders. \n  \"Siamese Instance Search for Tracking.\" CVPR (2016).\n  [[paper](https:\u002F\u002Fstaff.science.uva.nl\u002Fr.tao\u002Fpub\u002FTaoCVPR2016.pdf)]\n  [[project](https:\u002F\u002Fstaff.fnwi.uva.nl\u002Fr.tao\u002Fprojects\u002FSINT\u002FSINT_proj.html)]\n\n* **SCT:** Jongwon Choi, Hyung Jin Chang, Jiyeoup Jeong, Yiannis Demiris, and Jin Young Choi.\n  \"Visual Tracking Using Attention-Modulated Disintegration and Integration.\" CVPR (2016).\n  [[paper](http:\u002F\u002Fwww.cv-foundation.org\u002Fopenaccess\u002Fcontent_cvpr_2016\u002Fpapers\u002FChoi_Visual_Tracking_Using_CVPR_2016_paper.pdf)]\n  [[project](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fjwchoivision\u002Fhome\u002Fsct)]\n\n* **STCT:** Lijun Wang, Wanli Ouyang, Xiaogang Wang, and Huchuan Lu.\n  \"STCT: Sequentially Training Convolutional Networks for Visual Tracking.\" CVPR (2016).\n  [[paper](http:\u002F\u002Fwww.ee.cuhk.edu.hk\u002F~wlouyang\u002FPapers\u002FWangLJ_CVPR16.pdf)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fscott89\u002FSTCT)]\n\n* **SRDCFdecon:** Martin Danelljan, Gustav Häger, Fahad Khan, Michael Felsberg. \n  \"Adaptive Decontamination of the Training Set: A Unified Formulation for Discriminative Visual Tracking.\" CVPR (2016).\n  [[paper](https:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fdecontrack\u002FAdaptiveDecon_CVPR16.pdf)]\n  [[project](https:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fdecontrack\u002Findex.html)]\n\n* **HDT:** Yuankai Qi, Shengping Zhang, Lei Qin, Hongxun Yao, Qingming Huang, Jongwoo Lim, Ming-Hsuan Yang. \n  \"Hedged Deep Tracking.\" CVPR (2016). \n  [[paper](http:\u002F\u002Ffaculty.ucmerced.edu\u002Fmhyang\u002Fpapers\u002Fcvpr16_hedge_tracking.pdf)]\n  [[project](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fyuankiqi\u002Fhdt\u002F)]\n\n* **Staple:** Luca Bertinetto, Jack Valmadre, Stuart Golodetz, Ondrej Miksik, Philip H.S. Torr. \n  \"Staple: Complementary Learners for Real-Time Tracking.\" CVPR (2016). \n  [[paper](http:\u002F\u002F120.52.73.75\u002Farxiv.org\u002Fpdf\u002F1512.01355v2.pdf)]\n  [[project](http:\u002F\u002Fwww.robots.ox.ac.uk\u002F~luca\u002Fstaple.html)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fbertinetto\u002Fstaple)]\n\n* **EBT:** Gao Zhu, Fatih Porikli, and Hongdong Li.\n  \"Beyond Local Search: Tracking Objects Everywhere with Instance-Specific Proposals.\" CVPR (2016). \n  [[paper](http:\u002F\u002Fwww.cv-foundation.org\u002Fopenaccess\u002Fcontent_cvpr_2016\u002Fpapers\u002FZhu_Beyond_Local_Search_CVPR_2016_paper.pdf)]\n  [[exe](http:\u002F\u002Fwww.votchallenge.net\u002Fvot2016\u002Fdownload\u002F02_EBT.zip)]\n\n* **DLSSVM:** Jifeng Ning, Jimei Yang, Shaojie Jiang, Lei Zhang and Ming-Hsuan Yang. \n  \"Object Tracking via Dual Linear Structured SVM and Explicit Feature Map.\" CVPR (2016). \n  [[paper](http:\u002F\u002Fwww4.comp.polyu.edu.hk\u002F~cslzhang\u002Fpaper\u002Fcvpr16\u002FDLSSVM.pdf)]\n  [[code](http:\u002F\u002Fwww4.comp.polyu.edu.hk\u002F~cslzhang\u002Fcode\u002FDLSSVM_CVPR.zip)]\n  [[project](http:\u002F\u002Fwww4.comp.polyu.edu.hk\u002F~cslzhang\u002FDLSSVM\u002FDLSSVM.htm)]\n\n### NIPS2016\n* **Learnet:** Luca Bertinetto, João F. Henriques, Jack Valmadre, Philip H. S. Torr, Andrea Vedaldi. \n  \"Learning feed-forward one-shot learners.\" NIPS (2016). \n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1606.05233v1.pdf)]\n\n### ICCV2015\n\n* **FCNT:** Lijun Wang, Wanli Ouyang, Xiaogang Wang, and Huchuan Lu. \n  \"Visual Tracking with Fully Convolutional Networks.\" ICCV (2015). \n  [[paper](http:\u002F\u002F202.118.75.4\u002Flu\u002FPaper\u002FICCV2015\u002Ficcv15_lijun.pdf)]\n  [[project](http:\u002F\u002Fscott89.github.io\u002FFCNT\u002F)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fscott89\u002FFCNT)]\n\n* **SRDCF:** Martin Danelljan, Gustav Häger, Fahad Khan, Michael Felsberg. \n  \"Learning Spatially Regularized Correlation Filters for Visual Tracking.\" ICCV (2015). \n  [[paper](https:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fregvistrack\u002FSRDCF_ICCV15.pdf)]\n  [[project](https:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fregvistrack\u002F)]\n\n* **CF2:** Chao Ma, Jia-Bin Huang, Xiaokang Yang and Ming-Hsuan Yang.\n  \"Hierarchical Convolutional Features for Visual Tracking.\" ICCV (2015)\n  [[paper](http:\u002F\u002Ffaculty.ucmerced.edu\u002Fmhyang\u002Fpapers\u002Ficcv15_tracking.pdf)]\n  [[project](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fjbhuang0604\u002Fpublications\u002Fcf2)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fjbhuang0604\u002FCF2)]\n\n* Naiyan Wang, Jianping Shi, Dit-Yan Yeung and Jiaya Jia.\n  \"Understanding and Diagnosing Visual Tracking Systems.\" ICCV (2015). \n  [[paper](http:\u002F\u002Fwinsty.net\u002Fpapers\u002Fdiagnose.pdf)]\n  [[project](http:\u002F\u002Fwinsty.net\u002Ftracker_diagnose.html)]\n  [[code](http:\u002F\u002Fwinsty.net\u002Fdiagnose\u002Fdiagnose_code.zip)]\\\n\n* **DeepSRDCF:** Martin Danelljan, Gustav Häger, Fahad Khan, Michael Felsberg. \n  \"Convolutional Features for Correlation Filter Based Visual Tracking.\" ICCV workshop (2015). \n  [[paper](https:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fregvistrack\u002FConvDCF_ICCV15_VOTworkshop.pdf)]\n  [[project](https:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fregvistrack\u002F)]\n\n* **RAJSSC:** Mengdan Zhang, Junliang Xing, Jin Gao, Xinchu Shi, Qiang Wang, Weiming Hu. \n  \"Joint Scale-Spatial Correlation Tracking with Adaptive Rotation Estimation.\" ICCV workshop (2015). \n  [[paper](http:\u002F\u002Fwww.cv-foundation.org\u002F\u002Fopenaccess\u002Fcontent_iccv_2015_workshops\u002Fw14\u002Fpapers\u002FZhang_Joint_Scale-Spatial_Correlation_ICCV_2015_paper.pdf)]\n  [[poster](http:\u002F\u002Fwww.votchallenge.net\u002Fvot2015\u002Fdownload\u002Fposter_Mengdan_Zhang.pdf)]\n\n### CVPR2015\n\n* **MUSTer:** Zhibin Hong, Zhe Chen, Chaohui Wang, Xue Mei, Danil Prokhorov, Dacheng Tao. \n  \"MUlti-Store Tracker (MUSTer): A Cognitive Psychology Inspired Approach to Object Tracking.\" CVPR (2015). \n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2015\u002Fpapers\u002FHong_MUlti-Store_Tracker_MUSTer_2015_CVPR_paper.pdf)]\n  [[project](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fmultistoretrackermuster\u002F)]\n\n* **LCT:** Chao Ma, Xiaokang Yang, Chongyang Zhang, Ming-Hsuan Yang.\n  \"Long-term Correlation Tracking.\" CVPR (2015).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2015\u002Fpapers\u002FMa_Long-Term_Correlation_Tracking_2015_CVPR_paper.pdf)]\n  [[project](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fchaoma99\u002Fcvpr15_tracking)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fchaoma99\u002Flct-tracker)]\n\n* **DAT:** Horst Possegger, Thomas Mauthner, and Horst Bischof. \n  \"In Defense of Color-based Model-free Tracking.\" CVPR (2015). \n  [[paper](https:\u002F\u002Flrs.icg.tugraz.at\u002Fpubs\u002Fpossegger_cvpr15.pdf)]\n  [[project](https:\u002F\u002Fwww.tugraz.at\u002Finstitute\u002Ficg\u002Fresearch\u002Fteam-bischof\u002Flrs\u002Fdownloads\u002Fdat)]\n  [[code](https:\u002F\u002Flrs.icg.tugraz.at\u002Fdownloads\u002Fdat-v1.0.zip)]\n\n* **RPT:** Yang Li, Jianke Zhu and Steven C.H. Hoi. \n  \"Reliable Patch Trackers: Robust Visual Tracking by Exploiting Reliable Patches.\" CVPR (2015). \n  [[paper](https:\u002F\u002Fgithub.com\u002Fihpdep\u002Fihpdep.github.io\u002Fraw\u002Fmaster\u002Fpapers\u002Fcvpr15_rpt.pdf)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fihpdep\u002Frpt)]\n\n### ICML2015\n\n* **CNN-SVM:** Seunghoon Hong, Tackgeun You, Suha Kwak and Bohyung Han.\n  \"Online Tracking by Learning Discriminative Saliency Map with Convolutional Neural Network .\" ICML (2015)\n  [[paper](http:\u002F\u002F120.52.73.80\u002Farxiv.org\u002Fpdf\u002F1502.06796.pdf)]\n  [[project](http:\u002F\u002Fcvlab.postech.ac.kr\u002Fresearch\u002FCNN_SVM\u002F)]\n\n### BMVC2014\n\n* **DSST:** Martin Danelljan, Gustav Häger, Fahad Shahbaz Khan and Michael Felsberg. \n  \"Accurate Scale Estimation for Robust Visual Tracking.\" BMVC (2014).\n  [[paper](http:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fscalvistrack\u002FScaleTracking_BMVC14.pdf)]\n  [[PAMI](http:\u002F\u002Fwww.cvl.isy.liu.se\u002Fen\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fscalvistrack\u002FDSST_TPAMI.pdf)]\n  [[project](http:\u002F\u002Fwww.cvl.isy.liu.se\u002Fen\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fscalvistrack\u002Findex.html)]\n\n### ECCV2014\n\n* **MEEM:** Jianming Zhang, Shugao Ma, and Stan Sclaroff.\n  \"MEEM: Robust Tracking via Multiple Experts using Entropy Minimization.\" ECCV (2014).\n  [[paper](http:\u002F\u002Fcs-people.bu.edu\u002Fjmzhang\u002FMEEM\u002FMEEM-eccv-preprint.pdf)]\n  [[project](http:\u002F\u002Fcs-people.bu.edu\u002Fjmzhang\u002FMEEM\u002FMEEM.html)]\n\n* **TGPR:** Jin Gao, Haibin Ling, Weiming Hu, Junliang Xing.\n  \"Transfer Learning Based Visual Tracking with Gaussian Process Regression.\" ECCV (2014).\n  [[paper](http:\u002F\u002Fwww.dabi.temple.edu\u002F~hbling\u002Fpublication\u002Ftgpr-eccv14.pdf)]\n  [[project](http:\u002F\u002Fwww.dabi.temple.edu\u002F~hbling\u002Fcode\u002FTGPR.htm)]\n\n* **STC:** Kaihua Zhang, Lei Zhang, Ming-Hsuan Yang, David Zhang.\n  \"Fast Tracking via Spatio-Temporal Context Learning.\" ECCV (2014).\n  [[paper](http:\u002F\u002Farxiv.org\u002Fpdf\u002F1311.1939v1.pdf)]\n  [[project](http:\u002F\u002Fwww4.comp.polyu.edu.hk\u002F~cslzhang\u002FSTC\u002FSTC.htm)]\n\n* **SAMF:** Yang Li, Jianke Zhu.\n  \"A Scale Adaptive Kernel Correlation Filter Tracker with Feature Integration.\" ECCV workshop (2014).\n  [[paper](http:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007%2F978-3-319-16181-5_18.pdf)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fihpdep\u002Fsamf)]\n\n### NIPS2013\n\n* **DLT:** Naiyan Wang and Dit-Yan Yeung. \n  \"Learning A Deep Compact Image Representation for Visual Tracking.\" NIPS (2013). \n  [[paper](http:\u002F\u002Fwinsty.net\u002Fpapers\u002Fdlt.pdf)]\n  [[project](http:\u002F\u002Fwinsty.net\u002Fdlt.html)]\n  [[code](http:\u002F\u002Fwinsty.net\u002Fdlt\u002FDLTcode.zip)]\n \n ### PAMI & IJCV & TIP\n\n* **MCPF:** Tianzhu Zhang, Changsheng Xu, Ming-Hsuan Yang.\n    \" Learning Multi-task Correlation Particle Filters for Visual Tracking.\" TPAMI (2017).\n      [[paper]]\n      [[project](http:\u002F\u002Fnlpr-web.ia.ac.cn\u002Fmmc\u002Fhomepage\u002Ftzzhang\u002Flmcpf.html)]\n      [[code](http:\u002F\u002Fnlpr-web.ia.ac.cn\u002Fmmc\u002Fhomepage\u002Ftzzhang\u002FProject_Tianzhu\u002Fzhang_mcpf\u002FSource_Code\u002FSource_Code.zip)] \n\n* **RSST:** Tianzhu Zhang, Changsheng Xu, Ming-Hsuan Yang.\n  \" Robust Structural Sparse Tracking.\" TPAMI (2017).\n  [[paper]]\n  [[project](http:\u002F\u002Fnlpr-web.ia.ac.cn\u002Fmmc\u002Fhomepage\u002Ftzzhang\u002Frsst.html)]\n  [[code](http:\u002F\u002Fnlpr-web.ia.ac.cn\u002Fmmc\u002Fhomepage\u002Ftzzhang\u002FProject_Tianzhu\u002Fzhang_RSST\u002FRSSTDeep\u002FRSSTDeep_Code.zip)] \n\n* **fDSST:** Martin Danelljan, Gustav Häger, Fahad Khan, Michael Felsberg.\n  \"Discriminative Scale Space Tracking.\" TPAMI (2017).\n  [[paper](http:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fscalvistrack\u002FDSST_TPAMI.pdf)]\n  [[project](http:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fscalvistrack\u002Findex.html)]\n  [[code](http:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fscalvistrack\u002FfDSST_code.zip)] \n\n* **KCF:** João F. Henriques, Rui Caseiro, Pedro Martins, Jorge Batista. \n  \"High-Speed Tracking with Kernelized Correlation Filters.\" TPAMI (2015).\n  [[paper](http:\u002F\u002Fwww.robots.ox.ac.uk\u002F~joao\u002Fpublications\u002Fhenriques_tpami2015.pdf)]\n  [[project](http:\u002F\u002Fwww.robots.ox.ac.uk\u002F~joao\u002Fcirculant\u002F)]\n\n* **CLRST:** Tianzhu Zhang, Si Liu, Narendra Ahuja, Ming-Hsuan Yang, Bernard Ghanem.  \n  \"Robust Visual Tracking Via Consistent Low-Rank Sparse Learning.\" IJCV (2015). \n  [[paper](http:\u002F\u002Fnlpr-web.ia.ac.cn\u002Fmmc\u002Fhomepage\u002Ftzzhang\u002Ftianzhu%20zhang_files\u002FJournal%20Articles\u002FIJCV15_zhang_Low-Rank%20Sparse%20Learning.pdf)]\n  [[project](http:\u002F\u002Fnlpr-web.ia.ac.cn\u002Fmmc\u002Fhomepage\u002Ftzzhang\u002FProject_Tianzhu\u002Fzhang_IJCV14\u002FRobust%20Visual%20Tracking%20Via%20Consistent%20Low-Rank%20Sparse.html)]\n  [[code](http:\u002F\u002Fnlpr-web.ia.ac.cn\u002Fmmc\u002Fhomepage\u002Ftzzhang\u002FProject_Tianzhu\u002Fzhang_IJCV14\u002Fmaterial\u002FLRT_Code.zip)]\n\n* **DNT:** Zhizhen Chi, Hongyang Li, Huchuan Lu, Ming-Hsuan Yang. \n  \"Dual Deep Network for Visual Tracking.\" TIP (2017). \n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1612.06053v1.pdf)]\n\n* **DRT:** Junyu Gao, Tianzhu Zhang, Xiaoshan Yang, Changsheng Xu. \n  \"Deep Relative Tracking.\" TIP (2017). \n  [[paper](http:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F7828108\u002F)]\n\n* **BIT:** Bolun Cai, Xiangmin Xu, Xiaofen Xing, Kui Jia, Jie Miao, Dacheng Tao.\n  \"BIT: Biologically Inspired Tracker.\" TIP (2016). \n  [[paper](http:\u002F\u002Fcaibolun.github.io\u002Fpapers\u002FBIT_TIP.pdf)]\n  [[project](http:\u002F\u002Fcaibolun.github.io\u002FBIT\u002Findex.html)]\n  [[github](https:\u002F\u002Fgithub.com\u002Fcaibolun\u002FBIT)]\n\n* **CNT:** Kaihua Zhang, Qingshan Liu, Yi Wu, Minghsuan Yang. \n  \"Robust Visual Tracking via Convolutional Networks Without Training.\" TIP (2016). \n  [[paper](http:\u002F\u002Fkaihuazhang.net\u002FCNT.pdf)]\n  [[code](http:\u002F\u002Fkaihuazhang.net\u002FCNT_matlab.rar)]\n  \n\n\n## Benchmark\n\n* **LaSOT:** Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao, Haibin Ling.\n  \"LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking..\" CVPR (2019). \n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1809.07845.pdf)]\n  [[project](https:\u002F\u002Fcis.temple.edu\u002Flasot\u002F)]\n\n* **OxUvA long-term dataset+benchmark:** Jack Valmadre, Luca Bertinetto, João F. Henriques, Ran Tao, Andrea Vedaldi, Arnold Smeulders, Philip Torr, Efstratios Gavves.\u003Cbr \u002F>\n  \"Long-term Tracking in the Wild: a Benchmark.\" ECCV (2018).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1803.09502.pdf)]\n  [[project](https:\u002F\u002Foxuva.github.io\u002Flong-term-tracking-benchmark\u002F)]\n\n* **TrackingNet:** Matthias Müller, Adel Bibi, Silvio Giancola, Salman Al-Subaihi, Bernard Ghanem.\u003Cbr \u002F>\n  \"TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\" ECCV (2018).\n  [[project](https:\u002F\u002Fsilviogiancola.github.io\u002Fpublication\u002F2018-03-trackingnet\u002Fdetails\u002F)]\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1803.10794.pdf)] \n\n* **UAVDT:** Dawei Du, Yuankai Qi, Hongyang Yu, Yifang Yang, Kaiwen Duan, GuoRong Li, Weigang Zhang,  Weihai; Qingming Huang, Qi Tian.\u003Cbr \u002F>\n  \"The Unmanned Aerial Vehicle Benchmark: Object Detection and Tracking.\" ECCV (2018).\n  [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1804.00518.pdf)]\n\n* **Dataset-AMP:** Luka Čehovin Zajc; Alan Lukežič; Aleš Leonardis; Matej Kristan.\n  \"Beyond Standard Benchmarks: Parameterizing Performance Evaluation in Visual Object Tracking.\" ICCV (2017).\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FZajc_Beyond_Standard_Benchmarks_ICCV_2017_paper.pdf)]\n\n* **Dataset-Nfs:** Hamed Kiani Galoogahi, Ashton Fagg, Chen Huang, Deva Ramanan and Simon Lucey.\n  \"Need for Speed: A Benchmark for Higher Frame Rate Object Tracking.\" ICCV (2017)\n  [[paper](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FGaloogahi_Need_for_Speed_ICCV_2017_paper.pdf)]\n  [[supp](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fsupplemental\u002FGaloogahi_Need_for_Speed_ICCV_2017_supplemental.pdf)]\n  [[project](http:\u002F\u002Fci2cv.net\u002Fnfs\u002Findex.html)]\n\n* **Dataset-DTB70:** Siyi Li, Dit-Yan Yeung.\n  \"Visual Object Tracking for Unmanned Aerial Vehicles: A Benchmark and New Motion Models.\" AAAI (2017)\n  [[paper](http:\u002F\u002Faaai.org\u002Focs\u002Findex.php\u002FAAAI\u002FAAAI17\u002Fpaper\u002Fview\u002F14338\u002F14292)]\n  [[project](https:\u002F\u002Fgithub.com\u002Fflyers\u002Fdrone-tracking)]\n  [[dataset](https:\u002F\u002Fwww.dropbox.com\u002Fs\u002Fs1fj99s2six4lrs\u002FDTB70.tar.gz?dl=0)]\n\n* **Dataset-UAV123:** Matthias Mueller, Neil Smith and Bernard Ghanem.\n  \"A Benchmark and Simulator for UAV Tracking.\" ECCV (2016)\n  [[paper](https:\u002F\u002Fivul.kaust.edu.sa\u002FDocuments\u002FPublications\u002F2016\u002FA%20Benchmark%20and%20Simulator%20for%20UAV%20Tracking.pdf)]\n  [[project](https:\u002F\u002Fivul.kaust.edu.sa\u002FPages\u002Fpub-benchmark-simulator-uav.aspx)]\n  [[dataset](https:\u002F\u002Fivul.kaust.edu.sa\u002FPages\u002FDataset-UAV123.aspx)]\n\n* **Dataset-TColor-128:** Pengpeng Liang, Erik Blasch, Haibin Ling.\n  \"Encoding color information for visual tracking: Algorithms and benchmark.\" TIP (2015)\n  [[paper](http:\u002F\u002Fwww.dabi.temple.edu\u002F~hbling\u002Fpublication\u002FTColor-128.pdf)]\n  [[project](http:\u002F\u002Fwww.dabi.temple.edu\u002F~hbling\u002Fdata\u002FTColor-128\u002FTColor-128.html)]\n  [[dataset](http:\u002F\u002Fwww.dabi.temple.edu\u002F~hbling\u002Fdata\u002FTColor-128\u002FTemple-color-128.zip)]\n\n* **Dataset-NUS-PRO:** Annan Li, Min Lin, Yi Wu, Ming-Hsuan Yang, and Shuicheng Yan.\n  \"NUS-PRO: A New Visual Tracking Challenge.\" PAMI (2015)\n  [[paper](http:\u002F\u002Ffaculty.ucmerced.edu\u002Fmhyang\u002Fpapers\u002Fpami15_nus_pro.pdf)]\n  [[project](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fli00annan\u002Fnus-pro)]\n  [[Data_360](https:\u002F\u002Fd9fca6.lc.yunpan.cn\u002Flk\u002FcqKIc6DU3t2eJ)(code:bf28)]\n  [[Data_baidu]](https:\u002F\u002Fpan.baidu.com\u002Fs\u002F1pJHvbSn#list\u002Fpath=%2F)]\n  [[View_360](https:\u002F\u002F6aa275.lc.yunpan.cn\u002Flk\u002FcqK479PfzDrPX)(code:515a)]\n  [[View_baidu]](https:\u002F\u002Fpan.baidu.com\u002Fs\u002F1hqKXcuK)]\n\n* **Dataset-PTB:** Shuran Song and Jianxiong Xiao.\n  \"Tracking Revisited using RGBD Camera: Unified Benchmark and Baselines.\" ICCV (2013)\n  [[paper](http:\u002F\u002Fvision.princeton.edu\u002Fprojects\u002F2013\u002Ftracking\u002Fpaper.pdf)]\n  [[project](http:\u002F\u002Ftracking.cs.princeton.edu\u002F)]\n  [[5 validation](http:\u002F\u002Ftracking.cs.princeton.edu\u002FValidationSet.zip)]\n  [[95 evaluation](http:\u002F\u002Ftracking.cs.princeton.edu\u002FEvaluationSet.tgz)]\n\n* **Dataset-ALOV300+:** Arnold W. M. Smeulders, Dung M. Chu, Rita Cucchiara, Simone Calderara, Afshin Dehghan, Mubarak Shah.\n  \"Visual Tracking: An Experimental Survey.\" PAMI (2014)\n  [[paper](http:\u002F\u002Fcrcv.ucf.edu\u002Fpapers\u002FTracking_Survey.pdf)]\n  [[project](http:\u002F\u002Fimagelab.ing.unimore.it\u002Fdsm\u002F)]\n  [Mirror Link:ALOV300++ Dataset](http:\u002F\u002Fcrcv.ucf.edu\u002Fpeople\u002Fphd_students\u002Fafshin\u002FALOV300\u002FFrames.zip)\n  [Mirror Link:ALOV300++ Groundtruth](http:\u002F\u002Fcrcv.ucf.edu\u002Fpeople\u002Fphd_students\u002Fafshin\u002FALOV300\u002FGT.zip)\n\n* **OTB2013:** Wu, Yi, Jongwoo Lim, and Minghsuan Yang. \n  \"Online Object Tracking: A Benchmark.\" CVPR (2013).\n  [[paper](http:\u002F\u002Ffaculty.ucmerced.edu\u002Fmhyang\u002Fpapers\u002Fcvpr13_benchmark.pdf)]\n\n* **OTB2015:** Wu, Yi, Jongwoo Lim, and Minghsuan Yang. \n  \"Object Tracking Benchmark.\" TPAMI (2015).\n  [[paper](http:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7001050&tag=1)]\n  [[project](http:\u002F\u002Fcvlab.hanyang.ac.kr\u002Ftracker_benchmark\u002Findex.html)]\n\n* **Dataset-VOT:**\n  **[[project](http:\u002F\u002Fwww.votchallenge.net\u002F)]**\n\n**[[VOT13_paper_ICCV](http:\u002F\u002Fwww.votchallenge.net\u002Fvot2013\u002FDownload\u002Fvot_2013_paper.pdf)]The Visual Object Tracking VOT2013 challenge results**\n\n**[[VOT14_paper_ECCV](http:\u002F\u002Fwww.votchallenge.net\u002Fvot2014\u002Fdownload\u002Fvot_2014_paper.pdf)]The Visual Object Tracking VOT2014 challenge results**\n\n**[[VOT15_paper_ICCV](http:\u002F\u002Fwww.votchallenge.net\u002Fvot2015\u002Fdownload\u002Fvot_2015_paper.pdf)]The Visual Object Tracking VOT2015 challenge results**\n\n**[[VOT16_paper_ECCV](http:\u002F\u002Fwww.votchallenge.net\u002Fvot2016\u002Fdownload\u002Fvot_2016_paper.pdf)]The Visual Object Tracking VOT2016 challenge results**\n\n**[[VOT17_paper_ICCV](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017_workshops\u002Fpapers\u002Fw28\u002FKristan_The_Visual_Object_ICCV_2017_paper.pdf)]The Visual Object Tracking VOT2017 challenge results**\n\n\n## Distinguished Researchers & Teams\nDistinguished visual tracking researchers who have published +3 papers which have a major impact on the field of visual tracking and are still active in the field of visual tracking.(Names listed in no particular order.)\n\n* [Ming-Hsuan Yang](http:\u002F\u002Ffaculty.ucmerced.edu\u002Fmhyang\u002F)\n* [Haibin Ling](http:\u002F\u002Fwww.dabi.temple.edu\u002F~hbling\u002F)\n* [Huchuan Lu](http:\u002F\u002Fice.dlut.edu.cn\u002Flu\u002F)\n* [Hongdong Li](http:\u002F\u002Fusers.cecs.anu.edu.au\u002F~hongdong\u002F)\n* [Lei Zhang](http:\u002F\u002Fwww4.comp.polyu.edu.hk\u002F~cslzhang\u002F)\n* [Matej Kristan](http:\u002F\u002Fwww.vicos.si\u002FPeople\u002FMatejk)\n* [João F. Henriques](http:\u002F\u002Fwww.robots.ox.ac.uk\u002F~joao\u002F)\n* [Martin Danelljan](http:\u002F\u002Fusers.isy.liu.se\u002Fcvl\u002Fmarda26\u002F)\n* [Kaihua Zhang](http:\u002F\u002Fkaihuazhang.net\u002F)\n* [Hamed Kiani](http:\u002F\u002Fwww.hamedkiani.com\u002F)\n* [Luca Bertinetto](http:\u002F\u002Fwww.robots.ox.ac.uk\u002F~luca\u002Findex.html)\n* [Tianzhu Zhang](http:\u002F\u002Fnlpr-web.ia.ac.cn\u002Fmmc\u002Fhomepage\u002Ftzzhang\u002Findex.html)\n* [Chao Ma](https:\u002F\u002Fwww.chaoma.info\u002F)\n* [Yibing Song](https:\u002F\u002Fybsong00.github.io\u002F)\n* [Dong Wang](http:\u002F\u002Fwww.escience.cn\u002Fpeople\u002Fwangdongdut\u002Findex.html)\n* [**Torr Vision Group**](http:\u002F\u002Fwww.robots.ox.ac.uk\u002F~tvg\u002Fpeople.php)\n* [**Computer Vision Laboratory, POSTECH**](http:\u002F\u002Fcvlab.postech.ac.kr\u002Flab\u002Findex.php)\n","# 视觉跟踪开发\n\n### 寻求合作者\n\n我正在寻找合作者，共同开展视觉跟踪研究并推动其发展。\n\n如果您对该项目感兴趣，请随时与我联系（davidzhang@zjnu.edu.cn）。\n\n## 论文\n\n### :star2: 推荐文献 :star2:\n\n- **VOTSurvey:** Sajid Javed, Martin Danelljan, Fahad Shahbaz Khan, Muhammad Haris Khan, Michael Felsberg, Jiri Matas.\u003Cbr \u002F>\n  “基于判别滤波器和暹罗网络的视觉目标跟踪：综述与展望。” TAPMI (2023)。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.02838)] \n\n- **VOTBook:** Xin Zhao, Shiyu Hu, Xu-Cheng Yin.\u003Cbr \u002F>\n  “视觉目标跟踪：一种评估视角。” Springer (2025)。\n  [[论文](https:\u002F\u002Flink.springer.com\u002Fbook\u002F10.1007\u002F978-981-96-4558-9)]\n\n- **VOTSurvey:** Sajid Javed, Martin Danelljan, Fahad Shahbaz Khan, Muhammad Haris Khan, Michael Felsberg, Jiri Matas.\u003Cbr \u002F>\n  “基于判别滤波器和暹罗网络的视觉目标跟踪：综述与展望。” TAPMI (2023)。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.02838)]\n  \n- **DL4VT:** Seyed Mojtaba Marvasti-Zadeh, Li Cheng, Hossein Ghanei-Yakhdan, Shohreh Kasaei.\u003Cbr \u002F>\n  “用于视觉跟踪的深度学习：综合综述。” ArXiv (2021)。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1912.00535.pdf)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FMMarvasti\u002FDeep-Learning-for-Visual-Tracking-Survey)]\n\n- **SAMURAI:** Cheng-Yen Yang, Hsiang-Wei Huang, Zhongyu Jiang, Wenhao Chai, Jenq-Neng Hwang.\u003Cbr \u002F>\n  “SAMURAI：基于SAM 2的无训练视觉目标跟踪中的运动感知记忆。” TIP (2026)。\n  [[arxiv](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.11922)]\n  [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F11351313)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fyangchris11\u002Fsamurai)]\n  \n- **SAM:** Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dollár, Ross Girshick.\u003Cbr \u002F>\n  “Segment Anything.” ArXiv (2023)。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.02643v1.pdf)] \n  [[官网](https:\u002F\u002Fsegment-anything.com\u002F)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fsegment-anything)]\n  \n- **TAM:** Jinyu Yang, Mingqi Gao, Zhe Li, Shang Gao, Fangjing Wang, Feng Zheng.\u003Cbr \u002F>\n  “Track Anything：Segment Anything与视频的结合。” ArXiv (2023)。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.11968)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fgaomingqi\u002FTrack-Anything)]\n  \n- **SAM-Track:** Yangming Cheng, Liulei Li, Yuanyou Xu, Xiaodi Li, Zongxin Yang, Wenguan Wang, Yi Yang.\u003Cbr \u002F>\n  “Segment-and-Track Anything。” ArXiv (2023)。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.06558)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fz-x-yang\u002FSegment-and-Track-Anything)]\n  \n- **SEEM:** Xueyan Zou, Jianwei Yang, Hao Zhang, Feng Li, Linjie Li, Jianfeng Gao, Yong Jae Lee.\u003Cbr \u002F>\n  “随时随地分割一切。” ArXiv (2023)。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.06718v1.pdf)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FUX-Decoder\u002FSegment-Everything-Everywhere-All-At-Once)]\n\n- **SAM-PT:** Frano Rajič, Lei Ke, Yu-Wing Tai, Chi-Keung Tang, Martin Danelljan, Fisher Yu.\u003Cbr \u002F>\n  “Segment Anything与点跟踪的结合。” ArXiv (2023)。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.01197)] \n  [[代码](https:\u002Fgithub.com\u002Fsyscv\u002Fsam-pt)]\n  \n- **ReviewLLM:** Jiaqi Wang, Zhengliang Liu, Lin Zhao, Zihao Wu, Chong Ma, Sigang Yu, Haixing Dai.\u003Cbr \u002F>\n  “大型视觉模型与视觉提示工程综述。” ArXiv (2023)。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.00855)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fxxx)]\n  \n- **ChatVideo:** Junke Wang, Dongdong Chen, Chong Luo, Xiyang Dai, Lu Yuan, Zuxuan Wu, Yu-Gang Jiang.\u003Cbr \u002F>\n  “ChatVideo：以轨迹为中心的多模态、多功能视频理解系统。” ArXiv (2023)。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.14407)] \n  [[代码](https:\u002F\u002Fwww.wangjunke.info\u002FChatVideo\u002F)]\n  \n- **Video-ChatGPT:** Muhammad Maaz, Hanoona Rasheed, Salman Khan, Fahad Shahbaz Khan.\u003Cbr \u002F>\n  “Video-ChatGPT：通过大型视觉和语言模型实现精细视频理解。” ArXiv (2023)。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.05424)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fmbzuai-oryx\u002FVideo-ChatGPT)]\n  \n- **SegGPT:** Xinlong Wang, Xiaosong Zhang, Yue Cao, Wen Wang, Chunhua Shen, Tiejun Huang.\u003Cbr \u002F>\n  “SegGPT：上下文中的万物分割。” ArXiv (2023)。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03284)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fbaaivision\u002FPainter)]\n\n### AAAI 2026\n\n- **SATA:** 张天路、张强、丁贵光、韩俊功。\u003Cbr \u002F>\n  “在任意模态下追踪与分割任何目标。”AAAI（2026）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2511.19475)]\n  [[代码]( )]\n\n- **LUART:** 肖云、王宇航、金建东、张旺康、李成龙。\u003Cbr \u002F>\n  “非对齐无人机RGBT跟踪：大规模基准及一种新方法。”AAAI（2026）。\n  [[论文]( )] \n  [[代码](https:\u002F\u002Fgithub.com\u002FNOP1224\u002FUnaligned_RGBT_Tracking)]\n\n- **CADTrack:** 李浩、王宇豪、胡先涛、郝文宁、张平平、王栋、陆虎川。\u003Cbr \u002F>\n  “CADTrack：基于可变形对齐的上下文聚合学习，用于鲁棒的RGBT跟踪。”AAAI（2026）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2511.17967)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FIdolLab\u002FCADTrack)]\n\n- **AlignTrack:** 孙传宇、张继庆、王洋、王元辰、蒋宇彤、尹宝才、杨欣。\u003Cbr \u002F>\n  “AlignTrack：面向RGB-事件视觉跟踪的自顶向下时空分辨率对齐方法。”AAAI（2026）。\n  [[论文]( )] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fscy0712\u002FAlignTrack)]\n\n- **MoDTrack:** 杨洪涛、钟斌能、梁启华、胡先涛、谭宇飞、夏海英、宋书翔。\u003Cbr \u002F>\n  “基于运动与几何感知线索的运动感知目标跟踪。”AAAI（2026）。\n  [[论文]( )] \n  [[代码]( )]\n\n- **MUTrack:** 吴伟静、梁启华、钟斌能、唐晓虎、谭宇飞、李宁、薛元亮。\u003Cbr \u002F>\n  “MUTrack：一种面向视觉跟踪的记忆感知统一表征框架。”AAAI（2026）。\n  [[论文]( )] \n  [[代码]( )]\n\n- **ADTrack:** 张广通、钟斌能、杨世睿、王洋、白田。\u003Cbr \u002F>\n  “面向语言稀疏条件下的鲁棒视觉-语言跟踪的感知蒸馏。”AAAI（2026）。\n  [[论文]( )] \n  [[代码]( )]\n\n- **MFDP:** 王士磊、赖普健、高东、宁继峰、程功。\u003Cbr \u002F>\n  “探索多模态目标跟踪中的模态感知融合与解耦时序传播。”AAAI（2026）。\n  [[论文]( )] \n  [[代码]( )]\n\n- **STDTrack:** 史俊泽、于洋、史健、罗海波。\u003Cbr \u002F>\n  “探索可靠时空依赖关系以实现高效视觉跟踪。”AAAI（2026）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2601.09078)] \n  [[代码]( )]  \n\n- **AMTrack:** 应戈、张大伟、杨承转、刘伟、全相勋、王华、黄昌钦、郑中龙。\u003Cbr \u002F>\n  “利用All Mamba融合实现高效的RGB-D跟踪。”AAAI（2026）。\n  [[论文]( )] \n  [[代码]( )]\n\n- **GOLA:** 邵泽凯、胡宇凡、刘京源、范斌、刘宏民。\u003Cbr \u002F>\n  “面向RGB-T跟踪的群正交低秩适应。”AAAI（2026）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2512.05359)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FMelanTech\u002FGOLA)]\n\n- **SFPT:** 王嘉豪、刘芳、王浩、李硕、王希翼、陈普华。\u003Cbr \u002F>\n  “面向对抗感知的RGB-T跟踪的语义特征净化。”AAAI（2026）。\n  [[论文]( )] \n  [[代码]( )]\n\n- **HTTrack:** 王嘉豪、刘芳、焦立成、王浩、李硕、王希翼、李玲玲、陈普华、刘旭。\u003Cbr \u002F>\n  “HTTrack：通过历史轨迹学习目标感知的卫星视频跟踪。”AAAI（2026）。\n  [[论文]( )] \n  [[代码]( )]\n\n- **AerialMind:** 陈成志钊、梁绍峰、关润威、孙小楼、赵浩成、江海云、黄涛、丁恒辉、韩青龙。\u003Cbr \u002F>\n  “AerialMind：面向无人机场景的引用式多目标跟踪。”AAAI（2026）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2511.21053)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fshawnliang420\u002FAerialMind)]\n\n- **SAM2-OV:** 陈阳凯、吴强强、李光耀、高俊龙、牛广林、王汉子。\u003Cbr \u002F>\n  “SAM2-OV：一种面向开放词汇多目标跟踪的全新仅检测微调范式。”AAAI（2026）。\n  [[论文]( )] \n  [[代码]( )]\n\n- **SAM2MOT:** 姜俊杰、王泽林、赵曼琪、李茵、姜东升。\u003Cbr \u002F>\n  “SAM2MOT：一种基于分割的新型多目标跟踪范式。”AAAI（2026）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.04519)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FTripleJoy\u002FSAM2MOT)]\n\n  \n### ICLR 2026\n\n- **FARTrack:** 王桂杰、林通、白一凡、曹安佳、梁诗怡、赵王博、魏兴。\u003Cbr \u002F>\n  “FARTrack：高性能快速自回归视觉跟踪。”ICLR（2026）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2602.03214)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fwangguijiepedeval\u002FFARTrack)]\n\n- **GOT-Edit:** 陈诗芳、陈俊诚、卓伊弘、林延宇。\u003Cbr \u002F>\n  “GOT-Edit：通过在线模型编辑实现的几何感知通用目标跟踪。”ICLR（2026）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2602.08550)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fchenshihfang\u002FGOT)]\n\n### NeurIPS 2025\n\n- **RGBDT500:** 朱学锋、徐天阳、潘逸凡、顾金杰、李熙、卢继文、吴小军、约瑟夫·基特勒。\u003Cbr \u002F>\n  “协作视觉、深度与热信号进行多模态跟踪。”NeurIPS（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.24741)] \n  [[代码](https:\u002F\u002Fxuefeng-zhu5.github.io\u002FRGBDT500\u002F)]\n\n- **MMOT:** 李天昊、许廷发、王莹、秦浩林、林旭、李佳楠。\u003Cbr \u002F>\n  “MMOT：首个面向无人机多光谱多目标跟踪的挑战性基准。”NeurIPS（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.12565)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FAnnzstbl\u002FMMOT)]\n  \n- **SpikeFET:** 杨景俊、范良伟、张金浦、连向凯、沈辉、胡德文。\u003Cbr \u002F>\n  “用于统一帧-事件目标跟踪的全脉冲神经网络。”NeurIPS（2025）。\n  [[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=FooiwsnEH9)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FNoctis-A\u002FSpikeFET)]\n\n- **LoRATv2:** 林丽婷、范恒、张志鹏、黄玉清、王耀威、徐勇、凌海彬。\u003Cbr \u002F>\n  “LoRATv2：在单流跟踪器中实现低成本时序建模。”NeurIPS（2025）。\n  [[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=q06YjUj0FB)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FLitingLin\u002FLoRATv2)]\n\n- **DSATrack:** 周鑫宇、潘同欣、洪灵毅、郭品雪、郭海晶、陈兆宇、蒋凯迅、张文强。\u003Cbr \u002F>\n  “面向无人机跟踪的动态语义感知相关性建模。”NeurIPS（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.21351)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fzxyyxzz\u002FDSATrack)]\n\n### ICCV 2025\n\n- **UMDATrack:** 姚思远、朱睿、王梓琪、任文奇、闫燕阳、曹晓春。\u003Cbr \u002F>\n  “UMDATrack：恶劣天气条件下统一的多域自适应跟踪。” ICCV（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.00648)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FZ-Z188\u002FUMDATrack)]\n\n- **XTrack:** 谭岳东、吴宗伟、傅宇谦、周竹韵、孙国磊、爱德华·赞菲、马超、达达·帕尼·保德尔、吕克·范古尔、拉杜·蒂莫夫特。\u003Cbr \u002F>\n  “XTrack：多模态训练提升RGB-X视频目标跟踪器性能。” ICCV（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.17773)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fsupertyd\u002FXTrack)]\n\n- **FlexTrack:** 谭岳东、邵嘉伟、爱德华·赞菲、李阮俊、安兆冲、马超、达达·保德尔、吕克·范古尔、拉杜·蒂莫夫特、吴宗伟。\u003Cbr \u002F>\n  “你拥有的就是你要跟踪的：自适应且鲁棒的多模态跟踪。” ICCV（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.05899)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fsupertyd\u002FFlexTrack)]\n\n- **TUEs:** 吴强强、于毅、孔晨琦、刘子泉、万佳、李浩亮、Alex C. Kot、安东尼·B·陈。\u003Cbr \u002F>\n  “时间不可学习样本：防止个人视频数据被目标跟踪未经授权地利用。” ICCV（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.07483)] \n  [[代码]( )]\n\n- **ATCTrack:** 冯晓坤、胡世宇、李旭辰、张黛玲、吴美琪、张静、陈小堂、黄凯奇。\u003Cbr \u002F>\n  “ATCTrack：通过动态目标状态对齐目标—上下文线索，实现鲁棒的视觉—语言跟踪。” ICCV（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.19875)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FXiaokunFeng\u002FATCTrack)]\n\n- **CAT:** 袁永生、赵杰、王东、陆虎川。\u003Cbr \u002F>\n  “CAT：用于真实场景跟踪的统一点击式跟踪框架。” ICCV（2025）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fhtml\u002FYuan_CAT_A_Unified_Click-and-Track_Framework_for_Realistic_Tracking_ICCV_2025_paper.html)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fysyuann\u002FCAT)]\n\n- **CompressTracker:** 洪凌翼、李景伦、周鑫宇、严士林、郭品雪、蒋凯勋、陈兆宇、高树勇、张伟、陆宏、张文强。\u003Cbr \u002F>\n  “高效Transformer目标跟踪的通用压缩框架。” ICCV（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.17564)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FLingyiHongfd\u002FCompressTracker)]\n\n- **SMSTracker:** 陈思贤、李泽东、李文豪、吕世健、沈春华、张晓琴。\u003Cbr \u002F>\n  “SMSTracker：三路径分数掩码Sigma融合用于多模态跟踪。” ICCV（2025）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fhtml\u002FChan_SMSTracker_Tri-path_Score_Mask_Sigma_Fusion_for_Multi-Modal_Tracking_ICCV_2025_paper.html)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FLeezed525\u002FSMSTracker)]\n  \n\n  \n### CVPR 2025\n\n- **ARPTrack:** 梁世义、白一凡、龚一弘、魏兴。\u003Cbr \u002F>\n  “用于视觉跟踪的自回归序列预训练。” CVPR（2025）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FLiang_Autoregressive_Sequential_Pretraining_for_Visual_Tracking_CVPR_2025_paper.html)] \n  [[代码](https:\u002F\u002Farptrack.github.io\u002F)]\n\n- **DreamTrack:** 郭明哲、谭伟平、冉文宇、景丽萍、张志鹏。\u003Cbr \u002F>\n  “DreamTrack：为多模态视觉目标跟踪梦想未来。” CVPR（2025）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FGuo_DreamTrack_Dreaming_the_Future_for_Multimodal_Visual_Object_Tracking_CVPR_2025_paper.html)] \n  [[代码]( )]\n\n- **MamTrack:** 孙传宇、张继青、王洋、葛慧琳、夏千晨、尹宝才、杨欣。\u003Cbr \u002F>\n  “利用Mamba探索历史信息进行RGBE视觉跟踪。” CVPR（2025）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FSun_Exploring_Historical_Information_for_RGBE_Visual_Tracking_with_Mamba_CVPR_2025_paper.html)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fscy0712\u002FMamTrack)]\n  \n- **PURA:** 邵泽凯、胡宇凡、樊斌、刘洪民。\u003Cbr \u002F>\n  “PURA：用于RGB-T跟踪的参数更新—恢复测试时自适应。” CVPR（2025）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FShao_PURA_Parameter_Update-Recovery_Test-Time_Adaption_for_RGB-T_Tracking_CVPR_2025_paper.html)] \n  [[代码](https:\u002F\u002Fmelantech.github.io\u002FPURA)]\n\n- **ACAttack:** 向新宇、严庆龙、张浩、马佳怡。\u003Cbr \u002F>\n  “ACAttack：通过多模态响应解耦实现自适应交叉攻击的RGB-T跟踪器。” CVPR（2025）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FXiang_ACAttack_Adaptive_Cross_Attacking_RGB-T_Tracker_via_Multi-Modal_Response_Decoupling_CVPR_2025_paper.html)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FXinyu-Xiang\u002FACAttack)]\n  \n- **MITracker:** 徐孟洁、朱逸涛、江浩天、李嘉铭、申振荣、王晟、黄浩霖、王欣宇、杨清、张涵、王倩。\u003Cbr \u002F>\n  “MITracker：用于视觉目标跟踪的多视角融合。” CVPR（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.20111)] \n  [[代码](https:\u002F\u002Fmii-laboratory.github.io\u002FMITracker\u002F)]\n\n- **SPMTrack:** 蔡文瑞、刘庆杰、王云鸿。\u003Cbr \u002F>\n  “SPMTrack：具有专家混合的时空参数高效微调，用于可扩展的视觉跟踪。” CVPR（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.18338)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FWenRuiCai\u002FSPMTrack)]\n  \n- **ORTrack：** 武友、王旭成、杨向阳、刘梦圆、曾丹、叶恒州、李水旺。\u003Cbr \u002F>\n  “学习遮挡鲁棒的视觉Transformer用于实时无人机跟踪。” CVPR（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.09228)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fwuyou3474\u002FORTrack)]\n\n- **SGLATrack:** 薛超灿、钟彬能、梁启华、郑耀宗、李宁、薛元亮、宋书翔。\u003Cbr \u002F>\n  “用于无人机跟踪的相似性引导层自适应视觉Transformer。” CVPR（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.06625)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FGXNU-ZhongLab\u002FSGLATrack)]\n\n- **DUTrack:** 李晓海、钟彬能、梁启华、莫志毅、农建、宋书翔。\u003Cbr \u002F>\n  “用于视觉—语言跟踪中语言适应的动态更新。” CVPR（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.06621)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FGXNU-ZhongLab\u002FDUTrack)]\n\n- **MambaVLT:** 刘欣琪、周莉、周子坤、陈建秋、何振宇。\u003Cbr \u002F>\n  “MambaVLT：用于视觉—语言跟踪的时间演化多模态状态空间模型。” CVPR（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.15459)] \n  [[代码]( )]\n\n- **Mono3DVLT:** 魏洪凯、杨洋、孙世杰、冯明涛、宋向宇、雷琪、胡红丽、王荣、宋焕生、纳维德·阿赫塔尔、阿吉马尔·赛义德·米安。\u003Cbr \u002F>\n  “Mono3DVLT：基于单目视频的三维视觉语言跟踪”。CVPR（2025）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FWei_Mono3DVLT_Monocular-Video-Based_3D_Visual_Language_Tracking_CVPR_2025_paper.html)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fhongkai-wei\u002FMono3DVLT)]\n\n- **EdgeTAM:** 周冲、朱晨晨、熊云阳、萨克沙姆·苏里、肖凡毅、吴乐萌、拉古拉曼·克里希纳穆尔蒂、戴博、陈昌礼、维卡斯·钱德拉、比尔盖·索兰。\u003Cbr \u002F>\n  “EdgeTAM：端侧万物跟踪模型”。CVPR（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.07256)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FEdgeTAM)]\n\n- **DAM4SAM:** 约瓦娜·维德诺维奇、艾伦·卢克齐奇、马泰伊·克里斯坦。\u003Cbr \u002F>\n  “一种适用于SAM2的干扰物感知记忆用于视觉目标跟踪”。CVPR（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.17576)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fjovanavidenovic\u002FDAM4SAM)]\n\n- **MUST:** 秦浩林、徐廷发、李天昊、陈振翔、冯涛、李佳楠。\u003Cbr \u002F>\n  “MUST：首个多光谱无人机单目标跟踪数据集及统一框架”。CVPR（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.17699)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fq2479036243\u002FMUST-Multispectral-UAV-Single-Object-Tracking)]\n\n- **ETAP:** 弗里德海姆·哈曼、丹尼尔·格里格、菲尔伯特·费布里扬托、科斯塔斯·达尼利迪斯、吉列尔莫·加列戈。\u003Cbr \u002F>\n  “ETAP：基于事件的任意点跟踪”。CVPR（2025）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FHamann_ETAP_Event-based_Tracking_of_Any_Point_CVPR_2025_paper.html)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Ftub-rip\u002FETAP)]\n\n- **Chrono:** 金贤洙、曹锡柱、黄家辉、李正、李俊英、金承龙。\u003Cbr \u002F>\n  “探索时序感知特征用于点跟踪”。CVPR（2025）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FKim_Exploring_Temporally-Aware_Features_for_Point_Tracking_CVPR_2025_paper.html)] \n  [[代码](https:\u002F\u002Fcvlab-kaist.github.io\u002FChrono\u002F)]\n\n- **Tracktention:** 赖子航、安德烈亚·韦达尔迪。\u003Cbr \u002F>\n  “Tracktention：利用点跟踪更快更好地处理视频”。CVPR（2025）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FLai_Tracktention_Leveraging_Point_Tracking_to_Attend_Videos_Faster_and_Better_CVPR_2025_paper.html)] \n  [[代码](https:\u002F\u002Fzlai0.github.io\u002FTrackTention\u002F)]\n\n- **TimeTracker:** 刘浩悦、许景涵、常毅、周汉宇、赵浩志、王琳、严路欣。\u003Cbr \u002F>\n  “TimeTracker：基于事件的连续点跟踪，用于具有非线性运动的视频帧插值”。CVPR（2025）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FLiu_TimeTracker_Event-based_Continuous_Point_Tracking_for_Video_Frame_Interpolation_with_CVPR_2025_paper.html)] \n  [[代码]( )]\n\n- **ADMCMT:** 樊慧杰、乔宇、甄一豪、赵庭辉、范宝杰、王强。\u003Cbr \u002F>\n  “全天候多摄像头多目标跟踪”。CVPR（2025）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FFan_All-Day_Multi-Camera_Multi-Target_Tracking_CVPR_2025_paper.html)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FQTRACKY\u002FADMCMT)]\n\n- **OmniTrack:** 罗凯、史浩、吴盛、滕飞、段孟菲、黄畅、王宇航、王凯威、杨凯伦。\u003Cbr \u002F>\n  “全方位多目标跟踪”。CVPR（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.04565)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fxifen523\u002FOmniTrack)]\n\n- **DFormerv2:** 尹博文、曹蛟龙、程明明、侯启斌。\u003Cbr \u002F>\n  “DFormerv2：用于RGBD语义分割的几何自注意力机制”。CVPR（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.04701)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FVCIP-RGBD\u002FDFormer)]\n\n- **JTD-UAV:** 王一帆、赵健、范兆鑫、张欣、吴雪成、张雨典、靳磊、李心悦、王刚、贾梦溪、胡平、朱郑、李学龙。\u003Cbr \u002F>\n  “JTD-UAV：MLLM增强型联合跟踪与描述框架，用于反无人机系统”。CVPR（2025）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fhtml\u002FWang_JTD-UAV_MLLM-Enhanced_Joint_Tracking_and_Description_Framework_for_Anti-UAV_Systems_CVPR_2025_paper.html)] \n  [[代码]( )]\n\n\n\n### ICML 2025\n\n- **MPT:** 赵杰、陈欣、袁永胜、迈克尔·费尔斯贝格、王东、陆虎川。\u003Cbr \u002F>\n  “用于鲁棒视觉跟踪的高效运动提示学习”。ICML（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.16321)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fzj5559\u002FMotion-Prompt-Tracking)]\n\n- **CSTrack:** 冯晓坤、张大令、胡诗宇、李旭辰、吴美琪、张静、陈晓棠、黄凯琦。\u003Cbr \u002F>\n  “CSTrack：通过紧凑的时空特征提升RGB-X跟踪性能”。ICML（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.19434)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FXiaokunFeng\u002FCSTrack)]\n  \n\n\n### ACM MM 2025\n\n- **RSTrack:** 曾凡胜、钟彬能、夏海英、谭宇飞、胡先涛、石良涛、宋书祥。\u003Cbr \u002F>\n  “在监督下进行显式上下文推理以用于视觉跟踪”。ACM MM（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.16191)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FGXNU-ZhongLab\u002FRSTrack)]\n\n- **UniBench300:** 唐章勇、徐天阳、朱学峰、程春阳、周涛、吴晓军、约瑟夫·基特勒。\u003Cbr \u002F>\n  “串行优于并行：为多模态视觉目标跟踪与基准测试学习持续统一体验”。ACM MM（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.10655)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FZhangyong-Tang\u002FUniBench300)]\n\n- **Gen4Track:** 葛嘉伟、张馨宇、曹九鑫、朱雪林、刘伟佳、高青青、曹碧薇、王坤、刘畅、刘博、冯晨、伊万尼斯·帕特拉斯。\u003Cbr \u002F>\n  “Gen4Track：通过自校正扩散模型实现无需调优的数据增强框架，用于视觉—语言跟踪”。ACM MM（2025）。\n  [[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3746027.3754956)] \n  [[代码]( )]\n\n- **FA3T:** 王家豪、刘芳、焦立成、王浩、李硕、李玲玲、陈普华、刘旭、王新怡。\u003Cbr \u002F>\n  “FA3T：面向多模态跟踪的特征感知对抗攻击”。ACM MM（2025）。\n  [[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3746027.3755155)] \n  [[代码]( )]\n\n- **MST:** 王士磊、程功、赖普建、高东、韩俊伟。\u003Cbr \u002F>\n  “多状态跟踪器：通过多状态专业化与交互提升高效目标跟踪能力”。ACM MM（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.11531)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fwsumel\u002FMST)]\n\n### IJCAI 2025\n\n- **FastSeqTrack:** 李东东、高志南、蒯阳柳、陈锐。\u003Cbr \u002F>\n  “探索视觉目标跟踪中高效且有效的序列学习。” IJCAI（2025）。\n  [[论文](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F3672.pdf)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fvision4drones\u002FFastSeqTrack)]\n\n- **SSTrack:** 寇宇彤、林书博、李亮、李冰、胡伟明、高进。\u003Cbr \u002F>\n  “SSTrack：用于轻量级视觉目标跟踪的样本间隔调度。” IJCAI（2025）。\n  [[论文](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F3314.pdf)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FKou-99\u002FSSTrack)]\n\n- **TUMFNet:** 丁兆东、李成龙、缪圣清、唐进。\u003Cbr \u002F>\n  “基于模板的不确定性多模态融合网络用于RGBT跟踪。” IJCAI（2025）。\n  [[论文](https:\u002F\u002Fijcai-preprints.s3.us-west-1.amazonaws.com\u002F2025\u002F2815.pdf)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fdongdong2061\u002FIJCAI25-TUMFNet)]\n\n- **GDSTrack:** 李胜兰、姚睿、周勇、朱汉成、孙坤阳、刘兵、邵志文、赵佳琪。\u003Cbr \u002F>\n  “模态引导的动态图融合与时间扩散用于自监督RGB-T跟踪。” IJCAI（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.03507)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FLiShenglana\u002FGDSTrack)]\n\n\n### AAAI 2025\n\n- **STTrack:** 胡先涛、邰英、赵旭、赵晨、张振宇、李俊、钟彬能、杨健。\u003Cbr \u002F>\n  “利用多模态时空模式进行视频目标跟踪。” AAAI（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.15691)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FNJU-PCALab\u002FSTTrack)]\n\n- **SUTrack:** 陈鑫、康奔、耿婉婷、朱嘉雯、刘毅、王栋、陆虎川。\u003Cbr \u002F>\n  “SUTrack：迈向简单统一的单目标跟踪。” AAAI（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.19138)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fchenxin-dlut\u002FSUTrack)]\n\n- **MIMTrack:** 王兴梅、聂国豪、孟家祥、严子宁。\u003Cbr \u002F>\n  “MIMTrack：通过掩码图像建模实现情境内跟踪。” AAAI（2025）。\n  [[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F32860)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fchenxin-dlut\u002FSUTrack)]\n  \n- **AINet:** 卢安东、王万宇、李成龙、唐进、罗斌。\u003Cbr \u002F>\n  “通过全层多模态交互与渐进式融合Mamba实现RGBT跟踪。” AAAI（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.08827)] \n  [[代码]( )]\n\n- **CMS:** 向新宇、颜庆龙、张浩、丁建峰、徐涵、王中原、马佳怡。\u003Cbr \u002F>\n  “跨模态隐身：一种用于RGB-T跟踪器的粗细结合攻击框架。” AAAI（2025）。\n  [[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F32931)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FXinyu-Xiang\u002FCMS)]\n  \n- **CAFormer:** 肖云、赵家聪、卢安东、李成龙、林寅、尹冰、刘聪。\u003Cbr \u002F>\n  “用于RGBT跟踪的跨模态调制注意力Transformer。” AAAI（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.02222)] \n  [[代码]( )]\n  \n- **TemTrack:** 谢金霞、钟彬能、梁启华、李宁、莫志义、宋淑香。\u003Cbr \u002F>\n  “通过基于Mamba的上下文感知标记学习实现鲁棒跟踪。” AAAI（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.13611)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FGXNU-ZhongLab\u002FTemTrack)]\n\n- **LMTrack:** 徐辰龙、钟彬能、梁启华、郑耀宗、李国荣、宋淑香。\u003Cbr \u002F>\n  “少即是多：用于目标跟踪的标记上下文感知学习。” AAAI（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.00758)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FXuChenLong\u002FLMTrack)]\n  \n- **MambaLCT:** 李晓海、钟彬能、梁启华、李国荣、莫志义、宋淑香。\u003Cbr \u002F>\n  “MambaLCT：通过长期上下文状态空间模型提升跟踪性能。” AAAI（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.13615)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FGXNU-ZhongLab\u002FMambaLCT)]\n\n- **SSTrack:** 郑耀宗、钟彬能、梁启华、李宁、宋淑香。\u003Cbr \u002F>\n  “用于自监督跟踪的解耦时空一致性学习。” AAAI（2025）。\n  [[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F33155)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FGXNU-ZhongLab\u002FSSTrack)]\n  \n- **MCITrack:** 康奔、陈鑫、赖思淼、刘洋、刘毅、王栋。\u003Cbr \u002F>\n  “探索增强的情境信息用于视频级目标跟踪。” AAAI（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.11023)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fkangben258\u002FMCITrack\u002F)]\n  \n- **AsymTrack:** 朱嘉雯、汤怀义、陈鑫、王欣颖、王栋、陆虎川。\u003Cbr \u002F>\n  “双流胜过单流：用于高效视觉跟踪的非对称暹罗网络。” AAAI（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.00516)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fjiawen-zhu\u002FAsymTrack)]\n\n- **LVPTrack:** 吴洪景、姚思源、黄峰、王树、张林超、郑卓然、任文琪。\u003Cbr \u002F>\n  “LVPTrack：采用标签对齐视觉提示调优的高性能领域自适应无人机跟踪。” AAAI（2025）。\n  [[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F32906)] \n  [[代码]( )]\n\n- **MM-Tracker:** 姚牧峰、彭金龙、何青东、彭博、陈浩、迟明敏、刘超。\u003Cbr \u002F>\n  “MM-Tracker：用于无人机平台多目标跟踪的运动Mamba。” AAAI（2025）。\n  [[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F33019)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FYaoMufeng\u002FMMTracker)]\n  \n- **PSOT:** 李章斌、周金星、张静、唐生根、李坤、郭丹。\u003Cbr \u002F>\n  “用于视听问答的补丁级探测目标跟踪。” AAAI（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.10749)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fjiawen-zhu\u002FAsymTrack)]\n\n### ICASSP 2025\n\n- **MFDA:** 李志恒、翁志敏、王月环。\u003Cbr \u002F>\n  “用于单目标跟踪的多视角特征差异攻击”。ICASSP（2025）。\n  [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10889698)] \n  [[代码]( )]\n\n- **CGTrack:** 李伟宏、刘晓琼、范恒、张立博。\u003Cbr \u002F>\n  “CGTrack：基于分层特征聚合的级联门控网络用于无人机跟踪”。ICRA（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.05936)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FNightwatch-Fox11\u002FCGTrack)]\n\n- **CLTrack:** 陈斌、胡盛龙、董刚、梁凌燕、文东超、张凯华。\u003Cbr \u002F>\n  “用于鲁棒无人机跟踪的连续学习视频级目标令牌”。ICASSP（2025）。\n  [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10887950)] \n  [[代码]( )]\n\n- **LunarTracking:** 穆罕默德·利奥、张丁、郑海涛、林海叶。\u003Cbr \u002F>\n  “月球跟踪：夜间微小目标跟踪的新基准”。ICASSP（2025）。\n  [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10890681)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fkk123321x\u002FLunarTracking)]\n\n- **EHDA:** 李乔、谭侃伦、刘乔、袁迪、李欣、刘云鹏。\u003Cbr \u002F>\n  “高效分层领域自适应热红外跟踪”。ICASSP（2025）。\n  [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10890354)] \n  [[代码]( )]\n\n- **PDTrack:** 刘业强、李维然、丁彦浩、李振波。\u003Cbr \u002F>\n  “PDTrack：用于多目标跟踪的渐进式距离关联”。ICASSP（2025）。\n  [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10888323)] \n  [[代码]( )]\n\n- **RSM:** 程日冉、王旭鹏、索黑尔·费尔杜斯、雷航。\u003Cbr \u002F>\n  “RSM：用于可解释3D目标跟踪的精炼显著性图”。ICASSP（2025）。\n  [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10887798)] \n  [[代码]( )]\n\n- **LRPD:** 胡庆阔、李一晨、于文彬。\u003Cbr \u002F>\n  “利用多模态提示学习与蒸馏进行RGB-T跟踪”。ICMR（2025）。\n  [[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3731715.3733332)] \n  [[代码]( )]\n\n- **VSS:** 魏鹏飞、乔刘、何振宇、袁迪。\u003Cbr \u002F>\n  “一种多流视觉-光谱-空间自适应高光谱目标跟踪”。ICMR（2025）。\n  [[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3731715.3733262)] \n  [[代码]( )]\n\n- **DARTer:** 李旭照、李旭辰、胡世宇。\u003Cbr \u002F>\n  “DARTer：用于夜间无人机跟踪的动态自适应表征跟踪器”。ICMR（2025）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.00752)] \n  [[代码]( )]\n\n\n  \n### NeurIPS 2024\n\n- **ChatTracker:** 孙一鸣、于凡、陈绍祥、张宇、黄俊伟、李晨辉、李阳、王昌博。\u003Cbr \u002F>\n  “ChatTracker：通过与多模态大型语言模型对话提升视觉跟踪性能”。NeurIPS（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.01756)] \n  [[代码]( )]\n\n- **WebUOT-1M:** 张春辉、刘莉、黄冠杰、温浩、周曦、王延峰。\u003Cbr \u002F>\n  “WebUOT-1M：以百万规模基准推进深海目标跟踪”。NeurIPS（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.19818)] \n  [[代码](https:\u002F\u002Fgithub.com\u002F983632847\u002FAwesome-Multimodal-Object-Tracking)]\n\n- **VastTrack:** 彭亮、高俊源、刘欣然、李伟宏、董绍华、张志鹏、范恒、张立博。\u003Cbr \u002F>\n  “VastTrack：大规模类别视觉目标跟踪”。NeurIPS（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.03493)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FHengLan\u002FVastTrack)]\n\n- **DeTrack:** 周新宇、李景伦、洪玲毅、蒋凯勋、郭品雪、葛伟峰、张文强。\u003Cbr \u002F>\n  “DeTrack：用于视觉目标跟踪的模型内潜在去噪学习”。NeurIPS（2024）。\n  [[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=ZJjuNF0olj)] \n  [[代码]( )]\n\n- **CSAM:** 张天禄、库尔特·德巴蒂斯塔、张强、丁贵广、韩宗功。\u003Cbr \u002F>\n  “以MOT理念重新审视运动信息，用于RGB-事件跟踪”。NeurIPS（2024）。\n  [[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=bzGAELYOyL)] \n  [[代码]( )]\n\n- **DINTR:** 阮法、黎银、杰克逊·科森、阿尔珀·耶尔马兹、卢科阿。\u003Cbr \u002F>\n  “DINTR：基于扩散插值的跟踪”。NeurIPS（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.10053)] \n  [[代码]( )]\n\n- **UAV3D:** 叶辉、桑德拉曼、季世豪。\u003Cbr \u002F>\n  “UAV3D：面向无人飞行器的大规模3D感知基准”。NeurIPS（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.11125)] \n  [[代码](https:\u002F\u002Fhuiyegit.github.io\u002FUAV3D_Benchmark\u002F)]\n\n- **MemVLT:** 冯晓坤、李旭辰、胡世宇、张黛玲、吴美琪、陈晓棠、黄凯奇。\u003Cbr \u002F>\n  “MemVLT：基于自适应记忆的提示进行视觉-语言跟踪”。NeurIPS（2024）。\n  [[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=ZK1CZXKgG5)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FXiaokunFeng\u002FMemVLT)]\n\n- **CPDTrack:** 张黛玲、胡世宇、冯晓坤、李旭辰、吴美琪、黄凯奇。\u003Cbr \u002F>\n  “超越准确率：通过视觉搜索更像人类地进行跟踪”。NeurIPS（2024）。\n  [[论文](https:\u002F\u002Fopenreview.net\u002Fforum?id=LezAEImfoc)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FZhangDailing8\u002FCPDTrack)]\n\n### ECCV 2024\n\n- **Diff-Tracker:** 张正博、徐力、彭铎、侯赛因·拉赫马尼、刘俊。\u003Cbr \u002F>\n  “Diff-Tracker：文本到图像的扩散模型是无监督跟踪器。” ECCV（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.08394)] \n  [[代码]( )]\n\n- **LoRAT:** 林丽婷、范恒、张志鹏、王耀伟、许勇、凌海斌。\u003Cbr \u002F>\n  “跟踪遇上LoRA：训练更快、模型更大、性能更强。” ECCV（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.05231)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FLitingLin\u002FLoRAT)]\n\n- **VideoMamba:** 李坤昌、李新浩、王毅、何一楠、王亚莉、王利民、乔宇。\u003Cbr \u002F>\n  “VideoMamba：用于高效视频理解的状态空间模型。” ECCV（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.06977)] \n  [[代码](https:\u002F\u002Fhuggingface.co\u002FOpenGVLab\u002FVideoMamba)]\n\n- **DINO-Tracker:** 纳雷克·图曼扬、阿萨夫·辛格、沙伊·巴贡、塔莉·德克尔。\u003Cbr \u002F>\n  “DINO-Tracker：驯服DINO以实现单视频中的自监督点跟踪。” ECCV（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.14548v1)] \n  [[代码](https:\u002F\u002Fdino-tracker.github.io\u002F)]\n\n- **DecoMotion:** 李锐、刘东。\u003Cbr \u002F>\n  “分解提升对所有场景中所有目标的跟踪能力。” ECCV（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.06531)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fqianduoduolr\u002FDecoMotion)]\n\n- **Elysium:** 王涵、王艳杰、叶永杰、聂宇翔、黄灿。\u003Cbr \u002F>\n  “Elysium：通过多模态大语言模型探索视频中的物体级感知。” ECCV（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.02049)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FHon-Wong\u002FElysium)]\n  \n- **HVTrack:** 吴桥、孙坤、安培、马蒂厄·萨尔茨曼、张燕宁、杨佳琪。\u003Cbr \u002F>\n  “高时间变化下的点云中3D单目标跟踪。” ECCV（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.02049)] \n  [[代码]( )]\n\n- **AADN:** 吴哲伟、于瑞龙、刘启和、程淑英、邱士林、周世杰。\u003Cbr \u002F>\n  “利用辅助对抗防御网络提升跟踪鲁棒性。” ECCV（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.17976)] \n  [[代码](https:\u002F\u002Fgithub.com\u002F)]\n\n  \n### CVPR 2024\n\n- **MASA:** 李思远、柯磊、马丁·丹内尔扬、路易吉·皮奇内利、马蒂亚·塞古、卢克·范古尔、费舍尔·余。\u003Cbr \u002F>\n  “通过分割一切来匹配一切。” CVPR（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.04221)] \n  [[代码](https:\u002F\u002Fmatchinganything.github.io\u002F)]\n  \n- **OneTracker:** 洪凌翼、严士林、张仁睿、李万云、周信宇、郭品雪、蒋凯勋、程怡婷、李景伦、陈兆宇、张文强。\u003Cbr \u002F>\n  “OneTracker：用基础模型和高效微调统一视觉目标跟踪。” CVPR（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.09634)] \n  [[代码](https:\u002F\u002F)]\n\n- **ARTrackV2:** 白义凡、赵泽阳、龚一鸿、魏星。\u003Cbr \u002F>\n  “ARTrackV2：引导自回归跟踪器该看哪里以及如何描述。” CVPR（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.17133)] \n  [[代码](https:\u002F\u002Fartrackv2.github.io\u002F)]\n\n- **DiffusionTrack:** 谢飞、王仲道、马超。\u003Cbr \u002F>\n  “DiffusionTrack：用于视觉目标跟踪的点集扩散模型。” CVPR（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002F)] \n  [[代码](https:\u002F\u002F)]\n\n- **RTracker:** 黄玉清、李欣、周子坤、王耀伟、何振宇、杨明轩。\u003Cbr \u002F>\n  “RTracker：通过PN树状结构记忆实现可恢复跟踪。” CVPR（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002F)] \n  [[代码](https:\u002F\u002F)]\n\n- **NetTrack:** 郑光泽、林世杰、左浩波、傅昌宏、潘嘉。\u003Cbr \u002F>\n  “NetTrack：用网状结构追踪高度动态的目标。” CVPR（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.11186)] \n  [[代码](https:\u002F\u002Fgeorge-zhuang.github.io\u002Fnettrack\u002F)]\n\n- **Un-Track:** 吴宗伟、郑继来、任向轩、弗洛林-亚历山德鲁·瓦斯卢伊阿努、马超、丹达·帕尼·保德尔、卢克·范古尔、拉杜·蒂莫夫特。\u003Cbr \u002F>\n  “适用于视频目标跟踪的单模型与任意模态方法。” CVPR（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.15851)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FZongwei97\u002FUnTrack)]\n\n- **HIPTrack:** 蔡文睿、刘庆杰、王云洪。\u003Cbr \u002F>\n  “HIPTrack：基于历史提示的视觉跟踪。” CVPR（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.02072)] \n  [[代码](https:\u002F\u002Fxxx)]\n\n- **AQATrack:** 谢金霞、钟彬能、莫志义、张盛平、石良涛、宋书祥、季荣荣。\u003Cbr \u002F>\n  “基于时空Transformer的自回归查询用于适应性跟踪。” CVPR（2024）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FXie_Autoregressive_Queries_for_Adaptive_Tracking_with_Spatio-Temporal_Transformers_CVPR_2024_paper.html)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FGXNU-ZhongLab\u002FAQATrack)]\n\n- **MMA:** 杨凌霄、张如元、王延晨、谢晓华。\u003Cbr \u002F>\n  “MMA：用于视觉-语言模型的多模态适配器。” CVPR（2024）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fhtml\u002FYang_MMA_Multi-Modal_Adapter_for_Vision-Language_Models_CVPR_2024_paper.html)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FZjjConan\u002FMulti-Modal-Adapter)]\n\n- **SDSTrack:** 侯晓军、邢家政、钱一杰、郭耀伟、辛硕、陈俊豪、唐凯、王梦梦、蒋正凯、刘亮、刘勇。\u003Cbr \u002F>\n  “SDSTrack：用于多模态视觉目标跟踪的自蒸馏对称适配器学习。” CVPR（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002F)] \n  [[代码](https:\u002F\u002F)]\n\n- **HDETrack:** 王晓、王绍、唐传明、朱琳、江博、田永红、唐进。\u003Cbr \u002F>\n  “基于事件流的视觉目标跟踪：一个高分辨率基准数据集及一种新型基线。” CVPR（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.14611)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FEvent-AHU\u002FEventVOT_Benchmark)]\n\n- **CAI:** 邵燕燕、何淑婷、叶琪、冯宇超、罗文翰、陈继明。\u003Cbr \u002F>\n  “面向自然语言跟踪的语言与视觉参考的上下文感知融合。” CVPR（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002F)] \n  [[代码](https:\u002F\u002F)]\n\n- **ResampleTrack:** 任旭宏、陈建朗、曹悦、薛万利、郭青、马磊、赵建军、陈申勇。\u003Cbr \u002F>\n  “ResampleTrack：用于对抗性鲁棒视觉跟踪的在线重采样。” CVPR（2024）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024W\u002FAdvML\u002Fhtml\u002FRen_ResampleTrack_Online_Resampling_for_Adversarially_Robust_Visual_Tracking_CVPRW_2024_paper.html)] \n  [[代码]( )]\n\n### WACV 2024\n\n- **ContrasTR:** Pierre-François De Plaen, Nicola Marinello, Marc Proesmans, Tinne Tuytelaars, Luc Van Gool。\u003Cbr \u002F>\n  “基于Transformer的多目标跟踪对比学习”。WACV（2024）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2024\u002Fpapers\u002FDe_Plaen_Contrastive_Learning_for_Multi-Object_Tracking_With_Transformers_WACV_2024_paper.pdf)] \n  [[代码]()]\n\n- **LaGOT:** Christoph Mayer, Martin Danelljan, Ming-Hsuan Yang, Vittorio Ferrari, Luc Van Gool, Alina Kuznetsova。\u003Cbr \u002F>\n  “超越单目标跟踪：是时候同时跟踪多个通用目标了”。WACV（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.11920)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fvisionml\u002Fpytracking)]\n\n- **SMAT:** Goutam Yelluru Gopal, Maria A. Amer。\u003Cbr \u002F>\n  “用于高效目标跟踪的可分离自注意力与混合注意力Transformer”。WACV（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.03979)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fgoutamyg\u002FSMAT)]\n  \n- **DATr:** Jie Zhao, Johan Edstedt, Michael Felsberg, Dong Wang, Huchuan Lu。\u003Cbr \u002F>\n  “利用数据增强的力量进行基于Transformer的跟踪”。WACV（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.08264)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fzj5559\u002FDATr)]\n  \n### AAAI 2024\n\n- **GMMT:** Zhangyong Tang, Tianyang Xu, Xuefeng Zhu, Xiao-Jun Wu, Josef Kittler。\u003Cbr \u002F>\n  “基于生成式融合机制的多模态跟踪”。AAAI（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.01728)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FZhangyong-Tang\u002FGMMT)]\n\n- **ODTrack:** Yaozong Zheng, Bineng Zhong, Qihua Liang, Zhiyi Mo, Shengping Zhang, Xianxian Li。\u003Cbr \u002F>\n  “ODTrack：用于视觉跟踪的在线密集时序标记学习”。AAAI（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.01686)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FGXNU-ZhongLab\u002FODTrack)]\n\n - **EVPTrack:** Liangtao Shi, Bineng Zhong, Qihua Liang, Ning Li, Shengping Zhang, Xianxian Li。\u003Cbr \u002F>\n  “用于视觉目标跟踪的显式视觉提示”。AAAI（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.03142)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FGXNU-ZhongLab\u002FEVPTrack)] \n  \n- **BAT:** Bing Cao, Junliang Guo, Pengfei Zhu, Qinghua Hu。\u003Cbr \u002F>\n  “用于多模态跟踪的双向适配器”。AAAI（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.10611)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FSparkTempest\u002FBAT)]\n\n- **TATrack:** Hongyu Wang, Xiaotao Liu, Yifan Li, Meng Sun, Dian Yuan, Jing Liu。\u003Cbr \u002F>\n  “带有模态提示的时序自适应RGBT跟踪”。AAAI（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.01244)] \n  [[代码]()]\n  \n- **Hybrid-SORT:** Mingzhan Yang, Guangxin Han, Bin Yan, Wenhua Zhang, Jinqing Qi, Huchuan Lu, Dong Wang。\u003Cbr \u002F>\n  “Hybrid-SORT：弱线索对在线多目标跟踪同样重要”。AAAI（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.00783)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fymzis69\u002FHybirdSORT)]\n\n\n### ArXiv 2024\n\n- **SeqTrack3D:** Yu Lin, Zhiheng Li, Yubo Cui, Zheng Fang。\u003Cbr \u002F>\n  “SeqTrack3D：探索序列信息以实现鲁棒的3D点云跟踪”。ICRA（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.16249)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Faron-lin\u002Fseqtrack3d)]\n  \n- **VAT:** Guangtong Zhang, Qihua Liang, Zhiyi Mo, Ning Li, Bineng Zhong。\u003Cbr \u002F>\n  “用于RGBD跟踪的视觉适应”。ICASSP（2024）。\n  [[论文](https:\u002F\u002Farxiv.org )] \n  [[代码](https:\u002F\u002Fgithub.com\u002F )]\n\n- **UVLTrack:** Yinchao Ma, Yuyang Tang, Wenfei Yang, Tianzhu Zhang, Jinpeng Zhang, Mengxue Kang。\u003Cbr \u002F>\n  “通过对比学习统一视觉与视觉-语言跟踪”。ArXiv（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.11228)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FOpenSpaceAI\u002FUVLTrack)]\n\n- **SuperSBT:** Fei Xie, Wankou Yang, Chunyu Wang, Lei Chu, Yue Cao, Chao Ma, Wenjun Zeng。\u003Cbr \u002F>\n  “嵌入相关性的Transformer跟踪：单分支框架”。ArXiv（2024）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.12743)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fphiphiphi31\u002FSBT)]\n  \n\n### NeurIPS 2023\n\n- **MixFormerV2:** Yutao Cui, Tianhui Song, Gangshan Wu, Limin Wang。\u003Cbr \u002F>\n  “MixFormerV2：高效的全Transformer跟踪”。NeurIPS（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15896)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FMCG-NJU\u002FMixFormerV2)]\n  \n- **ZoomTrack:** Yutong Kou, Jin Gao, Bing Li, Gang Wang, Weiming Hu, Yizheng Wang, Liang Li。\u003Cbr \u002F>\n  “ZoomTrack：面向目标的非均匀缩放以实现高效视觉跟踪”。NeurIPS（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.10071)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FKou-99\u002FZoomTrack)]\n\n- **Type-to-Track:** Pha Nguyen, Kha Gia Quach, Kris Kitani, Khoa Luu。\u003Cbr \u002F>\n  “Type-to-Track：通过提示驱动的跟踪检索任意目标”。NeurIPS（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.13495)] \n  [[代码](https:\u002F\u002Fuark-cviu.github.io\u002FType-to-Track)]\n\n- **MGIT:** Shiyu Hu, Dailin Zhang, Meiqi Wu, Xiaokun Feng, Xuchen Li, Xin Zhao, Kaiqi Huang。\u003Cbr \u002F>\n  “多模态全局实例跟踪基准（MGIT）：在复杂时空及因果关系中更好地定位目标”。NeurIPS（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002Fxxxxx.xx)] \n  [[代码](http:\u002F\u002Fvideocube.aitestunion.com\u002F)]\n\n### ICCV 2023\n\n- **VTDNet:** Thomas E. Huang、刘一凡、卢克·范古尔、费舍尔·余。\u003Cbr \u002F>\n  “视频任务十项全能：统一自动驾驶中的图像与视频任务。” ICCV（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.04422)] \n  [[代码](https:\u002F\u002Fwww.vis.xyz\u002Fpub\u002Fvtd)]\n  \n- **HiT:** 康奔、陈鑫、王栋、彭厚文、陆旭川。\u003Cbr \u002F>\n  “探索用于高效视觉跟踪的轻量级层次化视觉Transformer。” ICCV（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.06904)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fkangben258\u002FHiT)]\n\n- **ROMTrack:** 蔡毅东、刘杰、唐杰、吴刚山。\u003Cbr \u002F>\n  “面向视觉跟踪的鲁棒目标建模。” ICCV（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.05140)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fdawnyc\u002FROMTrack)]\n\n- **F-BDMTrack:** 杨大伟、何建峰、马银超、于千金、张天柱。\u003Cbr \u002F>\n  “用于视觉目标跟踪的前景-背景分布建模Transformer。” ICCV（2023）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FYang_Foreground-Background_Distribution_Modeling_Transformer_for_Visual_Object_Tracking_ICCV_2023_paper.pdf)] \n  [[代码]()]\n  \n- **MITS:** 徐元友、杨宗欣、杨毅。\u003Cbr \u002F>\n  “整合边界框与掩码：用于统一视觉跟踪与分割的多目标框架。” ICCV（2023）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FXu_Integrating_Boxes_and_Masks_A_Multi-Object_Framework_for_Unified_Visual_ICCV_2023_paper.pdf)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fyoxu515\u002FMITS)]\n\n- **Aba-ViTrack:** 李水旺、杨向阳、曾丹、王旭成。\u003Cbr \u002F>\n  “适用于实时无人机跟踪的自适应且背景感知型视觉Transformer。” ICCV（2023）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FLi_Adaptive_and_Background-Aware_Vision_Transformer_for_Real-Time_UAV_Tracking_ICCV_2023_paper.pdf)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fxyyang317\u002FAba-ViTrack)]\n  \n- **Omnimotion:** 王倩倩、Yen-Yu Chang、蔡若瑾、李正奇、巴拉特·哈里哈兰、亚历山大·霍林斯基、诺亚·斯内夫利。\u003Cbr \u002F>\n  “一次追踪所有地方的所有事物。” ICCV（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.05422)] \n  [[代码](https:\u002F\u002Fomnimotion.github.io\u002F)]\n  \n- **DEVA:** 郑浩基、吴世宇、布莱恩·普赖斯、亚历山大·施温格、李俊英。\u003Cbr \u002F>\n  “通过解耦视频分割追踪任何物体。” ICCV（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.03903)] \n  [[代码](https:\u002F\u002Fhkchengrex.github.io\u002FTracking-Anything-with-DEVA)]\n\n- **CiteTracker:** 李欣、黄玉清、何振宇、王耀威、陆旭川、杨明轩。\u003Cbr \u002F>\n  “CiteTracker：结合图像与文本的视觉跟踪方法。” ICCV（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.11322)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fxinli\u002Fcitetracker)]\n\n- **DecoupleTNL:** 马丁、吴向乾。\u003Cbr \u002F>\n  “基于自然语言描述并结合长短时上下文解耦的跟踪方法。” ICCV（2023）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FMa_Tracking_by_Natural_Language_Specification_with_Long_Short-term_Context_Decoupling_ICCV_2023_paper.pdf)] \n  [[代码]()]\n  \n- **PVT++:** 李博文、黄子渊、叶俊杰、李一鸣、塞巴斯蒂安·舍雷尔、赵航、傅昌宏。\u003Cbr \u002F>\n  “PVT++：一个简单、端到端且考虑延迟的视觉跟踪框架。” ICCV（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.11629)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FJaraxxus-Me\u002FPVT_pp)]\n\n- **SyncTrack:** 马特丽、王梦梦、肖继民、吴慧峰、刘勇。\u003Cbr \u002F>\n  “同步特征提取与匹配：用于3D目标跟踪的单分支框架。” ICCV（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.12549)] \n  [[代码](xxxxx)]\n  \n- **360VOT:** 黄华健、许寅哲、陈颖舒、杨赛基。\u003Cbr \u002F>\n  “360VOT：一个新的全方位视觉目标跟踪基准数据集。” ICCV（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.14630)] \n  [[代码](https:\u002F\u002F360vot.hkustvgd.com\u002F)]\n\n- **PlanarTrack:** 刘欣然、刘晓琼、易子若、周鑫、黎青、张立博、黄燕、杨庆、范恒。\u003Cbr \u002F>\n  “PlanarTrack：一个大规模且具有挑战性的平面目标跟踪基准。” ICCV（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.07625)] \n  [[代码](https:\u002F\u002Fhengfan2010.github.io\u002Fprojects\u002FPlanarTrack\u002F)]\n\n### CVPR 2023\n\n- **X-Decoder:** Xueyan Zou, Zi-Yi Dou, Jianwei Yang, Zhe Gan, Linjie Li, Chunyuan Li, Xiyang Dai, Harkirat Behl, Jianfeng Wang, Lu Yuan, Nanyun Peng, Lijuan Wang, Yong Jae Lee, Jianfeng Gao。\u003Cbr \u002F>\n  “面向像素、图像和语言的通用解码”。CVPR（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.11270)] \n  [[代码](https:\u002F\u002Fx-decoder-vl.github.io\u002F)]\n  \n- **UNINEXT:** Bin Yan, Yi Jiang, Jiannan Wu, Dong Wang, Ping Luo, Zuhuan Yuan, Huchuan Lu。\u003Cbr \u002F>\n  “作为目标发现与检索的通用实例感知”。CVPR（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.06674)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FMasterBin-IIAU\u002FUNINEXT)]\n  \n- **OmniTracker:** Junke Wang, Dongdong Chen, Zuxuan Wu, Chong Luo, Xiyang Dai, Lu Yuan, Yu-Gang Jiang。\u003Cbr \u002F>\n  “OmniTracker：通过检测式跟踪统一目标跟踪”。CVPR（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.12079)] \n  [[代码](https:\u002F\u002Fgithub.com\u002F)]\n  \n- **SUSHI:** Orcun Cetintas, Guillem Brasó, Laura Leal-Taixé。\u003Cbr \u002F>\n  “利用图层次结构统一短期与长期跟踪”。CVPR（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.03038)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fdvl-tum\u002FSUSHI)]\n  \n- **DropMAE:** Qiangqiang Wu, Tianyu Yang, Ziquan Liu, Baoyuan Wu, Ying Shan, Antoni B. Chan。\u003Cbr \u002F>\n  “DropMAE：用于跟踪任务的空间注意力丢弃掩码自编码器”。CVPR（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.00571)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fjimmy-dq\u002FDropMAE)]\n  \n- **VideoTrack:** Fei Xie, Lei Chu, Jiahao Li, Yan Lu, Chao Ma。\u003Cbr \u002F>\n  “VideoTrack：通过视频Transformer学习目标跟踪”。CVPR（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002Fx)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fphiphiphi31\u002FVideoTrack)]\n  \n- **SwinV2:** Zhenda Xie, Zigang Geng, Jingcheng Hu, Zheng Zhang, Han Hu, Yue Cao。\u003Cbr \u002F>\n  “揭示掩码图像建模的深层秘密”。CVPR（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.13543)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FSwinTransformer\u002FMIM-Depth-Estimation)]\n  \n- **ViPT:** Jiawen Zhu, Simiao Lai, Xin Chen, Dong Wang, Huchuan Lu。\u003Cbr \u002F>\n  “视觉提示多模态跟踪”。CVPR（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.10826)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fjiawen-zhu\u002FViPT)]\n  \n - **JointNLT:** Li Zhou, Zikun Zhou, Kaige Mao, Zhenyu He。\u003Cbr \u002F>\n  “结合自然语言描述的联合视觉定位与跟踪”。CVPR（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.12027)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Flizhou-cs\u002FJointNLT)]\n  \n - **ARKitTrack:** Haojie Zhao, Junsong Chen, Lijun Wang, Huchuan Lu。\u003Cbr \u002F>\n  “ARKitTrack：一种基于移动RGB-D数据的新型多样化跟踪数据集”。CVPR（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.13885)] \n  [[代码](https:\u002F\u002Farkittrack.github.io\u002F)]\n  \n - **GRM:** Shenyuan Gao, Chunluan Zhou, Jun Zhang。\u003Cbr \u002F>\n  “用于Transformer跟踪的广义关系建模”。CVPR（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.16580v1.pdf)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FLittle-Podi\u002FGRM)]\n  \n - **ARTrack:** Xing Wei, Yifan Bai, Yongchao Zheng, Dahu Shi, Yihong Gong。\u003Cbr \u002F>\n  “自回归视觉跟踪”。CVPR（2023）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FWei_Autoregressive_Visual_Tracking_CVPR_2023_paper.html)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FMIV-XJTU\u002FARTrack)]\n  \n - **MAT:** Haojie Zhao, Dong Wang, Huchuan Lu。\u003Cbr \u002F>\n  “基于掩码外观迁移的视觉目标跟踪表征学习”。CVPR（2023）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FZhao_Representation_Learning_for_Visual_Object_Tracking_by_Masked_Appearance_Transfer_CVPR_2023_paper.html)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fdifhnp\u002FMAT)]\n  \n - **EMT:** Jinyu Yang, Shang Gao, Zhe Li, Feng Zheng, Aleš Leonardis。\u003Cbr \u002F>\n  “资源高效的RGBD空中目标跟踪”。CVPR（2023）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FYang_Resource-Efficient_RGBD_Aerial_Tracking_CVPR_2023_paper.html)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fyjybuaa\u002FRGBDAerialTracking)]\n  \n - **TBSI:** Tianrui Hui, Zizheng Xun, Fengguang Peng, Junshi Huang, Xiaoming Wei, Xiaolin Wei, Jiao Dai, Jizhong Han, Si Liu。\u003Cbr \u002F>\n  “通过模板桥接搜索区域交互实现RGB-T目标跟踪”。CVPR（2023）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fhtml\u002FHui_Bridging_Search_Region_Interaction_With_Template_for_RGB-T_Tracking_CVPR_2023_paper.html)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FRyanHTR\u002FTBSI)]\n  \n - **VisTracker:** Xianghui Xie, Bharat Lal Bhatnagar, Gerard Pons-Moll。\u003Cbr \u002F>\n  “基于单目RGB相机的可见性感知人机交互跟踪”。CVPR（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.16479v1)] \n  [[代码](https:\u002F\u002Fvirtualhumans.mpi-inf.mpg.de\u002FVisTracker\u002F)]\n  \n - **OVTrack:** Siyuan Li, Tobias Fischer, Lei Ke, Henghui Ding, Martin Danelljan, Fisher Yu。\u003Cbr \u002F>\n  “OVTrack：开放词汇多目标跟踪”。CVPR（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.08408)] \n  [[代码](https:\u002F\u002Fwww.vis.xyz\u002Fpub\u002Fovtrack\u002F)]\n  \n - **SeqTrack:** Xin Chen, Houwen Peng, Dong Wang, Huchuan Lu, Han Hu。\u003Cbr \u002F>\n  “SeqTrack：用于视觉目标跟踪的序列到序列学习”。CVPR（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.14394)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002FVideoX)]\n  \n - **ImageBind:** Rohit Girdhar, Alaaeldin El-Nouby, Zhuang Liu, Mannat Singh, Kalyan Vasudev Alwala, Armand Joulin, Ishan Misra。\u003Cbr \u002F>\n  “IMAGEBIND：一个嵌入空间，将一切联结起来”。CVPR（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.05665)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002FImageBind)]\n  \n - **TCOW:** Basile Van Hoorick, Pavel Tokmakov, Simon Stent, Jie Li, Carl Vondrick。\u003Cbr \u002F>\n  “在野外环境中穿越容器与遮挡物进行目标跟踪”。CVPR（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.03052)] \n  [[代码](https:\u002F\u002Ftcow.cs.columbia.edu\u002F)]\n\n### ArXiv 2023\n\n- **UTrack:** 高杰、钟彬能、陈燕。\u003Cbr \u002F>\n  “利用目标线索实现无歧义的目标跟踪”。ACM MM（2023）。\n  [[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3581783.3612240)] \n  [[代码]()]\n\n- **UPVPT:** 张广通、梁启华、李宁、莫志毅、钟彬能。\u003Cbr \u002F>\n  “通过统一预训练-微调与视觉提示调优实现鲁棒跟踪”。ACM MM亚洲（2023）。\n  [[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3595916.3626410)] \n  [[代码]()]\n\n- **TAO-Amodal:** 谢承延、塔拉莎·库拉纳、阿查尔·戴夫、德瓦·拉马南。\u003Cbr \u002F>\n  “非视域下任意目标的跟踪”。ArXiv（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.12433)] \n  [[代码](https:\u002F\u002Ftao-amodal.github.io\u002F)]\n\n- **HQTrack:** 朱家文、陈振宇、郝泽奇、常世杰、张璐、王东、陆虎川、罗斌、何俊彦、兰金鹏、陈翰源、李晨阳。\u003Cbr \u002F>\n  “高质量地跟踪任何目标”。ArXiv（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.13974)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fjiawen-zhu\u002FHQTrack)]\n\n- **MMTrack:** 郑耀宗、钟彬能、梁启华、李国荣、季荣荣、李仙仙。\u003Cbr \u002F>\n  “面向视觉-语言联合跟踪的统一标记学习”。TCSVT（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.14103)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FAzong-HQU\u002FMMTrack)]\n\n- **OVLM:** 张焕龙、王景超、张建伟、张天柱、钟彬能。\u003Cbr \u002F>\n  “用于目标跟踪的单流视觉-语言记忆网络”。TMM（2023）。\n  [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10149530)] \n  [[代码]( )]\n\n- **All-in-One:** 张春辉、孙欣、刘莉、杨一茜、刘琼、周曦、王艳峰。\u003Cbr \u002F>\n  “一体式：探索基于多模态对齐的统一视觉-语言跟踪”。ACM MM（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.03373)] \n  [[代码]( )]\n  \n- **MPLT:** 罗洋、郭锡庆、冯慧、敖磊。\u003Cbr \u002F>\n  “基于多模态互惠提示学习的RGB-T跟踪”。ArXiv（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.16386)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FHusterYoung\u002FMPLT)]\n  \n- **DCPT:** 朱家文、唐华义、程志奇、何俊彦、罗斌、邱世豪、李圣明、陆虎川。\u003Cbr \u002F>\n  “DCPT：基于黑暗线索提示的夜间无人机跟踪”。ArXiv（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.10491)] \n  [[代码](https:\u002F\u002Fxxx)]\n\n- **SRT:** 刘天鹏、李静、吴佳、张乐飞、常军、万俊、连乐志。\u003Cbr \u002F>\n  “基于显著性区域变换器的跟踪”。TIP（2023）。\n  [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10359476)] \n  [[代码](https:\u002F\u002Fgithub.xxxxx)]\n\n- **TATrans:** 赖普健、张美丽、程功、李盛阳、黄宪凯、韩俊伟。\u003Cbr \u002F>\n  “用于卫星视频目标跟踪的感知目标Transformer”。TGRS（2023）。\n  [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10342836)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Flaybebe\u002FTATrans_SVOT)]\n\n- **STRtrack:** 赵绍川、徐天阳、吴晓军、约瑟夫·基特勒。\u003Cbr \u002F>\n  “一种结合空间-通道Transformer和抖动抑制的时空鲁棒跟踪器”。IJCV（2023）。\n  [[论文](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs11263-023-01902-x)] \n  [[代码](https:\u002F\u002Fxxx)]\n\n- **CoTracker:** 尼基塔·卡拉耶夫、伊格纳西奥·罗科、本杰明·格雷厄姆、娜塔莉娅·内韦罗娃、安德烈亚·韦达尔迪、克里斯蒂安·鲁普雷希特。\u003Cbr \u002F>\n  “CoTracker：协同跟踪更有效”。ArXiv（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.07635)] \n  [[代码](https:\u002F\u002Fco-tracker.github.io\u002F)]\n  \n- **LiteTrack:** 魏青茂、曾碧、刘建奇、何力、曾国田。\u003Cbr \u002F>\n  “LiteTrack：采用异步特征提取进行层剪枝，实现轻量高效视觉跟踪”。ArXiv（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.09249)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FTsingWei\u002FLiteTrack)]\n  \n- **LightFC:** 李云峰、王博、李叶、刘卓妍、吴雪怡。\u003Cbr \u002F>\n  “轻量级全卷积孪生跟踪器”。ArXiv（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.05392)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FLiYunfengLYF\u002FLightFC)]\n\n- **DETRrack:** 魏青茂、曾碧、曾国田。\u003Cbr \u002F>\n  “基于可变形Transformer的视觉跟踪高效训练”。ArXiv（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.02676)] \n  [[代码](hxx)]\n\n- **JN:** 魏青茂、曾碧、曾国田。\u003Cbr \u002F>\n  “面向视觉跟踪中使用负样本的高效训练”。ArXiv（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.02903)] \n  [[代码](hxx)]\n\n- **COHA:** 朱志宇、侯俊辉、吴达鹏。\u003Cbr \u002F>\n  “面向RGB-事件Transformer跟踪器的跨模态正交高秩增强”。ArXiv（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.04129)] \n  [[代码](https:\u002Fxx)]\n  \n- **SparseTrack:** 刘泽林、王兴刚、王成、刘文宇、白翔。\u003Cbr \u002F>\n  “SparseTrack：基于伪深度进行场景分解的多目标跟踪”。ArXiv（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.05238)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fhustvl\u002FSparseTrack)]\n    \n- **TransSOT:** 贾纳尼·桑加维尔、塔尼卡萨拉姆·科库尔、阿米尔塔林甘·拉马南、苏巴·费尔南多。\u003Cbr \u002F>\n  “单目标跟踪中的Transformer：一项实验性综述”。ArXiv（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.11867)] \n  [[代码]()]\n  \n- **ProFormer:** 朱亚斌、李成龙、王小、唐进、黄志祥。\u003Cbr \u002F>\n  “基于动态引导学习的渐进式融合Transformer实现RGBT跟踪”。ArXiv（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.14778)] \n  [[代码]()]\n  \n- **SOTVerse:** 胡诗雨、赵欣、黄凯琪。\u003Cbr \u002F>\n  “SOTVerse：单目标跟踪的用户自定义任务空间”。IJCV（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.07414)] \n  [[代码](http:\u002F\u002Fmetaverse.aitestunion.com\u002Fsotverse)]\n\n- **TSMTrack:** 唐传明、胡钦涛、周高凡、姚金珍、张建林、黄永梅、叶其祥。\u003Cbr \u002F>\n  “用于高性能视觉目标跟踪的Transformer子块匹配”。TITS（2023）。\n  [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10101686)] \n  [[代码](https:\u002Fxx)]\n\n- **TADS:** 李鑫、裴文杰、王耀威、何振宇、陆虎川、杨明轩。\u003Cbr \u002F>\n  “基于目标感知数据合成的自监督跟踪”。TNNLS（2023）。\n  [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10004981)] \n  [[代码]()]\n  \n\n### IJCAI 2023\n\n- **OSP2B:** 聂嘉浩、何志伟、杨宇翔、鲍正一、高明宇、张静。\u003Cbr \u002F>\n  “OSP2B：用于3D孪生跟踪的单阶段点到框网络”。IJCAI（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.11584)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FHaozheQi\u002FP2B)]\n  \n  \n### WACV 2023\n\n- **MVT:** 戈塔姆·耶卢鲁·戈帕尔、玛丽亚·A·阿默。\u003Cbr \u002F>\n  “基于移动视觉Transformer的视觉目标跟踪”。BMVC（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.05829)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fgoutamyg\u002FMVT)]\n  \n- **E.T.Track:** 菲利普·布拉特、梅内劳斯·卡纳基斯、马丁·丹内尔扬、卢克·范古尔。\u003Cbr \u002F>\n  “基于示例Transformer的高效视觉跟踪”。WACV（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.09686)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fpblatter\u002Fettrack)]\n\n### AAAI 2023\n\n- **CTTrack:** 宋子凯、罗润、于俊青、陈依萍、杨伟。\u003Cbr \u002F>\n  “基于相关掩码建模的紧凑型Transformer跟踪器”。AAAI（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.10938)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FHUSTDML\u002FCTTrack)]\n  \n- **TATrack:** 何凯杰、张灿龙、谢胜、李志新、王志文。\u003Cbr \u002F>\n  “基于长期上下文注意力的目标感知跟踪”。AAAI（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.13840)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fhekaijie123\u002FTATrack)]\n  \n- **RGBD1K:** 朱学峰、徐天阳、唐章勇、吴祖成、刘浩东、杨晓、吴小军、约瑟夫·基特勒。\u003Cbr \u002F>\n  “RGBD1K：一个用于RGB-D目标跟踪的大规模数据集及基准测试”。AAAI（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.09787)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fxuefeng-zhu5\u002FRGBD1K)]\n\n- **GdaTFT:** 梁云、李巧巧、龙富敏。\u003Cbr \u002F>\n  “全局扩张注意力与目标聚焦网络用于鲁棒跟踪”。AAAI（2023）。\n  [[论文](https:\u002F\u002Funderline.io\u002Flecture\u002F69278-global-dilated-attention-and-target-focusing-network-for-robust-tracking)] \n  [[代码](https:\u002F\u002Fgithub.com\u002F)]\n  \n- **GLT-T:** 聂嘉豪、何志伟、杨宇翔、高明宇、张静。\u003Cbr \u002F>\n  “GLT-T：用于点云中3D单目标跟踪的全局-局部Transformer投票机制”。AAAI（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.10927)] \n  [[扩展版](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.00242)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fhaooozi\u002FGLT-T)]\n  \n- **RSPT:** 钟方伟、毕晓、张宇迪、张伟、王义周。\u003Cbr \u002F>\n  “RSPT：重建周围环境并预测轨迹以实现可泛化的主动目标跟踪”。AAAI（2023）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03623)] \n  [[代码](https:\u002F\u002Fsites.google.com\u002Fview\u002Faot-rspt)]\n  \n### NeurIPS 2022\n\n- **SwinTrack:** 林丽婷、范恒、许勇、凌海斌。\u003Cbr \u002F>\n  “SwinTrack：一种简单而强大的Transformer跟踪基线”。NeurIPS（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.00995)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FLitingLin\u002FSwinTrack)]\n  \n- **VLTrack:** 郭明哲、张志鹏、范恒、景丽萍。\u003Cbr \u002F>\n  “将更多注意力转向视觉-语言跟踪”。NeurIPS（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.01076)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FJudasDie\u002FSOTS)]\n  \n- **GKB:** 朱志宇、侯俊辉、吕先强。\u003Cbr \u002F>\n  “基于图嵌入的关键事件回溯学习，用于事件云中的目标跟踪”。NeurIPS（2022）。\n  [[论文](https:\u002F\u002Fnips.cc\u002FConferences\u002F2022\u002FSchedule?showEvent=54651)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fxxxx)]\n  \n- **TAP-Vid:** 卡尔·多尔施、安库什·古普塔、拉里萨·马尔凯耶娃、卢卡斯·斯迈拉、尤苏夫·艾塔尔、安德鲁·齐瑟曼、杨毅。\u003Cbr \u002F>\n  “TAP-Vid：一个用于跟踪视频中任意点的基准测试”。NeurIPS（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.03726)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fdeepmind\u002Ftapnet)]\n\n  \n### ECCV 2022\n\n- **OSTrack:** 叶博涛、常洪、马炳鹏、单世刚。\u003Cbr \u002F>\n  “联合特征学习与关系建模的跟踪：一种单流框架”。ECCV（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.11991)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fbotaoye\u002FOSTrack)]\n  \n- **Unicorn:** 严彬、蒋毅、孙培泽、王栋、袁泽寰、罗平、陆虎川。\u003Cbr \u002F>\n  “Unicorn：迈向目标跟踪的大统一”。ECCV（2022）口头报告。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.07078)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FMasterBin-IIAU\u002FUnicorn)]\n  \n- **SimTrack:** 陈博宇、李佩霞、白磊、乔雷、沈秋红、李波、甘伟浩、吴伟、欧阳万利。\u003Cbr \u002F>\n  “骨干网络就是你所需要的：一种用于视觉目标跟踪的简化架构”。ECCV（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.05328)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FLPXTT\u002FSimTrack)]\n  \n- **CIA:** 皮志雄、万伟涛、孙冲、高昌欣、桑农、李晨。\u003Cbr \u002F>\n  “用于视觉跟踪的层次化特征嵌入”。ECCV（2022）。\n  [[论文](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2022\u002Fpapers_ECCV\u002Fhtml\u002F4400_ECCV_2022_paper.php)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fzxgravity\u002FCIA)]\n  \n- **RTS:** 马蒂厄·保罗、马丁·丹内尔扬、克里斯托夫·迈耶、卢克·范·戈尔。\u003Cbr \u002F>\n  “基于分割的鲁棒视觉跟踪”。ECCV（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.11191)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fvisionml\u002Fpytracking)]\n  \n- **AiATrack:** 高申远、周春鸾、马超、王兴刚、袁俊松。\u003Cbr \u002F>\n  “AiATrack：用于Transformer视觉跟踪的注意力机制”。ECCV（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.09603)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FLittle-Podi\u002FAiATrack)]\n\n- **SLTtrack:** 金珉智、李承宽、奥贞淑、韩宝亨、赵民洙。\u003Cbr \u002F>\n  “迈向序列级训练的视觉跟踪”。ECCV（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.05810)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fbyminji\u002FSLTtrack)]\n  \n- **FEAR:** 瓦西里·博尔苏克、罗曼·维伊、奥列斯特·库平、泰季亚娜·马尔蒂纽克、伊戈尔·克拉舍尼、吉日·马塔斯。\u003Cbr \u002F>\n  “FEAR：快速、高效、准确且鲁棒的视觉跟踪器”。ECCV（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2112.07957.pdf)] \n  [[代码](https:\u002F\u002Fxxxxxxx)]\n  \n- **PersonPath22:** 帅兵、阿莱桑德罗·贝加莫、乌塔·布赫勒、安德鲁·伯内沙维、阿莉莎·博登、约瑟夫·提格。\u003Cbr \u002F>\n  “大规模真实世界多人跟踪”。ECCV（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.02175)] \n  [[代码](https:\u002F\u002Famazon-science.github.io\u002Ftracking-dataset\u002Fpersonpath22.html)]\n  \n- **STNet:** 胡磊、王灵鹏、唐玲华、兰凯浩、谢进、杨健。\u003Cbr \u002F>\n  “用于点云上单目标跟踪的3D暹罗Transformer网络”。ECCV（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.11995)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Ffpthink\u002FSTNet)]\n  \n- **P3AFormer:** 赵泽林、吴泽、庄月清、李博勋、贾佳娅。\u003Cbr \u002F>\n  “将目标视为像素级分布进行跟踪”。ECCV（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.05518)] \n  [[代码](https:\u002F\u002Fsjtuytc.github.io\u002Fzelin_pages\u002Fp3aformer.html)]\n  \n- **TETer:** 李思源、马丁·丹内尔扬、丁恒辉、托马斯·E·黄、费舍尔·余。\u003Cbr \u002F>\n  “在野外跟踪所有事物”。ECCV（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.12978)] \n  [[代码](http:\u002F\u002Fvis.xyz\u002Fpub\u002Ftet)]\n  \n- **ByteTrack:** 张一夫、孙培泽、蒋毅、于东东、袁泽寰、罗平、刘文宇、王兴刚。\u003Cbr \u002F>\n  “ByteTrack：通过关联每个检测框进行多目标跟踪”。ECCV（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.06864v2.pdf)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fifzhang\u002FByteTrack)]\n\n- **MOTR:** 曾方高、董斌、张元、王天财、张向宇、魏义臣。\u003Cbr \u002F>\n  “MOTR：基于Transformer的端到端多目标跟踪”。ECCV（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.03247)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fmegvii-research\u002FMOTR)]\n  \n- **MTracker:** 张一夫、王春雨、王兴刚、曾文俊、刘文宇。\u003Cbr \u002F>\n  “基于边缘推理的鲁棒多目标跟踪”。ECCV（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.03727)] \n  [[代码](https:\u002F\u002Fxxxxxxx)]\n\n### CVPR 2022\n\n- **MixFormer:** 崔宇涛、程江、王利民、吴刚。\u003Cbr \u002F>\n  “MixFormer：基于迭代混合注意力的端到端目标跟踪”。CVPR（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.11082)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FMCG-NJU\u002FMixFormer)]\n  \n- **OWTB:** 刘洋、伊迪尔·埃森·祖尔菲卡尔、乔纳森·吕滕、阿查尔·戴夫、德瓦·拉马南、巴斯蒂安·莱贝、阿廖沙·奥谢普、劳拉·莱阿尔-泰克塞。\u003Cbr \u002F>\n  “开启开放世界目标跟踪”。CVPR（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.11221)] \n  [[代码](https:\u002F\u002Fopenworldtracking.github.io\u002F)]\n  \n- **UTT:** 马帆、肖正浩、朱林超、范浩奇、徐一乐、杨毅、严志成。\u003Cbr \u002F>\n  “用于目标跟踪的统一Transformer跟踪器”。CVPR（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15175)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FFlowerfan\u002FTrackron)]\n  \n- **CSWinTT:** 宋子凯、于俊青、陈依萍、杨伟。\u003Cbr \u002F>\n  “基于循环移位窗口注意力的Transformer目标跟踪”。CVPR（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.03806)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FSkyeSong38\u002FCSWinTT)]\n  \n- **ToMP:** 克里斯托夫·迈耶、马丁·丹内尔扬、古塔姆·巴特、马蒂厄·保罗、丹达·帕尼·保德尔、费舍尔·余、卢克·范·古尔。\u003Cbr \u002F>\n  “用于目标跟踪的模型预测变换”。CVPR（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.11192)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fvisionml\u002Fpytracking)]\n  \n- **TCTrack:** 曹子昂、黄子元、潘亮、张世伟、刘子威、傅昌宏。\u003Cbr \u002F>\n  “TCTrack：面向航空目标跟踪的时序上下文”。CVPR（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.01885)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FTCTrack)]\n  \n- **SBT:** 谢飞、王春雨、王广亭、曹岳、杨万库、曾文俊。\u003Cbr \u002F>\n  “相关性感知的深度目标跟踪”。CVPR（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.01666)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fphiphiphi31\u002FSuperSBT)]\n  \n- **AdaRS:** 李一豪、俞军、蔡中鹏、潘宇文。\u003Cbr \u002F>\n  “基于自然语言的跨模态目标检索跟踪”。CVPR（2022）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022W\u002FODRUM\u002Fhtml\u002FLi_Cross-Modal_Target_Retrieval_for_Tracking_by_Natural_Language_CVPRW_2022_paper.html)] \n  [[代码](xxxx)]\n  \n- **STNet:** 张继庆、董博、张海伟、丁建川、费利克斯·海德、尹宝才、杨欣。\u003Cbr \u002F>\n  “用于事件驱动单目标跟踪的脉冲Transformer”。CVPR（2022）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fhtml\u002FZhang_Spiking_Transformers_for_Event-Based_Single_Object_Tracking_CVPR_2022_paper.html)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FJee-King\u002FCVPR2022_STNet)]\n  \n- **VTUAV:** 张鹏宇、赵杰、王东、陆虎川、阮翔。\u003Cbr \u002F>\n  “可见光-热红外无人机目标跟踪：大规模基准与新基线”。CVPR（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.04120)] \n  [[代码](https:\u002F\u002Fzhang-pengyu.github.io\u002FDUT-VTUAV\u002F)]\n  \n- **UAVMOT:** 刘帅、李鑫、陆虎川、何友。\u003Cbr \u002F>\n  “多目标跟踪与移动无人机的结合”。CVPR（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002Fxxxx.xxxx)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FLiuShuaiyr\u002FUAVMOT)]\n  \n- **GTR:** 周兴义、殷天伟、弗拉德伦·科尔图恩、菲利普·克雷亨布尔。\u003Cbr \u002F>\n  “全局跟踪Transformer”。CVPR（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.13250)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fxingyizhou\u002FGTR)]\n  \n- **GTELT:** 周子坤、陈健秋、裴文杰、毛凯歌、王洪鹏、何振宇。\u003Cbr \u002F>\n  “基于局部跟踪器集成的全局跟踪”。CVPR（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.16092)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FZikunZhou\u002FGTELT)]\n  \n- **RBO:** 唐峰、凌强。\u003Cbr \u002F>\n  “基于排序的孪生视觉目标跟踪”。CVPR（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2205.11761.pdf)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fsansanfree\u002FRBO)]\n  \n- **ULAST:** 沈秋红、乔磊、郭金阳、李佩霞、李鑫、李波、冯伟涛、甘伟浩、吴伟、欧阳万里。\u003Cbr \u002F>\n  “无监督学习的精确孪生目标跟踪”。CVPR（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.01475)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FFlorinShum\u002FULAST)]\n  \n- **UDAT:** 叶俊杰、傅昌宏、郑光泽、丹达·帕尼·保德尔、陈光。\u003Cbr \u002F>\n  “用于夜间航空目标跟踪的无监督域自适应”。CVPR（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.10541)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FUDAT)]\n  \n- **M2Track:** 郑朝达、闫旭、张海明、王宝源、程圣辉、崔树光、李震。\u003Cbr \u002F>\n  “超越3D孪生目标跟踪：面向点云中3D单目标跟踪的运动中心范式”。CVPR（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.01730)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FGhostish\u002FOpen3DSOT)]\n  \n\n### IJCAI 2022\n\n- **InBN:** 郭明哲、张志鹏、范恒、景丽萍、吕怡琳、李冰、胡卫明。\u003Cbr \u002F>\n  “通过信息交互学习目标感知表征的视觉目标跟踪”。IJCAI（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.02526)] \n  [[代码](https:\u002F\u002Fxxxxxxx)]\n  \n- **SparseTT:** 付志宏、付泽华、刘清杰、付泽华、王云洪。\u003Cbr \u002F>\n  “SparseTT：基于稀疏Transformer的视觉目标跟踪”。IJCAI（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.03776)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Ffzh0917\u002FSparseTT)]\n  \n- **HybTransT:** 郑一彩、金敏智、朴恩赫、韩宝炯。\u003Cbr \u002F>\n  “在线混合轻量级表征学习及其在视觉目标跟踪中的应用”。IJCAI（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.11179)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Ffzh0917\u002FSparseTT)]\n  \n  \n### MICCAI 2022\n\n- **TLT:** 唐文、康汉、张浩悦、于鹏欣、科里·W·阿诺德、张荣国。\u003Cbr \u002F>\n  “Transformer病变追踪器”。MICCAI（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.06252)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FTangWen920812\u002FTLT)]\n  \n  \n### ArXiv 2022\n\n- **ProTrack:** 杨锦宇、李哲、郑峰、阿莱什·莱昂纳迪斯、宋京宽。\u003Cbr \u002F>\n  “用于多模态目标跟踪的提示工程”。ACM MM（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.14571)] \n  [[代码](https:\u002F\u002F)]\n  \n- **GATransT:** 王立波、陈思、王振、王大涵、朱顺志。\u003Cbr \u002F>\n  “用于鲁棒视觉目标跟踪的图注意力Transformer网络”。ICONIP（2022）。\n  [[论文](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-981-99-1639-9_14)] \n  [[代码]()]\n\n- **SiamTDN:** 梁延杰、赵鹏辉、郝一飞、王汉子。\u003Cbr \u002F>\n  “用于鲁棒视觉跟踪的孪生模板扩散网络”。ICME（2022年）。\n  [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9859929)] \n  [[代码]()]\n  \n- **TAT:** 蓝凯豪、姜浩博、谢进。\u003Cbr \u002F>\n  “时序感知的孪生跟踪器：融合时序上下文用于3D目标跟踪”。ACCV（2022年）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FACCV2022\u002Fhtml\u002FLan_Temporal-aware_Siamese_Tracker_Integrate_Temporal_Context_for_3D_Object_Tracking_ACCV_2022_paper.html)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Ftqsdyy\u002FTAT)]\n  \n - **COESOT:** 唐川明、王肖、黄菊、姜博、朱林、张建林、王耀伟、田永宏。\u003Cbr \u002F>\n  “重访基于颜色-事件的跟踪：统一的网络、数据集和度量标准”。ArXiv（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.11010)] \n  [[代码](COESOT)]\n  \n- **WATB:** 王发胜、曹平、李福、王星、何兵、孙富明。\u003Cbr \u002F>\n  “WATB：野生动物跟踪基准”。IJCV（2022年）。\n  [[论文](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002Fs11263-022-01732-3.pdf?pdf=button)] \n  [[代码](https:\u002F\u002Fw-1995.github.io\u002F)]\n  \n- **UAV2UAV:** 王勇、黄子荣、罗伯特·拉加尼耶、张焕龙、丁璐。\u003Cbr \u002F>\n  “无人机对无人机跟踪基准”。KBS（2023年）。\n  [[论文](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS095070512201293X)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fhapless19\u002FUAV2UAV-dataset)]\n  \n- **UOT100:** K. Panetta、L. Kezebou、V. Oludare 和 S. S. Agaian。\u003Cbr \u002F>\n  “全面的水下目标跟踪基准数据集及基于GAN的水下图像增强”。IEEE JOE（2022年）。\n  [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9499961)] \n  [[代码](https:\u002F\u002Fwww.kaggle.com\u002Fdatasets\u002Flandrykezebou\u002Fuot100-underwater-object-tracking-dataset)]\n  \n- **NeighborTrack:** 陈宇希、王建尧、杨承云、张鸿硕、林友隆、庄咏瑜、廖宏远。\u003Cbr \u002F>\n  “NeighborTrack：通过与邻近轨迹片段的二部匹配提升单目标跟踪性能”。ArXiv（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.06663.pdf)] \n  [[代码](此处为空)]\n  \n- **MTTSiam:** Ali Sekhavati、李元淑。\u003Cbr \u002F>\n  “用于长期目标跟踪的多模板时序孪生网络”。ArXiv（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.13812)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FAliGreen0\u002FMTTSiam)]\n  \n- **PruningInTracking:** Saksham Aggarwal、Taneesh Gupta、Pawan Kumar Sahu、Arnav Chavan、Rishabh Tiwari、Dilip K. Prasad、Deepak K. Gupta。\u003Cbr \u002F>\n  “通过网络剪枝设计轻量级目标跟踪器：使用CNN还是Transformer？”。ArXiv（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.13769)] \n  [[代码](此处为空)]\n  \n- **ProContEXT:** 兰金鹏、程志奇、何俊彦、李晨阳、罗斌、鲍旭、项望萌、耿义峰、谢宣松。\u003Cbr \u002F>\n  “ProContEXT：探索渐进式上下文变换器用于跟踪”。ArXiv（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.15511)] \n  [[代码](https:\u002F\u002Fdrive.google.com\u002Fdrive\u002Ffolders\u002F18kHdBNEwvbk8S4-mwHaI-mw5w6cK-pyY?usp=sharing)]\n  \n- **TSFMO:** 张哲文、吴富良、邱宇明、梁景东、李水旺。\u003Cbr \u002F>\n  “小而快速移动目标的跟踪：一个基准”。ArXiv（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.04284)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FCodeOfGithub\u002FS-KeepTrack)]\n  \n- **SFTransT:** 唐川明、王肖、白元超、吴哲、张建林、黄永梅。\u003Cbr \u002F>\n  “用于视觉目标跟踪的空间-频率变换器学习”。ArXiv（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.08829)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FTchuanm\u002FSFTransT.git)]\n  \n- **DMTracker:** 高尚、杨金宇、李哲、郑峰、Aleš Leonardis、宋京宽。\u003Cbr \u002F>\n  “用于RGBD跟踪的双融合模态感知表征学习”。ArXiv（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.03055)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FShangGaoG\u002FDMTracker)]\n  \n- **AVisT:** Mubashir Noman、Wafa Al Ghallabi、Daniya Najiha、Christoph Mayer、Akshay Dudhane、Martin Danelljan、Hisham Cholakkal、Salman Khan、Luc Van Gool、Fahad Shahbaz Khan。\u003Cbr \u002F>\n  “AVisT：恶劣可见条件下视觉目标跟踪的基准”。ArXiv（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.06888)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fvisionml\u002Fpytracking)]\n\n- **RGBDReview:** 杨金宇、李哲、闫松、郑峰、阿莱什·莱昂纳迪斯、约尼-克里斯蒂安·凯马莱宁、邵凌。\u003Cbr \u002F>\n  “RGBD目标跟踪：深度综述”。ArXiv（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.14134)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fmemoryunreal\u002FRGBD-tracking-review)]\n  \n- **TOT\u002FMKDNet:** 朱亚斌、李成龙、刘瑶、王潇、唐进、罗彬、黄志翔。\u003Cbr \u002F>\n  “微型目标跟踪：大规模数据集与基准”。ArXiv（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.05659)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fmmic-lcl\u002FDatasets-and-benchmark-code)]\n  \n- **WebUAV-3M:** 张春辉、黄冠杰、刘莉、黄珊、杨一楠、张宇轩、万翔、葛世明。\u003Cbr \u002F>\n  “WebUAV-3M：揭示百万级深度无人机跟踪能力的基准”。ArXiv（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.07425)] \n  [[代码](https:\u002F\u002Fgithub.com\u002F983632847\u002FWebUAV-3M)]\n  \n- **SiamTracking4UAV:** 傅昌宏、陆坤翰、郑光泽、叶俊杰、曹子昂、李博文。\u003Cbr \u002F>\n  “面向无人机的孪生目标跟踪：综述与全面分析”。ArXiv（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.04281)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FSiameseTracking4UAV)]\n  \n- **SOTSurvey:** 扎赫拉·索莱曼尼塔勒布、穆罕默德·阿里·凯万拉德。\u003Cbr \u002F>\n  “单目标跟踪：方法、数据集与评估指标综述”。ArXiv（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.13066)] \n  \n- **SOTRearch:** 韩瑞泽、冯伟、郭清、胡庆华。\u003Cbr \u002F>\n  “单目标跟踪研究：综述”。ArXiv（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.11410)] \n  \n- **VOTSurvey:** 陈飞、王晓东、赵云翔、吕绍和、牛鑫。\u003Cbr \u002F>\n  “视觉目标跟踪：综述”。CVIU（2022年）。\n  [[论文](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1077314222001011?dgcid=author)] \n    \n- **HCAT:** 陈欣、王栋、李东东、卢虎川。\u003Cbr \u002F>\n  “基于层次交叉注意力Transformer的高效视觉跟踪”。ArXiv（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.13537)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fchenxin-dlut\u002FHCAT)]\n  \n- **TransT-M:** 陈欣、颜斌、朱嘉文、王栋、卢虎川。\u003Cbr \u002F>\n  “高性能Transformer跟踪”。ArXiv（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.13533)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fchenxin-dlut\u002FTransT-M)]\n  \n- **RGBDT:** 杨金宇、李哲、闫松、郑峰、阿莱什·莱昂纳迪斯、约尼-克里斯蒂安·凯马莱宁、邵凌。\u003Cbr \u002F>\n  “RGBD目标跟踪：深度综述”。ArXiv（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.14134)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fmemoryunreal\u002FRGBD-tracking-review)]\n  \n- **DST:** 崔耀、王广辉、张力。\u003Cbr \u002F>\n  “为子空间跟踪辩护：用于视觉跟踪的正交嵌入”。ArXiv（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.07927)] \n  [[代码](xxxxxxx)]\n  \n- **DUT-Anti-UAV:** 赵杰、张景书、李东东、王栋。\u003Cbr \u002F>\n  “基于视觉的反无人机检测与跟踪”。TITS（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.10851)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fwangdongdut\u002FDUT-Anti-UAV)]\n  \n- **CoCoLoT:** 马泰奥·邓霍费尔、克里斯蒂安·米凯洛尼。\u003Cbr \u002F>\n  “CoCoLoT：在长期视觉跟踪中结合互补跟踪器”。ICPR（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.04261)] \n  [[代码](xxxxxxx)]\n  \n- **EUSA:** 刘啸、陈兆宇、李伟、朱继伟、王家锋、张文强、甘仲学。\u003Cbr \u002F>\n  “用于视觉目标跟踪的高效通用洗牌攻击”。ICASSP（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.06898)] \n  [[代码](xxxxxxx)]\n  \n- **ITB:** 李欣、刘乔、裴文杰、沈秋红、王耀威、卢虎川、杨明轩。\u003Cbr \u002F>\n  “一个信息丰富的跟踪基准”。ArXiv（2021年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.06467)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FXinLi-zn\u002FInformative-tracking-benchmark)]\n  \n- **VisEvent:** 王晓、李佳宁、朱琳、张志鹏、陈哲、李欣、王耀威、田永洪、吴峰。\u003Cbr \u002F>\n  “VisEvent：通过帧流与事件流协作实现可靠的目标跟踪”。ArXiv（2021年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.05015)] \n  [[代码](https:\u002F\u002Fsites.google.com\u002Fview\u002Fviseventtrack\u002F)]\n  \n- **TrTr:** 赵莫居、冈田圭、稻叶雅之。\u003Cbr \u002F>\n  “TrTr：基于Transformer的视觉跟踪”。ArXiv（2021年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2105.03817.pdf)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Ftongtybj\u002FTrTr)]\n\n- **TS-RCN:** 张宁、刘金根、王科、曾丹、梅涛。\u003Cbr \u002F>\n  “基于双流残差卷积网络的鲁棒视觉目标跟踪”。ArXiv（2020年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2005.06536.pdf)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fxxxxx\u002Fxxxx)]\n  \n- **FCOT:** 崔宇涛、蒋成、王利民、吴刚山。\u003Cbr \u002F>\n  “全卷积在线跟踪”。ArXiv（2020年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.07109)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FMCG-NJU\u002FFCOT)]\n  \n\n\n### AAAI 2022\n\n- **HDN:** 战新锐、刘悦然、朱建科、李阳。\u003Cbr \u002F>\n  “用于平面目标跟踪的单应性分解网络”。AAAI（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2112.07909.pdf)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fzhanxinrui\u002FHDN)]\n\n- **MArMOT:** 李成龙、朱天浩、刘磊、司晓楠、范子林、翟苏兰。\u003Cbr \u002F>\n  “跨模态目标跟踪：模态感知表示与统一基准”。AAAI（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.04264)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fxxxxx\u002FMArMOT)]\n\n- **APFNet:** 肖云、杨梦梦、李成龙、刘磊、唐进。\u003Cbr \u002F>\n  “基于属性的渐进式融合网络，用于RGBT跟踪”。AAAI（2022年）。\n  [[论文](https:\u002F\u002Fgithub.com\u002Fyangmengmeng1997\u002FAPFNet\u002Ftree\u002Fmain\u002FPaper)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fyangmengmeng1997\u002FAPFNet)]\n\n- **TAV:** 塔哈尔·阿卢什、热罗姆·朗、弗洛里安·伊格尔。\u003Cbr \u002F>\n  “基于赞成投票的真实跟踪：规模很重要”。AAAI（2022年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.04387)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fzhanxinrui\u002FHDN)]\n  \n  \n### ICLR 2022\n\n- **FSBA:** 李一鸣、钟浩翔、马兴军、江勇、夏树涛。\u003Cbr \u002F>\n  “针对视觉目标跟踪的少样本后门攻击”。ICLR（2022年）。\n  [[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=qSV5CuSaK_a)] \n  [[代码](https:\u002F\u002Fwww.dropbox.com\u002Fs\u002Fnfg7en8azc1cvz3\u002Fcodes_FSBA_ICLR22.zip?dl=0)]\n\n### ICRA 2022\n\n- **Ad2Attack:** Fu Changhong, Li Sihang, Yuan Xinnan, Ye Junjie, Cao Ziang, Ding Fangqiang。\u003Cbr \u002F>\n  “Ad2Attack：面向实时无人机跟踪的自适应对抗攻击”。ICRA（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.01516)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FAd2Attack)]\n \n- **SCT:** Ye Junjie，Fu Changhong，Cao Ziang，An Shan，Zheng Guangze，Li Bowen。\u003Cbr \u002F>\n  “追踪器遇上黑夜：一种用于无人机跟踪的Transformer增强模型”。ICRA\u002FRAL（2022）。\n  [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9696362)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FSCT)]\n\n- **SiamX:** Huang Huajian，Yeung Sai-Kit。\u003Cbr \u002F>\n  “SiamX：一种基于跨层级特征相关性和自适应跟踪方案的高效长期跟踪器”。ICRA（2022）。\n  [[论文](https:\u002F\u002Fhuajianup.github.io\u002Fresearch\u002FSiamX\u002FSiamX_ICRA2022_final.pdf)] \n  [[代码](https:\u002F\u002Fhuajianup.github.io\u002Fresearch\u002FSiamX\u002F)]\n \n \n### WACV 2022\n\n- **SiamTPN:** Xing Daitao，Evangeliou Nikolaos，Tsoukalas Athanasios，Tzes Anthony。\u003Cbr \u002F>\n  “用于实时无人机跟踪的孪生Transformer金字塔网络”。WACV（2022）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.08822.pdf)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FRISC-NYUAD\u002FSiamTPNTracker)]\n  \n### ICCV 2021\n\n- **STARK:** Yan Bin，Peng Houwen，Fu Jianlong，Wang Dong，Lu Huchuan。\u003Cbr \u002F>\n  “用于视觉跟踪的时空Transformer学习模型”。ICCV（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.17154.pdf)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fresearchmm\u002FStark)]\n  \n- **AutoMatch:** Zhang Zhipeng，Liu Yihao，Wang Xiao，Li Bing，Hu Weiming。\u003Cbr \u002F>\n  “学会匹配：用于视觉跟踪的自动匹配网络设计”。ICCV（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.00803.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FJudasDie\u002FSOTS)]\n  \n- **DDT:** Yu Bin，Tang Ming，Zheng Linyu，Zhu Guibo，Wang Jinqiao。\u003Cbr \u002F>\n  “基于Transformer的高性能判别式跟踪方法”。ICCV（2021）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FYu_High-Performance_Discriminative_Tracking_With_Transformers_ICCV_2021_paper.pdf)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fxxxx\u002Fxxxx)]\n  \n- **HiFT:** Cao Ziang，Fu Changhong，Ye Junjie，Li Bowen，Li Yiming。\u003Cbr \u002F>\n  “用于空中跟踪的层次化特征Transformer模型”。ICCV（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.00202.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FHiFT)]\n  \n- **DualTFR:** Xie Fei，Wang Chunyu，Wang Guangting，Yang Wankou，Zeng Wenjun。\u003Cbr \u002F>\n  “通过双分支全Transformer网络学习跟踪表示”。ICCVW（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.02571)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fphiphiphi31\u002FDualTFR)]\n  \n- **DMB:** Xie Fei，Yang Wankou，Zhang Kaihua，Liu Bo，Xue Wanli，Zuo Wangmeng。\u003Cbr \u002F> \n  “用于高性能视觉跟踪的时空外观记忆网络学习模型”。ICCVW（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2009.09669.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fphiphiphi31\u002FDMB)]\n\n- **KeepTrack:** Mayer Christoph，Danelljan Martin，Paudel Danda Pani，Van Gool Luc。\u003Cbr \u002F>\n  “通过学习目标候选关联来跟踪不应被跟踪的对象”。ICCV（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.16556)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fvisionml\u002Fpytracking)]\n\n- **SAOT:** Zhou Zikun，Pei Wenjie，Li Xin，Wang Hongpeng，Zheng Feng，He Zhenyu。\u003Cbr \u002F>\n  “基于显著性关联的目标跟踪方法”。ICCV（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.03637.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FZikunZhou\u002FSAOT)]\n \n- **MLVSNet:** Wang Zhoutao，Xie Qian，Lai Yu-Kun，Wu Jing，Long Kun，Wang Jun。\u003Cbr \u002F>\n  “MLVSNet：用于3D视觉跟踪的多级投票孪生网络”。ICCV（2021）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FWang_MLVSNet_Multi-Level_Voting_Siamese_Network_for_3D_Visual_Tracking_ICCV_2021_paper.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FCodeWZT\u002FMLVSNet)]\n  \n - **EFTrack:** Zhang Jiqing，Yang Xin，Fu Yingkai，Wei Xiaopeng，Yin Baocai，Dong Bo。\u003Cbr \u002F>\n  “通过联合利用帧域和事件域进行目标跟踪”。ICCV（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.09052)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FJee-King\u002FICCV2021_Event_Frame_Tracking)]\n  \n - **Box2Mask:** Zhao Bin，Bhat Goutam，Danelljan Martin，Van Gool Luc，Timofte Radu。\u003Cbr \u002F>\n  “通过挖掘视频中的时空一致性从边界框生成掩码”。ICCV（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2101.02196)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fvisionml\u002Fpytracking)]\n  \n- **DepthTrack:** Yan Song，Yang Jinyu，Käpylä Jani，Zheng Feng，Leonardis Aleš，Kämäräinen Joni-Kristian。\u003Cbr \u002F>\n  “DepthTrack：揭示RGBD跟踪的强大能力”。ICCV（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.13962)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fxiaozai\u002FDeT)]\n  \n- **USOT:** Zheng Jilai，Ma Chao，Peng Houwen，Yang Xiaokang。\u003Cbr \u002F>\n  “从未标注视频中学习目标跟踪方法”。ICCV（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.12711)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FVISION-SJTU\u002FUSOT)]\n  \n- **TOTB:** Fan Heng，Akhilesha Miththanthaya Halady，Harshit，Rajan Siranjiv Ramana，Liu Xiaoqiong，Zou Zhilin，Lin Yuewei，Ling Haibin。\u003Cbr \u002F>\n  “透明物体跟踪基准测试”。ICCV（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.10875)]\n  [[代码](https:\u002F\u002Fhengfan2010.github.io\u002Fprojects\u002FTOTB\u002F)]\n  \n- **TREK-150:** Dunnhofer Matteo，Furnari Antonino，Farinella Giovanni Maria，Micheloni Christian。\u003Cbr \u002F>\n  “第一人称视角对目标跟踪是否具有挑战性？”。ICCVW（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.13665)]\n  [[代码](https:\u002F\u002Fmachinelearning.uniud.it\u002Fdatasets\u002Ftrek150\u002F)]\n  [[工具包](https:\u002F\u002Fgithub.com\u002Fmatteo-dunnhofer\u002FTREK-150-toolkit)]\n  \n- **VASR:** Dai Kenan，Zhao Jie，Wang Lijun，Wang Dong，Li Jianhua，Lu Huchuan，Qian Xuesheng，Yang Xiaoyun。\u003Cbr \u002F>\n  “通过选择与精修进行视觉跟踪的视频标注方法”。ICCV（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.03821.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FDaikenan\u002FVASR)]\n  \n- **BAT:** Zheng Chaoda，Yan Xu，Gao Jiantao，Zhao Weibing，Zhang Wei，Li Zhen，Cui Shuguang。\u003Cbr \u002F>\n  “面向点云上单目标跟踪的盒感知特征增强方法”。ICCV（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.04728.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FGhostish\u002FBAT)]\n  \n- **ABA:** Guo Qing，Cheng Ziyi，Juefei-Xu Felix，Ma Lei，Xie Xiaofei，Liu Yang，Zhao Jianjun。\u003Cbr \u002F>\n  “学习以对抗方式模糊视觉目标跟踪”。ICCV（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.12085)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Ftsingqguo\u002FABA)]\n\n### CVPR 2021\n\n- **TransT:** 陈鑫、闫斌、朱嘉文、王栋、杨晓云、陆虎川。\u003Cbr \u002F>\n  “Transformer跟踪”。CVPR（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.15436)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fchenxin-dlut\u002FTransT)]\n  \n- **Alpha-Refine:** 闫斌、张新宇、王栋、陆虎川、杨晓云。\u003Cbr \u002F>\n  “Alpha-Refine：通过精确的边界框估计提升跟踪性能”。CVPR（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1911.12836.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FMasterBin-IIAU\u002FAlphaRefine)]\n  \n- **LightTrack:** 闫斌、彭厚文、吴侃、王栋、傅建龙、陆虎川。\u003Cbr \u002F>\n  “LightTrack：通过一次性架构搜索寻找用于目标跟踪的轻量级神经网络”。CVPR（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.14545)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fcvpr-2021\u002Flighttrack)]\n  \n- **TrTrack:** 王宁、周文刚、王杰、李厚强。\u003Cbr \u002F>\n  “Transformer遇见跟踪器：利用时间上下文实现鲁棒的视觉跟踪”。CVPR（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.11681.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002F594422814\u002FTransformerTrack)]\n  \n- **STMTrack:** 傅志宏、刘庆杰、傅泽华、王云洪。\u003Cbr \u002F>\n  “STMTrack：基于时空记忆网络的无模板视觉跟踪”。CVPR（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.00324)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Ffzh0917\u002FSTMTrack)]\n  \n- **SiamGAT:** 郭东燕、邵艳艳、崔颖、王振华、张丽燕、沈春华。\u003Cbr \u002F>\n  “图注意力跟踪”。CVPR（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.11204)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fohhhyeahhh\u002FSiamGAT)]\n  \n- **SiamACM:** 韩文成、董兴平、法哈德·沙赫巴兹·汗、邵玲、沈建兵。\u003Cbr \u002F>\n  “在孪生跟踪器中学习融合非对称特征图”。CVPR（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.02776.pdf)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fwencheng256\u002FSiamBAN-ACM)]\n  \n- **PST:** 南君熙、许美兰、吴承旭、李俊英、金善柱。\u003Cbr \u002F>\n  “多边形点集跟踪”。CVPR（2021）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FNam_Polygonal_Point_Set_Tracking_CVPR_2021_paper.pdf)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FPST)]\n  \n- **PUL:** 吴强强、万佳、安东尼·B·陈。\u003Cbr \u002F>\n  “面向视觉目标跟踪的渐进式无监督学习”。CVPR（2021）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FWu_Progressive_Unsupervised_Learning_for_Visual_Object_Tracking_CVPR_2021_paper.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FPUL)]\n  \n- **CapsuleRRT:** 马丁、吴向谦。\u003Cbr \u002F>\n  “CapsuleRRT：基于胶囊的关系感知回归跟踪”。CVPR（2021）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FMa_CapsuleRRT_Relationships-Aware_Regression_Tracking_via_Capsules_CVPR_2021_paper.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FCapsuleRRT)]\n  \n- **Semi-Track:** 傅洋、刘思飞、伊克巴尔、德梅洛、施浩辉、考茨。\u003Cbr \u002F>\n  “无需视频标注即可学习实例跟踪”。CVPR（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.00287.pdf)] \n  [[代码](https:\u002F\u002Foasisyang.github.io\u002Fprojects\u002Fsemi-track\u002Findex.html)]\n\n- **RE-Siam:** 戈普塔、阿利亚、加夫斯。\u003Cbr \u002F>\n  “用于跟踪的旋转等变孪生网络”。CVPR（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2012.13078)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fdkgupta90\u002Fre-siamnet)]\n  \n- **SiamNLP:** 冯琪、阿布拉夫斯基、白钦勋、斯克拉罗夫。\u003Cbr \u002F>\n  “孪生自然语言跟踪器：利用孪生跟踪器进行自然语言描述的跟踪”。CVPR（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1912.02048v2)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Ffredfung007\u002Fsnlt)]\n  \n- **LangTrackBenchmark:** 王晓、舒秀军、张志鹏、江波、王耀伟、田永红、吴峰。\u003Cbr \u002F>\n  “借助自然语言实现更灵活、更精准的目标跟踪：算法与基准测试”。CVPR（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.16746.pdf)]\n  [[代码](https:\u002F\u002Fsites.google.com\u002Fview\u002Flangtrackbenchmark\u002F)]\n  \n- **DroneCrowd:** 文隆银、杜大伟、朱鹏飞、胡清华、王启龙、薄立峰、吕思伟。\u003Cbr \u002F>\n  “人群中的检测、跟踪与计数与无人机相结合：一项基准测试”。CVPR（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2105.02440.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FVisDrone\u002FDroneCrowd)]\n  \n- **DMTrack:** 张子凯、钟彬能、张盛平、唐振军、刘欣、张兆祥。\u003Cbr \u002F>\n  “基于动态卷积和多目标跟踪理念的干扰物感知快速跟踪”。CVPR（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.12041)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fhqucv\u002Fdmtrack)]\n  \n- **LF-Siam:** 程思远、钟彬能、李国荣、刘欣、唐振军、李仙仙、王静。\u003Cbr \u002F>\n  “学习过滤：用于鲁棒跟踪的孪生关系网络”。CVPR（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.00829)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fhqucv\u002Fsiamrn)]\n  \n- **IoU攻击:** 贾帅、宋一冰、马超、杨晓康。\u003Cbr \u002F>\n  “IoU攻击：面向视觉目标跟踪的时间一致性黑盒对抗攻击”。CVPR（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.14938)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FVISION-SJTU\u002FIoUattack)]\n  \n- **MeanShift++:** 张珍妮、蒋海因里希。\u003Cbr \u002F>\n  “MeanShift++：极快的模式搜索及其在分割和目标跟踪中的应用”。CVPR（2021）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FJang_MeanShift_Extremely_Fast_Mode-Seeking_With_Applications_to_Segmentation_and_Object_CVPR_2021_paper.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FMeanShift++)]\n  \n  \n### IROS 2021\n\n- **CRACT:** 樊恒、凌海宾。\u003Cbr \u002F>\n  “CRACT：级联回归-对齐-分类以实现鲁棒的视觉跟踪”。IROS（2020）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.12483)] \n\n- **SiamAPN++:** 曹子昂、付昌鸿、叶俊杰、李博文、李一鸣。\u003Cbr \u002F>\n  “SiamAPN++：用于实时无人机跟踪的孪生注意力聚合网络”。IROS（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.08816.pdf)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FSiamAPN)]\n\n- **DarkLighter:** 叶俊杰、付昌鸿、郑光泽、曹子昂、李博文。\u003Cbr \u002F>\n  “DarkLighter：为无人机跟踪点亮黑暗”。IROS（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2107.14389.pdf)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FDarkLighter)]\n  \n- **PTT:** 山家尧、周思凡、方正、崔宇博。\u003Cbr \u002F>\n  “PTT：用于点云中单目标3D跟踪的点-跟踪-Transformer模块”。IROS（2021）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.06455)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fshanjiayao\u002FPTT)]\n\n### NeurIPS 2021\n\n- **PathTrack:** 德鲁·林斯利、吉里克·马利克、金俊京、拉克什米·纳拉西曼·戈文达拉詹、恩尼奥·明戈拉、托马斯·塞雷。\u003Cbr \u002F>\n  “人类与机器中的无重识别跟踪”。NeurIPS（2021年）。\n  [[论文](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2021\u002Fhash\u002Fa2557a7b2e94197ff767970b67041697-Abstract.html)] \n  [[代码](http:\u002F\u002Fbit.ly\u002FInTcircuit)]\n  \n- **UniTrack:** 王中道、赵恒爽、李亚丽、王圣锦、菲利普·托尔、卢卡·贝尔蒂内托。\u003Cbr \u002F>\n  “不同的跟踪任务是否需要不同的外观模型？”NeurIPS（2021年）。\n  [[论文](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2021\u002Fhash\u002F06997f04a7db92466a2baa6ebc8b872d-Abstract.html)] \n  [[代码](https:\u002F\u002Fzhongdao.github.io\u002FUniTrack\u002F)]\n\n  \n### WACV 2021\n\n- **MART:** 范恒、凌海斌。\u003Cbr \u002F>\n  “MART：用于鲁棒视觉跟踪的运动感知循环神经网络”。WACV（2021年）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2021\u002Fpapers\u002FFan_MART_Motion-Aware_Recurrent_Neural_Network_for_Robust_Visual_Tracking_WACV_2021_paper.pdf)] \n  [[代码](https:\u002F\u002Fhengfan2010.github.io\u002Fprojects\u002FMART\u002FMART.htm)]\n  \n- **SiamSE:** 伊万·索斯诺维克、阿特姆·莫斯卡列夫、阿诺德·斯梅尔德斯。\u003Cbr \u002F>\n  “尺度等变性提升暹罗跟踪性能”。WACV（2021年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.09115.pdf)] \n  [[代码](https:\u002F\u002Fgithub.com\u002FISosnovik\u002FSiamSE)]\n  \n- **TracKlinic:** 范恒、杨帆、楚鹏、林岳伟、袁琳、凌海斌。\u003Cbr \u002F>\n  “TracKlinic：视觉跟踪中挑战因素的诊断”。WACV（2021年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.07959)]\n  [[代码](https:\u002F\u002Fhengfan2010.github.io\u002Fprojects\u002FTracKlinic\u002FTracKlinic.htm。)]\n  \n  \n### AAAI 2021\n\n- **MUG:** 周立军、安托万·勒丹、胡钦涛、刘婷、张建林、马里乌斯·克洛夫特。\u003Cbr \u002F>\n  “模型不确定性引导视觉目标跟踪”。AAAI（2021年）。\n  [[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F16473)] \n  \n- **UPA:** 丁莉、王永伟、袁凯文、蒋敏阳、王平、黄华、Z.简·王。\u003Cbr \u002F>\n  “面向单目标跟踪的通用物理攻击研究”。AAAI（2021年）。\n  [[论文](https:\u002F\u002Fwww.aaai.org\u002FAAAI21Papers\u002FAAAI-2606.DingL.pdf)]\n\n- **PACNet:** 张大伟、郑仲龙、贾日恒、李明璐。\u003Cbr \u002F>\n  “基于层次化深度强化学习的视觉跟踪”。AAAI（2021年）。\n  [[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F16443)] \n  \n- **MSANet:** 陈学松、傅灿淼、郑峰、赵勇、李洪生、罗平、齐国俊。\u003Cbr \u002F>\n  “用于视觉目标跟踪的统一多场景攻击网络”。AAAI（2021年）。\n  [[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F16195)]\n  \n\n### 其他 2021\n\n- **SiamAPN:** 傅昌宏、曹子昂、李一鸣、叶俊杰、冯晨。\u003Cbr \u002F>\n  “基于高效暹罗锚框提议网络的机载实时空中跟踪”。IEEE TGRS（2021年）。\n  [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9477413)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FSiamAPN)]\n  \n- **CCR:** 葛世明、张春辉、李世坤、曾丹、陶大成。\u003Cbr \u002F>\n  “用于鲁棒深度跟踪的级联相关精炼方法”。IEEE TNNLS（2021年）。\n  [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9069312)] \n  [[代码](https:\u002F\u002Fgithub.com\u002F983632847\u002FCCR)]\n  \n- **CHASE:** 赛义德·莫杰塔巴·马尔瓦斯蒂-扎德、贾瓦德·哈加尼、李诚、侯赛因·加内伊-亚赫丹、绍赫雷·卡塞伊。\u003Cbr \u002F>\n  “CHASE：基于细胞级可微神经架构搜索的鲁棒视觉跟踪”。BMVC（2021年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.03463)]\n\n### ECCV 2020\n\n- **Ocean:** 张志鹏、彭厚文、傅建龙、李冰、胡伟明。\u003Cbr \u002F>\n  “Ocean：基于目标感知的无锚框跟踪。” ECCV（2020）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2006.10721.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fresearchmm\u002FTracKit)]\n  \n- **KYS:** 戈塔姆·巴特、马丁·丹内尔扬、卢克·范·古尔、拉杜·蒂莫夫特。\u003Cbr \u002F>\n  “了解你的周围环境：利用场景信息进行目标跟踪。” ECCV（2020）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2003.11014v1.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fvisionml\u002Fpytracking)]\n  \n- **PGNet:** 廖炳炎、王晨晔、王雅云、王耀农、尹军。\u003Cbr \u002F>\n  “PG-Net：用于视觉跟踪的像素到全局匹配网络。” ECCV（2020）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2003.11014)]\n  \n- **STN:** 刘源、李若腾、程宇、罗比·T·谭、隋秀宝。\u003Cbr \u002F>\n  “基于时空网络进行未来位置预测的目标跟踪。” ECCV（2020）。\n  [[论文](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fpapers\u002F123670001.pdf)]\n  \n- **RPT:** 马子昂、王林远、张海涛、陆伟、尹军。\u003Cbr \u002F>\n  “RPT：用于孪生视觉跟踪的点集表示学习。” ECCVW（2020）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2008.03467)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fzhanght021\u002FRPT)]\n  \n- **CenterTrack:** 周兴义、弗拉德伦·科尔顿、菲利普·克拉亨布尔。\u003Cbr \u002F>\n  “将目标作为点进行跟踪。” ECCV（2020）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.01177)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fxingyizhou\u002FCenterTrack)]\n  \n- **PointTracker:** 徐振波、张伟、谭晓、杨威、黄欢、温士磊、丁二瑞、黄柳生。\u003Cbr \u002F>\n  “以点为单位进行分割，实现高效的在线多目标跟踪与分割。” ECCV（2020）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.01550)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FdetectRecog\u002FPointTrack)]\n  \n- **DCFST:** 郑林宇、唐明、陈莹莹、王金桥、陆汉青。\u003Cbr \u002F>\n  “基于判别模型跟踪的特征嵌入学习。” ECCV（2020）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1906.10414)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FnoneUmbrella\u002FDCFST)]\n  \n- **CLNet:** 董星平、沈坚兵、邵凌、法提赫·波里克利。\u003Cbr \u002F>\n  “CLNet：一种紧凑的潜在网络，用于快速调整孪生跟踪器。” ECCV（2020）。\n  [[论文](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2020\u002Fpapers_ECCV\u002Fpapers\u002F123650375.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fxingpingdong\u002FCLNet-tracking)]\n  \n- **RTAA:** 贾帅、马超、宋一兵、杨晓康。\u003Cbr \u002F>\n  “对抗攻击下的鲁棒跟踪。” ECCV（2020）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.09919)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fjoshuajss\u002FRTAA)]\n  \n- **EAA:** 梁思远、魏星星、姚思远、曹晓春。\u003Cbr \u002F>\n  “用于视觉目标跟踪的有效对抗攻击。” ECCV（2020）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2008.00217)]\n\n- **SPARK:** 郭庆、谢小飞、费利克斯·朱菲-徐、马雷、李忠国、薛万力、冯伟、刘洋。\u003Cbr \u002F>\n  “SPARK：面向视觉跟踪的空间感知在线增量攻击。” ECCV（2020）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1910.08681.pdf)]\n  \n- **CAT:** 李承龙、刘磊、陆安东、季青、唐锦。\u003Cbr \u002F>\n  “挑战感知的RGBT跟踪。” ECCV（2020）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.13143)]\n\n- **JDE:** 王中道、郑亮、刘一轩、王圣进。\u003Cbr \u002F>\n  “迈向实时多目标跟踪。” ECCV（2020）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1909.12605v1.pdf)]\n  [[代码](https:\u002F\u002Fgitee.com\u002Fmat026\u002FTowards-Realtime-MOT)]\n  \n- **Chained-Tracker:** 彭金龙、王长安、万方斌、吴阳、王亚彪、邰英、王成杰、李吉林、黄飞跃、付延伟。\u003Cbr \u002F>\n  “Chained-Tracker：将配对注意力回归结果串联起来，实现端到端的多目标检测与跟踪联合任务。” ECCV（2020）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.14557.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fpjl1995\u002FCTracker)]\n  \n- **TAO:** 阿查尔·戴夫、塔拉莎·库拉纳、帕维尔·托克马科夫、科黛莉娅·施密德、德瓦·拉马南。\u003Cbr \u002F>\n  “TAO：一个大规模的基准数据集，用于跟踪任何目标。” ECCV（2020）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2005.10356)]\n  [[代码](http:\u002F\u002Ftaodataset.org\u002F)]\n\n### CVPR2020\n\n* **MAML:** 王广亭、罗冲、孙晓燕、熊志伟、曾文俊。\u003Cbr \u002F>\n  “基于实例检测的跟踪：一种元学习方法。” CVPR（2020年 **口头报告**）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2004.00830v1.pdf)]\n\n* **Siam R-CNN:** 保罗·沃伊特兰德、乔纳森·吕滕、菲利普·H.S. 托尔、巴斯蒂安·莱贝。\u003Cbr \u002F>\n  “Siam R-CNN：通过重新检测进行视觉跟踪。” CVPR（2020年）。\n  [[BoLTVOS](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1904.04552.pdf)] \n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1911.12836.pdf)] \n  [[代码](https:\u002F\u002Fwww.vision.rwth-aachen.de\u002Fpage\u002Fsiamrcnn)]\n\n* **D3S:** 阿兰·卢克齐奇、季里·马塔斯、马泰伊·克里斯坦。\u003Cbr \u002F>\n  “D3S——一种判别式单次分割跟踪器。” CVPR（2020年）。\n  [[论文](http:\u002F\u002Farxiv.org\u002Fpdf\u002F1911.08862v2.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Falanlukezic\u002Fd3s)]\n\n* **PrDiMP:** 马丁·丹内尔扬、卢克·范·古尔、拉杜·蒂莫夫特。\u003Cbr \u002F>\n  “用于视觉跟踪的概率回归。” CVPR（2020年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2003.12565v1.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fvisionml\u002Fpytracking)]\n\n* **ROAM:** 杨天宇、徐鹏飞、胡润波、柴华、安东尼·B·陈。\u003Cbr \u002F>\n  “ROAM：递归优化的跟踪模型。” CVPR（2020年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1907.12006v3.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fskyoung\u002FROAM)]\n\n* **AutoTrack:** 李一鸣、傅昌宏、丁方强、黄子渊、陆耿。\u003Cbr \u002F>\n  “AutoTrack：面向无人机的高性能视觉跟踪，采用自动时空正则化。” CVPR（2020年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2003.12949.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FAutoTrack)]\n\n* **SiamBAN:** 陈泽度、钟彬能、李国荣、张盛平、季荣荣。\u003Cbr \u002F>\n  “用于视觉跟踪的孪生框自适应网络。” CVPR（2020年）。\n  [[论文](http:\u002F\u002Farxiv.org\u002Fpdf\u002F1911.08862v2.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fhqucv\u002Fsiamban)]\n\n* **SiamCAR:** 郭东艳、王军、崔颖、王振华、陈圣勇。\u003Cbr \u002F>\n  “SiamCAR：用于视觉跟踪的孪生全卷积分类与回归。” CVPR（2020年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1911.07241)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fohhhyeahhh\u002FSiamCAR)]\n\n* **SiamAttn:** 于跃臣、熊义磊、黄伟林、马修·R·斯科特。\u003Cbr \u002F>\n  “用于视觉目标跟踪的可变形孪生注意力网络。” CVPR（2020年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2004.06711v1.pdf)]\n\n* **CSA:** 颜斌、王栋、陆虎川、杨晓云。\u003Cbr \u002F>\n  “冷却-收缩攻击：用难以察觉的噪声使跟踪器失效。” CVPR（2020年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2003.09595)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FMasterBin-IIAU\u002FCSA)]\n\n* **LTMU:** 戴克楠、张云华、王栋、李建华、陆虎川、杨晓云。\u003Cbr \u002F>\n  “基于元更新器的高性能长期跟踪。” CVPR（2020年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.00305)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FDaikenan\u002FLTMU)]\n\n* **MAST:** 赖子航、陆艾丽卡、谢维迪。\u003Cbr \u002F>\n  “MAST：一种记忆增强的自监督跟踪器。” CVPR（2020年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2002.07793)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fzlai0\u002FMAST)]\n\n* **CGACD:** 杜飞、刘鹏、赵伟、唐向龙。\u003Cbr \u002F>\n  “基于相关性引导注意的角点检测视觉跟踪。” CVPR（2020年）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FDu_Correlation-Guided_Attention_for_Corner_Detection_Based_Visual_Tracking_CVPR_2020_paper.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Ffeiaxyt\u002FCGACD)]\n\n### IJCAI 2020\n\n- **TLPG-Tracker:** 李思远、张智、刘子宇、安娜·王、邱玲珑、杜峰。\u003Cbr \u002F>\n  “TLPG-Tracker：视觉跟踪中目标定位与候选框生成的联合学习。” IJCAI（2020年）。\n  [[论文](https:\u002F\u002Fwww.ijcai.org\u002FProceedings\u002F2020\u002F99)]\n\n- **E3SN:** 兰萌、张一鹏、许启宁、张雷飞。\u003Cbr \u002F>\n  “E3SN：用于视频目标分割的高效端到端孪生网络。” IJCAI（2020年）。\n  [[论文](https:\u002F\u002Fwww.ijcai.org\u002FProceedings\u002F2020\u002F98)]\n\n### AAAI 2020\n\n- **SiamFC++:** 徐银达、王泽宇、李佐欣、袁叶、于刚。\u003Cbr \u002F>\n  “SiamFC++：基于目标估计指导的鲁棒且精确的视觉跟踪。” AAAI（2020年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1911.06188v4.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FMegviiDetection\u002Fvideo_analyst)]\n\n- **DROL:** 周景浩、王鹏、孙浩洋。\u003Cbr \u002F>\n  “用于孪生视觉跟踪的判别式与鲁棒在线学习。” AAAI（2020年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1909.02959)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fshallowtoil\u002FDROL)]\n\n- **POST:** 王宁、周文刚、齐国军、李厚强。\u003Cbr \u002F>\n  “POST：基于策略的切换跟踪。” AAAI（2020年）。\n  [[论文](https:\u002F\u002Fojs.aaai.org\u002F\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F6899)]\n\n- **SPS:** 胡钦涛、周立军、王晓晓、毛耀、张建林、叶其祥。\u003Cbr \u002F>\n  “SPSTracker：通过响应图的次峰值抑制实现鲁棒目标跟踪。” AAAI（2020年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1912.00597.pdf)]\n  [[代码](https:\u002F\u002Fwww.ctolib.com\u002Fhttps:\u002F\u002Fgithub.com\u002FTrackerLB\u002FSPSTracker)]\n\n- **RPOT:** 杨一凡、李国荣、戚元凯、黄庆明。\u003Cbr \u002F>\n  “释放在线训练的力量，实现鲁棒视觉跟踪。” AAAI（2020年）。\n  [[论文](https:\u002F\u002Fojs.aaai.org\u002F\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F6956)]\n\n- **MetaRTT:** 郑一彩、柳基贤、卢贤宇、赵敏洙、韩宝亨。\u003Cbr \u002F>\n  “通过元学习实现实时目标跟踪：高效的模型适应与一次性的通道剪枝。” AAAI（2020年）。\n  [[论文](https:\u002F\u002Fojs.aaai.org\u002F\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F6779)]\n\n- **GlobalTrack:** 黄良华、赵欣、黄凯奇。\u003Cbr \u002F>\n  “GlobalTrack：一种简单而强大的长期跟踪基线。” AAAI（2020年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1912.08531)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fhuanglianghua\u002FGlobalTrack)]\n\n### 其他 2020\n\n* **VTT:** 边天凌、华阳、宋涛、薛振贵、马如辉、尼尔·罗伯逊、关海兵。\u003Cbr \u002F>\n  “VTT：基于Transformer的长期视觉跟踪。” ICPR 2020。 \n  [[论文](https:\u002F\u002Fpure.qub.ac.uk\u002Fen\u002Fpublications\u002Fvtt-long-term-visual-tracking-with-transformers)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FVisualTrackingVLL)]\n\n* **COMET:** 赛义德·莫杰塔巴·马尔瓦斯蒂-扎德、贾瓦德·哈加尼、侯赛因·加内伊-亚赫丹、绍赫雷·卡塞伊以及李成。\u003Cbr \u002F>\n  “COMET：上下文感知的IoU引导网络，用于小目标跟踪。” ACCV 2020。 \n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2006.02597.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FVisualTrackingVLL)]\n\n* **SiamKPN:** 李强、秦泽奎、张文博、郑文。\u003Cbr \u002F>\n  “用于视觉目标跟踪的孪生关键点预测网络。” ArXiv 2020。 \n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2006.04078)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FZekuiQin\u002FSiamKPN)]\n\n* **SiamCAN:** 周文章、温龙寅、张立波、杜大伟、罗铁坚、吴延军。\u003Cbr \u002F>\n  “SiamMan：用于视觉跟踪的运动感知孪生网络。” TIP 2020。 \n  [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1912.05515v2)]\n  [[新论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F1912.05515v2)]\n  [[代码](https:\u002F\u002Fisrc.iscas.ac.cn\u002Fgitlab\u002Fresearch\u002Fsiamcan)]\n\n### ICCV 2019\n\n* **DiMP:** Goutam Bhat、Martin Danelljan、Luc Van Gool、Radu Timofte。\u003Cbr \u002F>\n  “用于跟踪的判别模型预测学习”。ICCV（2019年 **口头报告**）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FBhat_Learning_Discriminative_Model_Prediction_for_Tracking_ICCV_2019_paper.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fvisionml\u002Fpytracking)]\n\n* **GradNet:** Peixia Li、Boyu Chen、Wanli Ouyang、Dong Wang、Xiaoyun Yang、Huchuan Lu。\u003Cbr \u002F>\n  “GradNet：面向视觉目标跟踪的梯度引导网络”。ICCV（2019年 **口头报告**）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FLi_GradNet_Gradient-Guided_Network_for_Visual_Object_Tracking_ICCV_2019_paper.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FLPXTT\u002FGradNet-Tensorflow)]\n\n* **MLT:** Janghoon Choi、Junseok Kwon、Kyoung Mu Lee。\u003Cbr \u002F>\n  “用于实时目标感知视觉跟踪的深度元学习”。ICCV（2019年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FChoi_Deep_Meta_Learning_for_Real-Time_Target-Aware_Visual_Tracking_ICCV_2019_paper.pdf)]\n\n* **SPLT:** Bin Yan、Haojie Zhao、Dong Wang、Huchuan Lu、Xiaoyun Yang。\u003Cbr \u002F>\n  “‘略读—细看’跟踪：一种用于实时且鲁棒的长期跟踪框架”。ICCV（2019年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FYan_Skimming-Perusal_Tracking_A_Framework_for_Real-Time_and_Robust_Long-Term_Tracking_ICCV_2019_paper.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fiiau-tracker\u002FSPLT)]\n\n* **ARCF:** Ziyuan Huang、Changhong Fu、Yiming Li、Fuling Lin、Peng Lu。\u003Cbr \u002F>\n  “用于实时无人机跟踪的畸变抑制相关滤波器学习”。ICCV（2019年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FHuang_Learning_Aberrance_Repressed_Correlation_Filters_for_Real-Time_UAV_Tracking_ICCV_2019_paper.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fvision4robotics\u002FARCF-tracker)]\n\n* **BGDT:** Lianghua Huang、Xin Zhao、Kaiqi Huang。\u003Cbr \u002F>\n  “弥合检测与跟踪之间的鸿沟：一种统一方法”。ICCV（2019年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FHuang_Bridging_the_Gap_Between_Detection_and_Tracking_A_Unified_Approach_ICCV_2019_paper.pdf)]\n\n* **PAT:** Rey Reza Wiyatno、Anqi Xu。\u003Cbr \u002F>\n  “能够欺骗视觉目标跟踪的物理对抗纹理”。ICCV（2019年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FWiyatno_Physical_Adversarial_Textures_That_Fool_Visual_Object_Tracking_ICCV_2019_paper.pdf)]\n\n* **GFS-DCF:** Tianyang Xu、Zhen-Hua Feng、Xiao-Jun Wu、Josef Kittler。\u003Cbr \u002F>\n  “联合分组特征选择与判别滤波器学习，用于鲁棒的视觉目标跟踪”。ICCV（2019年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FXu_Joint_Group_Feature_Selection_and_Discriminative_Filter_Learning_for_Robust_ICCV_2019_paper.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FXU-TIANYANG\u002FGFS-DCF)]\n\n* **CDTB:** Alan Lukežič、Ugur Kart、Jani Käpylä、Ahmed Durmush、Joni-Kristian Kämäräinen、Jiří Matas、Matej Kristan。\u003Cbr \u002F>\n  “CDTB：彩色与深度视觉目标跟踪数据集及基准测试”。ICCV（2019年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FLukezic_CDTB_A_Color_and_Depth_Visual_Object_Tracking_Dataset_and_ICCV_2019_paper.pdf)]\n\n* **fdKCF:** Linyu Zheng、Ming Tang、Yingying Chen、Jinqiao Wang、Hanqing Lu。\u003Cbr \u002F>\n  “无边界效应的快速deepKCF”。ICCV（2019年）。\n  [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FZheng_Fast-deepKCF_Without_Boundary_Effect_ICCV_2019_paper.pdf)]\n\n* **VOT2019:** Kristan、Matej 等。\u003Cbr \u002F>\n  “第七届视觉目标跟踪VOT2019挑战赛结果”。ICCV研讨会（2019年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCVW_2019\u002Fpapers\u002FVOT\u002FKristan_The_Seventh_Visual_Object_Tracking_VOT2019_Challenge_Results_ICCVW_2019_paper.pdf)]\n\n### CVPR2019\n\n* **SiamMask:** 王强、张立、卢卡·贝尔蒂内托、胡伟明、菲利普·H·S·托尔。\u003Cbr \u002F>\n  “快速在线目标跟踪与分割：一种统一方法。” CVPR（2019）。 \n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1812.05050.pdf)]\n  [[项目](http:\u002F\u002Fwww.robots.ox.ac.uk\u002F~qwang\u002FSiamMask\u002F)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Ffoolwood\u002FSiamMask)]\n\n* **SiamRPN++:** 李博、吴伟、王强、张方毅、邢俊亮、闫俊杰。\u003Cbr \u002F>\n  “SiamRPN++：基于超深度网络的孪生视觉跟踪演进。” CVPR（2019 **口头报告**）。 \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FLi_SiamRPN_Evolution_of_Siamese_Visual_Tracking_With_Very_Deep_Networks_CVPR_2019_paper.pdf)]\n  [[项目](http:\u002F\u002Fbo-li.info\u002FSiamRPN++\u002F)]\n\n* **ATOM:** 马丁·丹内尔扬、古塔姆·巴特、法哈德·沙赫巴兹·汗、迈克尔·费尔斯贝格。\u003Cbr \u002F>\n  “ATOM：通过最大化重叠实现精确跟踪。” CVPR（2019 **口头报告**）。 \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FDanelljan_ATOM_Accurate_Tracking_by_Overlap_Maximization_CVPR_2019_paper.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fvisionml\u002Fpytracking)]\n\n* **SiamDW:** 张志鹏、彭厚文。\u003Cbr \u002F>\n  “用于实时视觉跟踪的更深更宽的孪生网络。” CVPR（2019 **口头报告**）。 \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FZhang_Deeper_and_Wider_Siamese_Networks_for_Real-Time_Visual_Tracking_CVPR_2019_paper.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fresearchmm\u002FSiamDW)]\n\n* **GCT:** 高俊宇、张天柱、徐长胜。\u003Cbr \u002F>\n  “图卷积跟踪。” CVPR（2019 **口头报告**）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FGao_Graph_Convolutional_Tracking_CVPR_2019_paper.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fresearchmm\u002FSiamDW)]\n\n* **ASRCF:** 戴可楠、王栋、陆虎川、孙冲、李建华。\u003Cbr \u002F>\n  “基于自适应空间正则化相关滤波器的视觉跟踪。” CVPR（2019 **口头报告**）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FDai_Visual_Tracking_via_Adaptive_Spatially-Regularized_Correlation_Filters_CVPR_2019_paper.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FDaikenan\u002FASRCF)]\n\n* **UDT:** 王宁、宋义兵、马超、周文刚、刘伟、李厚强。\u003Cbr \u002F>\n  “无监督深度跟踪。” CVPR（2019）。 \n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1904.01828.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002F594422814\u002FUDT)]\n\n* **TADT:** 李欣、马超、吴宝元、何振宇、杨明轩。\u003Cbr \u002F>\n  “目标感知深度跟踪。” CVPR（2019）。 \n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1904.01772.pdf)]\n  [[项目](https:\u002F\u002Fxinli-zn.github.io\u002FTADT-project-page\u002F)]\n  [[代码](https:\u002F\u002Fgithub.com\u002FXinLi-zn\u002FTADT)]\n\n* **C-RPN:** 范恒、凌海斌。\u003Cbr \u002F>\n  “用于实时视觉跟踪的孪生级联区域建议网络。” CVPR（2019）。 \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FFan_Siamese_Cascaded_Region_Proposal_Networks_for_Real-Time_Visual_Tracking_CVPR_2019_paper.pdf)]\n\n* **SPM:** 王广亭、罗冲、熊志伟、曾文俊。\u003Cbr \u002F>\n  “SPM-Tracker：用于实时视觉目标跟踪的串并联匹配。” CVPR（2019）。 \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FWang_SPM-Tracker_Series-Parallel_Matching_for_Real-Time_Visual_Object_Tracking_CVPR_2019_paper.pdf)]\n\n* **OTR:** 乌古尔·卡尔特、艾伦·卢凯齐奇、马泰伊·克里斯坦、乔尼-克里斯蒂安·卡马拉伊宁、季里·马塔斯。\u003Cbr \u002F>\n  “基于视点特定判别相关滤波器的重建式目标跟踪。” CVPR（2019）。 \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FKart_Object_Tracking_by_Reconstruction_With_View-Specific_Discriminative_Correlation_Filters_CVPR_2019_paper.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fugurkart\u002FOTR)]\n\n* **RPCF:** 孙宇轩、孙冲、王栋、陆虎川、何友。\u003Cbr \u002F>\n  “用于视觉跟踪的ROI池化相关滤波器。” CVPR（2019）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FSun_ROI_Pooled_Correlation_Filters_for_Visual_Tracking_CVPR_2019_paper.pdf)]\n\n* **LaSOT:** 范恒、林丽婷、杨帆、楚鹏、邓戈、于思嘉、白和鑫、徐勇、廖春元、凌海斌。\u003Cbr \u002F>\n  “LaSOT：大规模单目标跟踪的高质量基准数据集。” CVPR（2019）。 \n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1809.07845.pdf)]\n  [[项目](https:\u002F\u002Fcis.temple.edu\u002Flasot\u002F)]\n\n### AAAI2019\n\n* **LDES:** 李阳、朱建科、史蒂文·C·H·霍伊、宋文杰、王哲峰、刘汉唐。\u003Cbr \u002F>\n  “用于视觉目标跟踪的相似变换鲁棒估计。” AAAI（2019）。 \n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1712.05231.pdf)]\n  [[代码](https:\u002F\u002Fgithub.com\u002Fihpdep\u002FLDES)] \n  \n* **ANT:** 齐元凯、张盛平、张卫刚、苏莉、黄庆明、杨明轩。\u003Cbr \u002F>\n  “学习属性特定表示以进行视觉跟踪。” AAAI（2019）。 \n  [[论文](https:\u002F\u002Ffaculty.ucmerced.edu\u002Fmhyang\u002Fpapers\u002Faaai2019_tracking.pdf)]\n  \n* **Re2EMA:** 黄江雷、周文刚。\u003Cbr \u002F>\n  “Re2EMA：用于目标模型更新的正则化与重新初始化指数移动平均。” AAAI（2019）。 \n  [[论文](https:\u002F\u002Fojs.aaai.org\u002F\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F4862)]\n\n### NIPS2018\n\n* **DAT:** 普石、宋义兵、马超、张洪刚、杨明轩。\u003Cbr \u002F>\n  “通过互惠学习的深度注意力跟踪。” NIPS（2018）。 \n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1810.03851.pdf)] \n  [[项目](https:\u002F\u002Fybsong00.github.io\u002Fnips18_tracking\u002Findex)] \n  [[代码](https:\u002F\u002Fgithub.com\u002Fshipubupt\u002FNIPS2018)]\n\n### ECCV2018\n\n* **UPDT:** Goutam Bhat、Joakim Johnander、Martin Danelljan、Fahad Shahbaz Khan、Michael Felsberg。\u003Cbr \u002F>\n  “揭秘深度跟踪的力量”。ECCV（2018年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FGoutam_Bhat_Unveiling_the_Power_ECCV_2018_paper.pdf)]  \n\n* **DaSiamRPN:** Zheng Zhu、Qiang Wang、Bo Li、Wu Wei、Junjie Yan、Weiming Hu。\u003Cbr \u002F>\n  “用于视觉目标跟踪的干扰物感知孪生网络”。ECCV（2018年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FZheng_Zhu_Distractor-aware_Siamese_Networks_ECCV_2018_paper.pdf)]\n  [[GitHub](https:\u002F\u002Fgithub.com\u002Ffoolwood\u002FDaSiamRPN)]\n  \n* **SiamMCF:** Henrique Morimitsu。\u003Cbr \u002F>\n  “基于孪生网络的多上下文特征在视觉目标跟踪中的应用”。ECCV（2018年）。\n  [[论文](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007%2F978-3-030-11009-3_6.pdf)]\n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fhmorimitsu\u002Fsiam-mcf)]\n\n* **SACF:** Mengdan Zhang、Qiang Wang、Junliang Xing、Jin Gao、Peixi Peng、Weiming Hu、Steve Maybank。\u003Cbr \u002F>\n  “基于空间对齐相关滤波器网络的视觉跟踪”。ECCV（2018年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002Fmengdan_zhang_Visual_Tracking_via_ECCV_2018_paper.pdf)]\n\n* **RTINet:** Yingjie Yao、Xiaohe Wu、Lei Zhang、Shiguang Shan、Wangmeng Zuo。\u003Cbr \u002F>\n  “基于相关滤波器的联合表示与截断推理学习用于目标跟踪”。ECCV（2018年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FYingjie_Yao_Joint_Representation_and_ECCV_2018_paper.pdf)]\n\n* **Meta-Tracker:** Eunbyung Park、Alexander C. Berg。\u003Cbr \u002F>\n  “Meta-Tracker：快速且鲁棒的视觉目标跟踪器在线自适应方法”。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FEunbyung_Park_Meta-Tracker_Fast_and_ECCV_2018_paper.pdf)]\n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fsilverbottlep\u002Fmeta_trackers)]\n\n* **DSLT:** Xiankai Lu、Chao Ma*、Bingbing Ni、Xiaokang Yang、Ian Reid、Ming-Hsuan Yang。\u003Cbr \u002F>\n  “带有收缩损失的深度回归跟踪”。ECCV（2018年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FXiankai_Lu_Deep_Regression_Tracking_ECCV_2018_paper.pdf)]\n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fchaoma99\u002FDSLT)]\n\n* **DRL-IS:** Liangliang Ren、Xin Yuan、Jiwen Lu、Ming Yang、Jie Zhou。\u003Cbr \u002F>\n  “用于视觉跟踪的迭代偏移深度强化学习”。ECCV（2018年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FLiangliang_Ren_Deep_Reinforcement_Learning_ECCV_2018_paper.pdf)]\n\n* **RT-MDNet:** Ilchae Jung、Jeany Son、Mooyeol Baek、Bohyung Han。\u003Cbr \u002F>\n  “实时MDNet”。ECCV（2018年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FIlchae_Jung_Real-Time_MDNet_ECCV_2018_paper.pdf)]\n\n* **ACT:** Boyu Chen、Dong Wang、Peixia Li、Huchuan Lu。\u003Cbr \u002F>\n  “实时‘演员—评论家’跟踪”。ECCV（2018年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FBoyu_Chen_Real-time_Actor-Critic_Tracking_ECCV_2018_paper.pdf)]\n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fbychen515\u002FACT)]\n\n* **StructSiam:** Yunhua Zhang、Lijun Wang、Dong Wang、Mengyang Feng、Huchuan Lu、Jinqing Qi。\u003Cbr \u002F>\n  “用于实时视觉跟踪的结构化孪生网络”。ECCV（2018年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FYunhua_Zhang_Structured_Siamese_Network_ECCV_2018_paper.pdf)]\n\n* **MemTrack:** Tianyu Yang、Antoni B. Chan。\u003Cbr \u002F>\n  “用于目标跟踪的动态记忆网络学习”。ECCV（2018年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FTianyu_Yang_Learning_Dynamic_Memory_ECCV_2018_paper.pdf)]\n\n* **SiamFC-tri:** Xingping Dong、Jianbing Shen。\u003Cbr \u002F>\n  “孪生网络中用于目标跟踪的三元组损失”。ECCV（2018年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FXingping_Dong_Triplet_Loss_with_ECCV_2018_paper.pdf)]\n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fshenjianbing\u002FTripletTracking)]\n\n* **OxUvA长期数据集+基准测试:** Jack Valmadre、Luca Bertinetto、João F. Henriques、Ran Tao、Andrea Vedaldi、Arnold Smeulders、Philip Torr、Efstratios Gavves。\u003Cbr \u002F>\n  “野外长期跟踪：一项基准测试”。ECCV（2018年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FEfstratios_Gavves_Long-term_Tracking_in_ECCV_2018_paper.pdf)]\n  [[项目](https:\u002F\u002Foxuva.github.io\u002Flong-term-tracking-benchmark\u002F)]\n\n* **TrackingNet:** Matthias Müller、Adel Bibi、Silvio Giancola、Salman Al-Subaihi、Bernard Ghanem。\u003Cbr \u002F>\n  “TrackingNet：一个大规模的野外目标跟踪数据集与基准测试”。ECCV（2018年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FMatthias_Muller_TrackingNet_A_Large-Scale_ECCV_2018_paper.pdf)] \n  [[项目](http:\u002F\u002Ftracking-net.org\u002F)]\n\n### CVPR2018\n\n* **VITAL:** Yibing Song, Chao Ma, Xiaohe Wu, Lijun Gong, Linchao Bao, Wangmeng Zuo, Chunhua Shen, Rynson Lau, and Ming-Hsuan Yang。\n  “VITAL：基于对抗学习的视觉跟踪。” CVPR（2018年 **Spotlight**）。 \n  [[项目](https:\u002F\u002Fybsong00.github.io\u002Fcvpr18_tracking\u002Findex)]\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FSong_VITAL_VIsual_Tracking_CVPR_2018_paper.pdf)]\n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fybsong00\u002FVital_release)]\n\n* **LSART:** Chong Sun, Dong Wang, Huchuan Lu, Ming-Hsuan Yang。\n  “用于视觉跟踪的空间感知回归学习。” CVPR（2018年 **Spotlight**）。 \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FSun_Learning_Spatial-Aware_Regressions_CVPR_2018_paper.pdf)]\n\n* **SiamRPN:** Bo Li, Wei Wu, Zheng Zhu, Junjie Yan。\n  “基于暹罗区域建议网络的高性能视觉跟踪。” CVPR（2018年 **Spotlight**）。 \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FLi_High_Performance_Visual_CVPR_2018_paper.pdf)]\n\n* **TRACA:** Jongwon Choi, Hyung Jin Chang, Tobias Fischer, Sangdoo Yun, Kyuewang Lee, Jiyeoup Jeong, Yiannis Demiris, Jin Young Choi。\n  “面向高速视觉跟踪的上下文感知深度特征压缩。” CVPR（2018年）。 \n  [[项目](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fjwchoivision\u002F)]\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FChoi_Context-Aware_Deep_Feature_CVPR_2018_paper.pdf)]\n\n* **RASNet:** Qiang Wang, Zhu Teng, Junliang Xing, Jin Gao, Weiming Hu, Stephen Maybank。\n  “学习注意力机制：基于残差注意力暹罗网络的高性能在线视觉跟踪。” CVPR 2018。 \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FWang_Learning_Attentions_Residual_CVPR_2018_paper.pdf)]\n\n* **SA-Siam:** Anfeng He, Chong Luo, Xinmei Tian, Wenjun Zeng。\n  “一种用于实时目标跟踪的双重暹罗网络。” CVPR（2018年）。 \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FHe_A_Twofold_Siamese_CVPR_2018_paper.pdf)]\n\n* **STRCF:** Feng Li, Cheng Tian, Wangmeng Zuo, Lei Zhang, Ming-Hsuan Yang。\n  “用于视觉跟踪的空间-时间正则化相关滤波器学习。” CVPR（2018年）。 \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FLi_Learning_Spatial-Temporal_Regularized_CVPR_2018_paper.pdf)]\n  [[GitHub](https:\u002F\u002Fgithub.com\u002Flifeng9472\u002FSTRCF)]\n\n* **FlowTrack:** Zheng Zhu, Wei Wu, Wei Zou, Junjie Yan。\n  “基于时空注意力的端到端流相关跟踪。” CVPR（2018年）。 \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FZhu_End-to-End_Flow_Correlation_CVPR_2018_paper.pdf)]\n\n* **DEDT:** Kourosh Meshgi, Shigeyuki Oba, Shin Ishii。\n  “用于判别式协同跟踪的高效多样化集成方法。” CVPR（2018年）。 \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FMeshgi_Efficient_Diverse_Ensemble_CVPR_2018_paper.pdf)]\n\n* **SINT++:** Xiao Wang, Chenglong Li, Bin Luo, Jin Tang。\n  “SINT++：通过对抗性正样本生成实现鲁棒视觉跟踪。” CVPR（2018年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FWang_SINT_Robust_Visual_CVPR_2018_paper.pdf)]\n\n* **DRT:** Chong Sun, Dong Wang, Huchuan Lu, Ming-Hsuan Yang。\n  “基于联合判别与可靠性学习的相关跟踪。” CVPR（2018年）。 \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FSun_Correlation_Tracking_via_CVPR_2018_paper.pdf)]\n\n* **MCCT:** Ning Wang, Wengang Zhou, Qi Tian, Richang Hong, Meng Wang, Houqiang Li。\n  “用于鲁棒视觉跟踪的多线索相关滤波器。” CVPR（2018年）。 \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FWang_Multi-Cue_Correlation_Filters_CVPR_2018_paper.pdf)]\n  [[GitHub](https:\u002F\u002Fgithub.com\u002F594422814\u002FMCCT)]\n\n* **MKCF:** Ming Tang, Bin Yu, Fan Zhang, Jinqiao Wang。\n  “基于多核相关滤波器的高速跟踪。” CVPR（2018年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FTang_High-Speed_Tracking_With_CVPR_2018_paper.pdf)]\n\n* **HP:** Xingping Dong, Jianbing Shen, Wenguan Wang, Yu, Liu, Ling Shao, and Fatih Porikli。\n  “基于连续深度Q学习的跟踪超参数优化。” CVPR（2018年）。\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FDong_Hyperparameter_Optimization_for_CVPR_2018_paper.pdf)]\n\n### NIPS2017\n\n* **HART:** Adam R. Kosiorek, Alex Bewley, Ingmar Posner。\n  “层次化注意力循环跟踪。” NIPS（2017年）。 \n  [[论文](https:\u002F\u002Fpapers.nips.cc\u002Fpaper\u002F6898-hierarchical-attentive-recurrent-tracking.pdf)]\n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fakosiorek\u002Fhart)]\n\n### ICCV2017\n\n* **CREST:** Yibing Song, Chao Ma, Lijun Gong, Jiawei Zhang, Rynson Lau, Ming-Hsuan Yang。  \n  “CREST：用于视觉跟踪的卷积残差学习”。ICCV（2017年 **Spotlight**）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FSong_CREST_Convolutional_Residual_ICCV_2017_paper.pdf)]  \n  [[项目](http:\u002F\u002Fwww.cs.cityu.edu.hk\u002F~yibisong\u002Ficcv17\u002Findex.html)]  \n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fybsong00\u002FCREST-Release)]\n\n* **EAST:** Chen Huang, Simon Lucey, Deva Ramanan。  \n  “利用深度特征级联学习自适应跟踪策略”。ICCV（2017年 **Spotlight**）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FHuang_Learning_Policies_for_ICCV_2017_paper.pdf)]  \n  [[补充材料](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fsupplemental\u002FHuang_Learning_Policies_for_ICCV_2017_supplemental.zip)]\n\n* **PTAV:** Heng Fan 和 Haibin Ling。  \n  “并行跟踪与验证：一种实时高精度视觉跟踪框架”。ICCV（2017年）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FFan_Parallel_Tracking_and_ICCV_2017_paper.pdf)]  \n  [[补充材料](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fsupplemental\u002FFan_Parallel_Tracking_and_ICCV_2017_supplemental.pdf)]  \n  [[项目](http:\u002F\u002Fwww.dabi.temple.edu\u002F~hbling\u002Fcode\u002FPTAV\u002Fptav.htm)]  \n  [[代码](http:\u002F\u002Fwww.dabi.temple.edu\u002F~hbling\u002Fcode\u002FPTAV\u002Fserial_ptav_v1.zip)]\n\n* **BACF:** Hamed Kiani Galoogahi, Ashton Fagg, Simon Lucey。  \n  “用于视觉跟踪的背景感知相关滤波器学习”。ICCV（2017年）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FGaloogahi_Learning_Background-Aware_Correlation_ICCV_2017_paper.pdf)]  \n  [[补充材料](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fsupplemental\u002FGaloogahi_Learning_Background-Aware_Correlation_ICCV_2017_supplemental.pdf)]  \n  [[代码](http:\u002F\u002Fwww.hamedkiani.com\u002Fuploads\u002F5\u002F1\u002F8\u002F8\u002F51882963\u002Fbacf_toupload.zip)]  \n  [[项目](http:\u002F\u002Fwww.hamedkiani.com\u002Fbacf.html)]\n\n* **TSN:** Zhu Teng, Junliang Xing, Qiang Wang, Congyan Lang, Songhe Feng 和 Yi Jin。  \n  “基于时空深度网络的鲁棒目标跟踪”。ICCV（2017年）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FTeng_Robust_Object_Tracking_ICCV_2017_paper.pdf)]\n\n* **p-tracker:** James Supančič, III；Deva Ramanan。  \n  “将跟踪视为在线决策：利用强化学习从流式视频中学习策略”。ICCV（2017年）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FSupancic_Tracking_as_Online_ICCV_2017_paper.pdf)]  \n  [[补充材料](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fsupplemental\u002FSupancic_Tracking_as_Online_ICCV_2017_supplemental.pdf)]\n\n* **DSiam:** Qing Guo；Wei Feng；Ce Zhou；Rui Huang；Liang Wan；Song Wang。  \n  “用于视觉目标跟踪的动态暹罗网络学习”。ICCV（2017年）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FGuo_Learning_Dynamic_Siamese_ICCV_2017_paper.pdf)]  \n  [[GitHub](https:\u002F\u002Fgithub.com\u002Ftsingqguo\u002FDSiam)]\n\n* **SP-KCF:** Xin Sun；Ngai-Man Cheung；Hongxun Yao；Yiluan Guo。  \n  “基于形状保持KCF和水平集的可变形块非刚性目标跟踪”。ICCV（2017年）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FSun_Non-Rigid_Object_Tracking_ICCV_2017_paper.pdf)]\n\n* **UCT:** Zheng Zhu, Guan Huang, Wei Zou, Dalong Du, Chang Huang。  \n  “UCT：用于实时视觉跟踪的统一卷积网络学习”。ICCV研讨会（2017年）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017_workshops\u002Fpapers\u002Fw28\u002FZhu_UCT_Learning_Unified_ICCV_2017_paper.pdf)]\n\n* Tobias Bottger, Patrick Follmann。  \n  “使用像素级分割评估跟踪器性能的优势”。ICCV研讨会（2017年）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017_workshops\u002Fpapers\u002Fw28\u002FBottger_The_Benefits_of_ICCV_2017_paper.pdf)]\n\n* **CFWCR:** Zhiqun He, Yingruo Fan, Junfei Zhuang, Yuan Dong, HongLiang Bai。  \n  “具有加权卷积响应的相关滤波器”。ICCV研讨会（2017年）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017_workshops\u002Fpapers\u002Fw28\u002FHe_Correlation_Filters_With_ICCV_2017_paper.pdf)]  \n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fhe010103\u002FCFWCR)]\n\n* **IBCCF:** Feng Li, Yingjie Yao, Peihua Li, David Zhang, Wangmeng Zuo, Ming-Hsuan Yang。  \n  “结合边界与中心相关滤波器以应对宽高比变化的视觉跟踪”。ICCV研讨会（2017年）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017_workshops\u002Fpapers\u002Fw28\u002FLi_Integrating_Boundary_and_ICCV_2017_paper.pdf)]  \n  [[GitHub](https:\u002F\u002Fgithub.com\u002Flifeng9472\u002FIBCCF)]\n\n* **RFL:** Tianyu Yang, Antoni B. Chan。  \n  “用于视觉跟踪的循环滤波器学习”。ICCV研讨会（2017年）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017_workshops\u002Fpapers\u002Fw28\u002FYang_Recurrent_Filter_Learning_ICCV_2017_paper.pdf)]\n\n### CVPR2017\n\n* **ECO:** Martin Danelljan, Goutam Bhat, Fahad Shahbaz Khan, Michael Felsberg。  \n  “ECO：用于目标跟踪的高效卷积算子”。CVPR（2017）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FDanelljan_ECO_Efficient_Convolution_CVPR_2017_paper.pdf)]  \n  [[补充材料](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fsupplemental\u002FDanelljan_ECO_Efficient_Convolution_2017_CVPR_supplemental.pdf)]  \n  [[项目主页](http:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fecotrack\u002Findex.html)]  \n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fmartin-danelljan\u002FECO)]\n\n* **CFNet:** Jack Valmadre, Luca Bertinetto, João F. Henriques, Andrea Vedaldi, Philip H. S. Torr。  \n  “基于相关滤波器跟踪的端到端表示学习”。CVPR（2017）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FValmadre_End-To-End_Representation_Learning_CVPR_2017_paper.pdf)]  \n  [[补充材料](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fsupplemental\u002FValmadre_End-To-End_Representation_Learning_2017_CVPR_supplemental.pdf)]  \n  [[项目主页](http:\u002F\u002Fwww.robots.ox.ac.uk\u002F~luca\u002Fcfnet.html)]  \n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fbertinetto\u002Fcfnet)]\n\n* **CACF:** Matthias Mueller, Neil Smith, Bernard Ghanem。  \n  “上下文感知的相关滤波器跟踪”。CVPR（2017，口头报告）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FMueller_Context-Aware_Correlation_Filter_CVPR_2017_paper.pdf)]  \n  [[补充材料](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fsupplemental\u002FMueller_Context-Aware_Correlation_Filter_2017_CVPR_supplemental.zip)]  \n  [[项目主页](https:\u002F\u002Fivul.kaust.edu.sa\u002FPages\u002Fpub-ca-cf-tracking.aspx)]  \n  [[代码](https:\u002F\u002Fgithub.com\u002Fthias15\u002FContext-Aware-CF-Tracking)]\n\n* **RaF:** Le Zhang, Jagannadan Varadarajan, Ponnuthurai Nagaratnam Suganthan, Narendra Ahuja 和 Pierre Moulin。  \n  “使用斜向随机森林的鲁棒视觉跟踪”。CVPR（2017）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FZhang_Robust_Visual_Tracking_CVPR_2017_paper.pdf)]  \n  [[补充材料](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fsupplemental\u002FZhang_Robust_Visual_Tracking_2017_CVPR_supplemental.pdf)]  \n  [[项目主页](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fzhangleuestc\u002Fincremental-oblique-random-forest)]  \n  [[代码](https:\u002F\u002Fgithub.com\u002FZhangLeUestc\u002FIncremental-Oblique-Random-Forest)]\n\n* **MCPF:** Tianzhu Zhang, Changsheng Xu, Ming-Hsuan Yang。  \n  “用于鲁棒目标跟踪的多任务相关粒子滤波器”。CVPR（2017）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FZhang_Multi-Task_Correlation_Particle_CVPR_2017_paper.pdf)]  \n  [[项目主页](http:\u002F\u002Fnlpr-web.ia.ac.cn\u002Fmmc\u002Fhomepage\u002Ftzzhang\u002Fmcpf.html)]  \n  [[代码](http:\u002F\u002Fnlpr-web.ia.ac.cn\u002Fmmc\u002Fhomepage\u002Ftzzhang\u002Fmcpf.html)]\n\n* **ACFN:** Jongwon Choi, Hyung Jin Chang, Sangdoo Yun, Tobias Fischer, Yiannis Demiris 和 Jin Young Choi。  \n  “用于自适应视觉跟踪的注意力相关滤波网络”。CVPR（2017）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FChoi_Attentional_Correlation_Filter_CVPR_2017_paper.pdf)]  \n  [[补充材料](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fsupplemental\u002FChoi_Attentional_Correlation_Filter_2017_CVPR_supplemental.pdf)]  \n  [[项目主页](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fjwchoivision\u002Fhome\u002Facfn-1)]  \n  [[测试代码](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F0B0ZkG8zaRQoLQUswbW9qSWFaU0U\u002Fview?usp=drive_web)]  \n  [[训练代码](https:\u002F\u002Fdrive.google.com\u002Ffile\u002Fd\u002F0B0ZkG8zaRQoLZVVranBnbHlydnM\u002Fview?usp=drive_web)]\n\n* **LMCF:** Mengmeng Wang, Yong Liu, Zeyi Huang。  \n  “基于循环特征图的大间隔目标跟踪”。CVPR（2017）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FWang_Large_Margin_Object_CVPR_2017_paper.pdf)]  \n  [[知乎](https:\u002F\u002Fzhuanlan.zhihu.com\u002Fp\u002F25761718)]\n\n* **ADNet:** Sangdoo Yun, Jongwon Choi, Youngjoon Yoo, Kimin Yun, Jin Young Choi。  \n  “基于深度强化学习的视觉跟踪动作决策网络”。CVPR（2017，Spotlight）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FYun_Action-Decision_Networks_for_CVPR_2017_paper.pdf)]  \n  [[补充材料](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fsupplemental\u002FYun_Action-Decision_Networks_for_2017_CVPR_supplemental.pdf)]  \n  [[项目主页](https:\u002F\u002Fsites.google.com\u002Fview\u002Fcvpr2017-adnet)]\n\n* **CSR-DCF:** Alan Lukežič, Tomáš Vojíř, Luka Čehovin, Jiří Matas, Matej Kristan。  \n  “具有通道和空间可靠性的判别相关滤波器”。CVPR（2017）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FLukezic_Discriminative_Correlation_Filter_CVPR_2017_paper.pdf)]  \n  [[补充材料](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fsupplemental\u002FLukezic_Discriminative_Correlation_Filter_2017_CVPR_supplemental.pdf)]  \n  [[代码](https:\u002F\u002Fgithub.com\u002Falanlukezic\u002Fcsr-dcf)]\n\n* **BranchOut:** Bohyung Han, Jack Sim, Hartwig Adam。  \n  “BranchOut：基于卷积神经网络的在线集成跟踪正则化方法”。CVPR（2017）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FHan_BranchOut_Regularization_for_CVPR_2017_paper.pdf)]\n\n* **AMCT:** Donghun Yeo, Jeany Son, Bohyung Han, Joonhee Han。  \n  “基于马尔可夫链的超像素分割跟踪”。CVPR（2017）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FYeo_Superpixel-Based_Tracking-By-Segmentation_Using_CVPR_2017_paper.pdf)]\n\n* **SANet:** Heng Fan, Haibin Ling。  \n  “SANet：面向视觉跟踪的结构感知网络”。CVPRW（2017）。  \n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1611.06878.pdf)]  \n  [[项目主页](http:\u002F\u002Fwww.dabi.temple.edu\u002F~hbling\u002Fcode\u002FSANet\u002FSANet.html)]  \n  [[代码](http:\u002F\u002Fwww.dabi.temple.edu\u002F~hbling\u002Fcode\u002FSANet\u002Fsanet_code.zip)]\n\n### ECCV2016\n\n* **SiameseFC:** Luca Bertinetto、Jack Valmadre、João F. Henriques、Andrea Vedaldi、Philip H.S. Torr。  \n  “用于目标跟踪的全卷积孪生网络”。ECCV研讨会（2016）。  \n  [[论文](http:\u002F\u002F120.52.73.78\u002Farxiv.org\u002Fpdf\u002F1606.09549v2.pdf)]  \n  [[项目](http:\u002F\u002Fwww.robots.ox.ac.uk\u002F~luca\u002Fsiamese-fc.html)]  \n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fbertinetto\u002Fsiamese-fc)]\n\n* **GOTURN:** David Held、Sebastian Thrun、Silvio Savarese。  \n  “使用深度回归网络以100 FPS学习跟踪”。ECCV（2016）。  \n  [[论文](http:\u002F\u002Fdavheld.github.io\u002FGOTURN\u002FGOTURN.pdf)]  \n  [[项目](http:\u002F\u002Fdavheld.github.io\u002FGOTURN\u002FGOTURN.html)]  \n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fdavheld\u002FGOTURN)]\n\n* **C-COT:** Martin Danelljan、Andreas Robinson、Fahad Khan、Michael Felsberg。  \n  “超越相关滤波器：学习用于视觉跟踪的连续卷积算子”。ECCV（2016）。  \n  [[论文](http:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fconttrack\u002FC-COT_ECCV16.pdf)]  \n  [[项目](http:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fconttrack\u002Findex.html)]  \n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fmartin-danelljan\u002FContinuous-ConvOp)]\n\n* **CF+AT:** Adel Bibi、Matthias Mueller、Bernard Ghanem。  \n  “用于相关滤波器跟踪的目标响应自适应”。ECCV（2016）。  \n  [[论文](http:\u002F\u002Fwww.adelbibi.com\u002Fpapers\u002FECCV2016\u002FTarget_Adap.pdf)]  \n  [[项目](https:\u002F\u002Fivul.kaust.edu.sa\u002FPages\u002Fpub-target-response-adaptation.aspx)]  \n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fadelbibi\u002FTarget-Response-Adaptation-for-Correlation-Filter-Tracking)]\n\n* Yao Sui、Ziming Zhang、Guanghui Wang、Yafei Tang、Li Zhang。  \n  “实时视觉跟踪：提升相关滤波器学习的鲁棒性”。ECCV（2016）。  \n  [[论文](http:\u002F\u002F120.52.73.78\u002Farxiv.org\u002Fpdf\u002F1608.08173.pdf)]\n\n* Yao Sui、Guanghui Wang、Yafei Tang、Li Zhang。  \n  “跟踪完成”。ECCV（2016）。  \n  [[论文](http:\u002F\u002F120.52.73.78\u002Farxiv.org\u002Fpdf\u002F1608.08171v1.pdf)]\n\n### CVPR2016\n\n* **MDNet:** Nam, Hyeonseob、Bohyung Han。  \n  “学习用于视觉跟踪的多领域卷积神经网络”。CVPR（2016）。  \n  [[论文](http:\u002F\u002Farxiv.org\u002Fpdf\u002F1510.07945v2.pdf)]  \n  [[VOT演示文稿](http:\u002F\u002Fvotchallenge.net\u002Fvot2015\u002Fdownload\u002Fpresentation_Hyeonseob.pdf)]  \n  [[项目](http:\u002F\u002Fcvlab.postech.ac.kr\u002Fresearch\u002Fmdnet\u002F)]  \n  [[GitHub](https:\u002F\u002Fgithub.com\u002FHyeonseobNam\u002FMDNet)]\n\n* **SINT:** Ran Tao、Efstratios Gavves、Arnold W.M. Smeulders。  \n  “用于跟踪的孪生实例搜索”。CVPR（2016）。  \n  [[论文](https:\u002F\u002Fstaff.science.uva.nl\u002Fr.tao\u002Fpub\u002FTaoCVPR2016.pdf)]  \n  [[项目](https:\u002F\u002Fstaff.fnwi.uva.nl\u002Fr.tao\u002Fprojects\u002FSINT\u002FSINT_proj.html)]\n\n* **SCT:** Jongwon Choi、Hyung Jin Chang、Jiyeoup Jeong、Yiannis Demiris、Jin Young Choi。  \n  “利用注意力调制的分解与融合进行视觉跟踪”。CVPR（2016）。  \n  [[论文](http:\u002F\u002Fwww.cv-foundation.org\u002Fopenaccess\u002Fcontent_cvpr_2016\u002Fpapers\u002FChoi_Visual_Tracking_Using_CVPR_2016_paper.pdf)]  \n  [[项目](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fjwchoivision\u002Fhome\u002Fsct)]\n\n* **STCT:** Lijun Wang、Wanli Ouyang、Xiaogang Wang、Huchuan Lu。  \n  “STCT：用于视觉跟踪的序列式训练卷积网络”。CVPR（2016）。  \n  [[论文](http:\u002F\u002Fwww.ee.cuhk.edu.hk\u002F~wlouyang\u002FPapers\u002FWangLJ_CVPR16.pdf)]  \n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fscott89\u002FSTCT)]\n\n* **SRDCFdecon:** Martin Danelljan、Gustav Häger、Fahad Khan、Michael Felsberg。  \n  “训练集的自适应去污：判别式视觉跟踪的统一公式”。CVPR（2016）。  \n  [[论文](https:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fdecontrack\u002FAdaptiveDecon_CVPR16.pdf)]  \n  [[项目](https:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fdecontrack\u002Findex.html)]\n\n* **HDT:** Yuankai Qi、Shengping Zhang、Lei Qin、Hongxun Yao、Qingming Huang、Jongwoo Lim、Ming-Hsuan Yang。  \n  “有保障的深度跟踪”。CVPR（2016）。  \n  [[论文](http:\u002F\u002Ffaculty.ucmerced.edu\u002Fmhyang\u002Fpapers\u002Fcvpr16_hedge_tracking.pdf)]  \n  [[项目](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fyuankiqi\u002Fhdt\u002F)]\n\n* **Staple:** Luca Bertinetto、Jack Valmadre、Stuart Golodetz、Ondrej Miksik、Philip H.S. Torr。  \n  “Staple：用于实时跟踪的互补学习者”。CVPR（2016）。  \n  [[论文](http:\u002F\u002F120.52.73.75\u002Farxiv.org\u002Fpdf\u002F1512.01355v2.pdf)]  \n  [[项目](http:\u002F\u002Fwww.robots.ox.ac.uk\u002F~luca\u002Fstaple.html)]  \n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fbertinetto\u002Fstaple)]\n\n* **EBT:** Gao Zhu、Fatih Porikli、Hongdong Li。  \n  “超越局部搜索：通过实例特定提案在任何地方跟踪目标”。CVPR（2016）。  \n  [[论文](http:\u002F\u002Fwww.cv-foundation.org\u002Fopenaccess\u002Fcontent_cvpr_2016\u002Fpapers\u002FZhu_Beyond_Local_Search_CVPR_2016_paper.pdf)]  \n  [[可执行文件](http:\u002F\u002Fwww.votchallenge.net\u002Fvot2016\u002Fdownload\u002F02_EBT.zip)]\n\n* **DLSSVM:** Jifeng Ning、Jimei Yang、Shaojie Jiang、Lei Zhang、Ming-Hsuan Yang。  \n  “基于双线性结构化SVM和显式特征映射的目标跟踪”。CVPR（2016）。  \n  [[论文](http:\u002F\u002Fwww4.comp.polyu.edu.hk\u002F~cslzhang\u002Fpaper\u002Fcvpr16\u002FDLSSVM.pdf)]  \n  [[代码](http:\u002F\u002Fwww4.comp.polyu.edu.hk\u002F~cslzhang\u002Fcode\u002FDLSSVM_CVPR.zip)]  \n  [[项目](http:\u002F\u002Fwww4.comp.polyu.edu.hk\u002F~cslzhang\u002FDLSSVM\u002FDLSSVM.htm)]\n\n### NIPS2016\n* **Learnet:** Luca Bertinetto、João F. Henriques、Jack Valmadre、Philip H. S. Torr、Andrea Vedaldi。  \n  “学习前馈式一次性学习器”。NIPS（2016）。  \n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1606.05233v1.pdf)]\n\n### ICCV2015\n\n* **FCNT:** 王利军、欧阳婉丽、王小刚和陆虎川。  \n  “基于全卷积网络的视觉跟踪”。ICCV（2015）。  \n  [[论文](http:\u002F\u002F202.118.75.4\u002Flu\u002FPaper\u002FICCV2015\u002Ficcv15_lijun.pdf)]  \n  [[项目](http:\u002F\u002Fscott89.github.io\u002FFCNT\u002F)]  \n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fscott89\u002FFCNT)]\n\n* **SRDCF:** 马丁·丹内尔扬、古斯塔夫·黑格、法哈德·汗、迈克尔·费尔斯贝格。  \n  “学习空间正则化的相关滤波器用于视觉跟踪”。ICCV（2015）。  \n  [[论文](https:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fregvistrack\u002FSRDCF_ICCV15.pdf)]  \n  [[项目](https:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fregvistrack\u002F)]\n\n* **CF2:** 马超、黄家彬、杨晓康和杨明轩。  \n  “用于视觉跟踪的层次化卷积特征”。ICCV（2015）  \n  [[论文](http:\u002F\u002Ffaculty.ucmerced.edu\u002Fmhyang\u002Fpapers\u002Ficcv15_tracking.pdf)]  \n  [[项目](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fjbhuang0604\u002Fpublications\u002Fcf2)]  \n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fjbhuang0604\u002FCF2)]\n\n* 王乃延、施建平、叶迪勇和贾佳亚。  \n  “理解和诊断视觉跟踪系统”。ICCV（2015）。  \n  [[论文](http:\u002F\u002Fwinsty.net\u002Fpapers\u002Fdiagnose.pdf)]  \n  [[项目](http:\u002F\u002Fwinsty.net\u002Ftracker_diagnose.html)]  \n  [[代码](http:\u002F\u002Fwinsty.net\u002Fdiagnose\u002Fdiagnose_code.zip)]\n\n* **DeepSRDCF:** 马丁·丹内尔扬、古斯塔夫·黑格、法哈德·汗、迈克尔·费尔斯贝格。  \n  “基于相关滤波器的视觉跟踪中的卷积特征”。ICCV研讨会（2015）。  \n  [[论文](https:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fregvistrack\u002FConvDCF_ICCV15_VOTworkshop.pdf)]  \n  [[项目](https:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fregvistrack\u002F)]\n\n* **RAJSSC:** 张梦丹、邢俊亮、高进、石新初、王强和胡伟明。  \n  “联合尺度-空间相关跟踪与自适应旋转估计”。ICCV研讨会（2015）。  \n  [[论文](http:\u002F\u002Fwww.cv-foundation.org\u002F\u002Fopenaccess\u002Fcontent_iccv_2015_workshops\u002Fw14\u002Fpapers\u002FZhang_Joint_Scale-Spatial_Correlation_ICCV_2015_paper.pdf)]  \n  [[海报](http:\u002F\u002Fwww.votchallenge.net\u002Fvot2015\u002Fdownload\u002Fposter_Mengdan_Zhang.pdf)]\n\n### CVPR2015\n\n* **MUSTer:** 洪志斌、陈哲、王超辉、梅雪、达尼尔·普罗霍罗夫和陶大成。  \n  “多存储跟踪器（MUSTer）：受认知心理学启发的目标跟踪方法”。CVPR（2015）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2015\u002Fpapers\u002FHong_MUlti-Store_Tracker_MUSTer_2015_CVPR_paper.pdf)]  \n  [[项目](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fmultistoretrackermuster\u002F)]\n\n* **LCT:** 马超、杨晓康、张崇阳和杨明轩。  \n  “长期相关跟踪”。CVPR（2015）。  \n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2015\u002Fpapers\u002FMa_Long-Term_Correlation_Tracking_2015_CVPR_paper.pdf)]  \n  [[项目](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fchaoma99\u002Fcvpr15_tracking)]  \n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fchaoma99\u002Flct-tracker)]\n\n* **DAT:** 霍斯特·波塞格尔、托马斯·毛特纳和霍斯特·比绍夫。  \n  “为基于颜色的无模型跟踪辩护”。CVPR（2015）。  \n  [[论文](https:\u002F\u002Flrs.icg.tugraz.at\u002Fpubs\u002Fpossegger_cvpr15.pdf)]  \n  [[项目](https:\u002F\u002Fwww.tugraz.at\u002Finstitute\u002Ficg\u002Fresearch\u002Fteam-bischof\u002Flrs\u002Fdownloads\u002Fdat)]  \n  [[代码](https:\u002F\u002Flrs.icg.tugraz.at\u002Fdownloads\u002Fdat-v1.0.zip)]\n\n* **RPT:** 李洋、朱建科和史蒂文·C.H. 霍伊。  \n  “可靠补丁跟踪器：通过利用可靠补丁实现鲁棒视觉跟踪”。CVPR（2015）。  \n  [[论文](https:\u002F\u002Fgithub.com\u002Fihpdep\u002Fihpdep.github.io\u002Fraw\u002Fmaster\u002Fpapers\u002Fcvpr15_rpt.pdf)]  \n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fihpdep\u002Frpt)]\n\n### ICML2015\n\n* **CNN-SVM:** 洪承勋、刘宅根、郭秀河和韩宝亨。  \n  “基于卷积神经网络学习判别性显著图的在线跟踪”。ICML（2015）。  \n  [[论文](http:\u002F\u002F120.52.73.80\u002Farxiv.org\u002Fpdf\u002F1502.06796.pdf)]  \n  [[项目](http:\u002F\u002Fcvlab.postech.ac.kr\u002Fresearch\u002FCNN_SVM\u002F)]\n\n### BMVC2014\n\n* **DSST:** 马丁·丹内尔扬、古斯塔夫·黑格、法哈德·沙赫巴兹·汗和迈克尔·费尔斯贝格。  \n  “用于鲁棒视觉跟踪的精确尺度估计”。BMVC（2014）。  \n  [[论文](http:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fscalvistrack\u002FScaleTracking_BMVC14.pdf)]  \n  [[PAMI](http:\u002F\u002Fwww.cvl.isy.liu.se\u002Fen\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fscalvistrack\u002FDSST_TPAMI.pdf)]  \n  [[项目](http:\u002F\u002Fwww.cvl.isy.liu.se\u002Fen\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fscalvistrack\u002Findex.html)]\n\n### ECCV2014\n\n* **MEEM:** 张建明、马树高和斯坦·斯克拉罗夫。  \n  “MEEM：基于熵最小化的多专家鲁棒跟踪”。ECCV（2014）。  \n  [[论文](http:\u002F\u002Fcs-people.bu.edu\u002Fjmzhang\u002FMEEM\u002FMEEM-eccv-preprint.pdf)]  \n  [[项目](http:\u002F\u002Fcs-people.bu.edu\u002Fjmzhang\u002FMEEM\u002FMEEM.html)]\n\n* **TGPR:** 高进、凌海宾、胡伟明和邢俊亮。  \n  “基于高斯过程回归的迁移学习视觉跟踪”。ECCV（2014）。  \n  [[论文](http:\u002F\u002Fwww.dabi.temple.edu\u002F~hbling\u002Fpublication\u002Ftgpr-eccv14.pdf)]  \n  [[项目](http:\u002F\u002Fwww.dabi.temple.edu\u002F~hbling\u002Fcode\u002FTGPR.htm)]\n\n* **STC:** 张凯华、张磊、杨明轩和戴维·张。  \n  “基于时空上下文学习的快速跟踪”。ECCV（2014）。  \n  [[论文](http:\u002F\u002Farxiv.org\u002Fpdf\u002F1311.1939v1.pdf)]  \n  [[项目](http:\u002F\u002Fwww4.comp.polyu.edu.hk\u002F~cslzhang\u002FSTC\u002FSTC.htm)]\n\n* **SAMF:** 李洋和朱建科。  \n  “具有特征融合的尺度自适应核相关滤波器跟踪器”。ECCV研讨会（2014）。  \n  [[论文](http:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007%2F978-3-319-16181-5_18.pdf)]  \n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fihpdep\u002Fsamf)]\n\n### NIPS2013\n\n* **DLT:** 王乃岩和叶迪雄。\n  “为视觉跟踪学习深度紧凑图像表示”。NIPS（2013年）。\n  [[论文](http:\u002F\u002Fwinsty.net\u002Fpapers\u002Fdlt.pdf)]\n  [[项目](http:\u002F\u002Fwinsty.net\u002Fdlt.html)]\n  [[代码](http:\u002F\u002Fwinsty.net\u002Fdlt\u002FDLTcode.zip)]\n\n ### PAMI、IJCV 和 TIP\n\n* **MCPF:** 张天柱、徐常胜、杨明轩。\n    “用于视觉跟踪的多任务相关性粒子滤波器学习”。TPAMI（2017年）。\n      [[论文]]\n      [[项目](http:\u002F\u002Fnlpr-web.ia.ac.cn\u002Fmmc\u002Fhomepage\u002Ftzzhang\u002Flmcpf.html)]\n      [[代码](http:\u002F\u002Fnlpr-web.ia.ac.cn\u002Fmmc\u002Fhomepage\u002Ftzzhang\u002FProject_Tianzhu\u002Fzhang_mcpf\u002FSource_Code\u002FSource_Code.zip)] \n\n* **RSST:** 张天柱、徐常胜、杨明轩。\n  “鲁棒结构稀疏跟踪”。TPAMI（2017年）。\n  [[论文]]\n  [[项目](http:\u002F\u002Fnlpr-web.ia.ac.cn\u002Fmmc\u002Fhomepage\u002Ftzzhang\u002Frsst.html)]\n  [[代码](http:\u002F\u002Fnlpr-web.ia.ac.cn\u002Fmmc\u002Fhomepage\u002Ftzzhang\u002FProject_Tianzhu\u002Fzhang_RSST\u002FRSSTDeep\u002FRSSTDeep_Code.zip)] \n\n* **fDSST:** 马丁·丹内尔扬、古斯塔夫·海格、法哈德·汗、迈克尔·费尔斯贝格。\n  “判别尺度空间跟踪”。TPAMI（2017年）。\n  [[论文](http:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fscalvistrack\u002FDSST_TPAMI.pdf)]\n  [[项目](http:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fscalvistrack\u002Findex.html)]\n  [[代码](http:\u002F\u002Fwww.cvl.isy.liu.se\u002Fresearch\u002Fobjrec\u002Fvisualtracking\u002Fscalvistrack\u002FfDSST_code.zip)] \n\n* **KCF:** 若昂·F·恩里克什、鲁伊·卡塞罗、佩德罗·马丁斯、若热·巴蒂斯塔。\n  “基于核相关滤波器的高速目标跟踪”。TPAMI（2015年）。\n  [[论文](http:\u002F\u002Fwww.robots.ox.ac.uk\u002F~joao\u002Fpublications\u002Fhenriques_tpami2015.pdf)]\n  [[项目](http:\u002F\u002Fwww.robots.ox.ac.uk\u002F~joao\u002Fcirculant\u002F)]\n\n* **CLRST:** 张天柱、刘思、纳伦德拉·阿胡贾、杨明轩、伯纳德·加内姆。\n  “通过一致低秩稀疏学习实现鲁棒视觉跟踪”。IJCV（2015年）。\n  [[论文](http:\u002F\u002Fnlpr-web.ia.ac.cn\u002Fmmc\u002Fhomepage\u002Ftzzhang\u002Ftianzhu%20zhang_files\u002FJournal%20Articles\u002FIJCV15_zhang_Low-Rank%20Sparse%20Learning.pdf)]\n  [[项目](http:\u002F\u002Fnlpr-web.ia.ac.cn\u002Fmmc\u002Fhomepage\u002Ftzzhang\u002FProject_Tianzhu\u002Fzhang_IJCV14\u002FRobust%20Visual%20Tracking%20Via%20Consistent%20Low-Rank%20Sparse.html)]\n  [[代码](http:\u002F\u002Fnlpr-web.ia.ac.cn\u002Fmmc\u002Fhomepage\u002Ftzzhang\u002FProject_Tianzhu\u002Fzhang_IJCV14\u002Fmaterial\u002FLRT_Code.zip)]\n\n* **DNT:** 池志珍、李洪洋、陆虎川、杨明轩。\n  “用于视觉跟踪的双深度网络”。TIP（2017年）。\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1612.06053v1.pdf)]\n\n* **DRT:** 高俊宇、张天柱、杨晓山、徐常胜。\n  “深度相对跟踪”。TIP（2017年）。\n  [[论文](http:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F7828108\u002F)]\n\n* **BIT:** 蔡博伦、许向民、邢晓芬、贾奎、缪杰、陶大成。\n  “BIT：生物启发式跟踪器”。TIP（2016年）。\n  [[论文](http:\u002F\u002Fcaibolun.github.io\u002Fpapers\u002FBIT_TIP.pdf)]\n  [[项目](http:\u002F\u002Fcaibolun.github.io\u002FBIT\u002Findex.html)]\n  [[GitHub](https:\u002F\u002Fgithub.com\u002Fcaibolun\u002FBIT)]\n\n* **CNT:** 张凯华、刘庆山、吴毅、杨明轩。\n  “无需训练的卷积网络实现鲁棒视觉跟踪”。TIP（2016年）。\n  [[论文](http:\u002F\u002Fkaihuazhang.net\u002FCNT.pdf)]\n  [[代码](http:\u002F\u002Fkaihuazhang.net\u002FCNT_matlab.rar)]\n\n## 基准数据集\n\n* **LaSOT:** 范恒、林丽婷、杨帆、楚鹏、邓戈、于思佳、白鹤欣、徐勇、廖春元、凌海斌。\u003Cbr \u002F>\n  “LaSOT：大规模单目标跟踪的高质量基准数据集”。CVPR（2019年）。\u003Cbr \u002F>\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1809.07845.pdf)]\u003Cbr \u002F>\n  [[项目](https:\u002F\u002Fcis.temple.edu\u002Flasot\u002F)]\n\n* **OxUvA 长期数据集+基准**：杰克·瓦尔马德雷、卢卡·贝尔蒂内托、若昂·F·恩里克斯、陶然、安德烈亚·韦达尔迪、阿诺德·斯梅尔德斯、菲利普·托尔、埃夫斯特拉提奥斯·加维斯。\u003Cbr \u002F>\n  “野外长期跟踪：一个基准数据集”。ECCV（2018年）。\u003Cbr \u002F>\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1803.09502.pdf)]\u003Cbr \u002F>\n  [[项目](https:\u002F\u002Foxuva.github.io\u002Flong-term-tracking-benchmark\u002F)]\n\n* **TrackingNet:** 马蒂亚斯·穆勒、阿德尔·比比、西尔维奥·詹科拉、萨尔曼·阿尔-苏拜希、伯纳德·加内姆。\u003Cbr \u002F>\n  “TrackingNet：野外目标跟踪的大规模数据集与基准”。ECCV（2018年）。\u003Cbr \u002F>\n  [[项目](https:\u002F\u002Fsilviogiancola.github.io\u002Fpublication\u002F2018-03-trackingnet\u002Fdetails\u002F)]\u003Cbr \u002F>\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1803.10794.pdf)]\n\n* **UAVDT:** 杜大伟、齐元凯、于洪洋、杨一芳、段凯文、李国荣、张卫刚、魏海；黄清明、田琦。\u003Cbr \u002F>\n  “无人机基准：目标检测与跟踪”。ECCV（2018年）。\u003Cbr \u002F>\n  [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1804.00518.pdf)]\n\n* **Dataset-AMP:** 卢卡·切霍文·扎伊茨；艾伦·卢克日奇；阿莱什·莱昂纳迪斯；马泰伊·克里斯坦。\u003Cbr \u002F>\n  “超越标准基准：视觉目标跟踪中性能评估的参数化方法”。ICCV（2017年）。\u003Cbr \u002F>\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FZajc_Beyond_Standard_Benchmarks_ICCV_2017_paper.pdf)]\n\n* **Dataset-Nfs:** 哈迈德·基亚尼·加卢加希、阿什顿·法格、陈黄、德瓦·拉马南和西蒙·卢西。\u003Cbr \u002F>\n  “速度需求：用于更高帧率目标跟踪的基准数据集”。ICCV（2017年）。\u003Cbr \u002F>\n  [[论文](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FGaloogahi_Need_for_Speed_ICCV_2017_paper.pdf)]\u003Cbr \u002F>\n  [[补充材料](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fsupplemental\u002FGaloogahi_Need_for_Speed_ICCV_2017_supplemental.pdf)]\u003Cbr \u002F>\n  [[项目](http:\u002F\u002Fci2cv.net\u002Fnfs\u002Findex.html)]\n\n* **Dataset-DTB70:** 李思义、叶定言。\u003Cbr \u002F>\n  “无人机视觉目标跟踪：基准数据集与新型运动模型”。AAAI（2017年）。\u003Cbr \u002F>\n  [[论文](http:\u002F\u002Faaai.org\u002Focs\u002Findex.php\u002FAAAI\u002FAAAI17\u002Fpaper\u002Fview\u002F14338\u002F14292)]\u003Cbr \u002F>\n  [[项目](https:\u002F\u002Fgithub.com\u002Fflyers\u002Fdrone-tracking)]\u003Cbr \u002F>\n  [[数据集](https:\u002F\u002Fwww.dropbox.com\u002Fs\u002Fs1fj99s2six4lrs\u002FDTB70.tar.gz?dl=0)]\n\n* **Dataset-UAV123:** 马蒂亚斯·穆勒、尼尔·史密斯和伯纳德·加内姆。\u003Cbr \u002F>\n  “无人机跟踪的基准数据集与模拟器”。ECCV（2016年）。\u003Cbr \u002F>\n  [[论文](https:\u002F\u002Fivul.kaust.edu.sa\u002FDocuments\u002FPublications\u002F2016\u002FA%20Benchmark%20and%20Simulator%20for%20UAV%20Tracking.pdf)]\u003Cbr \u002F>\n  [[项目](https:\u002F\u002Fivul.kaust.edu.sa\u002FPages\u002Fpub-benchmark-simulator-uav.aspx)]\u003Cbr \u002F>\n  [[数据集](https:\u002F\u002Fivul.kaust.edu.sa\u002FPages\u002FDataset-UAV123.aspx)]\n\n* **Dataset-TColor-128:** 梁鹏鹏、埃里克·布拉施、凌海斌。\u003Cbr \u002F>\n  “为视觉跟踪编码颜色信息：算法与基准数据集”。TIP（2015年）。\u003Cbr \u002F>\n  [[论文](http:\u002F\u002Fwww.dabi.temple.edu\u002F~hbling\u002Fpublication\u002FTColor-128.pdf)]\u003Cbr \u002F>\n  [[项目](http:\u002F\u002Fwww.dabi.temple.edu\u002F~hbling\u002Fdata\u002FTColor-128\u002FTColor-128.html)]\u003Cbr \u002F>\n  [[数据集](http:\u002F\u002Fwww.dabi.temple.edu\u002F~hbling\u002Fdata\u002FTColor-128\u002FTemple-color-128.zip)]\n\n* **Dataset-NUS-PRO:** 李安娜、林敏、吴毅、杨明轩和颜水成。\u003Cbr \u002F>\n  “NUS-PRO：一个新的视觉跟踪挑战”。PAMI（2015年）。\u003Cbr \u002F>\n  [[论文](http:\u002F\u002Ffaculty.ucmerced.edu\u002Fmhyang\u002Fpapers\u002Fpami15_nus_pro.pdf)]\u003Cbr \u002F>\n  [[项目](https:\u002F\u002Fsites.google.com\u002Fsite\u002Fli00annan\u002Fnus-pro)]\u003Cbr \u002F>\n  [[360度数据](https:\u002F\u002Fd9fca6.lc.yunpan.cn\u002Flk\u002FcqKIc6DU3t2eJ)(密码：bf28)]\u003Cbr \u002F>\n  [[百度云数据](https:\u002F\u002Fpan.baidu.com\u002Fs\u002F1pJHvbSn#list\u002Fpath=%2F)]\u003Cbr \u002F>\n  [[360度视图](https:\u002F\u002F6aa275.lc.yunpan.cn\u002Flk\u002FcqK479PfzDrPX)(密码：515a)]\u003Cbr \u002F>\n  [[百度云视图](https:\u002F\u002Fpan.baidu.com\u002Fs\u002F1hqKXcuK)]\u003Cbr \u002F>\n\n* **Dataset-PTB:** 宋舒然和肖建雄。\u003Cbr \u002F>\n  “利用RGBD相机重访目标跟踪：统一的基准与基线”。ICCV（2013年）。\u003Cbr \u002F>\n  [[论文](http:\u002F\u002Fvision.princeton.edu\u002Fprojects\u002F2013\u002Ftracking\u002Fpaper.pdf)]\u003Cbr \u002F>\n  [[项目](http:\u002F\u002Ftracking.cs.princeton.edu\u002F)]\u003Cbr \u002F>\n  [[5个验证集](http:\u002F\u002Ftracking.cs.princeton.edu\u002FValidationSet.zip)]\u003Cbr \u002F>\n  [[95个评估集](http:\u002F\u002Ftracking.cs.princeton.edu\u002FEvaluationSet.tgz)]\n\n* **Dataset-ALOV300+:** 阿诺德·W·M·斯梅尔德斯、朱登明、丽塔·库奇阿拉、西蒙娜·卡尔德拉拉、阿夫辛·德赫甘、穆巴拉克·沙赫。\u003Cbr \u002F>\n  “视觉跟踪：一项实验性综述”。PAMI（2014年）。\u003Cbr \u002F>\n  [[论文](http:\u002F\u002Fcrcv.ucf.edu\u002Fpapers\u002FTracking_Survey.pdf)]\u003Cbr \u002F>\n  [[项目](http:\u002F\u002Fimagelab.ing.unimore.it\u002Fdsm\u002F)]\u003Cbr \u002F>\n  [镜像链接：ALOV300++ 数据集](http:\u002F\u002Fcrcv.ucf.edu\u002Fpeople\u002Fphd_students\u002Fafshin\u002FALOV300\u002FFrames.zip)\u003Cbr \u002F>\n  [镜像链接：ALOV300++ 真值标注](http:\u002F\u002Fcrcv.ucf.edu\u002Fpeople\u002Fphd_students\u002Fafshin\u002FALOV300\u002FGT.zip)\n\n* **OTB2013:** 吴毅、林宗宇和杨明轩。\u003Cbr \u002F>\n  “在线目标跟踪：一个基准数据集”。CVPR（2013年）。\u003Cbr \u002F>\n  [[论文](http:\u002F\u002Ffaculty.ucmerced.edu\u002Fmhyang\u002Fpapers\u002Fcvpr13_benchmark.pdf)]\n\n* **OTB2015:** 吴毅、林宗宇和杨明轩。\u003Cbr \u002F>\n  “目标跟踪基准数据集”。TPAMI（2015年）。\u003Cbr \u002F>\n  [[论文](http:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7001050&tag=1)]\u003Cbr \u002F>\n  [[项目](http:\u002F\u002Fcvlab.hanyang.ac.kr\u002Ftracker_benchmark\u002Findex.html)]\n\n* **Dataset-VOT:**\n  **[[项目](http:\u002F\u002Fwww.votchallenge.net\u002F)]**\n\n**[[VOT13_paper_ICCV](http:\u002F\u002Fwww.votchallenge.net\u002Fvot2013\u002FDownload\u002Fvot_2013_paper.pdf)]2013年视觉目标跟踪挑战赛结果**\n\n**[[VOT14_paper_ECCV](http:\u002F\u002Fwww.votchallenge.net\u002Fvot2014\u002Fdownload\u002Fvot_2014_paper.pdf)]2014年视觉目标跟踪挑战赛结果**\n\n**[[VOT15_paper_ICCV](http:\u002F\u002Fwww.votchallenge.net\u002Fvot2015\u002Fdownload\u002Fvot_2015_paper.pdf)]2015年视觉目标跟踪挑战赛结果**\n\n**[[VOT16_paper_ECCV](http:\u002F\u002Fwww.votchallenge.net\u002Fvot2016\u002Fdownload\u002Fvot_2016_paper.pdf)]2016年视觉目标跟踪挑战赛结果**\n\n**[[VOT17_paper_ICCV](http:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017_workshops\u002Fpapers\u002Fw28\u002FKristan_The_Visual_Object_ICCV_2017_paper.pdf)]2017年视觉目标跟踪挑战赛结果**\n\n## 杰出研究者与团队\n在视觉跟踪领域发表了三篇以上对本领域产生重大影响论文、且目前仍在该领域活跃的杰出视觉跟踪研究者。（名单不分先后。）\n\n* [杨明轩](http:\u002F\u002Ffaculty.ucmerced.edu\u002Fmhyang\u002F)\n* [凌海斌](http:\u002F\u002Fwww.dabi.temple.edu\u002F~hbling\u002F)\n* [陆虎川](http:\u002F\u002Fice.dlut.edu.cn\u002Flu\u002F)\n* [李宏东](http:\u002F\u002Fusers.cecs.anu.edu.au\u002F~hongdong\u002F)\n* [张磊](http:\u002F\u002Fwww4.comp.polyu.edu.hk\u002F~cslzhang\u002F)\n* [马泰伊·克里斯坦](http:\u002F\u002Fwww.vicos.si\u002FPeople\u002FMatejk)\n* [若昂·F·恩里克斯](http:\u002F\u002Fwww.robots.ox.ac.uk\u002F~joao\u002F)\n* [马丁·丹内尔扬](http:\u002F\u002Fusers.isy.liu.se\u002Fcvl\u002Fmarda26\u002F)\n* [张凯华](http:\u002F\u002Fkaihuazhang.net\u002F)\n* [哈迈德·基亚尼](http:\u002F\u002Fwww.hamedkiani.com\u002F)\n* [卢卡·贝尔蒂内托](http:\u002F\u002Fwww.robots.ox.ac.uk\u002F~luca\u002Findex.html)\n* [张天柱](http:\u002F\u002Fnlpr-web.ia.ac.cn\u002Fmmc\u002Fhomepage\u002Ftzzhang\u002Findex.html)\n* [马超](https:\u002F\u002Fwww.chaoma.info\u002F)\n* [宋一兵](https:\u002F\u002Fybsong00.github.io\u002F)\n* [王栋](http:\u002F\u002Fwww.escience.cn\u002Fpeople\u002Fwangdongdut\u002Findex.html)\n* [**托尔视觉组**](http:\u002F\u002Fwww.robots.ox.ac.uk\u002F~tvg\u002Fpeople.php)\n* [**浦项工科大学计算机视觉实验室**](http:\u002F\u002Fcvlab.postech.ac.kr\u002Flab\u002Findex.php)","# Visual-Tracking-Development 快速上手指南\n\n> **注意**：`Visual-Tracking-Development` 是一个视觉跟踪领域的**论文与代码资源汇总仓库**，而非单一的算法模型。本指南将指导你如何获取该资源列表，并演示如何快速运行列表中推荐的一个经典跟踪项目（以 SAM-Track 为例）。\n\n## 环境准备\n\n在开始之前，请确保你的开发环境满足以下基本要求：\n\n*   **操作系统**：Linux (推荐 Ubuntu 20.04\u002F22.04) 或 macOS。Windows 用户建议使用 WSL2。\n*   **Python 版本**：Python 3.8 或更高版本。\n*   **GPU 支持**：建议配备 NVIDIA GPU 并安装对应的 CUDA 驱动（大多数深度学习跟踪模型需要 GPU 加速）。\n*   **前置依赖**：\n    *   Git\n    *   pip 或 conda (推荐使用 conda 管理环境)\n    *   PyTorch (需根据具体子项目版本要求安装)\n\n## 安装步骤\n\n由于本仓库是资源索引，你首先需要克隆该仓库以获取最新的论文列表和对应的项目链接，然后选择感兴趣的具体项目进行安装。\n\n### 1. 克隆资源仓库\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002Fdavidzhang0915\u002FVisual-Tracking-Development.git\ncd Visual-Tracking-Development\n```\n*(注：如果访问 GitHub 较慢，可使用国内镜像加速，例如通过 `git clone https:\u002F\u002Fghproxy.com\u002Fhttps:\u002F\u002Fgithub.com\u002Fdavidzhang0915\u002FVisual-Tracking-Development.git`)*\n\n### 2. 选择并安装具体项目\n浏览仓库中的 `README.md` 找到你感兴趣的项目（例如 **SAM-Track**），进入其官方代码仓库进行安装。以下以 **SAM-Track** 为例：\n\n```bash\n# 克隆具体项目代码\ngit clone https:\u002F\u002Fgithub.com\u002Fz-x-yang\u002FSegment-and-Track-Anything.git\ncd Segment-and-Track-Anything\n\n# 创建虚拟环境 (推荐)\nconda create -n samtrack python=3.9\nconda activate samtrack\n\n# 安装 PyTorch (请根据你的 CUDA 版本前往 pytorch.org 获取具体命令，以下为通用示例)\npip install torch torchvision torchaudio --index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu118\n\n# 安装项目依赖\npip install -r requirements.txt\n```\n\n## 基本使用\n\n安装完成后，你可以参考具体项目的文档进行推理。以下是基于 **SAM-Track** 的最简使用示例，用于对视频中的物体进行分割与跟踪。\n\n### 运行推理示例\n\n假设你有一个输入视频 `input.mp4`，并希望跟踪第一帧中指定的目标：\n\n```bash\npython inference.py \\\n    --video_path .\u002Fassets\u002Finput.mp4 \\\n    --output_dir .\u002Fresults \\\n    --box_prompt \"[x1, y1, x2, y2]\" \n```\n\n*   `--box_prompt`: 替换为实际的目标检测框坐标（例如 `[200, 150, 400, 350]`）。\n*   如果是交互式点击提示，请使用 `--point_prompt` 参数。\n\n### 查看结果\n运行结束后，生成的跟踪结果视频和掩码序列将保存在 `.\u002Fresults` 目录中。你可以使用播放器查看生成的视频文件，或使用 Python\u002FOpenCV 进一步处理掩码数据。\n\n---\n**提示**：该仓库持续更新 AAAI、ICCV、NeurIPS 等顶会的最新论文。如需复现其他列表中的模型（如 `LoRATv2`, `FARTrack` 等），请重复“安装步骤”中的第 2 步，前往对应论文的官方代码仓库进行操作。","某安防监控团队正在开发一套智能视频分析系统，需要从长达数小时的监控录像中持续锁定并追踪特定嫌疑人的移动轨迹。\n\n### 没有 Visual-Tracking-Development 时\n- **算法选型迷茫**：面对海量的跟踪论文（如判别滤波器、孪生网络等），开发人员难以快速甄别哪些是业界公认的前沿方案，导致技术调研耗时数周。\n- **复现成本高昂**：缺乏统一的代码基准和权威综述，团队需从零复现不同算法，常因细节缺失导致模型无法收敛或性能不达预期。\n- **场景适应性差**：传统方法在目标被遮挡或快速运动时极易丢失目标，且难以结合最新的分割大模型（如 SAM）提升鲁棒性。\n- **评估标准缺失**：缺少系统的评估视角，无法科学对比不同模型在特定数据集上的表现，导致最终上线的模型效果不稳定。\n\n### 使用 Visual-Tracking-Development 后\n- **技术路线清晰**：直接参考项目整理的 VOTSurvey 和 DL4VT 等权威综述，团队迅速锁定了基于 Siamese 网络和最新 SAM 结合的几种高潜力架构。\n- **研发效率倍增**：利用项目中汇总的 TAM、SAM-Track 等开源代码链接，开发人员可直接复用成熟模块，将算法验证周期从数周缩短至几天。\n- **追踪精度跃升**：通过引入项目中推荐的 Motion-Aware Memory 等先进机制，系统在人员交叉遮挡和剧烈运动场景下的目标丢失率降低了 60%。\n- **评估体系完善**：依据 VOTBook 提供的评估框架，团队建立了标准化的测试流程，确保模型迭代方向始终指向实际业务指标的提升。\n\nVisual-Tracking-Development 通过整合前沿论文与优质代码资源，将视觉跟踪领域的探索从“大海捞针”转变为“按图索骥”，极大加速了高性能监控系统的落地进程。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FDavidZhangdw_Visual-Tracking-Development_624d6566.png","DavidZhangdw","David Zhang","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FDavidZhangdw_cefb8e4a.jpg",null,"Zhejiang Normal University","https:\u002F\u002Fgithub.com\u002FDavidZhangdw",[79],{"name":80,"color":81,"percentage":82},"Python","#3572A5",100,563,63,"2026-04-04T20:09:18",5,"","未说明",{"notes":90,"python":88,"dependencies":91},"该 README 文件主要是一个视觉跟踪领域的论文和开源项目列表（包括 VOTSurvey, SAMURAI, SAM, AAAI\u002FICCV\u002FNeurIPS 等会议的最新论文），并非一个具体的可执行软件工具。因此，文中未包含任何关于操作系统、GPU、内存、Python 版本或依赖库的具体安装和运行环境需求。用户若需运行列表中提到的具体算法（如 SAMURAI, LoRATv2 等），需前往各项目对应的独立代码仓库查看其特定的环境配置说明。",[],[93,14],"其他",[95,96,97],"deep-learning","tracking","benchmark","2026-03-27T02:49:30.150509","2026-04-06T19:56:41.756350",[],[]]