[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-Psychic-DL--Awesome-Traffic-Agent-Trajectory-Prediction":3,"tool-Psychic-DL--Awesome-Traffic-Agent-Trajectory-Prediction":64},[4,17,27,35,43,56],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":16},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,3,"2026-04-05T11:01:52",[13,14,15],"开发框架","图像","Agent","ready",{"id":18,"name":19,"github_repo":20,"description_zh":21,"stars":22,"difficulty_score":23,"last_commit_at":24,"category_tags":25,"status":16},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",138956,2,"2026-04-05T11:33:21",[13,15,26],"语言模型",{"id":28,"name":29,"github_repo":30,"description_zh":31,"stars":32,"difficulty_score":23,"last_commit_at":33,"category_tags":34,"status":16},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,"2026-04-03T11:11:01",[13,14,15],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":23,"last_commit_at":41,"category_tags":42,"status":16},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[13,26],{"id":44,"name":45,"github_repo":46,"description_zh":47,"stars":48,"difficulty_score":23,"last_commit_at":49,"category_tags":50,"status":16},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,"2026-04-05T10:45:23",[14,51,52,53,15,54,26,13,55],"数据工具","视频","插件","其他","音频",{"id":57,"name":58,"github_repo":59,"description_zh":60,"stars":61,"difficulty_score":10,"last_commit_at":62,"category_tags":63,"status":16},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,"2026-04-04T04:44:48",[15,14,13,26,54],{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":69,"readme_en":70,"readme_zh":71,"quickstart_zh":72,"use_case_zh":73,"hero_image_url":74,"owner_login":75,"owner_name":76,"owner_avatar_url":77,"owner_bio":78,"owner_company":79,"owner_location":80,"owner_email":81,"owner_twitter":82,"owner_website":83,"owner_url":83,"languages":82,"stars":84,"forks":85,"last_commit_at":86,"license":87,"difficulty_score":88,"env_os":89,"env_gpu":89,"env_ram":89,"env_deps":90,"category_tags":93,"github_topics":94,"view_count":10,"oss_zip_url":82,"oss_zip_packed_at":82,"status":16,"created_at":102,"updated_at":103,"faqs":104,"releases":115},695,"Psychic-DL\u002FAwesome-Traffic-Agent-Trajectory-Prediction","Awesome-Traffic-Agent-Trajectory-Prediction","This is a list of papers related to traffic agent trajectory prediction. ","Awesome-Traffic-Agent-Trajectory-Prediction 是一个专注于交通智能体轨迹预测领域的开源资源聚合库。它系统地整理了该方向下的学术论文、公开数据集及核心代码实现，涵盖从传统统计方法到深度学习的前沿方案。\n\n针对自动驾驶与智慧交通研究中文献分散、检索效率低的问题，该项目提供了清晰的时间线与分类导航，帮助用户快速锁定关键资料。无论是高校师生、科研人员还是从事自动驾驶算法开发的工程师，都能在此找到有价值的学习素材与项目参考。\n\n资源持续更新至 2024 年 11 月，不仅收录了 2018 年前的经典文献，还重点追踪了近年的顶会成果，甚至涉及去噪扩散概率模型（DDPM）等新兴技术方向。维护团队鼓励社区贡献，欢迎通过 Pull Request 补充新资源或加入交流群组。对于希望深入理解多智能体交互机制的研究者而言，这是一份结构清晰、内容详实的进阶指南。","# Awesome-Traffic-Agent-Trajectory-Prediction\n![Version](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FVersion-1.0-ff69b4.svg) ![LastUpdated](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLastUpdated-2024.11-lightgrey.svg) ![Topic](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FTopic-trajectory--prediction-yellow.svg?logo=github) ![Awesome](https:\u002F\u002Fawesome.re\u002Fbadge.svg) ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F-C++-00599C?style=flat-square&logo=cplusplus&logoColor=FFFFFF) ![Language](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F-Python-F37626?style=flat-square&logo=python&logoColor=FFFFFF) ![Framework](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F-Pytorch-EE4C2C?style=flat-square&logo=pytorch&logoColor=FFFFFF) ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F-ChatGPT-412991?style=flat-square&logo=openai&logoColor=FFFFFF)\n\n\n![image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FPsychic-DL_Awesome-Traffic-Agent-Trajectory-Prediction_readme_9b524df57ef8.png)\n![image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FPsychic-DL_Awesome-Traffic-Agent-Trajectory-Prediction_readme_1ab81fe9ac60.png)\n![image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FPsychic-DL_Awesome-Traffic-Agent-Trajectory-Prediction_readme_5ed6ec4a272b.png)\n![image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FPsychic-DL_Awesome-Traffic-Agent-Trajectory-Prediction_readme_9be25c5c8e99.png)\n![image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FPsychic-DL_Awesome-Traffic-Agent-Trajectory-Prediction_readme_b45257e45fd3.png)\n![image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FPsychic-DL_Awesome-Traffic-Agent-Trajectory-Prediction_readme_dc42b54658b4.png)\n\n# 🤝 Contributions\n\nThis is a list of the latest research materials (datasets, papers, and codes) related to traffic agent trajectory prediction. Continuously updated, welcome to pay attention!\n\n**Maintainers: Chaoneng Li (Lanzhou Jiaotong University)           Emails: xdchaonengli@163.com**\n\nPlease feel free to pull requests to add new resources or send emails to us for questions, discussions, and collaborations. **We would like to connect more students, teachers, and bigwigs in the field of multi-agent trajectory prediction, and if you would like to do the same, you can add me on WeChat (CN15691969157). Let's create the Trajectory Prediction Community Group together!**\n\n# 🧐 Citation\nPlease consider citing our papers if this repository accelerates your research:\n```\n@article{11222824,\n  author={Li, Chaoneng and Wang, Xiaolong and Zhao, Shuxu and Wang, Xiaohu and Ye, Ze},\n  journal={IEEE Transactions on Vehicular Technology}, \n  title={DiffMATP: Interaction-Aware Multi-Agent Trajectory Prediction via Denoising Diffusion Models}, \n  year={2025},\n  pages={1-14},\n  doi={10.1109\u002FTVT.2025.3627215}}\n@inproceedings{li2022fidelity,\n  title={Fidelity Evaluation of Virtual Traffic Based on Anomalous Trajectory Detection},\n  author={Li, Chaoneng and Chao, Qianwen and Feng, Guanwen and Wang, Qiongyan and Liu, Pengfei and Li, Yunan and Miao, Qiguang},\n  booktitle={2022 IEEE\u002FRSJ International Conference on Intelligent Robots and Systems (IROS)},\n  pages={8157--8164},\n  year={2022},\n  organization={IEEE}\n}\n@article{li2024difftad,\n  title={DiffTAD: Denoising diffusion probabilistic models for vehicle trajectory anomaly detection},\n  author={Li, Chaoneng and Feng, Guanwen and Li, Yunan and Liu, Ruyi and Miao, Qiguang and Chang, Liang},\n  journal={Knowledge-Based Systems},\n  volume={286},\n  pages={111387},\n  year={2024},\n  publisher={Elsevier}\n}\n```\n******\n\n# 📜 Table of Contents\n\n\u003C!-- TOC depthFrom:1 depthTo:6 withLinks:1 updateOnSave:1 orderedList:0 -->\n- [Awesome-Traffic-Agent-Trajectory-Prediction](#awesome-traffic-agent-trajectory-prediction)\n- [🤝 Contributions](#-contributions)\n- [🧐 Citation](#-citation)\n- [📜 Table of Contents](#-table-of-contents)\n- [📚 Traditional Methods](#-traditional-methods)\n- [📚 2018 and Before Conference and Journal Papers](#-2018-and-before-conference-and-journal-papers)\n  - [Conference Papers](#conference-papers)\n  - [Journal Papers](#journal-papers)\n  - [Others](#others)\n- [📚 2019 Conference and Journal Papers](#-2019-conference-and-journal-papers)\n  - [Conference Papers 2019](#conference-papers-2019)\n  - [Journal Papers 2019](#journal-papers-2019)\n  - [Others 2019](#others-2019)\n- [📚 2020 Conference and Journal Papers](#-2020-conference-and-journal-papers)\n  - [Conference Papers 2020](#conference-papers-2020)\n  - [Journal Papers 2020](#journal-papers-2020)\n  - [Others 2020](#others-2020)\n- [📚 2021 Conference and Journal Papers](#-2021-conference-and-journal-papers)\n  - [Conference Papers 2021](#conference-papers-2021)\n  - [Journal Papers 2021](#journal-papers-2021)\n  - [Others 2021](#others-2021)\n- [📚 2022 Conference and Journal Papers](#-2022-conference-and-journal-papers)\n  - [Conference Papers 2022](#conference-papers-2022)\n  - [Journal Papers 2022](#journal-papers-2022)\n  - [Others 2022](#others-2022)\n- [📚 2023 Conference and Journal Papers](#-2023-conference-and-journal-papers)\n  - [Conference Papers 2023](#conference-papers-2023)\n  - [Journal Papers 2023](#journal-papers-2023)\n  - [Others 2023](#others-2023)\n- [📚 2024 Conference and Journal Papers](#-2024-conference-and-journal-papers)\n  - [Conference Papers 2024](#conference-papers-2024)\n  - [Journal Papers 2024](#journal-papers-2024)\n  - [Others 2024](#others-2024)\n- [📚 2025 Conference and Journal Papers](#-2025-conference-and-journal-papers)\n  - [Conference Papers 2025](#conference-papers-2025)\n  - [Journal Papers 2025](#journal-papers-2025)\n  - [Others 2025](#others-2025)\n- [📚 2026 Conference and Journal Papers](#-2026-conference-and-journal-papers)\n  - [Conference Papers 2026](#conference-papers-2026)\n  - [Journal Papers 2026](#journal-papers-2026)\n  - [Others 2026](#others-2026)\n- [📚 Related Review Papers](#-related-review-papers)\n- [📚 Datasets](#-datasets)\n  - [Reviews about Datasets](#reviews-about-datasets)\n  - [Vehicles Publicly Available Datasets](#vehicles-publicly-available-datasets)\n  - [Pedestrians Publicly Available Datasets](#pedestrians-publicly-available-datasets)\n  - [Others Agents Datasets](#others-agents-datasets)\n    - [Aircraft](#aircraft)\n    - [Ship](#ship)\n    - [Hurricane and Animal](#hurricane-and-animal)\n- [🌹 Acknowledgments](#-acknowledgments)\n- [🌟 Star History](#-star-history)\n\n******\n\n# 📚 Traditional Methods\n* Social force model for pedestrian dynamics, Physical review E 1995. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002Fcond-mat\u002F9805244.pdf?ref=https:\u002F\u002Fgithubhelp.com)]\n* Simulating dynamical features of escape panic, Nature 2000. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002Fcond-mat\u002F0009448.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fobisargoni\u002FrepastInterSim)]\n* Congested traffic states in empirical observations and microscopic simulations, Physical review E 2000. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002Fcond-mat\u002F0002177.pdf)]\n* A methodology for automated trajectory prediction analysis, AIAA Guidance, Navigation, and Control Conference and Exhibit 2004. [[paper](http:\u002F\u002Fciteseerx.ist.psu.edu\u002Fviewdoc\u002Fdownload?doi=10.1.1.76.2942&rep=rep1&type=pdf)]\n* Continuum crowds, ACM Transactions on Graphics (TOG 2006). [[paper](https:\u002F\u002Fwww.khoury.neu.edu\u002Fhome\u002Fscooper\u002Findex_files\u002Fpub\u002Ftreuille2006continuum.pdf)]\n* New Algorithms for Aircraft Intent Inference and Trajectory Prediction, Journal of guidance, control, and dynamics 2007. [[paper](https:\u002F\u002Fsci-hub.hkvisa.net\u002F10.2514\u002F1.26750)]\n* Reciprocal Velocity Obstacles for Real-Time Multi-Agent Navigation, ICRA 2008. [[paper](http:\u002F\u002Fciteseerx.ist.psu.edu\u002Fviewdoc\u002Fdownload?doi=10.1.1.161.9395&rep=rep1&type=pdf)]\n* You’ll Never Walk Alone: Modeling Social Behavior for Multi-target Tracking, ICCV 2009. [[paper](http:\u002F\u002Fvision.cse.psu.edu\u002Fcourses\u002FTracking\u002Fvlpr12\u002FPellegriniNeverWalkAlone.pdf)]\n* Real time trajectory prediction for collision risk estimation between vehicles, International Conference on Intelligent Computer Communication and Processing 2009. [[paper](https:\u002F\u002Fhal.inria.fr\u002Finria-00438624\u002Fdocument)]\n* People Tracking with Human Motion Predictions from Social Forces, ICRA 2010. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=5509779)]\n* Unfreezing the robot: Navigation in dense, interacting crowds, IROS 2010. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=5654369)]\n* Who are you with and where are you going?, CVPR 2011. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=5995468)]\n* Social force model with explicit collision prediction, Europhysics Letters 2011. [[paper](https:\u002F\u002Fiopscience.iop.org\u002Farticle\u002F10.1209\u002F0295-5075\u002F93\u002F68005\u002Fpdf)]\n* A Machine Learning Approach to Trajectory Prediction, AIAA Guidance, Navigation, and Control (GNC) Conference 2013. [[paper](https:\u002F\u002Fsci-hub.hkvisa.net\u002F10.2514\u002F6.2013-4782)]\n* Cyclist Social Force Model at Unsignalized Intersections With Heterogeneous Traffic, IEEE Transactions on Industrial Informatics 2016. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7536132)]\n* Walking Ahead: The Headed Social Force Model, PLoS ONE 2017. [[paper](https:\u002F\u002Fjournals.plos.org\u002Fplosone\u002Farticle\u002Ffile?id=10.1371\u002Fjournal.pone.0169734&type=printable)]\n* AutoRVO: Local Navigation with Dynamic Constraints in Dense Heterogeneous Traffic, arXiv preprint arXiv:1804.02915, 2018. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1804.02915.pdf)]\n* Social force models for pedestrian traffic – state of the art, Transport reviews 2018. [[paper](https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FXu-Chen-67\u002Fpublication\u002F320872442_Social_force_models_for_pedestrian_traffic_-_state_of_the_art\u002Flinks\u002F5bce680b4585152b144eac39\u002FSocial-force-models-for-pedestrian-traffic-state-of-the-art.pdf)]\n\n\n# 📚 2018 and Before Conference and Journal Papers\n## Conference Papers\n* Social GAN: Socially Acceptable Trajectories with Generative Adversarial Networks, CVPR 2018. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FGupta_Social_GAN_Socially_CVPR_2018_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fagrimgupta92\u002Fsgan)]\n* Encoding Crowd Interaction with Deep Neural Network for Pedestrian Trajectory Prediction, CVPR 2018. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FXu_Encoding_Crowd_Interaction_CVPR_2018_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fsvip-lab\u002FCIDNN)]\n* Fast and Furious: Real Time End-to-End 3D Detection, Tracking and Motion Forecasting with a Single Convolutional Net, CVPR 2018. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FLuo_Fast_and_Furious_CVPR_2018_paper.pdf)]\n* MX-LSTM: Mixing Tracklets and Vislets to Jointly Forecast Trajectories and Head Poses, CVPR 2018. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FHasan_MX-LSTM_Mixing_Tracklets_CVPR_2018_paper.pdf)]\n* Long-Term On-Board Prediction of People in Traffic Scenes under Uncertainty, CVPR 2018. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FBhattacharyya_Long-Term_On-Board_Prediction_CVPR_2018_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fapratimbhattacharyya18\u002Fonboard_long_term_prediction)]\n* R2P2: A ReparameteRized Pushforward Policy for Diverse, Precise Generative Path Forecasting, ECCV 2018. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FNicholas_Rhinehart_R2P2_A_ReparameteRized_ECCV_2018_paper.pdf)]\n* Where Will They Go? Predicting Fine-Grained Adversarial Multi-Agent Motion using Conditional Variational Autoencoders, ECCV 2018. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FPanna_Felsen_Where_Will_They_ECCV_2018_paper.pdf)]\n* Generating Comfortable, Safe and Comprehensible Trajectories for Automated Vehicles in Mixed Traffic, International Conference on Intelligent Transportation Systems (ITSC 2018). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8569658)]\n* Set-Based Prediction of Pedestrians in Urban Environments Considering Formalized Traffic Rules, ITSC 2018. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8569434)]\n* Intention-aware Long Horizon Trajectory Prediction of Surrounding Vehicles using Dual LSTM Networks, ITSC 2018. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8569595)]\n* Social Attention: Modeling Attention in Human Crowds, ICRA 2018. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8460504)]\n* A Data-driven Model for Interaction-Aware Pedestrian Motion Prediction in Object Cluttered Environments, ICRA 2018. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8461157)]\n* Multimodal Probabilistic Model-Based Planning for Human-Robot Interaction, ICRA 2018. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8460766)] [[code](https:\u002F\u002Fgithub.com\u002FStanfordASL\u002FTrafficWeavingCVAE)]\n* GD-GAN: Generative Adversarial Networks for Trajectory Prediction and Group Detection in Crowds, ACCV 2018. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1812.07667.pdf)]\n* Multi-Modal Trajectory Prediction of Surrounding Vehicles with Maneuver based LSTMs, IEEE Intelligent Vehicles Symposium (IV 2018). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8500493)]\n* Sequence-to-Sequence Prediction of Vehicle Trajectory via LSTM Encoder-Decoder Architecture, IV 2018. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8500658)]\n* Predicting Trajectories of Vehicles Using Large-Scale Motion Priors, IV 2018. [[paper](http:\u002F\u002Fmssuraj.com\u002Fpublications\u002F2018_IV_0596.pdf)]\n* Road Infrastructure Indicators for Trajectory Prediction, IV 2018. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8500678)]\n* Mixed Traffic Trajectory Prediction Using LSTM–Based Models in Shared Space, Annual International Conference on Geographic Information Science 2018. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-319-78208-9_16.pdf)]\n* SS-LSTM: A Hierarchical LSTM Model for Pedestrian Trajectory Prediction, WACV 2018. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8354239)] [[code](https:\u002F\u002Fgithub.com\u002Fxuehaouwa\u002FSS-LSTM)]\n* “Seeing is Believing”: Pedestrian Trajectory Forecasting Using Visual Frustum of Attention, WACV 2018. [[paper](http:\u002F\u002Firtizahasan.com\u002FWACV_2018_Seeing_is_believing.pdf)]\n* Tracking by Prediction: A Deep Generative Model for Mutli-person Localisation and Tracking, WACV 2018. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8354232)]\n* Context-Aware Trajectory Prediction, International Conference on Pattern Recognition (ICPR 2018). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8545447)]\n* Transferable Pedestrian Motion Prediction Models at Intersections, IROS 2018. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8593783)]\n* Generative Modeling of Multimodal Multi-Human Behavior, IROS 2018. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8594393)] [[code](https:\u002F\u002Fgithub.com\u002FStanfordASL\u002FNHumanModeling)]\n* Building Prior Knowledge: A Markov Based Pedestrian Prediction Model Using Urban Environmental Data, International Conference on Control, Automation, Robotics and Vision (ICARCV 2018). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8581368)]\n* Cyclist Trajectory Prediction Using Bidirectional Recurrent Neural Networks, Australasian Joint Conference on Artificial Intelligence 2018. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-03991-2_28.pdf)]\n* Attention Is All You Need, NIPS 2017. [[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2017\u002Ffile\u002F3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf)]\n* Bi-Prediction: Pedestrian Trajectory Prediction Based on Bidirectional LSTM Classification, International Conference on Digital Image Computing: Techniques and Applications (DICTA 2017). [[paper](https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FDu-Huynh-2\u002Fpublication\u002F322001876_Bi-Prediction_Pedestrian_Trajectory_Prediction_Based_on_Bidirectional_LSTM_Classification\u002Flinks\u002F5c03cef4a6fdcc1b8d5029bb\u002FBi-Prediction-Pedestrian-Trajectory-Prediction-Based-on-Bidirectional-LSTM-Classification.pdf)]\n* Probabilistic Vehicle Trajectory Prediction over Occupancy Grid Map via Recurrent Neural Network, ITSC 2017. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8317943)]\n* Natural Vision Based Method for Predicting Pedestrian Behaviour in Urban Environments, ITSC 2017. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8317848)]\n* How good is my prediction? Finding a similarity measure for trajectory prediction evaluation, ITSC 2017. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8317825)]\n* An LSTM network for highway trajectory prediction, ITSC 2017. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8317913)]\n* DESIRE: Distant Future Prediction in Dynamic Scenes with Interacting Agents, CVPR 2017. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FLee_DESIRE_Distant_Future_CVPR_2017_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Ftdavchev\u002FDESIRE)]\n* Forecasting Interactive Dynamics of Pedestrians with Fictitious Play, CVPR 2017. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FMa_Forecasting_Interactive_Dynamics_CVPR_2017_paper.pdf)]\n* Forecast the Plausible Paths in Crowd Scenes, IJCAI 2017. [[paper](https:\u002F\u002Fwww.ijcai.org\u002Fproceedings\u002F2017\u002F0386.pdf)]\n* What will Happen Next? Forecasting Player Moves in Sports Videos, ICCV 2017. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FFelsen_What_Will_Happen_ICCV_2017_paper.pdf)]\n* Using road topology to improve cyclist path prediction, IV 2017. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7995734)]\n* Short-term 4D Trajectory Prediction Using Machine Learning Methods, Proc. SID 2017. [[paper](https:\u002F\u002Fwww.sesarju.eu\u002Fsites\u002Fdefault\u002Ffiles\u002Fdocuments\u002Fsid\u002F2017\u002FSIDs_2017_paper_11.pdf)]\n* Generating Long-term Trajectories Using Deep Hierarchical Networks, NIPS 2016. [[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2016\u002Ffile\u002Ffe8c15fed5f808006ce95eddb7366e35-Paper.pdf)]\n* Learning Social Etiquette: Human Trajectory Understanding In Crowded Scenes, ECCV 2016. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-319-46484-8_33.pdf)]\n* Knowledge Transfer for Scene-Specific Motion Prediction, ECCV 2016. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-319-46448-0_42.pdf)]\n* Structural-RNN: Deep Learning on Spatio-Temporal Graphs, CVPR 2016. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2016\u002Fpapers\u002FJain_Structural-RNN_Deep_Learning_CVPR_2016_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fasheshjain399\u002FRNNexp)]\n* Visual Path Prediction in Complex Scenes with Crowded Moving Objects, CVPR 2016. [[paper](https:\u002F\u002Fwww.cv-foundation.org\u002Fopenaccess\u002Fcontent_cvpr_2016\u002Fpapers\u002FYoo_Visual_Path_Prediction_CVPR_2016_paper.pdf)]\n* Social LSTM: Human Trajectory Prediction in Crowded Spaces, CVPR 2016. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2016\u002Fpapers\u002FAlahi_Social_LSTM_Human_CVPR_2016_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fquancore\u002Fsocial-lstm)]\n* Comparison and Evaluation of Pedestrian Motion Models for Vehicle Safety Systems, ITSC 2016. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7795912)]\n* Intent-aware long-term prediction of pedestrian motion, ICRA 2016. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7487409)]\n* Novel planning-based algorithms for human motion prediction, ICRA 2016. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7487505)]\n* GLMP-realtime pedestrian path prediction using global and local movement patterns, ICRA 2016. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7487768)]\n* Augmented Dictionary Learning for Motion Prediction, ICRA 2016. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7487407&tag=1)]\n* Predicting Future Agent Motions for Dynamic Environments, International Conference on Machine Learning and Applications (ICMLA 2016). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7838128)]\n* Trajectory prediction of cyclists using a physical model and an artificial neural network, IV 2016. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7535484)]\n* STF-RNN: Space Time Features-based Recurrent Neural Network for predicting people next location, IEEE Symposium Series on Computational Intelligence (SSCI 2016). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7849919)]\n* Trajectory analysis and prediction for improved pedestrian safety: Integrated framework and evaluations, IV 2015. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7225707)]\n* Bayesian intention inference for trajectory prediction with an unknown goal destination, IROS 2015. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7354203)]\n* Unsupervised robot learning to predict person motion, ICRA 2015. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7139254)]\n* A Controlled Interactive Multiple Model Filter for Combined Pedestrian Intention Recognition and Path Prediction, ITSC 2015. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7313129)]\n* Socially-aware Large-scale Crowd Forecasting, CVPR 2014. [[paper](https:\u002F\u002Fwww.cv-foundation.org\u002Fopenaccess\u002Fcontent_cvpr_2014\u002Fpapers\u002FAlahi_Socially-aware_Large-scale_Crowd_2014_CVPR_paper.pdf)]\n* Patch to the Future: Unsupervised Visual Prediction, CVPR 2014. [[paper](https:\u002F\u002Fwww.cv-foundation.org\u002Fopenaccess\u002Fcontent_cvpr_2014\u002Fpapers\u002FWalker_Patch_to_the_2014_CVPR_paper.pdf)]\n* Online maneuver recognition and multimodal trajectory prediction for intersection assistance using non-parametric regression, IV 2014. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=6856480)]\n* Pedestrian Path Prediction using Body Language Traits, IV 2014. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=6856498)]\n* Behavior estimation for a complete framework for human motion prediction in crowded environments, ICRA 2014. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=6907734)]\n* Learning to predict trajectories of cooperatively navigating agents, ICRA 2014. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=6907442)]\n* Pedestrian's Trajectory Forecast in Public Traffic with Artificial Neural Networks, International Conference on Pattern Recognition (ICPR 2014). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=6977417)]\n* Context-Based Pedestrian Path Prediction, ECCV 2014. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-319-10599-4_40.pdf)]\n* Bayesian, Maneuver-Based, Long-Term Trajectory Prediction and Criticality Assessment for Driver Assistance Systems, ITSC 2014. [[paper](https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FMatthias-Schreier\u002Fpublication\u002F266954831_Bayesian_Maneuver-Based_Long-Term_Trajectory_Prediction_and_Criticality_Assessment_for_Driver_Assistance_Systems\u002Flinks\u002F543fb6250cf2be1758cf3c39\u002FBayesian-Maneuver-Based-Long-Term-Trajectory-Prediction-and-Criticality-Assessment-for-Driver-Assistance-Systems.pdf)]\n* Trajectory generator for autonomous vehicles in urban environments, ICRA 2013. [[paper](https:\u002F\u002Fhal.inria.fr\u002Ffile\u002Findex\u002Fdocid\u002F789760\u002Ffilename\u002FICRA_Perez_et_al_2360.pdf)]\n* Vehicle trajectory prediction based on motion model and maneuver recognition, IROS 2013. [[paper](https:\u002F\u002Fhal.archives-ouvertes.fr\u002Fhal-00881100\u002Fdocument)]\n* Predictive maneuver evaluation for enhancement of Car-to-X mobility data, IV 2012. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=6232217)]\n* Probabilistic trajectory prediction with Gaussian mixture models, IV 2012. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=6232277)]\n* Exploiting map information for driver intention estimation at road intersections, IV 2011. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=5940452)]\n* Trajectory Prediction: Learning to Map Situations to Robot Trajectories, ICML 2009. [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F1553374.1553433)]\n* Monte Carlo based Threat Assessment: Analysis and Improvements, IV 2007. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=4290120)]\n* Gaussian Processes in Machine Learning, Summer school on machine learning 2003. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-540-28650-9_4.pdf)]\n\n## Journal Papers\n* Soft + Hardwired Attention: An LSTM Framework for Human Trajectory Prediction and Abnormal Event Detection, Neural networks 2018. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1702.05552.pdf?ref=https:\u002F\u002Fgithubhelp.com)]\n* Long-term path prediction in urban scenarios using circular distributions, Image and Vision Computing 2018. [[paper](https:\u002F\u002Freader.elsevier.com\u002Freader\u002Fsd\u002Fpii\u002FS0262885617301853?token=DAD7B9F10835E05341405E75C5AB9F8F114FE99410544AD2BB4EFAA23BFC99D63EA8811C4A8C4F679593A61D0D3E35B6&originRegion=eu-west-1&originCreation=20220509082210)]\n* An Efficient Algorithm for Optimal Trajectory Generation for Heterogeneous Multi-Agent Systems in Non-Convex Environments, RAL 2018. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8260912)]\n* Network-Wide Vehicle Trajectory Prediction in Urban Traffic Networks using Deep Learning, Transportation Research Record 2018. [[paper](https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FSeongjin-Choi-2\u002Fpublication\u002F327524033_Network-Wide_Vehicle_Trajectory_Prediction_in_Urban_Traffic_Networks_using_Deep_Learning\u002Flinks\u002F5e3a123e458515072d8015d2\u002FNetwork-Wide-Vehicle-Trajectory-Prediction-in-Urban-Traffic-Networks-using-Deep-Learning.pdf)]\n* Intent Prediction of Pedestrians via Motion Trajectories Using Stacked Recurrent Neural Networks, IEEE Transactions on Intelligent Vehicles 2018. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8481390)]\n* How Would Surround Vehicles Move? A Unified Framework for Maneuver Classification and Motion Prediction, IEEE Transactions on Intelligent Vehicles 2018. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F8286935)]\n* Pedestrian Path, Pose, and Intention Prediction Through Gaussian Process Dynamical Models and Pedestrian Activity Recognition, TITS 2018. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8370119)]\n* Dictionary-based Fidelity Measure for Virtual Traffic, TVCG 2018. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8481568)]\n* Realistic Data-Driven Traffic Flow Animation Using Texture Synthesis, TVCG 2017. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7814314)]\n* Vehicle Trajectory Prediction by Integrating Physics- and Maneuver-Based Approaches Using Interactive Multiple Models, IEEE Transactions on Industrial Electronics 2017. [[paper](https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FJianqiang-Wang\u002Fpublication\u002F321738692_Vehicle_Trajectory_Prediction_by_Integrating_Physics-_and_Maneuver-Based_Approaches_Using_Interactive_Multiple_Models\u002Flinks\u002F5fcde8c445851568d1469e52\u002FVehicle-Trajectory-Prediction-by-Integrating-Physics-and-Maneuver-Based-Approaches-Using-Interactive-Multiple-Models.pdf)]\n* Real-Time Certified Probabilistic Pedestrian Forecasting, RAL 2017. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7959047)]\n* Deep Learning Driven Visual Path Prediction from a Single Image, TIP 2016. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7576681)]\n* Age and Group-driven Pedestrian Behaviour: from Observations to Simulations, Collective Dynamics 2016. [[paper](https:\u002F\u002Fcollective-dynamics.eu\u002Findex.php\u002Fcod\u002Farticle\u002Fview\u002FA3\u002F5)]\n* An Integrated Approach to Maneuver-Based Trajectory Prediction and Criticality Assessment in Arbitrary Road Environments, TITS 2016. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7412746)]\n* Trajectory Data and Flow Characteristics of Mixed Traffic, Transportation Research Record 2015. [[paper](https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FGowri-Asaithambi\u002Fpublication\u002F284708700_Trajectory_Data_and_Flow_Characteristics_of_Mixed_Traffic\u002Flinks\u002F5710718008ae68dc79097605\u002FTrajectory-Data-and-Flow-Characteristics-of-Mixed-Traffic.pdf)]\n* Predicting and recognizing human interactions in public spaces, Journal of Real-Time Image Processing 2015. [[paper](https:\u002F\u002Ffabiopoiesi.github.io\u002Ffiles\u002Fpapers\u002Fjournals\u002F2014_JRTIP_PredictingRecognizingInteractionsPublic_Poiesi_Cavallaro.pdf)]\n* Learning Collective Crowd Behaviors with Dynamic Pedestrian-Agents, International Journal of Computer Vision 2015. [[paper](https:\u002F\u002Fdspace.mit.edu\u002Fbitstream\u002Fhandle\u002F1721.1\u002F103360\u002F11263_2014_735_ReferencePDF.pdf?sequence=1&isAllowed=y)]\n* Real-Time Predictive Modeling and Robust Avoidance of Pedestrians with Uncertain, Changing Intentions, Algorithmic Foundations of Robotics XI 2015. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-319-16595-0_10.pdf)]\n* BRVO: Predicting pedestrian trajectories using velocity-space reasoning, International Journal of Robotics Research 2015. [[paper](https:\u002F\u002Fwww.cs.cityu.edu.hk\u002F~rynson\u002Fpapers\u002Fijrr15.pdf)]\n* Learning intentions for improved human motion prediction, Robotics and Autonomous Systems 2014. [[paper](https:\u002F\u002Fwww.techunited.nl\u002Fmedia\u002Fimages\u002FKwalificatie%20materiaal%202014\u002FElfring_2014.pdf)]\n* A Self-Adaptive Parameter Selection Trajectory Prediction Approach via Hidden Markov Models, TITS 2014. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=6918501)]\n* TraPlan: An Effective Three-in-One Trajectory-Prediction Model in Transportation Networks, TITS 2014. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=6899589)]\n* Will the Pedestrian Cross? A Study on Pedestrian Path Prediction, TITS 2013. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=6632960)]\n* Mobile Agent Trajectory Prediction Using Bayesian Nonparametric Reachability Trees, Infotech@ Aerospace 2011. [[paper](https:\u002F\u002Fdspace.mit.edu\u002Fbitstream\u002Fhandle\u002F1721.1\u002F114899\u002FAoude_Infotech11.pdf?sequence=1&isAllowed=y)]\n* Gaussian Process Dynamical Models for Human Motion, TPAMI 2008. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=4359316)]\n* A new approach to linear filtering and prediction problems, Journal of Basic Engineering 1960. [[paper](http:\u002F\u002F160.78.24.2\u002FPublic\u002FKalman\u002FKalman1960.pdf)]\n\n## Others\n* An Evaluation of Trajectory Prediction Approaches and Notes on the TrajNet Benchmark. arXiv preprint arXiv:1805.07663, 2018. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1805.07663.pdf)] [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCVW_2018\u002Fpapers\u002F11131\u002FBecker_RED_A_simple_but_effective_Baseline_Predictor_for_the_TrajNet_ECCVW_2018_paper.pdf)]\n* Scene-LSTM: A Model for Human Trajectory Prediction, arXiv preprint arXiv:1808.04018, 2018. [[paper](https:\u002F\u002Farxiv.org\u002Fftp\u002Farxiv\u002Fpapers\u002F1808\u002F1808.04018.pdf)]\n* Convolutional Social Pooling for Vehicle Trajectory Prediction, CVPR Workshops 2018. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018_workshops\u002Fpapers\u002Fw29\u002FDeo_Convolutional_Social_Pooling_CVPR_2018_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fnachiket92\u002Fconv-social-pooling)]\n* Convolutional Neural Network for Trajectory Prediction, ECCV Workshops 2018. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCVW_2018\u002Fpapers\u002F11131\u002FNikhil_Convolutional_Neural_Network_for_Trajectory_Prediction_ECCVW_2018_paper.pdf)]\n* Group LSTM: Group Trajectory Prediction in Crowded Scenarios, ECCV Workshops 2018. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCVW_2018\u002Fpapers\u002F11131\u002FBisagno_Group_LSTM_Group_Trajectory_Prediction_in_Crowded_Scenarios_ECCVW_2018_paper.pdf)]\n* Are they going to cross? a benchmark dataset and baseline for pedestrian crosswalk behavior, ICCV Workshops 2017. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017_workshops\u002Fpapers\u002Fw3\u002FRasouli_Are_They_Going_ICCV_2017_paper.pdf)] [[website](https:\u002F\u002Fdata.nvision2.eecs.yorku.ca\u002FJAAD_dataset\u002F)]\n* Human Trajectory Prediction using Spatially aware Deep Attention Models, arXiv preprint arXiv:1705.09436, 2017. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1705.09436.pdf)]\n* Modeling Spatial-Temporal Dynamics of Human Movements for Predicting Future Trajectories, AAAI Workshops 2015. [[paper](https:\u002F\u002Fwww.diva-portal.org\u002Fsmash\u002Fget\u002Fdiva2:808848\u002FFULLTEXT01.pdf)]\n\n# 📚 2019 Conference and Journal Papers\n## Conference Papers 2019\n* MultiPath: Multiple Probabilistic Anchor Trajectory Hypotheses for Behavior Prediction, Conference on Robot Learning (CoRL 2019). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1910.05449.pdf)]\n* Generating Multi-Agent Trajectories using Programmatic Weak Supervision, ICLR 2019. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1803.07612.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fezhan94\u002Fmultiagent-programmatic-supervision)]\n* Stochastic Prediction of Multi-Agent Interactions from Partial Observations, ICLR 2019. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1902.09641.pdf)]\n* TrafficPredict: Trajectory Prediction for Heterogeneous Traffic-Agents, AAAI 2019. [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F4569\u002F4447)] [[code](https:\u002F\u002Fgithub.com\u002Fhuang-xx\u002FTrafficPredict)]\n* Data-Driven Crowd Simulation with Generative Adversarial Networks, International Conference on Computer Animation and Social Agents (CASA 2019). [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3328756.3328769)] [[code](https:\u002F\u002Fgithub.com\u002Famiryanj\u002FcrowdGAN)]\n* RobustTP: End-to-End Trajectory Prediction for Heterogeneous Road-Agents in Dense Traffic with Noisy Sensor Inputs, ACM Computer Science in Cars Symposium (CSCS 2019). [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3359999.3360495)] [[code](https:\u002F\u002Fgithub.com\u002Frohanchandra30\u002FTrackNPred)]\n* Which Way Are You Going? Imitative Decision Learning for Path Forecasting in Dynamic Scenes, CVPR 2019. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FLi_Which_Way_Are_You_Going_Imitative_Decision_Learning_for_Path_CVPR_2019_paper.pdf)]\n* Multi-Agent Tensor Fusion for Contextual Trajectory Prediction, CVPR 2019. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FZhao_Multi-Agent_Tensor_Fusion_for_Contextual_Trajectory_Prediction_CVPR_2019_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FprogrammingLearner\u002FMATF-architecture-details)]\n* Peeking into the Future: Predicting Future Person Activities and Locations in Videos, CVPR 2019. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FLiang_Peeking_Into_the_Future_Predicting_Future_Person_Activities_and_Locations_CVPR_2019_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fnext-prediction)] [[website](https:\u002F\u002Fnext.cs.cmu.edu\u002F)]\n* SoPhie: An Attentive GAN for Predicting Paths Compliant to Social and Physical Constraints, CVPR 2019. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FSadeghian_SoPhie_An_Attentive_GAN_for_Predicting_Paths_Compliant_to_Social_CVPR_2019_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fcoolsunxu\u002Fsophie)]\n* SR-LSTM: State Refinement for LSTM towards Pedestrian Trajectory Prediction, CVPR 2019. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FZhang_SR-LSTM_State_Refinement_for_LSTM_Towards_Pedestrian_Trajectory_Prediction_CVPR_2019_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fzhangpur\u002FSR-LSTM)]\n* TraPHic: Trajectory Prediction in Dense and Heterogeneous Traffic Using Weighted Interactions, CVPR 2019. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FChandra_TraPHic_Trajectory_Prediction_in_Dense_and_Heterogeneous_Traffic_Using_Weighted_CVPR_2019_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FBenMSK\u002Ftrajectory_prediction_TraPHic)]\n* Overcoming Limitations of Mixture Density Networks: A Sampling and Fitting Framework for Multimodal Future Prediction, CVPR 2019. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FMakansi_Overcoming_Limitations_of_Mixture_Density_Networks_A_Sampling_and_Fitting_CVPR_2019_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Flmb-freiburg\u002FMultimodal-Future-Prediction)]\n* Argoverse: 3D Tracking and Forecasting with Rich Maps, CVPR 2019. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FChang_Argoverse_3D_Tracking_and_Forecasting_With_Rich_Maps_CVPR_2019_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fargoai\u002Fargoverse-api)]\n* Diverse Generation for Multi-agent Sports Games, CVPR 2019. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FYeh_Diverse_Generation_for_Multi-Agent_Sports_Games_CVPR_2019_paper.pdf)]\n* Looking to Relations for Future Trajectory Forecast, ICCV 2019. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FChoi_Looking_to_Relations_for_Future_Trajectory_Forecast_ICCV_2019_paper.pdf)]\n* Analyzing the Variety Loss in the Context of Probabilistic Trajectory Prediction, ICCV 2019. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FThiede_Analyzing_the_Variety_Loss_in_the_Context_of_Probabilistic_Trajectory_ICCV_2019_paper.pdf)]\n* The Trajectron: Probabilistic Multi-Agent Trajectory Modeling With Dynamic Spatiotemporal Graphs, ICCV 2019. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FIvanovic_The_Trajectron_Probabilistic_Multi-Agent_Trajectory_Modeling_With_Dynamic_Spatiotemporal_Graphs_ICCV_2019_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FStanfordASL\u002FTrajectron)]\n* Joint Prediction for Kinematic Trajectories in Vehicle-Pedestrian-Mixed Scenes, ICCV 2019. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FBi_Joint_Prediction_for_Kinematic_Trajectories_in_Vehicle-Pedestrian-Mixed_Scenes_ICCV_2019_paper.pdf)]\n* STGAT: Modeling Spatial-Temporal Interactions for Human Trajectory Prediction, ICCV 2019. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FHuang_STGAT_Modeling_Spatial-Temporal_Interactions_for_Human_Trajectory_Prediction_ICCV_2019_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fhuang-xx\u002FSTGAT)]\n* PIE: A Large-Scale Dataset and Models for Pedestrian Intention Estimation and Trajectory Prediction, ICCV 2019. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FRasouli_PIE_A_Large-Scale_Dataset_and_Models_for_Pedestrian_Intention_Estimation_ICCV_2019_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Faras62\u002FPIEPredict)]\n* A Multi-Vehicle Trajectories Generator to Simulate Vehicle-to-Vehicle Encountering Scenarios, ICRA 2019. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8793776)]\n* Multimodal Trajectory Predictions for Autonomous Driving using Deep Convolutional Networks, ICRA 2019. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8793868)] [[code](https:\u002F\u002Fgithub.com\u002Fdaeheepark\u002FPathPredictNusc)]\n* Force-based Heterogeneous Traffic Simulation for Autonomous Vehicle Testing, ICRA 2019. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8794430)]\n* Interaction-aware Multi-agent Tracking and Probabilistic Behavior Prediction via Adversarial Learning, ICRA 2019. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8793661)]\n* StarNet: Pedestrian Trajectory Prediction using Deep Neural Network in Star Topology, IROS 2019. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8967811)]\n* Deep Predictive Autonomous Driving Using Multi-Agent Joint Trajectory Prediction and Traffic Rules, IROS 2019. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8967708)]\n* Conditional Generative Neural System for Probabilistic Trajectory Prediction, IROS 2019. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8967822)]\n* Jointly Learnable Behavior and Trajectory Planning for Self-Driving Vehicles, IROS 2019. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8967615)]\n* INFER: INtermediate representations for FuturE pRediction, IROS 2019. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8968553)] [[code](https:\u002F\u002Fgithub.com\u002Ftalsperre\u002FINFER)] [[website](https:\u002F\u002Ftalsperre.github.io\u002FINFER\u002F)]\n* Stochastic Sampling Simulation for Pedestrian Trajectory Prediction, IROS 2019. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8967857)]\n* Long-term Prediction of Motion Trajectories Using Path Homology Clusters, IROS 2019. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8968125)]\n* Social-BiGAT: Multimodal Trajectory Forecasting using Bicycle-GAN and Graph Attention Networks, NIPS 2019. [[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2019\u002Ffile\u002Fd09bf41544a3365a46c9077ebb5e35c3-Paper.pdf)]\n* Multiple Futures Prediction, NIPS 2019. [[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2019\u002Ffile\u002F86a1fa88adb5c33bd7a68ac2f9f3f96b-Paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fapple\u002Fml-multiple-futures-prediction)]\n* Trajectory Prediction by Coupling Scene-LSTM with Human Movement LSTM, International Symposium on Visual Computing (ISVC 2019). [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-33720-9_19.pdf)]\n* Pedestrian Trajectory Prediction Using a Social Pyramid, Pacific Rim International Conference on Artificial Intelligence (PRICAI 2019). [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-29911-8_34.pdf)]\n* Situation-Aware Pedestrian Trajectory Prediction with Spatio-Temporal Attention Model, Computer Vision Winter Workshop (CVWW 2019). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1902.05437.pdf)]\n* Location-Velocity Attention for Pedestrian Trajectory Prediction, WACV 2019. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8659060)]\n* Coordination and trajectory prediction for vehicle interactions via bayesian generative modeling, IV 2019. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8813821)]\n* Wasserstein Generative Learning with Kinematic Constraints for Probabilistic Interactive Driving Behavior Prediction, IV 2019. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8813783)]\n* AGen: Adaptable Generative Prediction Networks for Autonomous Driving, IV 2019. [[paper](http:\u002F\u002Fwww.cs.cmu.edu\u002F~cliu6\u002Ffiles\u002Fiv19-1.pdf)]\n* Vehicle Trajectory Prediction at Intersections using Interaction based Generative Adversarial Networks, ITSC 2019. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8916927), [paper](https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FDebaditya-Roy-2\u002Fpublication\u002F337629029_Vehicle_Trajectory_Prediction_at_Intersections_using_Interaction_based_Generative_Adversarial_Networks\u002Flinks\u002F5de5e6224585159aa45cc76c\u002FVehicle-Trajectory-Prediction-at-Intersections-using-Interaction-based-Generative-Adversarial-Networks.pdf)]\n* GRIP: Graph-based Interaction-aware Trajectory Prediction, Intelligent Transportation Systems Conference (ITSC 2019). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8917228)] [[code](https:\u002F\u002Fgithub.com\u002Fxincoder\u002FGRIP)]\n* GRIP++: Enhanced Graph-based Interaction-aware Trajectory Prediction for Autonomous Driving, arXiv preprint arXiv:1907.07792, 2019. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1907.07792.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fxincoder\u002FGRIP)]\n* Pose Based Trajectory Forecast of Vulnerable Road Users, IEEE Symposium Series on Computational Intelligence (SSCI 2019). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9003023)]\n* Path Predictions using Object Attributes and Semantic Environment, International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications (VISIGRAPP 2019). [[paper](https:\u002F\u002Fpdfs.semanticscholar.org\u002F1d36\u002F88ae8738335f6452147de3c2f33bcfbd81b3.pdf)]\n* Probabilistic Path Planning using Obstacle Trajectory Prediction, CoDS-COMAD 2019. [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3297001.3297006)]\n* Human Trajectory Prediction using Adversarial Loss, Proceedings of the 19th Swiss Transport Research Conference 2019. [[paper](https:\u002F\u002Fwww.strc.ch\u002F2019\u002FKothari_Alahi.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fvita-epfl\u002FAdversarialLoss-SGAN)]\n\n## Journal Papers 2019\n* A Scalable Framework for Trajectory Prediction, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8658195)]\n* Contextual Recurrent Predictive Model for Long-Term Intent Prediction of Vulnerable Road Users, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8766889&tag=1)]\n* Interactive Trajectory Prediction of Surrounding Road Users for Autonomous Driving Using Structural-LSTM Network, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8848853)]\n* A Deep Learning-Based Framework for Intersectional Traffic Simulation and Editing, TVCG. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8600335)]\n* Heter-Sim: Heterogeneous Multi-Agent Systems Simulation by Interactive Data-Driven Optimization, TVCG. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8865441)]\n* AADS: Augmented Autonomous Driving Simulation using Data-driven Algorithms, SCIENCE ROBOTICS. [[paper](https:\u002F\u002Farxiv.org\u002Fftp\u002Farxiv\u002Fpapers\u002F1901\u002F1901.07849.pdf)]\n* Learning Generative Socially Aware Models of Pedestrian Motion, RAL. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8760356)]\n* Pedestrian Trajectory Prediction in Extremely Crowded Scenarios, Sensors. [[paper](https:\u002F\u002Fwww.mdpi.com\u002F1424-8220\u002F19\u002F5\u002F1223\u002Fpdf)]\n* Human trajectory prediction in crowded scene using social-affinity Long Short-Term Memory, PR. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320319301712)]\n\n## Others 2019\n* Joint Interaction and Trajectory Prediction for Autonomous Driving using Graph Neural Networks, arXiv preprint arXiv:1912.07882, 2019. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1912.07882.pdf)]\n* Learning to Infer Relations for Future Trajectory Forecast, CVPR Workshops 2019. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2019\u002Fpapers\u002FPrecognition\u002FChoi_Learning_to_Infer_Relations_for_Future_Trajectory_Forecast_CVPRW_2019_paper.pdf)]\n* Social Ways: Learning Multi-Modal Distributions of Pedestrian Trajectories, CVPR Workshops 2019. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2019\u002Fpapers\u002FPrecognition\u002FAmirian_Social_Ways_Learning_Multi-Modal_Distributions_of_Pedestrian_Trajectories_With_GANs_CVPRW_2019_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fcrowdbotp\u002Fsocialways)]\n* Social and Scene-Aware Trajectory Prediction in Crowded Spaces, ICCV Workshops 2019. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1909.08840.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FOghma\u002Fsns-lstm\u002F)]\n* Probabilistic Trajectory Prediction for Autonomous Vehicles with Attentive Recurrent Neural Process, arXiv preprint arXiv:1910.08102, 2019. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1910.08102.pdf)]\n* Stochastic Trajectory Prediction with Social Graph Network, arXiv preprint arXiv:1907.10233, 2019. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1907.10233.pdf)]\n\n# 📚 2020 Conference and Journal Papers\n## Conference Papers 2020\n* Spatio-Temporal Graph Transformer Networks for Pedestrian Trajectory Prediction, ECCV 2020. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2005.08514.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FMajiker\u002FSTAR)]\n* AutoTrajectory: Label-Free Trajectory Extraction and Prediction from Videos Using Dynamic Points, ECCV 2020. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58601-0_38.pdf)]\n* PiP: Planning-Informed Trajectory Prediction for Autonomous Driving, ECCV 2020. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58589-1_36.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FHaoran-SONG\u002FPiP-Planning-informed-Prediction)]\n* SMART: Simultaneous Multi-Agent Recurrent Trajectory Prediction, ECCV 2020. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58583-9_28.pdf)]\n* Trajectron++: Dynamically-Feasible Trajectory Forecasting with Heterogeneous Data, ECCV 2020. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58523-5_40.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FStanfordASL\u002FTrajectron-plus-plus)]\n* SimAug: Learning Robust Representations from Simulation for Trajectory Prediction, ECCV 2020. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58601-0_17.pdf)] [[code](https:\u002F\u002Fnext.cs.cmu.edu\u002Fsimaug\u002F)]\n* Diverse and Admissible Trajectory Forecasting Through Multimodal Context Understanding, ECCV 2020. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58621-8_17.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fkami93\u002FCMU-DATF)]\n* It Is Not the Journey But the Destination: Endpoint Conditioned Trajectory Prediction, ECCV 2020. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58536-5_45.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FHarshayuGirase\u002FHuman-Path-Prediction)]\n* How Can I See My Future? FvTraj: Using First-Person View for Pedestrian Trajectory Prediction, ECCV 2020. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58571-6_34.pdf)]\n* Dynamic and Static Context-Aware LSTM for Multi-agent Motion Prediction, ECCV 2020. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58589-1_33.pdf)]\n* Learning Lane Graph Representations for Motion Forecasting, ECCV 2020. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58536-5_32.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fuber-research\u002FLaneGCN)]\n* Implicit Latent Variable Model for Scene-Consistent Motion Forecasting, ECCV 2020. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58592-1_37.pdf)]\n* Testing the Safety of Self-driving Vehicles by Simulating Perception and Prediction, ECCV 2020. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58574-7_19.pdf)]\n* Perceive, Predict, and Plan: Safe Motion Planning Through Interpretable Semantic Representations, ECCV 2020. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58592-1_25.pdf)]\n* Transformer Networks for Trajectory Forecasting, International Conference on Pattern Recognition (ICPR 2020). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9412190)] [[code](https:\u002F\u002Fgithub.com\u002FFGiuliari\u002FTrajectory-Transformer)]\n* DAG-Net: Double Attentive Graph Neural Network for Trajectory Forecasting, ICPR 2020. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9412114)] [[code](https:\u002F\u002Fgithub.com\u002Falexmonti19\u002Fdagnet)]\n* TNT: Target-driveN Trajectory Prediction, Conference on Robot Learning (CoRL 2020). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2008.08294.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FHenry1iu\u002FTNT-Trajectory-Predition)]\n* Social-VRNN: One-Shot Multi-modal Trajectory Prediction for Interacting Pedestrians, CoRL 2020. [[paper](https:\u002F\u002Fautonomousrobots.nl\u002Fdocs\u002F20-Brito-CoRL.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Ftud-amr\u002Fsocial_vrnn)]\n* Kernel Trajectory Maps for Multi-Modal Probabilistic Motion Prediction, CoRL 2020. [[paper](http:\u002F\u002Fproceedings.mlr.press\u002Fv100\u002Fzhi20a\u002Fzhi20a.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fwzhi\u002FKernelTrajectoryMaps)]\n* MATS: An Interpretable Trajectory Forecasting Representation for Planning and Control, CoRL 2020. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2009.07517)] [[code](https:\u002F\u002Fgithub.com\u002FStanfordASL\u002FMATS)]\n* An Attention-Based Interaction-Aware Spatio-Temporal Graph Neural Network for Trajectory Prediction, International Conference on Neural Information Processing (ICONIP 2020). [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-63823-8_5.pdf)]\n* OpenTraj: Assessing Prediction Complexity in Human Trajectories Datasets, ACCV 2020. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FACCV2020\u002Fpapers\u002FAmirian_OpenTraj_Assessing_Prediction_Complexity_in_Human_Trajectories_Datasets_ACCV_2020_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fcrowdbotp\u002FOpenTraj)]\n* Goal-GAN: Multimodal Trajectory Prediction Based on Goal Position Estimation, ACCV 2020. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.01114.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fdendorferpatrick\u002FGoalGAN)]\n* Semantic Synthesis of Pedestrian Locomotion, ACCV 2020. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FACCV2020\u002Fpapers\u002FPriisalu_Semantic_Synthesis_of_Pedestrian_Locomotion_ACCV_2020_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FMariaPriisalu\u002Fspl)]\n* EvolveGraph: Multi-Agent Trajectory Prediction with Dynamic Relational Reasoning, NIPS 2020. [[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Ffile\u002Fe4d8163c7a068b65a64c89bd745ec360-Paper.pdf)] [[website](https:\u002F\u002Fjiachenli94.github.io\u002Fpublications\u002FEvolvegraph\u002F)]\n* Multi-agent Trajectory Prediction with Fuzzy Query Attention, NIPS 2020. [[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Ffile\u002Ffe87435d12ef7642af67d9bc82a8b3cd-Paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fnitinkamra1992\u002FFQA)]\n* Spatio-Temporal Graph Structure Learning for Traffic Forecasting, AAAI 2020. [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F5470\u002F5326)]\n* GMAN: A Graph Multi-Attention Network for Traffic Prediction, AAAI 2020. [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F5477\u002F5333)] [[code](https:\u002F\u002Fgithub.com\u002Fzhengchuanpan\u002FGMAN)]\n* CF-LSTM: Cascaded Feature-Based Long Short-Term Networks for Predicting Pedestrian Trajectory, AAAI 2020. [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F6943\u002F6797)]\n* OMuLeT: Online Multi-Lead Time Location Prediction for Hurricane Trajectory Forecasting, AAAI 2020. [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F5444\u002F5300)]\n* Multimodal Interaction-Aware Trajectory Prediction in Crowded Space, AAAI 2020. [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F6874\u002F6728)]\n* STINet: Spatio-Temporal-Interactive Network for Pedestrian Detection and Trajectory Prediction, CVPR 2020. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FZhang_STINet_Spatio-Temporal-Interactive_Network_for_Pedestrian_Detection_and_Trajectory_Prediction_CVPR_2020_paper.pdf)]\n* CoverNet: Multimodal Behavior Prediction using Trajectory Sets, CVPR 2020. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FPhan-Minh_CoverNet_Multimodal_Behavior_Prediction_Using_Trajectory_Sets_CVPR_2020_paper.pdf)]\n* TPNet: Trajectory Proposal Network for Motion Prediction, CVPR 2020. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FFang_TPNet_Trajectory_Proposal_Network_for_Motion_Prediction_CVPR_2020_paper.pdf)]\n* Reciprocal Learning Networks for Human Trajectory Prediction, CVPR 2020. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FSun_Reciprocal_Learning_Networks_for_Human_Trajectory_Prediction_CVPR_2020_paper.pdf)]\n* MANTRA: Memory Augmented Networks for Multiple Trajectory Prediction, CVPR 2020. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FMarchetti_MANTRA_Memory_Augmented_Networks_for_Multiple_Trajectory_Prediction_CVPR_2020_paper.pdf)]\n* Recursive Social Behavior Graph for Trajectory Prediction, CVPR 2020. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FSun_Recursive_Social_Behavior_Graph_for_Trajectory_Prediction_CVPR_2020_paper.pdf)]\n* The Garden of Forking Paths: Towards Multi-Future Trajectory Prediction, CVPR 2020. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FLiang_The_Garden_of_Forking_Paths_Towards_Multi-Future_Trajectory_Prediction_CVPR_2020_paper.pdf)] [[code](https:\u002F\u002Fnext.cs.cmu.edu\u002Fmultiverse\u002F)]\n* Social-STGCNN: A Social Spatio-Temporal Graph Convolutional Neural Network for Human Trajectory Prediction, CVPR 2020. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FMohamed_Social-STGCNN_A_Social_Spatio-Temporal_Graph_Convolutional_Neural_Network_for_Human_CVPR_2020_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fabduallahmohamed\u002FSocial-STGCNN)]\n* VectorNet: Encoding HD Maps and Agent Dynamics from Vectorized Representation, CVPR 2020. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FGao_VectorNet_Encoding_HD_Maps_and_Agent_Dynamics_From_Vectorized_Representation_CVPR_2020_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FDQSSSSS\u002FVectorNet)]\n* Imitative Non-Autoregressive Modeling for Trajectory Forecasting and Imputation, CVPR 2020. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FQi_Imitative_Non-Autoregressive_Modeling_for_Trajectory_Forecasting_and_Imputation_CVPR_2020_paper.pdf)]\n* Collaborative Motion Prediction via Neural Motion Message Passing, CVPR 2020. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FHu_Collaborative_Motion_Prediction_via_Neural_Motion_Message_Passing_CVPR_2020_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FPhyllisH\u002FNMMP)]\n* UST: Unifying Spatio-Temporal Context for Trajectory Prediction in Autonomous Driving, IROS 2020. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9340943)]\n* Interaction-Aware Trajectory Prediction of Connected Vehicles using CNN-LSTM Networks, Annual Conference of the IEEE Industrial Electronics Society (IECON 2020). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9255162)]\n* GISNet:Graph-Based Information Sharing Network For Vehicle Trajectory Prediction, International Joint Conference on Neural Networks (IJCNN 2020). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9206770)]\n* Disentangling Human Dynamics for Pedestrian Locomotion Forecasting with Noisy Supervision, WACV 2020. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_WACV_2020\u002Fpapers\u002FMangalam_Disentangling_Human_Dynamics_for_Pedestrian_Locomotion_Forecasting_with_Noisy_Supervision_WACV_2020_paper.pdf)] [[website](https:\u002F\u002Fkarttikeya.github.io\u002Fpublication\u002Fplf\u002F)]\n* Deep Imitative Models for Flexible Inference, Planning, and Control, ICLR 2020. [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=Skl4mRNYDr)] [[code](https:\u002F\u002Fgithub.com\u002Fnrhine1\u002Fdeep_imitative_models)] [[website](https:\u002F\u002Fsites.google.com\u002Fview\u002Fimitative-models)]\n* Diverse Trajectory Forecasting with Determinantal Point Processes, ICLR 2020. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1907.04967.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FGruntrexpewrus\u002FTrajectoryFor-and-DPP)]\n* Trajectory Prediction in Heterogeneous Environment via Attended Ecology Embedding, ACM International Conference on Multimedia 2020. [[paper](http:\u002F\u002Fbasiclab.lab.nycu.edu.tw\u002Fassets\u002FAEE-GAN_MM2020.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FEgo2Eco\u002FAEE-GAN)]\n* Multiple Trajectory Prediction with Deep Temporal and Spatial Convolutional Neural Networks, IROS 2020. [[paper](http:\u002F\u002Fras.papercept.net\u002Fimages\u002Ftemp\u002FIROS\u002Ffiles\u002F1081.pdf)]\n* Probabilistic Multi-modal Trajectory Prediction with Lane Attention for Autonomous Vehicles, IROS 2020. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9341034\u002F)]\n* Lane-Attention: Predicting Vehicles’ Moving Trajectories by Learning Their Attention Over Lanes, IROS 2020. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1909.13377.pdf)]\n* Interaction-aware Kalman Neural Networks for Trajectory Prediction, IEEE Intelligent Vehicles Symposium (IV 2020). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1902.10928.pdf)]\n* Multi-Head Attention for Multi-Modal Joint Vehicle Motion Forecasting, ICRA 2020. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9197340)]\n\n## Journal Papers 2020\n* TrajVAE: A Variational AutoEncoder model for trajectory generation, Neurocomputing. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0925231220312017)]\n* Social-Aware Pedestrian Trajectory Prediction via States Refinement LSTM, TPAMI. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9261113)]\n* Forecasting Trajectory and Behavior of Road-Agents Using Spectral Clustering in Graph-LSTMs, IEEE Robotics and Automation Letters. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9126166)]\n* Attention Based Vehicle Trajectory Prediction, IEEE Transactions on Intelligent Vehicles. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9084255)]\n* AC-VRNN: Attentive Conditional-VRNN for multi-future trajectory prediction, Computer Vision and Image Understanding. [[paper](https:\u002F\u002Freader.elsevier.com\u002Freader\u002Fsd\u002Fpii\u002FS1077314221000898?token=F06466B50D3AE170EC14D460C1AFE91DFE5D61047357252C808857A2BBD4FE4CF2FF3076AD391F842F155CAD2B102C5F&originRegion=eu-west-1&originCreation=20220421024623)]\n* PoPPL: Pedestrian Trajectory Prediction by LSTM With Automatic Route Class Clustering, TNNLS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9031707)]\n* Real Time Trajectory Prediction Using Deep Conditional Generative Models, IEEE Robotics and Automation Letters. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8957482)]\n* Scene Compliant Trajectory Forecast with Agent-Centric Spatio-Temporal Grids, RAL. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9000540)]\n* What the Constant Velocity Model Can Teach Us About Pedestrian Motion Prediction, RAL. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1903.07933.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fcschoeller\u002Fconstant_velocity_pedestrian_motion)]\n* Multimodal Deep Generative Models for Trajectory Prediction: A Conditional Variational Autoencoder Approach, RAL. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9286482)]\n* Deep Context Maps: Agent Trajectory Prediction using Location-specific Latent Maps, RAL. [[paper](http:\u002F\u002Fras.papercept.net\u002Fimages\u002Ftemp\u002FIROS\u002Ffiles\u002F2532.pdf)]\n* Learning Structured Representations of Spatial and Interactive Dynamics for Trajectory Prediction in Crowded Scenes, RAL. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9309332)] [[code](https:\u002F\u002Fgithub.com\u002Ftdavchev\u002Fstructured-trajectory-prediction), [code](https:\u002F\u002Fgithub.com\u002Ftdavchev\u002FStochastic-Futures-Prediction)]\n* Probabilistic Crowd GAN: Multimodal Pedestrian Trajectory Prediction Using a Graph Vehicle-Pedestrian Attention Network, RAL. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9123560)]\n* Multimodal Interaction-aware Motion Prediction for Autonomous Street Crossing, International Journal of Robotics Research. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1808.06887)]\n* Pedestrian Trajectory Prediction Based on Deep Convolutional LSTM Network, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9043898)] [[code](https:\u002F\u002Fgithub.com\u002FParadiseCK\u002FDeepConvLstmNet)]\n* Multi-Vehicle Collaborative Learning for Trajectory Prediction With Spatio-Temporal Tensor Fusion, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9151374)]\n* Multiple Trajectory Prediction of Moving Agents with Memory Augmented Networks, TPAMI. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9138768)]\n* Regularizing Neural Networks for Future Trajectory Prediction via Inverse Reinforcement Learning Framework, IET Computer Vision. [[paper](https:\u002F\u002Fietresearch.onlinelibrary.wiley.com\u002Fdoi\u002Fepdf\u002F10.1049\u002Fiet-cvi.2019.0546)] [[code](https:\u002F\u002Fgithub.com\u002Fd1024choi\u002Ftraj-pred-irl)]\n* Motion trajectory prediction based on a CNN-LSTM sequential model, Science China Information Sciences. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002Fs11432-019-2761-y.pdf)]\n\n## Others 2020\n* Scene Gated Social Graph: Pedestrian Trajectory Prediction Based on Dynamic Social Graphs and Scene Constraints, arXiv preprint arXiv:2010.05507, 2020. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.05507.pdf)]\n* Robust Trajectory Forecasting for Multiple Intelligent Agents in Dynamic Scene, arXiv preprint arXiv:2005.13133, 2020. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2005.13133.pdf)]\n* Map-Adaptive Goal-Based Trajectory Prediction, arXiv preprint arXiv:2009.04450, 2020. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2009.04450.pdf)]\n* A Spatial-Temporal Attentive Network with Spatial Continuity for Trajectory Prediction, arXiv preprint arXiv:2003.06107, 2020. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2003.06107v1.pdf)]\n* Trajformer: Trajectory Prediction with Local Self-Attentive Contexts for Autonomous Driving, arXiv preprint arXiv:2011.14910, 2020. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2011.14910.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FManojbhat09\u002FTrajformer)]\n* TPPO: A Novel Trajectory Predictor with Pseudo Oracle, arXiv preprint arXiv:2002.01852, 2020. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2002.01852.pdf)]\n* Vehicle Trajectory Prediction by Transfer Learning of Semi-Supervised Models, arXiv preprint arXiv:2007.06781, 2020. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.06781.pdf)]\n* Social-WaGDAT: Interaction-aware Trajectory Prediction via Wasserstein Graph Double-Attention Network, arXiv preprint arXiv:2002.06241, 2020. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2002.06241.pdf)]\n* Trajectory Forecasts in Unknown Environments Conditioned on Grid-Based Plans, arXiv preprint arXiv:2001.00735, 2020. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2001.00735.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fnachiket92\u002FP2T)]\n* Multi-modal Trajectory Prediction for Autonomous Driving with Semantic Map and Dynamic Graph Attention Network, NIPS Workshops 2020. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.16273.pdf)]\n* Scene Gated Social Graph: Pedestrian Trajectory Prediction Based on Dynamic Social Graphs and Scene Constraints, arXiv preprint arXiv:2010.05507, 2020. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.05507v1.pdf)]\n* PathGAN: Local Path Planning with Attentive Generative Adversarial Networks, arXiv preprint arXiv:2007.03877, 2020. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.03877.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fd1024choi\u002Fpathgan_pytorch)]\n\n# 📚 2021 Conference and Journal Papers\n## Conference Papers 2021\n* Collaborative Uncertainty in Multi-Agent Trajectory Forecasting, NIPS 2021. [[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2021\u002Ffile\u002F31ca0ca71184bbdb3de7b20a51e88e90-Paper.pdf)]\n* GRIN: Generative Relation and Intention Network for Multi-agent Trajectory Prediction, NIPS 2021. [[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2021\u002Ffile\u002Fe3670ce0c315396e4836d7024abcf3dd-Paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Flongyuanli\u002FGRIN_NeurIPS21)]\n* LibCity: An Open Library for Traffic Prediction, SIGSPATIAL 2021. [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3474717.3483923)] [[code](https:\u002F\u002Fgithub.com\u002FLibCity\u002FBigscity-LibCity)]\n* Predicting Vehicles Trajectories in Urban Scenarios with Transformer Networks and Augmented Information, IEEE Intelligent Vehicles Symposium (IV 2021). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9575242)]\n* Social-STAGE: Spatio-Temporal Multi-Modal Future Trajectory Forecast, ICRA 2021. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2011.04853.pdf)]\n* AVGCN: Trajectory Prediction using Graph Convolutional Networks Guided by Human Attention, ICRA 2021. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2101.05682.pdf)]\n* Exploring Dynamic Context for Multi-path Trajectory Prediction, ICRA 2021. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9562034)] [[code](https:\u002F\u002Fgithub.com\u002Fwtliao\u002FDCENet)]\n* Pedestrian Trajectory Prediction using Context-Augmented Transformer Networks, ICRA 2021. [[paper](https:\u002F\u002Fwww.researchgate.net\u002Fpublication\u002F346614349_Pedestrian_Trajectory_Prediction_using_Context-Augmented_Transformer_Networks)] [[code](https:\u002F\u002Fgithub.com\u002FKhaledSaleh\u002FContext-Transformer-PedTraj)]\n* Spectral Temporal Graph Neural Network for Trajectory Prediction, ICRA 2021. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.02930.pdf)]\n* Congestion-aware Multi-agent Trajectory Prediction for Collision Avoidance, ICRA 2021. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9560994)] [[code](https:\u002F\u002Fgithub.com\u002Fxuxie1031\u002FCollisionFreeMultiAgentTrajectoryPrediciton)]\n* Anticipatory Navigation in Crowds by Probabilistic Prediction of Pedestrian Future Movements, ICRA 2021. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9561022)]\n* AgentFormer: Agent-Aware Transformers for Socio-Temporal Multi-Agent Forecasting, ICCV 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FYuan_AgentFormer_Agent-Aware_Transformers_for_Socio-Temporal_Multi-Agent_Forecasting_ICCV_2021_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FKhrylx\u002FAgentFormer)] [[website](https:\u002F\u002Fye-yuan.com\u002Fagentformer\u002F)]\n* Likelihood-Based Diverse Sampling for Trajectory Forecasting, ICCV 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FJason_Likelihood-Based_Diverse_Sampling_for_Trajectory_Forecasting_ICCV_2021_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FJasonMa2016\u002FLDS)]\n* MG-GAN: A Multi-Generator Model Preventing Out-of-Distribution Samples in Pedestrian Trajectory Prediction, ICCV 2021. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.09274.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fselflein\u002FMG-GAN)]\n* Spatial-Temporal Consistency Network for Low-Latency Trajectory Forecasting, ICCV 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FLi_Spatial-Temporal_Consistency_Network_for_Low-Latency_Trajectory_Forecasting_ICCV_2021_paper.pdf)]\n* Three Steps to Multimodal Trajectory Prediction: Modality Clustering, Classification and Synthesis, ICCV 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FSun_Three_Steps_to_Multimodal_Trajectory_Prediction_Modality_Clustering_Classification_and_ICCV_2021_paper.pdf)]\n* From Goals, Waypoints & Paths To Long Term Human Trajectory Forecasting, ICCV 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FMangalam_From_Goals_Waypoints__Paths_to_Long_Term_Human_Trajectory_ICCV_2021_paper.pdf)] [[code](https:\u002F\u002Fkarttikeya.github.io\u002Fpublication\u002Fynet\u002F)]\n* Where are you heading? Dynamic Trajectory Prediction with Expert Goal Examples, ICCV 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FZhao_Where_Are_You_Heading_Dynamic_Trajectory_Prediction_With_Expert_Goal_ICCV_2021_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FJoeHEZHAO\u002Fexpert_traj)]\n* DenseTNT: End-to-end Trajectory Prediction from Dense Goal Sets, ICCV 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FGu_DenseTNT_End-to-End_Trajectory_Prediction_From_Dense_Goal_Sets_ICCV_2021_paper.pdf)]\n* Safety-aware Motion Prediction with Unseen Vehicles for Autonomous Driving, ICCV 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FRen_Safety-Aware_Motion_Prediction_With_Unseen_Vehicles_for_Autonomous_Driving_ICCV_2021_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fxrenaa\u002FSafety-Aware-Motion-Prediction)]\n* LOKI: Long Term and Key Intentions for Trajectory Prediction, ICCV 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FGirase_LOKI_Long_Term_and_Key_Intentions_for_Trajectory_Prediction_ICCV_2021_paper.pdf)] [[dataset](https:\u002F\u002Fusa.honda-ri.com\u002Floki)]\n* Human Trajectory Prediction via Counterfactual Analysis, ICCV 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FChen_Human_Trajectory_Prediction_via_Counterfactual_Analysis_ICCV_2021_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FCHENGY12\u002FCausalHTP)]\n* Personalized Trajectory Prediction via Distribution Discrimination, ICCV 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FChen_Personalized_Trajectory_Prediction_via_Distribution_Discrimination_ICCV_2021_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FCHENGY12\u002FDisDis)]\n* Unlimited Neighborhood Interaction for Heterogeneous Trajectory Prediction, ICCV 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FZheng_Unlimited_Neighborhood_Interaction_for_Heterogeneous_Trajectory_Prediction_ICCV_2021_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fzhengfang1997\u002FUnlimited-Neighborhood-Interaction-for-Heterogeneous-Trajectory-Prediction)]\n* Social NCE: Contrastive Learning of Socially-aware Motion Representations, ICCV 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FLiu_Social_NCE_Contrastive_Learning_of_Socially-Aware_Motion_Representations_ICCV_2021_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fvita-epfl\u002Fsocial-nce)]\n* RAIN: Reinforced Hybrid Attention Inference Network for Motion Forecasting, ICCV 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FLi_RAIN_Reinforced_Hybrid_Attention_Inference_Network_for_Motion_Forecasting_ICCV_2021_paper.pdf)]\n* Temporal Pyramid Network for Pedestrian Trajectory Prediction with Multi-Supervision, AAAI 2021. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.01884.pdf)]\n* SCAN: A Spatial Context Attentive Network for Joint Multi-Agent Intent Prediction, AAAI 2021. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2102.00109.pdf)]\n* Disentangled Multi-Relational Graph Convolutional Network for Pedestrian Trajectory Prediction, AAAI 2021. [[paper](https:\u002F\u002Fwww.aaai.org\u002FAAAI21Papers\u002FAAAI-1677.BaeI.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FInhwanBae\u002FDMRGCN)]\n* MotionRNN: A Flexible Model for Video Prediction with Spacetime-Varying Motions, CVPR 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FWu_MotionRNN_A_Flexible_Model_for_Video_Prediction_With_Spacetime-Varying_Motions_CVPR_2021_paper.pdf)]\n* Multimodal Motion Prediction with Stacked Transformers, CVPR 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FLiu_Multimodal_Motion_Prediction_With_Stacked_Transformers_CVPR_2021_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fdecisionforce\u002FmmTransformer)] [[website](https:\u002F\u002Fdecisionforce.github.io\u002FmmTransformer\u002F?utm_source=catalyzex.com)]\n* SGCN: Sparse Graph Convolution Network for Pedestrian Trajectory Prediction, CVPR 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FShi_SGCN_Sparse_Graph_Convolution_Network_for_Pedestrian_Trajectory_Prediction_CVPR_2021_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fshuaishiliu\u002FSGCN)]\n* LaPred: Lane-Aware Prediction of Multi-Modal Future Trajectories of Dynamic Agents, CVPR 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FKim_LaPred_Lane-Aware_Prediction_of_Multi-Modal_Future_Trajectories_of_Dynamic_Agents_CVPR_2021_paper.pdf)]\n* Divide-and-Conquer for Lane-Aware Diverse Trajectory Prediction, CVPR 2021. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.08277.pdf)]\n* Euro-PVI: Pedestrian Vehicle Interactions in Dense Urban Centers, CVPR 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FBhattacharyya_Euro-PVI_Pedestrian_Vehicle_Interactions_in_Dense_Urban_Centers_CVPR_2021_paper.pdf)] [[dataset](https:\u002F\u002Fwww.mpi-inf.mpg.de\u002Fdepartments\u002Fcomputer-vision-and-machine-learning\u002Fresearch\u002Feuro-pvi-dataset)]\n* Trajectory Prediction with Latent Belief Energy-Based Model, CVPR 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FPang_Trajectory_Prediction_With_Latent_Belief_Energy-Based_Model_CVPR_2021_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fbpucla\u002Flbebm)]\n* Shared Cross-Modal Trajectory Prediction for Autonomous Driving, CVPR 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FChoi_Shared_Cross-Modal_Trajectory_Prediction_for_Autonomous_Driving_CVPR_2021_paper.pdf)]\n* Pedestrian and Ego-vehicle Trajectory Prediction from Monocular Camera, CVPR 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FNeumann_Pedestrian_and_Ego-Vehicle_Trajectory_Prediction_From_Monocular_Camera_CVPR_2021_paper.pdf)] [[code](https:\u002F\u002Fgitlab.com\u002FlukeN86\u002FpedFutureTracking)]\n* Interpretable Social Anchors for Human Trajectory Forecasting in Crowds, CVPR 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FKothari_Interpretable_Social_Anchors_for_Human_Trajectory_Forecasting_in_Crowds_CVPR_2021_paper.pdf)]\n* Introvert: Human Trajectory Prediction via Conditional 3D Attention, CVPR 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FShafiee_Introvert_Human_Trajectory_Prediction_via_Conditional_3D_Attention_CVPR_2021_paper.pdf)]\n* MP3: A Unified Model to Map, Perceive, Predict and Plan, CVPR 2021. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2101.06806.pdf)]\n* TrafficSim: Learning to Simulate Realistic Multi-Agent Behaviors, CVPR 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FSuo_TrafficSim_Learning_To_Simulate_Realistic_Multi-Agent_Behaviors_CVPR_2021_paper.pdf)]\n* SceneGen: Learning to Generate Realistic Traffic Scenes, CVPR 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FTan_SceneGen_Learning_To_Generate_Realistic_Traffic_Scenes_CVPR_2021_paper.pdf)]\n* Multimodal Transformer Network for Pedestrian Trajectory Prediction, IJCAI 2021. [[paper](https:\u002F\u002Fwww.ijcai.org\u002Fproceedings\u002F2021\u002F0174.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fericyinyzy\u002FMTN_trajectory)]\n* Decoder Fusion RNN: Context and Interaction Aware Decoders for Trajectory Prediction, IROS 2021. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.05814.pdf)]\n* Joint Intention and Trajectory Prediction Based on Transformer, IROS 2021. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9636241)]\n* Maneuver-based Trajectory Prediction for Self-driving Cars Using Spatio-temporal Convolutional Networks, IROS 2021. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9636875)]\n* Multiple Contextual Cues Integrated Trajectory Prediction for Autonomous Driving, IROS 2021. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9476975)]\n* MultiXNet: Multiclass Multistage Multimodal Motion Prediction, IEEE Intelligent Vehicles Symposium (IV 2021). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9575718)]\n* Trajectory Prediction for Autonomous Driving based on Multi-Head Attention with Joint Agent-Map Representation, IEEE Intelligent Vehicles Symposium (IV 2021). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9576054)]\n* Social-IWSTCNN: A Social Interaction-Weighted Spatio-Temporal Convolutional Neural Network for Pedestrian Trajectory Prediction in Urban Traffic Scenarios, IV 2021. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9575958)]\n* Generating Scenarios with Diverse Pedestrian Behaviors for Autonomous Vehicle Testing, Conference on Robot Learning (CoRL 2021). [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=HTfApPeT4DZ)] [[code](https:\u002F\u002Fgithub.com\u002FMariaPriisalu\u002Fspl)]\n* Multimodal Trajectory Prediction Conditioned on Lane-Graph Traversals, CoRL 2021. [[paper](https:\u002F\u002Fproceedings.mlr.press\u002Fv164\u002Fdeo22a.html)] [[code](https:\u002F\u002Fgithub.com\u002Fnachiket92\u002FPGP)]\n* Learning to Predict Vehicle Trajectories with Model-based Planning, CoRL 2021. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.04027.pdf)]\n* Pose Based Trajectory Forecast of Vulnerable Road Users Using Recurrent Neural Networks, International Conference on Pattern Recognition (ICPR 2021). [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-68763-2_5.pdf)]\n* GraphTCN: Spatio-Temporal Interaction Modeling for Human Trajectory Prediction, WACV 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2021\u002Fpapers\u002FWang_GraphTCN_Spatio-Temporal_Interaction_Modeling_for_Human_Trajectory_Prediction_WACV_2021_paper.pdf)]\n* Goal-driven Long-Term Trajectory Prediction, WACV 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2021\u002Fpapers\u002FTran_Goal-Driven_Long-Term_Trajectory_Prediction_WACV_2021_paper.pdf)]\n* Multimodal Trajectory Predictions for Autonomous Driving without a Detailed Prior Map, WACV 2021. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2021\u002Fpapers\u002FKawasaki_Multimodal_Trajectory_Predictions_for_Autonomous_Driving_Without_a_Detailed_Prior_WACV_2021_paper.pdf)]\n* Self-Growing Spatial Graph Network for Context-Aware Pedestrian Trajectory Prediction, IEEE International Conference on Image Processing (ICIP 2021). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.06320v2.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fserenetech90\u002FAOL_ovsc)]\n* S2TNet: Spatio-Temporal Transformer Networks for Trajectory Prediction in Autonomous Driving, Asian Conference on Machine Learning 2021. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.10902.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fchenghuang66\u002Fs2tnet)]\n* Trajectory Prediction using Equivariant Continuous Convolution, ICLR 2021. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.11344.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FRose-STL-Lab\u002FECCO)]\n* TridentNet: A Conditional Generative Model for Dynamic Trajectory Generation, International Conference on Intelligent Autonomous Systems 2021. [[paper](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-030-95892-3_31#Abs1)]\n* HOME: Heatmap Output for future Motion Estimation, ITSC 2021. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2105.10968.pdf)]\n* Graph and Recurrent Neural Network-based Vehicle Trajectory Prediction For Highway Driving, ITSC 2021. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9564929)]\n* SCSG Attention: A Self-Centered Star Graph with Attention for Pedestrian Trajectory Prediction, International Conference on Database Systems for Advanced Applications (DASFAA 2021). [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-73194-6_29.pdf)]\n* Leveraging Trajectory Prediction for Pedestrian Video Anomaly Detection, IEEE Symposium Series on Computational Intelligence (SSCI 2021). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9660004)] [[code](https:\u002F\u002Fgithub.com\u002Fakanuasiegbu\u002FLeveraging-Trajectory-Prediction-for-Pedestrian-Video-Anomaly-Detection)]\n\n## Journal Papers 2021\n* Are socially-aware trajectory prediction models really socially-aware?, Transportation Research: Part C. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.10879.pdf), [paper](https:\u002F\u002Ficcv21-adv-workshop.github.io\u002Fshort_paper\u002Fs-attack-arow2021.pdf)] [[code](https:\u002F\u002Fs-attack.github.io\u002F)]\n* Injecting knowledge in data-driven vehicle trajectory predictors, Transportation Research: Part C. [[paper](https:\u002F\u002Freader.elsevier.com\u002Freader\u002Fsd\u002Fpii\u002FS0968090X21000425?token=F03D20769BFB255F56662C10348A81F3D07A42C6B4AB9BA19E3F7B2A5F1DA7D99B96B783616BDA86C12866AFCF4C5671&originRegion=eu-west-1&originCreation=20220506090622)] [[code](https:\u002F\u002Fgithub.com\u002Fvita-epfl\u002FRRB)]\n* Decoding pedestrian and automated vehicle interactions using immersive virtual reality and interpretable deep learning, Transportation Research: Part C. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0968090X2030855X)]\n* Human Trajectory Forecasting in Crowds: A Deep Learning Perspective,  IEEE Transactions on Intelligent Transportation Systems. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9408398)] [[code](https:\u002F\u002Fgithub.com\u002Fvita-epfl\u002Ftrajnetplusplusbaselines)]\n* NetTraj: A Network-Based Vehicle Trajectory Prediction Model With Directional Representation and Spatiotemporal Attention Mechanisms, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9629362)]\n* Spatio-Temporal Graph Dual-Attention Network for Multi-Agent Prediction and Tracking, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9491972)]\n* A Hierarchical Framework for Interactive Behaviour Prediction of Heterogeneous Traffic Participants Based on Graph Neural Network, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9468360&tag=1)]\n* TrajGAIL: Generating urban vehicle trajectories using generative adversarial imitation learning, Transportation Research Part C. [[paper](https:\u002F\u002Freader.elsevier.com\u002Freader\u002Fsd\u002Fpii\u002FS0968090X21001121?token=3DEACAF2AD919E99B3331E74F747B61A0EAC2741E79B6F99F4F806155EB394F163D74F2F83806358BBD65911E107EF01&originRegion=us-east-1&originCreation=20220416040814)] [[code](https:\u002F\u002Fgithub.com\u002Fbenchoi93\u002FTrajGAIL)]\n* Vehicle Trajectory Prediction Using Generative Adversarial Network With Temporal Logic Syntax Tree Features, IEEE ROBOTICS AND AUTOMATION LETTERS. [[paper](https:\u002F\u002Fwww.gilitschenski.org\u002Figor\u002Fpublications\u002F202104-ral-logic_gan\u002Fral21-logic_gan.pdf)]\n* Vehicle Trajectory Prediction Using LSTMs with Spatial-Temporal Attention Mechanisms, IEEE Intelligent Transportation Systems Magazine. [[paper](http:\u002F\u002Furdata.net\u002Ffiles\u002F2020_VTP.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fleilin-research\u002FVTP)]\n* Long Short-Term Memory-Based Human-Driven Vehicle Longitudinal Trajectory Prediction in a Connected and Autonomous Vehicle Environment, Transportation Research Record. [[paper](http:\u002F\u002Fsage.cnpereading.com\u002Fparagraph\u002Fdownload\u002F?doi=10.1177\u002F0361198121993471)]\n* Temporal Pyramid Network with Spatial-Temporal Attention for Pedestrian Trajectory Prediction, IEEE Transactions on Network Science and Engineering. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9373939)]\n* An efficient Spatial–Temporal model based on gated linear units for trajectory prediction, Neurocomputing. [[paper](https:\u002F\u002Freader.elsevier.com\u002Freader\u002Fsd\u002Fpii\u002FS0925231221018907?token=C894F657732BB6078B77AEC9BD3858338C1A7F1254CCC0BBC34ADA1421A95CF9A4F68BDCA8812457DE27FB37EEB8F198&originRegion=us-east-1&originCreation=20220420144432)]\n* SRAI-LSTM: A Social Relation Attention-based Interaction-aware LSTM for human trajectory prediction, Neurocomputing. [[paper](https:\u002F\u002Freader.elsevier.com\u002Freader\u002Fsd\u002Fpii\u002FS0925231221018014?token=BB22DAAC41E3BF453C326A9D72A0CC900C2DFFD0D8AE07B7DEED51C7F2250B9CB40CC89B6812CA20DBFA6A7EDD32AAD6&originRegion=us-east-1&originCreation=20220512100647)]\n* AST-GNN: An attention-based spatio-temporal graph neural network for Interaction-aware pedestrian trajectory prediction, Neurocomputing. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS092523122100388X)]\n* Multi-PPTP: Multiple Probabilistic Pedestrian Trajectory Prediction in the Complex Junction Scene, IEEE Transactions on Intelligent Transportation Systems. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9619864)]\n* A Novel Graph-Based Trajectory Predictor With Pseudo-Oracle, TNNLS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9447207)]\n* Large Scale GPS Trajectory Generation Using Map Based on Two Stage GAN, Journal of Data Science. [[paper](https:\u002F\u002Fwww.jds-online.com\u002Ffiles\u002FJDS202001-08.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FXingruiWang\u002FTwo-Stage-Gan-in-trajectory-generation)]\n* Pose and Semantic Map Based Probabilistic Forecast of Vulnerable Road Users’ Trajectories, IEEE Transactions on Intelligent Vehicles. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9707640)]\n* STI-GAN: Multimodal Pedestrian Trajectory Prediction Using Spatiotemporal Interactions and a Generative Adversarial Network, IEEE Access. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?arnumber=9387292)]\n* Holistic LSTM for Pedestrian Trajectory Prediction, TIP. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9361440)]\n* Pedestrian trajectory prediction with convolutional neural networks, PR. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320321004325)]\n* LSTM based trajectory prediction model for cyclist utilizing multiple interactions with environment, PR. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320320306038)]\n* Human trajectory prediction and generation using LSTM models and GANs, PR. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS003132032100323X)]\n* Vehicle trajectory prediction and generation using LSTM models and GANs, Plos one. [[paper](https:\u002F\u002Fjournals.plos.org\u002Fplosone\u002Farticle?id=10.1371\u002Fjournal.pone.0253868)]\n* BiTraP: Bi-Directional Pedestrian Trajectory Prediction With Multi-Modal Goal Estimation, RAL. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9345445)] [[code](https:\u002F\u002Fgithub.com\u002Fumautobots\u002Fbidireaction-trajectory-prediction)]\n* A Kinematic Model for Trajectory Prediction in General Highway Scenarios, RAL. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9472993)] [[code](https:\u002F\u002Fgithub.com\u002Fumautobots\u002Fkinematic_highway)]\n* Trajectory Prediction in Autonomous Driving With a Lane Heading Auxiliary Loss, RAL. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9387075)]\n* Vehicle Trajectory Prediction Using Generative Adversarial Network With Temporal Logic Syntax Tree Features, RAL. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9366373)]\n* Tra2Tra: Trajectory-to-Trajectory Prediction With a Global Social Spatial-Temporal Attentive Neural Network, RAL. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9347678)]\n* Social graph convolutional LSTM for pedestrian trajectory prediction, IET Intelligent Transport Systems. [[paper](https:\u002F\u002Fietresearch.onlinelibrary.wiley.com\u002Fdoi\u002Fepdf\u002F10.1049\u002Fitr2.12033)]\n* HSTA: A Hierarchical Spatio-Temporal Attention Model for Trajectory Prediction, IEEE Transactions on Vehicular Technology (TVT). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9548801)]\n* Environment-Attention Network for Vehicle Trajectory Prediction, TVT. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9534487)]\n* Where Are They Going? Predicting Human Behaviors in Crowded Scenes, ACM Transactions on Multimedia Computing, Communications, and Applications (TOMM). [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3449359)]\n* Multi-Agent Trajectory Prediction with Spatio-Temporal Sequence Fusion, IEEE Transactions on Multimedia (TMM). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9580659)]\n\n## Others 2021\n* Trajectory Prediction using Generative Adversarial Network in Multi-Class Scenarios, arXiv preprint arXiv:2110.11401, 2021. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.11401.pdf)]\n* Spatial-Channel Transformer Network for Trajectory Prediction on the Traffic Scenes, arXiv preprint arXiv:2101.11472, 2021. [[paper](https:\u002F\u002Farxiv.org\u002Fftp\u002Farxiv\u002Fpapers\u002F2101\u002F2101.11472.pdf)]\n* Physically Feasible Vehicle Trajectory Prediction, arXiv preprint arXiv:2104.14679, 2021. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.14679.pdf)]\n* MSN: Multi-Style Network for Trajectory Prediction, arXiv preprint arXiv:2107.00932, 2021. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2107.00932.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FNorthOcean\u002FMSN)]\n* Rethinking Trajectory Forecasting Evaluation, arXiv preprint arXiv:2107.10297, 2021. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2107.10297)]\n* Pedestrian Trajectory Prediction via Spatial Interaction Transformer Network, IEEE Intelligent Vehicles Symposium Workshops (IV Workshops 2021). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2112.06624)]\n* Deep Social Force, arXiv preprint arXiv:2109.12081, 2021. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2109.12081)] [[code](https:\u002F\u002Fgithub.com\u002Fsvenkreiss\u002Fsocialforce)]\n\n# 📚 2022 Conference and Journal Papers\n## Conference Papers 2022\n* Social Interpretable Tree for Pedestrian Trajectory Prediction, AAAI 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2205.13296.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Flssiair\u002FSIT)]\n* Complementary Attention Gated Network for Pedestrian Trajectory Prediction, AAAI 2022. [[paper](https:\u002F\u002Fwww.aaai.org\u002FAAAI22Papers\u002FAAAI-1963.DuanJ.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FjinghaiD\u002FCAGN)]\n* Scene Transformer: A unified architecture for predicting future trajectories of multiple agents, ICLR 2022. [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=Wm3EA5OlHsG)]\n* You Mostly Walk Alone: Analyzing Feature Attribution in Trajectory Prediction, ICLR 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.05304.pdf)]\n* Latent Variable Sequential Set Transformers For Joint Multi-Agent Motion Prediction, ICLR 2022. [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=Dup_dDqkZC5)] [[code](https:\u002F\u002Ffgolemo.github.io\u002Fautobots\u002F)]\n* THOMAS: Trajectory Heatmap Output with learned Multi-Agent Sampling, ICLR 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.06607)]\n* Remember Intentions: Retrospective-Memory-based Trajectory Prediction, CVPR 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.11474.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FMediaBrain-SJTU\u002FMemoNet)]\n* STCrowd: A Multimodal Dataset for Pedestrian Perception in Crowded Scenes, CVPR 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.01026.pdf)] [[code](https:\u002F\u002Fgithub.com\u002F4DVLab\u002FSTCrowd.git)]\n* Vehicle trajectory prediction works, but not everywhere, CVPR 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2112.03909.pdf)] [[code](https:\u002F\u002Fs-attack.github.io\u002F)]\n* Stochastic Trajectory Prediction via Motion Indeterminacy Diffusion, CVPR 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.13777.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fgutianpei\u002FMID)]\n* Non-Probability Sampling Network for Stochastic Human Trajectory Prediction, CVPR 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.13471.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Finhwanbae\u002FNPSN)]\n* On Adversarial Robustness of Trajectory Prediction for Autonomous Vehicles, CVPR 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2201.05057.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fzqzqz\u002FAdvTrajectoryPrediction)]\n* Adaptive Trajectory Prediction via Transferable GNN, CVPR 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.05046.pdf)]\n* Towards Robust and Adaptive Motion Forecasting: A Causal Representation Perspective, CVPR 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2111.14820.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fvita-epfl\u002Fcausalmotion), [code](https:\u002F\u002Fgithub.com\u002Fsherwinbahmani\u002Fynet_adaptive)]\n* How many Observations are Enough? Knowledge Distillation for Trajectory Forecasting, CVPR 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.04781.pdf)]\n* Learning from All Vehicles, CVPR 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.11934.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fdotchen\u002FLAV)]\n* Forecasting from LiDAR via Future Object Detection, CVPR 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.16297.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fneeharperi\u002FFutureDet)]\n* End-to-End Trajectory Distribution Prediction Based on Occupancy Grid Maps, CVPR 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.16910.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FKguo-cs\u002FTDOR)]\n* M2I: From Factored Marginal Trajectory Prediction to Interactive Prediction, CVPR 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2202.11884.pdf)] [[code](https:\u002F\u002Ftsinghua-mars-lab.github.io\u002FM2I\u002F)]\n* GroupNet: Multiscale Hypergraph Neural Networks for Trajectory Prediction with Relational Reasoning, CVPR 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.08770.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FMediaBrain-SJTU\u002FGroupNet)]\n* Whose Track Is It Anyway? Improving Robustness to Tracking Errors with Affinity-Based Prediction, CVPR 2022. [[paper](https:\u002F\u002Fxinshuoweng.com\u002Fpapers\u002FAffinipred\u002Fcamera_ready.pdf)]\n* ScePT: Scene-consistent, Policy-based Trajectory Predictions for Planning, CVPR 2022. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FChen_ScePT_Scene-Consistent_Policy-Based_Trajectory_Predictions_for_Planning_CVPR_2022_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FNVlabs\u002FScePT)]\n* Graph-based Spatial Transformer with Memory Replay for Multi-future Pedestrian Trajectory Prediction, CVPR 2022. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FLi_Graph-Based_Spatial_Transformer_With_Memory_Replay_for_Multi-Future_Pedestrian_Trajectory_CVPR_2022_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FJacobieee\u002FST-MR)]\n* MUSE-VAE: Multi-Scale VAE for Environment-Aware Long Term Trajectory Prediction, CVPR 2022. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FLee_MUSE-VAE_Multi-Scale_VAE_for_Environment-Aware_Long_Term_Trajectory_Prediction_CVPR_2022_paper.pdf)]\n* LTP: Lane-based Trajectory Prediction for Autonomous Driving, CVPR 2022. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FWang_LTP_Lane-Based_Trajectory_Prediction_for_Autonomous_Driving_CVPR_2022_paper.pdf)]\n* ATPFL: Automatic Trajectory Prediction Model Design under Federated Learning Framework, CVPR 2022. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FWang_ATPFL_Automatic_Trajectory_Prediction_Model_Design_Under_Federated_Learning_Framework_CVPR_2022_paper.pdf)]\n* Human Trajectory Prediction with Momentary Observation, CVPR 2022. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FSun_Human_Trajectory_Prediction_With_Momentary_Observation_CVPR_2022_paper.pdf)]\n* HiVT: Hierarchical Vector Transformer for Multi-Agent Motion Prediction, CVPR 2022. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FZhou_HiVT_Hierarchical_Vector_Transformer_for_Multi-Agent_Motion_Prediction_CVPR_2022_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FZikangZhou\u002FHiVT)]\n* Path-Aware Graph Attention for HD Maps in Motion Prediction, ICRA 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2202.13772.pdf)]\n* Trajectory Prediction with Linguistic Representations, ICRA 2022. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9811928)]\n* Leveraging Smooth Attention Prior for Multi-Agent Trajectory Prediction, ICRA 2022. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9811718)] [[website](https:\u002F\u002Fsites.google.com\u002Fview\u002Fsmoothness-attention)]\n* KEMP: Keyframe-Based Hierarchical End-to-End Deep Model for Long-Term Trajectory Prediction, ICRA 2022. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9812337)]\n* Domain Generalization for Vision-based Driving Trajectory Generation, ICRA 2022. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9812070)] [[website](https:\u002F\u002Fsites.google.com\u002Fview\u002Fdg-traj-gen)]\n* A Deep Concept Graph Network for Interaction-Aware Trajectory Prediction, ICRA 2022. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9811567)]\n* Conditioned Human Trajectory Prediction using Iterative Attention Blocks, ICRA 2022. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9812404)]\n* StopNet: Scalable Trajectory and Occupancy Prediction for Urban Autonomous Driving, ICRA 2022. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9811830)]\n* Meta-path Analysis on Spatio-Temporal Graphs for Pedestrian Trajectory Prediction, ICRA 2022. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9811632)] [[website](https:\u002F\u002Fsites.google.com\u002Fillinois.edu\u002Fmesrnn\u002Fhome)]\n* Propagating State Uncertainty Through Trajectory Forecasting, ICRA 2022. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9811776)] [[code](https:\u002F\u002Fgithub.com\u002FStanfordASL\u002FPSU-TF)]\n* HYPER: Learned Hybrid Trajectory Prediction via Factored Inference and Adaptive Sampling, ICRA 2022. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9812254)]\n* Grouptron: Dynamic Multi-Scale Graph Convolutional Networks for Group-Aware Dense Crowd Trajectory Forecasting, ICRA 2022. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9811585)]\n* Crossmodal Transformer Based Generative Framework for Pedestrian Trajectory Prediction, ICRA 2022. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9812226)]\n* Trajectory Prediction for Autonomous Driving with Topometric Map, ICRA 2022. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9811712)] [[code](https:\u002F\u002Fgithub.com\u002FJiaolong\u002Ftrajectory-prediction)]\n* CRAT-Pred: Vehicle Trajectory Prediction with Crystal Graph Convolutional Neural Networks and Multi-Head Self-Attention, ICRA 2022. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9811637)] [[code](https:\u002F\u002Fgithub.com\u002Fschmidt-ju\u002Fcrat-pred)]\n* MultiPath++: Efficient Information Fusion and Trajectory Aggregation for Behavior Prediction, ICRA 2022. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9812107)]\n* Multi-modal Motion Prediction with Transformer-based Neural Network for Autonomous Driving, ICRA 2022. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9812060\u002F)]\n* GOHOME: Graph-Oriented Heatmap Output for future Motion Estimation, ICRA 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2109.01827.pdf)]\n* TridentNetV2: Lightweight Graphical Global Plan Representations for Dynamic Trajectory Generation, ICRA 2022. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?arnumber=9811591)]\n* Heterogeneous-Agent Trajectory Forecasting Incorporating Class Uncertainty, IROS 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.12446.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FTRI-ML\u002FHAICU)] [[trajdata](https:\u002F\u002Fgithub.com\u002Fnvr-avg\u002Ftrajdata)]\n* Trajectory Prediction with Graph-based Dual-scale Context Fusion, IROS 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2111.01592.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FHKUST-Aerial-Robotics\u002FDSP)]\n* Learning Pedestrian Group Representations for Multi-modal Trajectory Prediction, ECCV 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.09953.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FInhwanBae\u002FGPGraph)]\n* Social-Implicit: Rethinking Trajectory Prediction Evaluation and The Effectiveness of Implicit Maximum Likelihood Estimation, ECCV 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.03057.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fabduallahmohamed\u002FSocial-Implicit)] [[website](https:\u002F\u002Fwww.abduallahmohamed.com\u002Fsocial-implicit-amdamv-adefde-demo)] \n* Hierarchical Latent Structure for Multi-Modal Vehicle Trajectory Forecasting, ECCV 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.04624.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fd1024choi\u002FHLSTrajForecast)]\n* SocialVAE: Human Trajectory Prediction using Timewise Latents, ECCV 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.08207.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fxupei0610\u002FSocialVAE)]\n* View Vertically: A Hierarchical Network for Trajectory Prediction via Fourier Spectrums, ECCV 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.07288.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fcocoon2wong\u002FVertical)]\n* Entry-Flipped Transformer for Inference and Prediction of Participant Behavior, ECCV 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.06235.pdf)]\n* D2-TPred: Discontinuous Dependency for Trajectory Prediction under Traffic Lights, ECCV 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.10398.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FVTP-TL\u002FD2-TPred)]\n* Human Trajectory Prediction via Neural Social Physics, ECCV 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.10435.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Frealcrane\u002FHuman-Trajectory-Prediction-via-Neural-Social-Physics)]\n* Social-SSL: Self-Supervised Cross-Sequence Representation Learning Based on Transformers for Multi-Agent Trajectory Prediction, ECCV 2022. [[paper](https:\u002F\u002Fbasiclab.lab.nycu.edu.tw\u002Fassets\u002FSocial-SSL.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FSigta678\u002FSocial-SSL)]\n* Aware of the History: Trajectory Forecasting with the Local Behavior Data, ECCV 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.09646.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FKay1794\u002FAware-of-the-history)]\n* Action-based Contrastive Learning for Trajectory Prediction, ECCV 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.08664.pdf)]\n* AdvDO: Realistic Adversarial Attacks for Trajectory Prediction, ECCV 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.08744.pdf)]\n* ST-P3: End-to-end Vision-based Autonomous Driving via Spatial-Temporal Feature Learning, ECCV 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.07601.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FOpenPerceptionX\u002FST-P3)]\n* Social ODE: Multi-Agent Trajectory Forecasting with Neural Ordinary Differential Equations, ECCV 2022. [[paper](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2022\u002Fpapers_ECCV\u002Fpapers\u002F136820211.pdf)]\n* Forecasting Human Trajectory from Scene History, NIPS 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2210.08732.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FMaKaRuiNah\u002FSHENet)]\n* Trajectory-guided Control Prediction for End-to-end Autonomous Driving: A Simple yet Strong Baseline, NIPS 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.08129)] [[code](https:\u002F\u002Fgithub.com\u002FOpenPerceptionX\u002FTCP)]\n* Motion Transformer with Global Intention Localization and Local Movement Refinement, NIPS 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.13508.pdf)] [[website](https:\u002F\u002Fvas.mpi-inf.mpg.de\u002Fmotion-transformer-with-global-intention-localization-and-local-movement-refinement\u002F)]\n* Interaction Modeling with Multiplex Attention, NIPS 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2208.10660.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Ffanyun-sun\u002FIMMA)]\n* Deep Interactive Motion Prediction and Planning: Playing Games with Motion Prediction Models, Conference on Learning for Dynamics and Control (L4DC). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.02392.pdf)] [[website](https:\u002F\u002Fsites.google.com\u002Fview\u002Fdeep-interactive-predict-plan)]\n* Robust Trajectory Prediction against Adversarial Attacks, CoRL 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2208.00094.pdf)] [[code](https:\u002F\u002Frobustav.github.io\u002FRobustTraj\u002F)]\n* Planning with Diffusion for Flexible Behavior Synthesis, ICML 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.09991)] [[website](https:\u002F\u002Fdiffusion-planning.github.io\u002F)]\n* Synchronous Bi-Directional Pedestrian Trajectory Prediction with Error Compensation, ACCV 2022. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FACCV2022\u002Fpapers\u002FXie_Synchronous_Bi-Directional_Pedestrian_Trajectory_Prediction_with_Error_Compensation_ACCV_2022_paper.pdf)]\n* Model-Based Imitation Learning for Urban Driving, NIPS 2022. [[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2022\u002Ffile\u002F827cb489449ea216e4a257c47e407d18-Paper-Conference.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fwayveai\u002Fmile)]\n\n## Journal Papers 2022\n* AI-TP: Attention-based Interaction-aware Trajectory Prediction for Autonomous Driving, IEEE Transactions on Intelligent Vehicles. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9723649)] [[code](https:\u002F\u002Fgithub.com\u002FKP-Zhang\u002FAI-TP)]\n* MDST-DGCN: A Multilevel Dynamic Spatiotemporal Directed Graph Convolutional Network for Pedestrian Trajectory Prediction, Computational Intelligence and Neuroscience. [[paper](https:\u002F\u002Fdownloads.hindawi.com\u002Fjournals\u002Fcin\u002F2022\u002F4192367.pdf)]\n* Graph-Based Spatial-Temporal Convolutional Network for Vehicle Trajectory Prediction in Autonomous Driving, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9737058)]\n* Multi-Agent Trajectory Prediction with Heterogeneous Edge-Enhanced Graph Attention Network, TITS. [[paper](https:\u002F\u002Fdspace.lib.cranfield.ac.uk\u002Fbitstream\u002Fhandle\u002F1826\u002F17541\u002FMulti-agent_trajectory_prediction-2022.pdf?sequence=1&isAllowed=y)]\n* Fully Convolutional Encoder-Decoder With an Attention Mechanism for Practical Pedestrian Trajectory Prediction, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9768201)]\n* STGM: Vehicle Trajectory Prediction Based on Generative Model for Spatial-Temporal Features, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9743363)]\n* Trajectory Prediction for Autonomous Driving Using Spatial-Temporal Graph Attention Transformer, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9768029)]\n* Intention-Aware Vehicle Trajectory Prediction Based on Spatial-Temporal Dynamic Attention Network for Internet of Vehicles, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9767719)] [[code](https:\u002F\u002Fxbchen82.github.io\u002Fresource\u002F)]\n* Trajectory Forecasting Based on Prior-Aware Directed Graph Convolutional Neural Network, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9686621&tag=1)]\n* DeepTrack: Lightweight Deep Learning for Vehicle Trajectory Prediction in Highways, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9770480)]\n* Interactive Trajectory Prediction Using a Driving Risk Map-Integrated Deep Learning Method for Surrounding Vehicles on Highways, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9745461&tag=1)]\n* Vehicle Trajectory Prediction in Connected Environments via Heterogeneous Context-Aware Graph Convolutional Networks, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9781338)]\n* Trajectory Prediction Neural Network and Model Interpretation Based on Temporal Pattern Attention, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9945660)]\n* Learning Sparse Interaction Graphs of Partially Detected Pedestrians for Trajectory Prediction, RAL. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9664278)] [[code](https:\u002F\u002Fgithub.com\u002Ftedhuang96\u002Fgst)]\n* GAMMA: A General Agent Motion Prediction Model for Autonomous Driving, RAL. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1906.01566.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FAdaCompNUS\u002Fgamma)]\n* Stepwise Goal-Driven Networks for Trajectory Prediction, RAL. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.14107v3.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FChuhuaW\u002FSGNet.pytorch)]\n* GA-STT: Human Trajectory Prediction with Group Aware Spatial-Temporal Transformer, RAL. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9779572)]\n* Long-term 4D trajectory prediction using generative adversarial networks, Transportation Research Part C: Emerging Technologies. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0968090X22000031)]\n* A context-aware pedestrian trajectory prediction framework for automated vehicles, Transportation Research Part C: Emerging Technologies. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0968090X21004423)]\n* Explainable multimodal trajectory prediction using attention models, Transportation Research Part C: Emerging Technologies. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0968090X22002509)]\n* CSCNet: Contextual semantic consistency network for trajectory prediction in crowded spaces, PR. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320322000334)]\n* CSR: Cascade Conditional Variational AutoEncoder with Social-aware Regression for Pedestrian Trajectory Prediction, PR. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320322005106)]\n* Step Attention: Sequential Pedestrian Trajectory Prediction, IEEE Sensors Journal. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9732437)]\n* Vehicle Trajectory Prediction Method Coupled With Ego Vehicle Motion Trend Under Dual Attention Mechanism, IEEE Transactions on Instrumentation and Measurement. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9749176)]\n* Spatio-temporal Interaction Aware and Trajectory Distribution Aware Graph Convolution Network for Pedestrian Multimodal Trajectory Prediction, IEEE Transactions on Instrumentation and Measurement. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9997233)]\n* Deep encoder–decoder-NN: A deep learning-based autonomous vehicle trajectory prediction and correction model, Physica A: Statistical Mechanics and its Applications. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0378437122000139)]\n* PTPGC: Pedestrian trajectory prediction by graph attention network with ConvLSTM, Robotics and Autonomous Systems. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0921889021002165)]\n* GCHGAT: pedestrian trajectory prediction using group constrained hierarchical graph attention networks, Applied Intelligence. [[paper](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs10489-021-02997-w)]\n* Vehicles Trajectory Prediction Using Recurrent VAE Network, IEEE Access. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9740177)] [[code](https:\u002F\u002Fgithub.com\u002Fmidemig\u002Ftraj_pred_vae)]\n* SEEM: A Sequence Entropy Energy-Based Model for Pedestrian Trajectory All-Then-One Prediction, TPAMI. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9699076)]\n* PTP-STGCN: Pedestrian Trajectory Prediction Based on a Spatio-temporal Graph Convolutional Neural Network, Applied Intelligence. [[paper](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs10489-022-03524-1)]\n* Trajectory distributions: A new description of movement for trajectory prediction, Computational Visual Media. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002Fs41095-021-0236-6.pdf)]\n* Trajectory prediction for autonomous driving based on multiscale spatial-temporal graph, IET Intelligent Transport Systems. [[paper](https:\u002F\u002Fietresearch.onlinelibrary.wiley.com\u002Fdoi\u002Fpdfdirect\u002F10.1049\u002Fitr2.12265)]\n* Continual learning-based trajectory prediction with memory augmented networks, Knowledge-Based Systems. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0950705122011157)]\n* Atten-GAN: Pedestrian Trajectory Prediction with GAN Based on Attention Mechanism, Cognitive Computation. [[paper](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs12559-022-10029-z#Abs1)]\n* EvoSTGAT: Evolving spatiotemporal graph attention networks for pedestrian trajectory prediction, Neurocomputing. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0925231222003460?ref=pdf_download&fr=RR-2&rr=7da0ead45e800fcc)]\n\n## Others 2022\n* Raising context awareness in motion forecasting, CVPR Workshops 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2109.08048.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fvaleoai\u002FCAB)]\n* Goal-driven Self-Attentive Recurrent Networks for Trajectory Prediction, CVPR Workshops 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.11561.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fluigifilippochiara\u002FGoal-SAR)]\n* Importance Is in Your Attention: Agent Importance Prediction for Autonomous Driving, CVPR Workshops 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.09121.pdf)]\n* MPA: MultiPath++ Based Architecture for Motion Prediction, CVPR Workshops 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.10041.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fstepankonev\u002Fwaymo-motion-prediction-challenge-2022-multipath-plus-plus)]\n* TPAD: Identifying Effective Trajectory Predictions Under the Guidance of Trajectory Anomaly Detection Model, arXiv:2201.02941, 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2201.02941v1.pdf)]\n* Wayformer: Motion Forecasting via Simple & Efficient Attention Networks, arXiv preprint arXiv:2207.05844, 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.05844.pdf)]\n* PreTR: Spatio-Temporal Non-Autoregressive Trajectory Prediction Transformer, arXiv preprint arXiv:2203.09293, 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.09293.pdf)]\n* LatentFormer: Multi-Agent Transformer-Based Interaction Modeling and Trajectory Prediction, arXiv preprint arXiv:2203.01880, 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.01880.pdf)]\n* Diverse Multiple Trajectory Prediction Using a Two-stage Prediction Network Trained with Lane Loss, arXiv preprint arXiv:2206.08641, 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.08641.pdf)]\n* Semi-supervised Semantics-guided Adversarial Training for Trajectory Prediction, arXiv preprint arXiv:2205.14230, 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2205.14230.pdf)]\n* Heterogeneous Trajectory Forecasting via Risk and Scene Graph Learning, arXiv preprint arXiv:2211.00848, 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.00848.pdf)]\n* GATraj: A Graph- and Attention-based Multi-Agent Trajectory Prediction Model, arXiv preprint arXiv:2209.07857, 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.07857.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fmengmengliu1998\u002FGATraj)]\n* Dynamic-Group-Aware Networks for Multi-Agent Trajectory Prediction with Relational Reasoning, arXiv preprint arXiv:2206.13114, 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.13114.pdf)]\n* Collaborative Uncertainty Benefits Multi-Agent Multi-Modal Trajectory Forecasting, arXiv preprint arXiv:2207.05195, 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.05195)] [[code](https:\u002F\u002Fgithub.com\u002FMediaBrain-SJTU\u002FCollaborative-Uncertainty)]\n* Guided Conditional Diffusion for Controllable Traffic Simulation, arXiv preprint arXiv:2210.17366, 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2210.17366.pdf)] [[website](https:\u002F\u002Faiasd.github.io\u002Fctg.github.io\u002F)]\n* PhysDiff: Physics-Guided Human Motion Diffusion Model, arXiv preprint arXiv:2212.02500, 2022. [[paper](http:\u002F\u002Fxxx.itp.ac.cn\u002Fpdf\u002F2212.02500.pdf)]\n* Trajectory Forecasting on Temporal Graphs, arXiv preprint arXiv:2207.00255, 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.00255.pdf)] [[website](https:\u002F\u002Fkuis-ai.github.io\u002Fftgn\u002F)]\n\n# 📚 2023 Conference and Journal Papers\n## Conference Papers 2023\n* Human Joint Kinematics Diffusion-Refinement for Stochastic Motion Prediction, AAAI 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2210.05976.pdf)]\n* Multi-stream Representation Learning for Pedestrian Trajectory Prediction, AAAI 2023. [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F25389)] [[code](https:\u002F\u002Fgithub.com\u002FYuxuanIAIR\u002FMSRL-master)]\n* Continuous Trajectory Generation Based on Two-Stage GAN, AAAI 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2301.07103.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FWenMellors\u002FTS-TrajGen)]\n* A Set of Control Points Conditioned Pedestrian Trajectory Prediction, AAAI 2023. [[paper](https:\u002F\u002Fassets.underline.io\u002Flecture\u002F67747\u002Fpaper\u002F82988b653861eb7a0d5cdc91c4b26f8c.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FInhwanBae\u002FGraphTERN)]\n* WSiP: Wave Superposition Inspired Pooling for Dynamic Interactions-Aware Trajectory Prediction, AAAI 2023. [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F25592)] [[code](https:\u002F\u002Fgithub.com\u002FChopin0123\u002FWSiP)]\n* Leveraging Future Relationship Reasoning for Vehicle Trajectory Prediction, ICLR 2023. [[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=CGBCTp2M6lA)]\n* IPCC-TP: Utilizing Incremental Pearson Correlation Coefficient for Joint Multi-Agent Trajectory Prediction, CVPR 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.00575.pdf)]\n* FEND: A Future Enhanced Distribution-Aware Contrastive Learning Framework for Long-tail Trajectory Prediction, CVPR 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.16574.pdf)]\n* Trace and Pace: Controllable Pedestrian Animation via Guided Trajectory Diffusion, CVPR 2023. [[paper](https:\u002F\u002Fnv-tlabs.github.io\u002Ftrace-pace\u002Fdocs\u002Ftrace_and_pace.pdf)] [[website](https:\u002F\u002Fnv-tlabs.github.io\u002Ftrace-pace\u002F)]\n* FJMP: Factorized Joint Multi-Agent Motion Prediction over Learned Directed Acyclic Interaction Graphs, CVPR 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.16197.pdf)] [[website](https:\u002F\u002Frluke22.github.io\u002FFJMP\u002F)]\n* Leapfrog Diffusion Model for Stochastic Trajectory Prediction, CVPR 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.10895.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FMediaBrain-SJTU\u002FLED)]\n* ViP3D: End-to-end Visual Trajectory Prediction via 3D Agent Queries, CVPR 2023. [[paper](http:\u002F\u002Fxxx.itp.ac.cn\u002Fpdf\u002F2208.01582.pdf)] [[website](https:\u002F\u002Ftsinghua-mars-lab.github.io\u002FViP3D\u002F)]\n* EqMotion: Equivariant Multi-Agent Motion Prediction with Invariant Interaction Reasoning, CVPR 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.10876.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FMediaBrain-SJTU\u002FEqMotion)]\n* Uncovering the Missing Pattern: Unified Framework Towards Trajectory Imputation and Prediction, CVPR 2023. [[paper](http:\u002F\u002Fxxx.itp.ac.cn\u002Fpdf\u002F2303.16005.pdf)]\n* Unsupervised Sampling Promoting for Stochastic Human Trajectory Prediction, CVPR 2023. [[paper](https:\u002F\u002Fchengy12.github.io\u002Ffiles\u002FBosampler.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fviewsetting\u002FUnsupervised_sampling_promoting)]\n* Stimulus Verification is a Universal and Effective Sampler in Multi-modal Human Trajectory Prediction, CVPR 2023. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FSun_Stimulus_Verification_Is_a_Universal_and_Effective_Sampler_in_Multi-Modal_CVPR_2023_paper.pdf)]\n* Query-Centric Trajectory Prediction, CVPR 2023. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FZhou_Query-Centric_Trajectory_Prediction_CVPR_2023_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FZikangZhou\u002FQCNet)] [[QCNeXt](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.10508.pdf)]\n* Weakly Supervised Class-agnostic Motion Prediction for Autonomous Driving, CVPR 2023. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FLi_Weakly_Supervised_Class-Agnostic_Motion_Prediction_for_Autonomous_Driving_CVPR_2023_paper.pdf)]\n* Decompose More and Aggregate Better: Two Closer Looks at Frequency Representation Learning for Human Motion Prediction, CVPR 2023. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FGao_Decompose_More_and_Aggregate_Better_Two_Closer_Looks_at_Frequency_CVPR_2023_paper.pdf)]\n* MotionDiffuser: Controllable Multi-Agent Motion Prediction using Diffusion, CVPR 2023. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FJiang_MotionDiffuser_Controllable_Multi-Agent_Motion_Prediction_Using_Diffusion_CVPR_2023_paper.pdf)]\n* Planning-oriented Autonomous Driving, CVPR 2023. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FHu_Planning-Oriented_Autonomous_Driving_CVPR_2023_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FOpenDriveLab\u002FUniAD)]\n* TrafficGen: Learning to Generate Diverse and Realistic Traffic Scenarios, ICRA 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2210.06609.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fmetadriverse\u002Ftrafficgen)]\n* GANet: Goal Area Network for Motion Forecasting, ICRA 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.09723.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fkingwmk\u002FGANet)]\n* TOFG: A Unified and Fine-Grained Environment Representation in Autonomous Driving, ICRA 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.20068.pdf)]\n* SSL-Lanes: Self-Supervised Learning for Motion Forecasting in Autonomous Driving, CoRL 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.14116.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FAutoVision-cloud\u002FSSL-Lanes)]\n* PowerBEV: A Powerful Yet Lightweight Framework for Instance Prediction in Bird’s-Eye View, IJCAI 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.10761.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FEdwardLeeLPZ\u002FPowerBEV)]\n* HumanMAC: Masked Motion Completion for Human Motion Prediction, ICCV 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.03665)] [[code](https:\u002F\u002Fgithub.com\u002FLinghaoChan\u002FHumanMAC)]\n* BeLFusion: Latent Diffusion for Behavior-Driven Human Motion Prediction, ICCV 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.14304)] [[code](https:\u002F\u002Fgithub.com\u002FBarqueroGerman\u002FBeLFusion)]\n* EigenTrajectory: Low-Rank Descriptors for Multi-Modal Trajectory Forecasting, ICCV 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.09306)] [[code](https:\u002F\u002Fgithub.com\u002FInhwanBae\u002FEigenTrajectory)]\n* ADAPT: Efficient Multi-Agent Trajectory Prediction with Adaptation, ICCV 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.14187.pdf)] [[code](https:\u002F\u002Fkuis-ai.github.io\u002Fadapt\u002F)]\n* Evaluation of Differentially Constrained Motion Models for Graph-Based Trajectory Prediction, IV 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.05116)] [[code](https:\u002F\u002Fgithub.com\u002Fwestny\u002Fmtp-go)]\n* LimSim: A Long-term Interactive Multi-scenario Traffic Simulator, ITSC 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.06648.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FPJLab-ADG\u002FLimSim)]\n* V2X-Seq: A Large-Scale Sequential Dataset for Vehicle-Infrastructure Cooperative Perception and Forecasting, CVPR 2023. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FYu_V2X-Seq_A_Large-Scale_Sequential_Dataset_for_Vehicle-Infrastructure_Cooperative_Perception_and_CVPR_2023_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FAIR-THU\u002FDAIR-V2X-Seq)]\n* INT2: Interactive Trajectory Prediction at Intersections, ICCV 2023. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FYan_INT2_Interactive_Trajectory_Prediction_at_Intersections_ICCV_2023_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FAIR-DISCOVER\u002FINT2)]\n* Trajectory Unified Transformer for Pedestrian Trajectory Prediction, ICCV 2023. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FShi_Trajectory_Unified_Transformer_for_Pedestrian_Trajectory_Prediction_ICCV_2023_paper.pdf)]\n* Sparse Instance Conditioned Multimodal Trajectory Prediction, ICCV 2023. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FDong_Sparse_Instance_Conditioned_Multimodal_Trajectory_Prediction_ICCV_2023_paper.pdf)]\n* MotionLM: Multi-Agent Motion Forecasting as Language Modeling, ICCV 2023. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FSeff_MotionLM_Multi-Agent_Motion_Forecasting_as_Language_Modeling_ICCV_2023_paper.pdf)]\n* Fast Inference and Update of Probabilistic Density Estimation on Trajectory Prediction, ICCV 2023. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FMaeda_Fast_Inference_and_Update_of_Probabilistic_Density_Estimation_on_Trajectory_ICCV_2023_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fmeaten\u002FFlowChain-ICCV2023)]\n* ADAPT: Action-aware Driving Caption Transformer, ICRA 2023. [[paper](https:\u002F\u002Fbrowse.arxiv.org\u002Fpdf\u002F2302.00673.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fjxbbb\u002FADAPT)]\n* Scenario Diffusion: Controllable Driving Scenario Generation With Diffusion, NIPS 2023. [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=99MHSB98yZ)]\n* BCDiff: Bidirectional Consistent Diffusion for Instantaneous Trajectory Prediction, NIPS 2023. [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=FOFJmR1oxt)]\n* Conditional Variational Inference for Multi-modal Trajectory Prediction with Latent Diffusion Prior, Pacific Rim International Conference on Artificial Intelligence (PRICAI 2023). [[paper](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-981-99-7019-3_2)]\n* Language-Guided Traffic Simulation via Scene-Level Diffusion, CoRL 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.06344.pdf)]\n* Language Conditioned Traffic Generation, CoRL 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.07947)] [[code](https:\u002F\u002Fariostgx.github.io\u002Flctgen\u002F)]\n* LightSim: Neural Lighting Simulation for Urban Scenes, NIPS 2023. [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=mcx8IGneYw)] [[website](https:\u002F\u002Fwaabi.ai\u002Flightsim\u002F)]\n* What Truly Matters in Trajectory Prediction for Autonomous Driving? NIPS 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.15136.pdf)] [[code](https:\u002F\u002Fwhatmatters23.github.io\u002F)]\n\n## Journal Papers 2023\n* MVHGN: Multi-View Adaptive Hierarchical Spatial Graph Convolution Network Based Trajectory Prediction for Heterogeneous Traffic-Agents, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10056303)]\n* Adaptive and Simultaneous Trajectory Prediction for Heterogeneous Agents via Transferable Hierarchical Transformer Network, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10149109)]\n* SSAGCN: Social Soft Attention Graph Convolution Network for Pedestrian Trajectory Prediction, TNNLS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10063206)] [[code](https:\u002F\u002Fgithub.com\u002FWW-Tong\u002Fssagcn_for_path_prediction)]\n* Disentangling Crowd Interactions for Pedestrians Trajectory Prediction, RAL. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10083225)]\n* VNAGT: Variational Non-Autoregressive Graph Transformer Network for Multi-Agent Trajectory Prediction, IEEE Transactions on Vehicular Technology. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10121688)]\n* Spatial-Temporal-Spectral LSTM: A Transferable Model for Pedestrian Trajectory Prediction, TIV. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10149368)]\n* Holistic Transformer: A Joint Neural Network for Trajectory Prediction and Decision-Making of Autonomous Vehicles, PR. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320323002935)]\n* Tri-HGNN: Learning triple policies fused hierarchical graph neural networks for pedestrian trajectory prediction, PR. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320323004703)]\n* Multimodal Vehicular Trajectory Prediction With Inverse Reinforcement Learning and Risk Aversion at Urban Unsignalized Intersections, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10164651)]\n* Trajectory prediction for autonomous driving based on multiscale spatial‐temporal graph, IET Intelligent Transport Systems. [[paper](https:\u002F\u002Fietresearch.onlinelibrary.wiley.com\u002Fdoi\u002Fpdfdirect\u002F10.1049\u002Fitr2.12265)]\n* Social Self-Attention Generative Adversarial Networks for Human Trajectory Prediction, IEEE Transactions on Artificial Intelligence. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10197467)]\n* CSIR: Cascaded Sliding CVAEs With Iterative Socially-Aware Rethinking for Trajectory Prediction, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10215313)]\n* Multimodal Manoeuvre and Trajectory Prediction for Automated Driving on Highways Using Transformer Networks, RAL. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10207845)]\n* A physics-informed Transformer model for vehicle trajectory prediction on highways, Transportation Research Part C: Emerging Technologies. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0968090X23002619)] [[code](https:\u002F\u002Fgithub.com\u002FGengmaosi\u002FPIT-IDM)]\n* MacFormer: Map-Agent Coupled Transformer for Real-time and Robust Trajectory Prediction, RAL. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2308.10280.pdf)]\n* MRGTraj: A Novel Non-Autoregressive Approach for Human Trajectory Prediction, TCSVT. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10226250)] [[code](https:\u002F\u002Fgithub.com\u002Fwisionpeng\u002FMRGTraj)]\n* Planning-inspired Hierarchical Trajectory Prediction via Lateral-Longitudinal Decomposition for Autonomous Driving, TIV. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10226224)]\n* A multi-modal vehicle trajectory prediction framework via conditional diffusion model: A coarse-to-fine approach, KBS. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0950705123007402)]\n* Modality Exploration, Retrieval and Adaptation for Trajectory Prediction, TPAMI. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10254381)]\n* MFAN: Mixing Feature Attention Network for Trajectory Prediction, PR. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320323006957#abs0001)]\n* IE-GAN: a data-driven crowd simulation method via generative adversarial networks, Multimedia Tools and Applications. [[paper](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs11042-023-17346-x)]\n* Trajectory Distribution Aware Graph Convolutional Network for Trajectory Prediction Considering Spatio-temporal Interactions and Scene Information, TKDE. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10309163)]\n* Map-free Trajectory Prediction in Traffic with Multi-level Spatial-temporal Modeling, TIV. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10356823)]\n* STIGCN: Spatial-Temporal Interaction-aware Graph Convolution Network for Pedestrian Trajectory Prediction, The Journal of Supercomputing. [[paper](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs11227-023-05850-8)] [[code](https:\u002F\u002Fgithub.com\u002FChenwangxing\u002FSTIGCN_master)]\n* Stochastic Non-Autoregressive Transformer-Based Multi-Modal Pedestrian Trajectory Prediction for Intelligent Vehicles, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10367756)] [[code](https:\u002F\u002Fgithub.com\u002Fxbchen82\u002FSNARTF)]\n* Trajectory Prediction for Autonomous Driving Based on Structural Informer Method, IEEE Transactions on Automation Science and Engineering. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10364872)]\n* MTP-GO: Graph-Based Probabilistic Multi-Agent Trajectory Prediction with Neural ODEs, IEEE Transactions on Intelligent Vehicles. [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.00735)] [[code](https:\u002F\u002Fgithub.com\u002Fwestny\u002Fmtp-go)]\n\n## Others 2023\n* Traj-MAE: Masked Autoencoders for Trajectory Prediction, arXiv preprint arXiv:2303.06697, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.06697.pdf)]\n* Uncertainty-Aware Pedestrian Trajectory Prediction via Distributional Diffusion, arXiv preprint arXiv:2303.08367, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.08367.pdf)]\n* DiffTraj: Generating GPS Trajectory with Diffusion Probabilistic Model, arXiv preprint arXiv:2304.11582, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.11582.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FYasoz\u002FDiffTraj)]\n* Multiverse Transformer: 1st Place Solution for Waymo Open Sim Agents Challenge 2023, CVPR 2023 Workshop on Autonomous Driving. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.11868.pdf)] [[website](https:\u002F\u002Fmultiverse-transformer.github.io\u002Fsim-agents\u002F)]\n* Joint-Multipath++ for Simulation Agents: 2nd Place Solution for Waymo Open Sim Agents Challenge 2023, CVPR 2023 Workshop on Autonomous Driving. [[paper](https:\u002F\u002Fstorage.googleapis.com\u002Fwaymo-uploads\u002Ffiles\u002Fresearch\u002F2023%20Technical%20Reports\u002FSA_hm_jointMP.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fwangwenxi-handsome\u002FJoint-Multipathpp)]\n* MTR++: Multi-Agent Motion Prediction with Symmetric Scene Modeling and Guided Intention Querying, 1st Place Solution for Waymo Open Motion Prediction Challenge 2023, CVPR 2023 Workshop on Autonomous Driving. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.17770.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fsshaoshuai\u002FMTR)]\n* GameFormer: Game-theoretic Modeling and Learning of Transformer-based Interactive Prediction and Planning for Autonomous Driving, arXiv preprint arXiv:2303.05760, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.05760.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FMCZhi\u002FGameFormer)] [[website](https:\u002F\u002Fmczhi.github.io\u002FGameFormer\u002F)]\n* GameFormer Planner: A Learning-enabled Interactive Prediction and Planning Framework for Autonomous Vehicles, the nuPlan Planning Challenge at the CVPR 2023 End-to-End Autonomous Driving Workshop. [[paper](https:\u002F\u002Fopendrivelab.com\u002Fe2ead\u002FAD23Challenge\u002FTrack_4_AID.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FMCZhi\u002FGameFormer-Planner\u002F)]\n* trajdata: A Unified Interface to Multiple Human Trajectory Datasets, arXiv preprint arXiv:2307.13924, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.13924.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Ftrajdata)]\n* Graph-Based Interaction-Aware Multimodal 2D Vehicle Trajectory Prediction using Diffusion Graph Convolutional Networks, arXiv preprint arXiv:2309.01981, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.01981.pdf)]\n* EquiDiff: A Conditional Equivariant Diffusion Model For Trajectory Prediction, arXiv preprint arXiv:2308.06564, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2308.06564.pdf)]\n* DICE: Diverse Diffusion Model with Scoring for Trajectory Prediction, arXiv preprint arXiv:2310.14570, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.14570.pdf)]\n* Pedestrian Trajectory Prediction Using Dynamics-based Deep Learning, arXiv preprint arXiv:2309.09021, 2023. [[paper](https:\u002F\u002Fbrowse.arxiv.org\u002Fpdf\u002F2309.09021.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fsydney-machine-learning\u002Fpedestrianpathprediction)]\n* VT-Former: A Transformer-based Vehicle Trajectory Prediction Approach For Intelligent Highway Transportation Systems, arXiv preprint arXiv:2311.06623, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.06623.pdf)]\n* Learning Cooperative Trajectory Representations for Motion Forecasting, arXiv preprint arXiv:2311.00371, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.00371.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FAIR-THU\u002FV2X-Graph)]\n* Social-Transmotion: Promptable Human Trajectory Prediction, arXiv preprint arXiv:2312.16168, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.16168.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fvita-epfl\u002Fsocial-transmotion)]\n* RealGen: Retrieval Augmented Generation for Controllable Traffic Scenarios, arXiv preprint arXiv:2312.13303, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.13303.pdf)] [[code](https:\u002F\u002Frealgen.github.io\u002F)]\n* SceneDM: Scene-level Multi-agent Trajectory Generation with Consistent Diffusion Models, arXiv preprint arXiv:2311.15736, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.15736.pdf)] [[website](https:\u002F\u002Falperen-hub.github.io\u002FSceneDM\u002F)]\n* DriveDreamer: Towards Real-world-driven World Models for Autonomous Driving, arXiv preprint arXiv:2309.09777, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.09777.pdf)] [[website](https:\u002F\u002Fdrivedreamer.github.io\u002F)]\n* Language Prompt for Autonomous Driving, arXiv preprint arXiv:2309.04379, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.04379.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fwudongming97\u002FPrompt4Driving)]\n* GAIA-1: A Generative World Model for Autonomous Driving, arXiv preprint arXiv:2309.17080, 2023. [[paper](https:\u002F\u002Fbrowse.arxiv.org\u002Fpdf\u002F2309.17080.pdf)] [[website](https:\u002F\u002Fwayve.ai\u002Fthinking\u002Fscaling-gaia-1\u002F)]\n* LanguageMPC: Large Language Models as Decision Makers for Autonomous Driving, arXiv preprint arXiv:2310.03026, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.03026.pdf)] [[website](https:\u002F\u002Fsites.google.com\u002Fview\u002Fllm-mpc)]\n* DriveGPT4: Interpretable End-to-end Autonomous Driving via Large Language Model, arXiv preprint arXiv:2310.01412, 2023. [[paper](https:\u002F\u002Fbrowse.arxiv.org\u002Fpdf\u002F2310.01412.pdf)] [[website](https:\u002F\u002Ftonyxuqaq.github.io\u002Fprojects\u002FDriveGPT4\u002F)]\n* Drive Like a Human: Rethinking Autonomous Driving with Large Language Models, arXiv preprint arXiv:2307.07162, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.07162.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FPJLab-ADG\u002FDriveLikeAHuman)]\n* DiLu: A Knowledge-Driven Approach to Autonomous Driving with Large Language Models, arXiv preprint arXiv:2309.16292, 2023. [[paper](https:\u002F\u002Fbrowse.arxiv.org\u002Fpdf\u002F2309.16292.pdf)] [[website](https:\u002F\u002Fpjlab-adg.github.io\u002FDiLu\u002F)]\n* DrivingDiffusion: Layout-Guided multi-view driving scene video generation with latent diffusion model, arXiv preprint arXiv:2310.07771, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.07771.pdf)] [[website](https:\u002F\u002Fdrivingdiffusion.github.io\u002F)]\n* Driving with LLMs: Fusing Object-Level Vector Modality for Explainable Autonomous Driving, arXiv preprint arXiv:2310.01957, 2023. [[paper](https:\u002F\u002Fbrowse.arxiv.org\u002Fpdf\u002F2310.01957.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fwayveai\u002FDriving-with-LLMs)]\n* WEDGE: A Multi-Weather Autonomous Driving Dataset Built From Generative Vision-Language Models, CVPR Workshops 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.07528.pdf)] [[website](https:\u002F\u002Finfernolia.github.io\u002FWEDGE)]\n* BEVGPT: Generative Pre-trained Large Model for Autonomous Driving Prediction, Decision-Making, and Planning, arXiv preprint arXiv:2310.10357, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.10357.pdf)]\n* Diffusion World Models, ICLR 2024 Conference Submission, 2023. [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=bAXmvOLtjA)]\n* Waymax: An Accelerated, Data-Driven Simulator for Large-Scale Autonomous Driving Research, arXiv preprint arXiv:2310.08710, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.08710.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fwaymo-research\u002Fwaymax)] [[website](https:\u002F\u002Fwaymo.com\u002Fintl\u002Fzh-cn\u002Fresearch\u002Fwaymax\u002F)]\n* MagicDrive: Street View Generation with Diverse 3D Geometry Control, arXiv preprint arXiv:2310.02601, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.02601.pdf)] [[website](https:\u002F\u002Fgaoruiyuan.com\u002Fmagicdrive\u002F)]\n* GPT-Driver: Learning to Drive with GPT, arXiv preprint arXiv:2310.01415, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.01415.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FPointsCoder\u002FGPT-Driver)]\n* Can you text what is happening? Integrating pre-trained language encoders into trajectory prediction models for autonomous driving, arXiv preprint arXiv:2309.05282, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.05282.pdf)]\n* HiLM-D: Towards High-Resolution Understanding in Multimodal Large Language Models for Autonomous Driving, arXiv preprint arXiv:2309.05186, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.05186.pdf)]\n* A Language Agent for Autonomous Driving, arXiv preprint arXiv:2311.10813, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.10813.pdf)] [[website](https:\u002F\u002Fusc-gvl.github.io\u002FAgent-Driver\u002F)]\n* ADriver-I: A General World Model for Autonomous Driving, arXiv preprint arXiv:2311.13549, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.13549.pdf)]\n* LLM4Drive: A Survey of Large Language Models for Autonomous Driving, arXiv preprint arXiv:2311.01043, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.01043.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FThinklab-SJTU\u002FAwesome-LLM4AD)]\n* Vision Language Models in Autonomous Driving and Intelligent Transportation Systems, arXiv preprint arXiv:2310.14414, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.14414.pdf)]\n* On the Road with GPT-4V(ision): Early Explorations of Visual-Language Model on Autonomous Driving, arXiv preprint arXiv:2311.05332, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.05332.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FPJLab-ADG\u002FGPT4V-AD-Exploration)]\n* Driving into the Future: Multiview Visual Forecasting and Planning with World Model for Autonomous Driving, arXiv preprint arXiv:2311.17918, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.17918.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FBraveGroup\u002FDrive-WM)] [[website](https:\u002F\u002Fdrive-wm.github.io\u002F)]\n* A Survey on Multimodal Large Language Models for Autonomous Driving, arXiv preprint arXiv:2311.12320, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.12320.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FIrohXu\u002FAwesome-Multimodal-LLM-Autonomous-Driving)]\n* Panacea: Panoramic and Controllable Video Generation for Autonomous Driving, arXiv preprint arXiv:2311.16813, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.16813.pdf)] [[website](https:\u002F\u002Fpanacea-ad.github.io\u002F)] [[code](https:\u002F\u002Fgithub.com\u002Fwenyuqing\u002Fpanacea)]\n* LMDrive: Closed-Loop End-to-End Driving with Large Language Models, arXiv preprint arXiv:2312.07488, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.07488.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fopendilab\u002FLMDrive)]\n* DriveMLM: Aligning Multi-Modal Large Language Models with Behavioral Planning States for Autonomous Driving, arXiv preprint arXiv:2312.09245, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.09245.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FOpenGVLab\u002FDriveMLM)]\n* Language Models, Agent Models, and World Models: The LAW for Machine Reasoning and Planning, arXiv preprint arXiv:2312.05230, NIPS Tutorial 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.05230.pdf)] [[website](https:\u002F\u002Fsites.google.com\u002Fview\u002Fneurips2023law)]\n* Dolphins: Multimodal Language Model for Driving, arXiv preprint arXiv:2312.00438, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.00438.pdf)] [[website](https:\u002F\u002Fvlm-driver.github.io\u002F)]\n* DriveLM: Driving with Graph Visual Question Answering, arXiv preprint arXiv:2312.14150, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.14150.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FOpenDriveLab\u002FDriveLM)] [[website](https:\u002F\u002Fopendrivelab.github.io\u002FDriveLM)]\n* LingoQA: Video Question Answering for Autonomous Driving, arXiv preprint arXiv:2312.14115, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.14115.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fwayveai\u002FLingoQA)]\n* ViFiT: Reconstructing Vision Trajectories from IMU and Wi-Fi Fine Time Measurements, MobiCom ISACom Workshop 2023. [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3615984.3616503)] [[code](https:\u002F\u002Fgithub.com\u002Fbryanbocao\u002Fvifit)]\n\n\n# 📚 2024 Conference and Journal Papers\n## Conference Papers 2024\n* BAT: Behavior-Aware Human-Like Trajectory Prediction for Autonomous Driving, AAAI 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.06371.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FPetrichor625\u002FBATraj-Behavior-aware-Model)]\n* NuScenes-QA: A Multi-modal Visual Question Answering Benchmark for Autonomous Driving Scenario, AAAI 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.14836.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fqiantianwen\u002FNuScenes-QA)]\n* SocialCVAE: Predicting Pedestrian Trajectory via Interaction Conditioned Latents, AAAI 2024. [[paper](http:\u002F\u002Fwww.cad.zju.edu.cn\u002Fhome\u002Fjin\u002FAAAI20242\u002FSocialCVAE.pdf)] [[code](http:\u002F\u002Fwww.cad.zju.edu.cn\u002Fhome\u002Fjin\u002FAAAI20242\u002FSocialCVAE.htm)]\n* Improving Transferability for Cross-domain Trajectory Prediction via Neural Stochastic Differential Equation, AAAI 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.15906)] [[code](https:\u002F\u002Fgithub.com\u002Fdaeheepark\u002FTrajSDE)]\n* Can Language Beat Numerical Regression? Language-Based Multimodal Trajectory Prediction, CVPR 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.18447)] [[code](https:\u002F\u002Fgithub.com\u002FInhwanBae\u002FLMTrajectory)]\n* SingularTrajectory: Universal Trajectory Predictor using Diffusion Model, CVPR 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.18452)] [[code](https:\u002F\u002Fgithub.com\u002FInhwanBae\u002FSingularTrajectory)]\n* Producing and Leveraging Online Map Uncertainty in Trajectory Prediction, CVPR 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.16439.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Falfredgu001324\u002FMapUncertaintyPrediction)]\n* HPNet: Dynamic Trajectory Forecasting with Historical Prediction Attention, CVPR 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.06351.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FXiaolongTang23\u002FHPNet)]\n* Adapting to Length Shift: FlexiLength Network for Trajectory Prediction, CVPR 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.00742.pdf)]\n* T4P: Test-Time Training of Trajectory Prediction via Masked Autoencoder and Actor-specific Token Memory, CVPR 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.10052.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fdaeheepark\u002FT4P)]\n* SocialCircle: Learning the Angle-based Social Interaction Representation for Pedestrian Trajectory Prediction, CVPR 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.05370.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fcocoon2wong\u002FSocialCircle)]\n* Adversarial Backdoor Attack by Naturalistic Data Poisoning on Trajectory Prediction in Autonomous Driving, CVPR 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.15755.pdf)]\n* CaDeT: a Causal Disentanglement Approach for Robust Trajectory Prediction in Autonomous Driving, CVPR 2024.\n* Higher-order Relational Reasoning for Pedestrian Trajectory Prediction, CVPR 2024.\n* Density-Adaptive Model Based on Motif Matrix for Multi-Agent Trajectory Prediction, CVPR 2024.\n* OOSTraj: Out-of-Sight Trajectory Prediction With Vision-Positioning Denoising, CVPR 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.02227.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FHai-chao-Zhang\u002FOOSTraj)]\n* SmartRefine: A Scenario-Adaptive Refinement Framework for Efficient Motion Prediction, CVPR 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.11492)] [[code](https:\u002F\u002Fgithub.com\u002Fopendilab\u002FSmartRefine)]\n* MFTraj: Map-Free, Behavior-Driven Trajectory Prediction for Autonomous Driving, IJCAI 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.01266)]\n* Characterized Diffusion and Spatial-Temporal Interaction Network for Trajectory Prediction in Autonomous Driving, IJCAI 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.02145)]\n* A Cognitive-Driven Trajectory Prediction Model for Autonomous Driving in Mixed Autonomy Environment, IJCAI 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.17520)]\n* Physics-Informed Trajectory Prediction for Autonomous Driving under Missing Observation, IJCAI 2024. [[paper](https:\u002F\u002Fpapers.ssrn.com\u002Fsol3\u002Fpapers.cfm?abstract_id=4809575 )]\n* Exploring Large Language Models for Trajectory Prediction: A Technical Perspective, ACM\u002FIEEE International Conference on Human-Robot Interaction (HRI 2024). [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3610978.3640625)]\n* SpectrumNet: Spectrum-Based Trajectory Encode Neural Network for Pedestrian Trajectory Prediction, IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2024). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10446706)]\n* MapFlow: Multi-Agent Pedestrian Trajectory Prediction Using Normalizing Flow, ICASSP 2024. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10448062)]\n* Promptable Closed-loop Traffic Simulation, CoRL 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.05863)] [[code](https:\u002F\u002Fariostgx.github.io\u002FProSim\u002F)]\n* TrajCLIP: Pedestrian Trajectory Prediction Method Using Contrastive Learning and Idempotent Networks, NIPS 2024. [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=fUBFy8tb3z)]\n* Pedestrian Trajectory Prediction with Missing Data: Datasets, Imputation, and Benchmarking, NIPS 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.00174)] [[code](https:\u002F\u002Fgithub.com\u002FPranav-chib\u002FTrajImpute)]\n* LaKD: Length-agnostic Knowledge Distillation for Trajectory Prediction with Any Length Observations, NIPS 2024. [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf\u002F7cf0348cc3747c46278bb98d27d152a16c5722d3.pdf)]\n* Drones Help Drones: A Collaborative Framework for Multi-Drone Object Trajectory Prediction and Beyond, NIPS 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.14674)] [[code](https:\u002F\u002Fgithub.com\u002FWangzcBruce\u002FDHD)]\n* MGF: Mixed Gaussian Flow for Diverse Trajectory Prediction, NIPS 2024. [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=muYhNDlxWc)] [[code](https:\u002F\u002Fgithub.com\u002Fmulplue\u002FMGF)]\n* Reasoning Multi-Agent Behavioral Topology for Interactive Autonomous Driving, NIPS 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.18031)] [[code](https:\u002F\u002Fgithub.com\u002FOpenDriveLab\u002FBeTop)]\n* SMART: Scalable Multi-agent Real-time Simulation via Next-token Prediction, NIPS 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.15677)] [[code](https:\u002F\u002Fgithub.com\u002Frainmaker22\u002FSMART)]\n* MART: MultiscAle Relational Transformer Networks for Multi-agent Trajectory Prediction, ECCV 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.21635)] [[code](https:\u002F\u002Fgithub.com\u002Fgist-ailab\u002FMART)]\n* Optimizing Diffusion Models for Joint Trajectory Prediction and Controllable Generation, ECCV 2024. [[paper](https:\u002F\u002Fyixiaowang7.github.io\u002FOptTrajDiff_Page\u002Fstatic\u002Fpdfs\u002Fpaper.pdf)] [[code](https:\u002F\u002Fyixiaowang7.github.io\u002FOptTrajDiff_Page\u002F)]\n* Progressive Pretext Task Learning for Human Trajectory Prediction, ECCV 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.11588)] [[code](https:\u002F\u002Fgithub.com\u002FiSEE-Laboratory\u002FPPT)]\n* Reliable Probabilistic Human Trajectory Prediction for Autonomous Applications, ECCV 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.06905)] [[code](https:\u002F\u002Fgithub.com\u002Fkav-institute\u002Fmdn_trajectory_forecasting)]\n* DySeT: a Dynamic Masked Self-distillation Approach for Robust Trajectory Prediction, ECCV 2024. [[paper](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2024\u002Fpapers_ECCV\u002Fpapers\u002F00414.pdf)]\n* Adaptive Human Trajectory Prediction via Latent Corridors, ECCV 2024. [[paper](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2024\u002Fpapers_ECCV\u002Fpapers\u002F05542.pdf)]\n* CRITERIA: a New Benchmarking Paradigm for Evaluating Trajectory Prediction Models for Autonomous Driving, ICRA 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.07794)] [[code](https:\u002F\u002Fgithub.com\u002Fhuawei-noah\u002FSMARTS\u002Ftree\u002FCRITERIA-latest\u002Fpapers\u002FCRITERIA)]\n* FIMP: Future Interaction Modeling for Multi-Agent Motion Prediction, ICRA 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2401.16189)]\n* Pedestrian Trajectory Prediction Using Dynamics-based Deep Learning, ICRA 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.09021)] [[code](https:\u002F\u002Fgithub.com\u002Fsydney-machine-learning\u002Fpedestrianpathprediction)]\n* Scene Informer: Anchor-based Occlusion Inference and Trajectory Prediction in Partially Observable Environments, ICRA 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.13893)] [[code](https:\u002F\u002Fgithub.com\u002Fsisl\u002FSceneInformer)]\n* Human Observation-Inspired Trajectory Prediction for Autonomous Driving in Mixed-Autonomy Traffic Environments, ICRA 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.04318)]\n* Neural Interaction Energy for Multi-Agent Trajectory Prediction, ACM MM 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.16579)]\n## Journal Papers 2024\n* SMEMO: Social Memory for Trajectory Forecasting, TPAMI. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.12446.pdf)]\n* A Cognitive-Based Trajectory Prediction Approach for Autonomous Driving, TIV. [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.19251)]\n* EMSIN: Enhanced Multi-Stream Interaction Network for Vehicle Trajectory Prediction, IEEE Transactions on Fuzzy Systems. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10418557)]\n* Social Force Embedded Mixed Graph Convolutional Network for Multi-class Trajectory Prediction, TIV. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10415371)]\n* Context-Aware Timewise VAEs for Real-Time Vehicle Trajectory Prediction, RAL. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.10873.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fxupei0610\u002FContextVAE)]\n* Learning Autoencoder Diffusion Models of Pedestrian Group Relationships for Multimodal Trajectory Prediction, IEEE Transactions on Instrumentation and Measurement (TIM). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10466609)]\n* DSTCNN: Deformable Spatial-Temporal Convolutional Neural Network for Pedestrian Trajectory Prediction, Information Sciences. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0020025524003682)]\n* Heterogeneous graph social pooling for interaction-aware vehicle trajectory prediction, Transportation Research Part E: Logistics and Transportation Review. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS1366554524003399)]\n* VTSIM: Attention-Based Recurrent Neural Network for Intersection Vehicle Trajectory Simulation, Computer Animation and Virtual Worlds. [[paper](https:\u002F\u002Fonlinelibrary.wiley.com\u002Fdoi\u002Fepdf\u002F10.1002\u002Fcav.2298?saml_referrer)]\n* Context-Aware Timewise VAEs for Real-Time Vehicle Trajectory Prediction, RAL. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.10873)] [[code](https:\u002F\u002Fgithub.com\u002Fxupei0610\u002FContextVAE)]\n* MacFormer: Map-Agent Coupled Transformer for Real-time and Robust Trajectory Prediction, RAL. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2308.10280)]\n* Simulating human mobility with a trajectory generation framework based on diffusion model, International Journal of Geographical Information Science. [[paper](https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FChen-Chu-17\u002Fpublication\u002F378022332_Simulating_human_mobility_with_a_trajectory_generation_framework_based_on_diffusion_model\u002Flinks\u002F65cc2e5c790074549783cbf7\u002FSimulating-human-mobility-with-a-trajectory-generation-framework-based-on-diffusion-model.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fchuchen2017\u002FTrajGDM)]\n\n## Others 2024\n* Controllable Safety-Critical Closed-loop Traffic Simulation via Guided Diffusion, arXiv preprint arXiv:2401.00391, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2401.00391.pdf)] [[website](https:\u002F\u002Fsafe-sim.github.io\u002F)]\n* Forging Vision Foundation Models for Autonomous Driving: Challenges, Methodologies, and Opportunities, arXiv preprint arXiv:2401.08045, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2401.08045.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fzhanghm1995\u002FForge_VFM4AD)]\n* Intention-aware Denoising Diffusion Model for Trajectory Prediction, arXiv preprint arXiv:2403.09190, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.09190.pdf)]\n* LG-Traj: LLM Guided Pedestrian Trajectory Prediction, arXiv preprint arXiv:2403.08032, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.08032.pdf)]\n* Traj-LLM: A New Exploration for Empowering Trajectory Prediction with Pre-trained Large Language Models, arXiv preprint arXiv:2405.04909, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.04909)]\n* UniTraj: A Unified Framework for Scalable Vehicle Trajectory Prediction, arXiv preprint arXiv:2403.15098, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.15098.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fvita-epfl\u002FUniTraj)]\n* Versatile Scene-Consistent Traffic Scenario Generation as Optimization with Diffusion, arXiv preprint arXiv:2404.02524, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.02524.pdf)] [[code](https:\u002F\u002Fsites.google.com\u002Fview\u002Fversatile-behavior-diffusion)]\n* ControlTraj: Controllable Trajectory Generation with Topology-Constrained Diffusion Model, arXiv preprint arXiv:2404.15380, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.15380)]\n* Diffusion-Based Environment-Aware Trajectory Prediction, arXiv preprint arXiv:2403.11643, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.11643)]\n* A Preprocessing and Evaluation Toolbox for Trajectory Prediction Research on the Drone Datasets, arXiv preprint arXiv:2405.00604, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.00604)] [[code](https:\u002F\u002Fgithub.com\u002Fwestny\u002Fdronalize)]\n* BehaviorGPT: Smart Agent Simulation for Autonomous Driving with Next-Patch Prediction, arXiv preprint arXiv:2405.17372, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.17372)]\n* Vista: A Generalizable Driving World Model with High Fidelity and Versatile Controllability, arXiv preprint arXiv:2405.17398, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.17398)] [[code](https:\u002F\u002Fgithub.com\u002FOpenDriveLab\u002FVista)]\n* UrbanGPT: Spatio-Temporal Large Language Models, arXiv preprint arXiv:2403.00813, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.00813)] [[code](https:\u002F\u002Fgithub.com\u002FHKUDS\u002FUrbanGPT)]\n* Continuously Learning, Adapting, and, Improving: A Dual-Process Approach to Autonomous Driving, arXiv preprint arXiv:2405.15324, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.15324)] [[code](https:\u002F\u002Fgithub.com\u002FPJLab-ADG\u002FLeapAD)]\n* DriveVLM: The Convergence of Autonomous Driving and Large Vision-Language Models, arXiv preprint arXiv:2402.12289, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.12289)] [[website](https:\u002F\u002Ftsinghua-mars-lab.github.io\u002FDriveVLM\u002F)]\n* NAVSIM: Data-Driven Non-Reactive Autonomous Vehicle Simulation and Benchmarking, arXiv preprint arXiv:2406.15349, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.15349)] [[code](https:\u002F\u002Fgithub.com\u002Fautonomousvision\u002Fnavsim)] [[supplementary](https:\u002F\u002Fdanieldauner.github.io\u002Fassets\u002Fpdf\u002FDauner2024NIPS_supplementary.pdf)]\n* SimGen: Simulator-conditioned Driving Scene Generation, arXiv preprint arXiv:2406.09386, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.09386)] [[code](https:\u002F\u002Fmetadriverse.github.io\u002Fsimgen\u002F)]\n* GenAD: Generative End-to-End Autonomous Driving, arXiv preprint arXiv:2402.11502, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.11502)] [[code](https:\u002F\u002Fgithub.com\u002Fwzzheng\u002FGenAD)]\n* LCSim: A Large-Scale Controllable Traffic Simulator, arXiv preprint arXiv:2406.19781, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.19781)] [[code](https:\u002F\u002Fgithub.com\u002Ftsinghua-fib-lab\u002FLCSim)]\n* Strada-LLM: Graph LLM for traffic prediction, arXiv preprint arXiv:2410.20856, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.20856)]\n* MADiff: Motion-Aware Mamba Diffusion Models for Hand Trajectory Prediction on Egocentric Videos, arXiv preprint arXiv:2409.02638, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.02638)] [[code](https:\u002F\u002Firmvlab.github.io\u002Fmadiff.github.io\u002F)]\n* Gen-Drive: Enhancing Diffusion Generative Driving Policies with Reward Modeling and Reinforcement Learning Fine-tuning, arXiv preprint arXiv:2410.05582, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.05582)] [[code](https:\u002F\u002Fmczhi.github.io\u002FGenDrive)]\n* Conformal Trajectory Prediction with Multi-View Data Integration in Cooperative Driving, arXiv preprint arXiv:2408.00374, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.00374)] [[code](https:\u002F\u002Fgithub.com\u002Fxichennn\u002FV2I_trajectory_prediction)]\n* LHPF: Look back the History and Plan for the Future in Autonomous Driving, arXiv preprint arXiv:2411.17253, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.17253)] [[website](https:\u002F\u002Fchantsss.github.io\u002FLHPF\u002F)]\n\n# 📚 2025 Conference and Journal Papers\n## Conference Papers 2025\n* Generating Traffic Scenarios via In-Context Learning to Learn Better Motion Planner, AAAI 2025. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.18086)] [[code](https:\u002F\u002Fezharjan.github.io\u002FAutoSceneGen\u002F)]\n* NEST: A Neuromodulated Small-world Hypergraph Trajectory Prediction Model for Autonomous Driving, AAAI 2025. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.11682)]\n* C2F-TP: A Coarse-to-Fine Denoising Framework for Uncertainty-aware Trajectory Prediction, AAAI 2025. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.13231)] [[code](https:\u002F\u002Fgithub.com\u002Fwangzc0422\u002FC2F-TP)]\n* CUQDS: Conformal Uncertainty Quantification Under Distribution Shift for Trajectory Prediction, AAAI 2025. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.12100)]\n* STraj: Self-training for Bridging the Cross-Geography Gap in Trajectory Prediction, AAAI 2025. [[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fdownload\u002F34432\u002F36587)] [[code](https:\u002F\u002Fgithub.com\u002FZhanwei-Z\u002FSTraj)]\n* Bridging Traffic State and Trajectory for Dynamic Road Network and Trajectory Representation Learning, AAAI 2025. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.06870)] [[code](https:\u002F\u002Fgithub.com\u002FNickHan-cs\u002FTRACK)]\n* GTG: Generalizable Trajectory Generation Model for Urban Mobility, AAAI 2025. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.01107)] [[code](https:\u002F\u002Fgithub.com\u002Flyd1881310\u002FGTG)]\n* MoFlow: One-Step Flow Matching for Human Trajectory Forecasting via Implicit Maximum Likelihood Estimation based Distillation, CVPR 2025. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.09950)] [[code](https:\u002F\u002Fgithub.com\u002FDSL-Lab\u002FMoFlow)]\n* Certified Human Trajectory Prediction, CVPR 2025. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.13778)] [[website](https:\u002F\u002Fs-attack.github.io\u002F)] [[code](https:\u002F\u002Fs-attack.github.io\u002Fcertified\u002F)]\n* Enduring, Efficient and Robust Trajectory Prediction Attack in Autonomous Driving via Optimization-Driven Multi-Frame Perturbation Framework, CVPR 2025. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FYu_Enduring_Efficient_and_Robust_Trajectory_Prediction_Attack_in_Autonomous_Driving_CVPR_2025_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fy1y5\u002FOMP-ATTACK)]\n* PerReg+: Towards Generalizable Trajectory Prediction using Dual-Level Representation Learning and Adaptive Prompting, CVPR 2025. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FMessaoud_Towards_Generalizable_Trajectory_Prediction_using_Dual-Level_Representation_Learning_and_Adaptive_CVPR_2025_paper.pdf)]\n* Leveraging SD Map to Augment HD Map-based Trajectory Prediction, CVPR 2025. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FDong_Leveraging_SD_Map_to_Augment_HD_Map-based_Trajectory_Prediction_CVPR_2025_paper.pdf)]\n* Adapting to Observation Length of Trajectory Prediction via Contrastive, CVPR 2025. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FQiu_Adapting_to_Observation_Length_of_Trajectory_Prediction_via_Contrastive_Learning_CVPR_2025_paper.pdf)]\n* Multi-modal Knowledge Distillation-based Human Trajectory Forecasting, CVPR 2025. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FJeong_Multi-modal_Knowledge_Distillation-based_Human_Trajectory_Forecasting_CVPR_2025_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FJaewoo97\u002FKDTF)]\n* Physical Plausibility-aware Trajectory Prediction via Locomotion Embodiment, CVPR 2025. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FTaketsugu_Physical_Plausibility-aware_Trajectory_Prediction_via_Locomotion_Embodiment_CVPR_2025_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FImIntheMiddle\u002FEmLoco)]\n* Sim-to-Real Causal Transfer: A Metric Learning Approach to Causally-Aware Interaction Representations, CVPR 2025. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FRahimi_Sim-to-Real_Causal_Transfer_A_Metric_Learning_Approach_to_Causally-Aware_Interaction_CVPR_2025_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fvita-epfl\u002FCausalSim2Real)]\n* SocialMOIF：Multi-Order Intention Fusion for Pedestrian Trajectory Prediction, CVPR 2025. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FChen_SocialMOIF_Multi-Order_Intention_Fusion_for_Pedestrian_Trajectory_Prediction_CVPR_2025_paper.pdf)]\n* Trajectory-Mamba: An Efficient Attention-Mamba Forecasting Model Based on Selective SSM, CVPR 2025. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FHuang_Trajectory_Mamba_Efficient_Attention-Mamba_Forecasting_Model_Based_on_Selective_SSM_CVPR_2025_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FYiZhou-H\u002FTrajectory-Mamba-CVPR)]\n* Tra-MoE: Learning Trajectory Prediction Model from Multiple Domains for Adaptive Policy Conditioning, CVPR 2025. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FYang_Tra-MoE_Learning_Trajectory_Prediction_Model_from_Multiple_Domains_for_Adaptive_CVPR_2025_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FMCG-NJU\u002FTra-MoE)]\n* Unified Uncertainty-Aware Diffusion for Multi-Agent Trajectory Modeling, CVPR 2025. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FCapellera_Unified_Uncertainty-Aware_Diffusion_for_Multi-Agent_Trajectory_Modeling_CVPR_2025_paper.pdf)]\n* AMD: Adaptive Momentum and Decoupled Contrastive Learning Framework for Robust Long-Tail Trajectory Prediction, ICCV 2025. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fpapers\u002FRao_AMD_Adaptive_Momentum_and_Decoupled_Contrastive_Learning_Framework_for_Robust_ICCV_2025_paper.pdf)]\n* DONUT: A Decoder-Only Model for Trajectory Prediction, ICCV 2025. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fpapers\u002FKnoche_DONUT_A_Decoder-Only_Model_for_Trajectory_Prediction_ICCV_2025_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FMKnoche\u002FDONUT)]\n* Foresight in Motion: Reinforcing Trajectory Prediction with Reward Heuristics, ICCV 2025. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fpapers\u002FPei_Foresight_in_Motion_Reinforcing_Trajectory_Prediction_with_Reward_Heuristics_ICCV_2025_paper.pdf)]\n* ForeSight: Multi-View Streaming Joint Object Detection and Trajectory Forecasting, ICCV 2025. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fpapers\u002FPapais_ForeSight_Multi-View_Streaming_Joint_Object_Detection_and_Trajectory_Forecasting_ICCV_2025_paper.pdf)] [[code](https:\u002F\u002Fforesight-iccv.github.io\u002F)]\n* Generative Active Learning for Long-tail Trajectory Prediction via Controllable Diffusion Model, ICCV 2025. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fpapers\u002FPark_Generative_Active_Learning_for_Long-tail_Trajectory_Prediction_via_Controllable_Diffusion_ICCV_2025_paper.pdf)]\n* NATRA: Noise-Agnostic Framework for Trajectory Prediction with Noisy Observations, ICCV 2025. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fpapers\u002FLi_NATRA_Noise-Agnostic_Framework_for_Trajectory_Prediction_with_Noisy_Observations_ICCV_2025_paper.pdf)]\n* SRefiner: Soft-Braid Attention for Multi-Agent Trajectory Refinement, ICCV 2025. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fpapers\u002FXiao_SRefiner_Soft-Braid_Attention_for_Multi-Agent_Trajectory_Refinement_ICCV_2025_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FLiwen-Xiao\u002FSRefiner)]\n* TOTP: Transferable Online Pedestrian Trajectory Prediction with Temporal-Adaptive Mamba Latent Diffusion, ICCV 2025. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fpapers\u002FRen_TOTP_Transferable_Online_Pedestrian_Trajectory_Prediction_with_Temporal-Adaptive_Mamba_Latent_ICCV_2025_paper.pdf)]\n* Unified Multi-Agent Trajectory Modeling with Masked Trajectory Diffusion, ICCV 2025. [[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fpapers\u002FYang_Unified_Multi-Agent_Trajectory_Modeling_with_Masked_Trajectory_Diffusion_ICCV_2025_paper.pdf)]\n* A Driving-Style-Adaptive Framework for Vehicle Trajectory Prediction, NIPS 2025. [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf\u002F19a4046603d4ecb927c2708967c00e223725333a.pdf)]\n* Towards Predicting Any Human Trajectory In Context, NIPS 2025. [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf\u002Fda08cd5bb2b08b484195bc720c3b833a7fd3bab6.pdf)]\n* Interactive Adjustment for Human Trajectory Prediction with Individual Feedback, ICLR 2025. [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=DCpukR83sw)]\n* Leveraging Driver Field-of-View for Multimodal Ego-Trajectory Prediction, ICLR 2025. [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=LLWj8on4Rv)] [[code](https:\u002F\u002Fgithub.com\u002Fmeakbiyik\u002Frouteformer)]\n* Neuralized Markov Random Field for Interaction-Aware Stochastic Human Trajectory Prediction, ICLR 2025. [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=r3cEOVj7Ze)] [[code](https:\u002F\u002Fgithub.com\u002FAdaCompNUS\u002FNMRF_TrajectoryPrediction)]\n* Sports-Traj: A Unified Trajectory Generation Model for Multi-Agent Movement in Sports, ICLR 2025. [paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=9aTZf71uiD)] [[code](https:\u002F\u002Fgithub.com\u002Fcolorfulfuture\u002FUniTraj-pytorch)]\n* Trajectory-LLM: A Language-based Data Generator for Trajectory Prediction in Autonomous Driving, ICLR 2025. [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=UapxTvxB3N)] [[code](https:\u002F\u002Fgithub.com\u002FTJU-IDVLab\u002FTraj-LLM)]\n* TSC-Net: Prediction of Pedestrian Trajectories by Trajectory-Scene-Cell Classification, ICLR 2025. [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=Xmh5gdMfRJ)]\n* SmartPretrain: Model-Agnostic and Dataset-Agnostic Representation Learning for Motion Prediction, ICLR 2025. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.08669)] [[code](https:\u002F\u002Fgithub.com\u002Fyoungzhou1999\u002FSmartPretrain)]\n* DriveGPT: Scaling Autoregressive Behavior Models for Driving, ICML 2025. [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=SBUxQakoJJ)]\n* SAH-Drive: A Scenario-Aware Hybrid Planner for Closed-Loop Vehicle Trajectory Generation, ICML 2025. [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=OYbZWmNHwn)] [[code](https:\u002F\u002Fgithub.com\u002Frichie-live\u002FSAH-Drive)]\n* Three-Dimensional Trajectory Prediction with 3DMoTraj Dataset, ICML 2025. [[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=jkVH7nLzUR)] [[code](https:\u002F\u002Fgithub.com\u002Fzhouhao94\u002F3DMoTraj)]\n* Cross Time Domain Intention Interaction for Conditional Trajectory Prediction, ACM MM 2025. [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3746027.3754709)]\n* ViTraj: Learning Dual-Side Representations for Vehicle-Infrastructure Cooperative Trajectory Prediction, ACM MM 2025. [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3746027.3755295)]\n* Unified Human Localization and Trajectory Prediction with Monocular Vision, ICRA 2025. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.03535)] [[code](https:\u002F\u002Fgithub.com\u002Fvita-epfl\u002FMonoTransmotion)]\n* Pedestrian Intention and Trajectory Prediction in Unstructured Traffic Using IDD-PeD, ICRA 2025. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11128395)] [[code](https:\u002F\u002Fcvit.iiit.ac.in\u002Fresearch\u002Fprojects\u002Fcvit-projects\u002Fiddped)]\n* Visual-Linguistic Reasoning for Pedestrian Trajectory Prediction, ICRA 2025. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11127538)]\n* Curb Your Attention: Causal Attention Gating for Robust Trajectory Prediction in Autonomous Driving, ICRA 2025. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11128367)] [[code](https:\u002F\u002Fehsan-ami.github.io\u002Fcritic\u002F)]\n* Co-MTP: A Cooperative Trajectory Prediction Framework with Multi-Temporal Fusion for Autonomous Driving, ICRA 2025. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11127303)] [[code](https:\u002F\u002Fxiaomiaozhang.github.io\u002FCo-MTP\u002F)]\n* WcDT: World-centric Diffusion Transformer for Traffic Scene Generation, ICRA 2025. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11127600)] [[code](https:\u002F\u002Fgithub.com\u002Fyangchen1997\u002FWcDT)]\n* Diff-Refiner: Enhancing Multi-Agent Trajectory Prediction with a Plug-and-Play Diffusion Refiner, ICRA 2025. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11127226)]\n* Scene-Aware Explainable Multimodal Trajectory Prediction, ICRA 2025. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11128379)] [[code](https:\u002F\u002Fgithub.com\u002Focean-luna\u002FExplainable-Prediction)]\n* Stochastic Trajectory Prediction under Unstructured Constraints, ICRA 2025. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11127647)]\n* DSFormer-RTP: Dynamic-stream Transformers for Real-time Deterministic Trajectory Prediction, IROS 2025. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11247611)] [[code](https:\u002F\u002Fgithub.com\u002Fcxnaive\u002FDSFormer-RTP)]\n* ParkDiffusion: Heterogeneous Multi-Agent Multi-Modal Trajectory Prediction for Automated Parking using Diffusion Models, IROS 2025. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11246467)]\n* GDTS: Goal-Guided Diffusion Model with Tree Sampling for Multi-Modal Pedestrian Trajectory Prediction, IROS 2025. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11246846)]\n* TR-LLM: Integrating Trajectory Data for Scene-Aware LLM-Based Human Action Prediction, IROS 2025. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11246714)] [[code](https:\u002F\u002Fsites.google.com\u002Fview\u002Ftrllm?usp=)]\n* \n## Journal Papers 2025\n* DEMO: A Dynamics-Enhanced Learning Model for Multi-Horizon Trajectory Prediction in Autonomous Vehicles, Information Fusion. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.20784)]\n* DSTIGCN: Deformable Spatial-Temporal Interaction Graph Convolution Network for Pedestrian Trajectory Prediction, TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10843981)] [[code](https:\u002F\u002Fgithub.com\u002FChenwangxing\u002FDSTIGCN_Master)]\n* PCHGCN: Physically Constrained Higher-Order Graph Convolutional Network for Pedestrian Trajectory Prediction, IEEE Internet of Things Journal. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10948459)] [[code](https:\u002F\u002Fgithub.com\u002FChenwangxing\u002FPCHGCN-Master)]\n* DiffMATP: Interaction-Aware Multi-Agent Trajectory Prediction via Denoising Diffusion Models, TVT. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F11222824)]\n* HDSVT: High-Density Semantic Vehicle Trajectory Dataset Based on a Cosmopolitan City Bridge, Scientific Data. [[paper](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41597-025-05603-7)] [[DataSet](https:\u002F\u002Ffigshare.com\u002Farticles\u002Fdataset\u002FHDSVT_High-Density_Semantic_Vehicle_Trajectory_Dataset_Based_on_a_Cosmopolitan_City_Bridge\u002F27180387)]\n* CoT-Drive: Efficient Motion Forecasting for Autonomous Driving with LLMs and Chain-of-Thought Prompting, IEEE Transactions on Artificial Intelligence. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10980428)]\n\n## Others 2025\n* V2V-LLM: Vehicle-to-Vehicle Cooperative Autonomous Driving with Multi-Modal Large Language Models, arXiv preprint arXiv:2502.09980, 2025. [[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.09980)] [[code](https:\u002F\u002Feddyhkchiu.github.io\u002Fv2vllm.github.io\u002F)]\n* V2V-GoT: Vehicle-to-Vehicle Cooperative Autonomous Driving with Multimodal Large Language Models and Graph-of-Thoughts, arXiv preprint arXiv:2509.18053, 2025. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.18053v3)] [[code](https:\u002F\u002Feddyhkchiu.github.io\u002Fv2vgot.github.io\u002F)]\n\n# 📚 2026 Conference and Journal Papers\n## Conference Papers 2026\n* ViTE: Virtual Graph Trajectory Expert Router for Pedestrian Trajectory Prediction, AAAI 2026. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.12214)] [[code](https:\u002F\u002Fgithub.com\u002FCarrotsniper\u002FViTE)]\n* DiffRefiner: Coarse to Fine Trajectory Planning via Diffusion Refinement with Semantic Interaction for End to End Autonomous Driving, AAAI 2026. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.17150)] [[code](https:\u002F\u002Fgithub.com\u002Fnullmax-vision\u002FDiffRefiner)]\n* TRAJEVO: Trajectory Prediction Heuristics Design via LLM-driven Evolution, AAAI 2026. [[paper](https:\u002F\u002Fwww.arxiv.org\u002Fpdf\u002F2508.05616)] [[code](https:\u002F\u002Fgithub.com\u002Fai4co\u002Ftrajevo)]\n## Journal Papers 2026\n* \n## Others 2026\n* \n\n# 📚 Related Review Papers\n* Large Foundation Models for Trajectory Prediction in Autonomous Driving: A Comprehensive Survey, arXiv preprint arXiv:2509.10570, 2025. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.10570)]\n* Trajectory Prediction Meets Large Language Models: A Survey, arXiv preprint arXiv:2506.03408, 2025. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.03408)] [[code](https:\u002F\u002Fgithub.com\u002Fcolorfulfuture\u002FAwesome-Trajectory-Motion-Prediction-Papers)]\n* Summary and Reflections on Pedestrian Trajectory Prediction in the Field of Autonomous Driving, TIV 2024. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10528911)]\n* A Review of Trajectory Prediction Methods for the Vulnerable Road User, Robotics 2023. [[paper](https:\u002F\u002Fwww.mdpi.com\u002F2218-6581\u002F13\u002F1\u002F1)]\n* A Survey of Generative AI for Intelligent Transportation Systems, arXiv preprint arXiv:2312.08248, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.08248.pdf)]\n* Pedestrian and vehicle behaviour prediction in autonomous vehicle system — A review, Expert Systems With Applications 2023. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0957417423024855)]\n* Data-driven Traffic Simulation: A Comprehensive Review, arXiv preprint arXiv:2310.15975, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fftp\u002Farxiv\u002Fpapers\u002F2310\u002F2310.15975.pdf)]\n* Pedestrian Trajectory Prediction in Pedestrian-Vehicle Mixed Environments: A Systematic Review, TITS 2023. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10181234)]\n* Machine Learning for Autonomous Vehicle’s Trajectory Prediction: A comprehensive survey, Challenges, and Future Research Directions, arXiv preprint arXiv:2307.07527, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.07527.pdf)]\n* Incorporating Driving Knowledge in Deep Learning Based Vehicle Trajectory Prediction: A Survey, IEEE Transactions on Intelligent Vehicles 2023. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10100881)]\n* A Survey of Vehicle Trajectory Prediction Based on Deep Learning Models, International Conference on Sustainable Expert Systems: ICSES 2022. [[paper](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-981-19-7874-6_48)]\n* A Survey on Trajectory-Prediction Methods for Autonomous Driving, IEEE Transactions on Intelligent Vehicles 2022. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9756903)]\n* Generative Adversarial Networks for Spatio-temporal Data: A Survey, ACM Transactions on Intelligent Systems and Technology 2022. [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3474838)]\n* Scenario Understanding and Motion Prediction for Autonomous Vehicles – Review and Comparison, TITS 2022. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9733973)]\n* Deep Reinforcement Learning for Autonomous Driving: A Survey, TITS 2022. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9351818)]\n* Social Interactions for Autonomous Driving: A Review and Perspective, arXiv preprint arXiv:2208.07541, 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2208.07541.pdf)]\n* Behavioral Intention Prediction in Driving Scenes: A Survey, arXiv preprint arXiv:2211.00385, 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.00385.pdf)]\n* Multi-modal Fusion Technology based on Vehicle Information: A Survey, arXiv preprint arXiv:2211.06080, 2022. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.06080.pdf)]\n* Pedestrian Behavior Prediction for Automated Driving: Requirements, Metrics, and Relevant Features, TITS 2021. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9660784)]\n* A Review of Deep Learning-Based Methods for Pedestrian Trajectory Prediction, Sensors 2021. [[paper](https:\u002F\u002Fwww.mdpi.com\u002F1424-8220\u002F21\u002F22\u002F7543\u002Fpdf)]\n* A Survey on Deep-Learning Approaches for Vehicle Trajectory Prediction in Autonomous Driving, IEEE International Conference on Robotics and Biomimetics (ROBIO 2021). [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.10436.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FHenry1iu\u002FTNT-Trajectory-Predition)]\n* Review of Pedestrian Trajectory Prediction Methods: Comparing Deep Learning and Knowledge-based Approaches, arXiv preprint arXiv:2111.06740, 2021. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2111.06740.pdf)]\n* A Survey on Trajectory Data Management, Analytics, and Learning, ACM Computing Surveys (CSUR 2021). [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3440207)]\n* A Survey on Motion Prediction of Pedestrians and Vehicles for Autonomous Driving, IEEE Access 2021. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9559998)]\n* Autonomous Driving with Deep Learning: A Survey of State-of-Art Technologies, arXiv preprint arXiv:2006.06091, 2020. [[paper](https:\u002F\u002Farxiv.org\u002Fftp\u002Farxiv\u002Fpapers\u002F2006\u002F2006.06091.pdf)]\n* A Survey on Visual Traffic Simulation: Models, Evaluations, and Applications in Autonomous Driving, Computer Graphics Forum 2020. [[paper](https:\u002F\u002Fonlinelibrary.wiley.com\u002Fdoi\u002Fepdf\u002F10.1111\u002Fcgf.13803?saml_referrer)]\n* A Survey of Deep Learning Techniques for Autonomous Driving, Journal of Field Robotics 2020. [[paper](https:\u002F\u002Fonlinelibrary.wiley.com\u002Fdoi\u002Fepdf\u002F10.1002\u002Frob.21918?saml_referrer)]\n* Human Motion Trajectory Prediction: A Survey, International Journal of Robotics Research 2020. [[paper](http:\u002F\u002Fsage.cnpereading.com\u002Fparagraph\u002Fdownload\u002F?doi=10.1177\u002F0278364920917446)]\n* Vehicle Trajectory Similarity: Models, Methods, and Applications, ACM Computing Surveys (CSUR 2020). [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3406096)]\n* Deep Learning-Based Vehicle Behavior Prediction for Autonomous Driving Applications: A Review, TITS 2020. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9158529)]\n* Survey of Deep Reinforcement Learning for Motion Planning of Autonomous Vehicles, TITS 2020. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9210154)]\n* Overview of Tools Supporting Planning for Automated Driving, ITSC 2020. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9294512)]\n* Autonomous Vehicles that Interact with Pedestrians: A Survey of Theory and Practice, TITS 2019. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8667866)]\n* A Survey on Path Prediction Techniques for Vulnerable Road Users: From Traditional to Deep-Learning Approaches, ITSC 2019. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8917053)]\n* Spatio-Temporal Data Mining: A Survey of Problems and Methods, ACM Computing Surveys 2018. [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3161602)]\n* Survey on Vision-Based Path Prediction, International Conference on Distributed, Ambient, and Pervasive Interactions (DAPI 2018). [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-319-91131-1_4.pdf)]\n* Moving Objects Analytics: Survey on Future Location & Trajectory Prediction Methods, arXiv preprint arXiv:1807.04639, 2018. [[paper](https:\u002F\u002Farxiv.org\u002Fftp\u002Farxiv\u002Fpapers\u002F1807\u002F1807.04639.pdf)]\n* A Survey on Trajectory Data Mining: Techniques and Applications, IEEE Access 2016. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?arnumber=7452339)]\n* Trajectory Data Mining: An Overview, ACM Transactions on Intelligent Systems and Technology 2015. [[paper](http:\u002F\u002Furban-computing.com\u002Fpdf\u002FTrajectoryDataMining-tist-yuzheng.pdf)]\n* A survey on motion prediction and risk assessment for intelligent vehicles, ROBOMECH Journal 2014. [[paper](https:\u002F\u002Frobomechjournal.springeropen.com\u002Ftrack\u002Fpdf\u002F10.1186\u002Fs40648-014-0001-z.pdf)]\n\n# 📚 Datasets\n## Reviews about Datasets\n* A Survey on Autonomous Driving Datasets: Data Statistic, Annotation, and Outlook, arXiv preprint arXiv:2401.01454, 2024. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2401.01454.pdf)]\n* Open-sourced Data Ecosystem in Autonomous Driving: the Present and Future, arXiv preprint arXiv:2312.03408, 2023. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.03408.pdf)] [[Chinese](https:\u002F\u002Fopendrivelab.com\u002FDataset_Survey_Chinese.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FOpenDriveLab\u002FDriveAGI)]\n* HDSVT: High-Density Semantic Vehicle Trajectory Dataset Based on a Cosmopolitan City Bridge, Scientific Data 2025. [[paper](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41597-025-05603-7)] [[DataSet](https:\u002F\u002Ffigshare.com\u002Farticles\u002Fdataset\u002FHDSVT_High-Density_Semantic_Vehicle_Trajectory_Dataset_Based_on_a_Cosmopolitan_City_Bridge\u002F27180387)]\n* The AD4CHE Dataset and Its Application in Typical Congestion Scenarios of Traffic Jam Pilot Systems, IEEE Transactions on Intelligent Vehicles 2023. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10079130)] [[code](https:\u002F\u002Fgithub.com\u002FADSafetyJointLab\u002FAD4CHE)]\n\n## Vehicles Publicly Available Datasets\n* [Porto](https:\u002F\u002Fwww.kaggle.com\u002Fc\u002Fpkdd-15-predict-taxi-service-trajectory-i\u002Fdata), [website](https:\u002F\u002Farchive.ics.uci.edu\u002Fml\u002Fdatasets\u002FTaxi+Service+Trajectory+-+Prediction+Challenge,+ECML+PKDD+2015)\n* [NGSIM](https:\u002F\u002Fdata.transportation.gov\u002FAutomobiles\u002FNext-Generation-Simulation-NGSIM-Vehicle-Trajector\u002F8ect-6jqj)\n* [NYC](https:\u002F\u002Fwww1.nyc.gov\u002Fsite\u002Ftlc\u002Fabout\u002Ftlc-trip-record-data.page)\n* [T-drive](https:\u002F\u002Fwww.microsoft.com\u002Fen-us\u002Fresearch\u002Fpublication\u002Ft-drive-trajectory-data-sample\u002F)\n* [Greek Trucks](http:\u002F\u002Fwww.chorochronos.org\u002F)\n* [highD](https:\u002F\u002Fwww.highd-dataset.com\u002F)\n* [inD](https:\u002F\u002Fwww.ind-dataset.com\u002F)\n* [rounD](https:\u002F\u002Fwww.round-dataset.com\u002F)\n* [uniD](https:\u002F\u002Fwww.unid-dataset.com\u002F)\n* [exiD](https:\u002F\u002Fwww.exid-dataset.com\u002F)\n* [Dronalize](https:\u002F\u002Fgithub.com\u002Fwestny\u002Fdronalize)\n* [Mirror-Traffic](http:\u002F\u002Fwww.scenarios.cn\u002Fhtml\u002Fdataset.html)\n* [Argoverse Website](https:\u002F\u002Fwww.argoverse.org\u002F), [Argoverse 1](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FChang_Argoverse_3D_Tracking_and_Forecasting_With_Rich_Maps_CVPR_2019_paper.pdf), [Argoverse 2](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2301.00493.pdf)\n* [ApolloScape](http:\u002F\u002Fapolloscape.auto\u002Ftrajectory.html)\n* [INTERACTION](https:\u002F\u002Finteraction-dataset.com\u002F)\n* [Waymo Open Dataset](https:\u002F\u002Fwaymo.com\u002Fopen\u002F)\n* [Cityscapes](https:\u002F\u002Fwww.cityscapes-dataset.com\u002F)\n* [KITTI](http:\u002F\u002Fwww.cvlibs.net\u002Fdatasets\u002Fkitti\u002F)\n* [nuScenes](https:\u002F\u002Fwww.nuscenes.org\u002F)\n* [TRAF](https:\u002F\u002Fgamma.umd.edu\u002Fresearchdirections\u002Fautonomousdriving\u002Ftrafdataset)\n* [Lyft Level 5](https:\u002F\u002Flevel-5.global\u002F)\n* [METEOR](https:\u002F\u002Fgamma.umd.edu\u002Fresearchdirections\u002Fautonomousdriving\u002Fmeteor\u002F)\n* [DiDi GAIA](https:\u002F\u002Foutreach.didichuxing.com\u002Fresearch\u002Fopendata\u002F), [D²-City](https:\u002F\u002Fwww.scidb.cn\u002Fen\u002Fdetail?dataSetId=804399692560465920&dataSetType=personal), [paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1904.01975)\n* [Shanghai & Hangzhou](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F2700478)\n* [Beijing](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F2525314.2525343)\n* [VMT](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F6482546)\n* [TRAFFIC](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F7565640), [website](https:\u002F\u002Fmin.sjtu.edu.cn\u002Flwydemo\u002FTrajectory%20analysis.htm)\n* [CROSS](https:\u002F\u002Fcvrr-nas.ucsd.edu\u002Fpublications\u002F2011\u002FMorris_PAMI2011.pdf), [website](http:\u002F\u002Fcvrr.ucsd.edu\u002Fbmorris\u002Fdatasets\u002F)\n* [Ubiquitous Traffic Eyes (UTE)](http:\u002F\u002Fseutraffic.com\u002F#\u002Fhome)\n* [CitySim](https:\u002F\u002Fgithub.com\u002FUCF-SST-Lab\u002FUCF-SST-CitySim1-Dataset)\n* [pNEUMA](https:\u002F\u002Fopen-traffic.epfl.ch\u002F)\n* [I-24 MOTION](https:\u002F\u002Fi24motion.org\u002Fdata)\n* [Zen Traffic Data](https:\u002F\u002Fzen-traffic-data.net\u002Fenglish\u002F)\n* [DLR Urban Traffic](https:\u002F\u002Fdoi.org\u002F10.5281\u002Fzenodo.11396371)\n* [DLR Highway Traffic](https:\u002F\u002Fdoi.org\u002F10.5281\u002Fzenodo.14012005)\n## Pedestrians Publicly Available Datasets\n* [GeoLife](https:\u002F\u002Fwww.microsoft.com\u002Fen-us\u002Fresearch\u002Fpublication\u002Fgeolife-gps-trajectory-dataset-user-guide\u002F)\n* [UCY](https:\u002F\u002Fgraphics.cs.ucy.ac.cy\u002Fresearch\u002Fdownloads\u002Fcrowd-data)\n* [ETH](https:\u002F\u002Ficu.ee.ethz.ch\u002Fresearch\u002Fdatsets.html), [paper](https:\u002F\u002Fethz.ch\u002Fcontent\u002Fdam\u002Fethz\u002Fspecial-interest\u002Fbaug\u002Figp\u002Fphotogrammetry-remote-sensing-dam\u002Fdocuments\u002Fpdf\u002Fpellegrini09iccv.pdf)\n* [Stanford Drone Dataset](https:\u002F\u002Fcvgl.stanford.edu\u002Fprojects\u002Fuav_data\u002F)\n* [TrajNet](http:\u002F\u002Ftrajnet.stanford.edu\u002F)\n* [Oxford Town Center](https:\u002F\u002Fexposing.ai\u002Foxford_town_centre\u002F)\n* [New York Grand Central Station](https:\u002F\u002Fwww.ee.cuhk.edu.hk\u002F~xgwang\u002Fgrandcentral.html), [paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F5995459), [paper](https:\u002F\u002Fpeople.csail.mit.edu\u002Fbzhou\u002Fproject\u002Fcvpr2012\u002Fzhoucvpr2012.pdf), [paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2015\u002Fpapers\u002FYi_Understanding_Pedestrian_Behaviors_2015_CVPR_paper.pdf)\n* [PIE](https:\u002F\u002Fdata.nvision2.eecs.yorku.ca\u002FPIE_dataset\u002F)\n* [JAAD](https:\u002F\u002Fdata.nvision2.eecs.yorku.ca\u002FJAAD_dataset\u002F)\n* [DS4C-PPP](https:\u002F\u002Fwww.kaggle.com\u002Fdatasets\u002Fkimjihoo\u002Fcoronavirusdataset)\n* [BDBC COVID-19](https:\u002F\u002Fgithub.com\u002FBDBC-KG-NLP\u002FCOVID-19-tracker)\n* [Vi-Fi](https:\u002F\u002Fsites.google.com\u002Fwinlab.rutgers.edu\u002Fvi-fidataset\u002Fhome)\n## Others Agents Datasets\n### Aircraft\n* [LocaRDS](https:\u002F\u002Fatmdata.github.io\u002F)\n* [ZUMAVD](https:\u002F\u002Frpg.ifi.uzh.ch\u002Fzurichmavdataset.html)\n### Ship\n* [Ushant](https:\u002F\u002Ffigshare.com\u002Farticles\u002Fdataset\u002FUshant_AIS_dataset\u002F8966273)\n* [Cargo](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs10707-020-00421-y)\n### Hurricane and Animal\n* [HURDAT2](https:\u002F\u002Fwww.nhc.noaa.gov\u002Fdata\u002F)\n* [Movebank](https:\u002F\u002Fwww.movebank.org\u002Fcms\u002Fmovebank-main)\n\n# 🌹 Acknowledgments\nWe are grateful to the authors and developers who provided the papers, the open-source code, and the project website! Thank you for your positive contributions to the agent trajectory prediction community. Your thoughts and contributions are a green signal for us. If you have suggestions or additional insights, feel free to open an issue or submit a pull request.\n\n# 🌟 Star History\n\n[![Star History Chart](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FPsychic-DL_Awesome-Traffic-Agent-Trajectory-Prediction_readme_0371064377b3.png)](https:\u002F\u002Fstar-history.com\u002F#Psychic-DL\u002FAwesome-Traffic-Agent-Trajectory-Prediction&Date)\n\n\n","# 精选：交通智能体轨迹预测\n![版本](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FVersion-1.0-ff69b4.svg) ![最后更新](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FLastUpdated-2024.11-lightgrey.svg) ![主题](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FTopic-trajectory--prediction-yellow.svg?logo=github) ![精选](https:\u002F\u002Fawesome.re\u002Fbadge.svg) ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F-C++-00599C?style=flat-square&logo=cplusplus&logoColor=FFFFFF) ![语言](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F-Python-F37626?style=flat-square&logo=python&logoColor=FFFFFF) ![框架](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F-Pytorch-EE4C2C?style=flat-square&logo=pytorch&logoColor=FFFFFF) ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F-ChatGPT-412991?style=flat-square&logo=openai&logoColor=FFFFFF)\n\n\n![图片](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FPsychic-DL_Awesome-Traffic-Agent-Trajectory-Prediction_readme_9b524df57ef8.png)\n![图片](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FPsychic-DL_Awesome-Traffic-Agent-Trajectory-Prediction_readme_1ab81fe9ac60.png)\n![图片](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FPsychic-DL_Awesome-Traffic-Agent-Trajectory-Prediction_readme_5ed6ec4a272b.png)\n![图片](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FPsychic-DL_Awesome-Traffic-Agent-Trajectory-Prediction_readme_9be25c5c8e99.png)\n![图片](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FPsychic-DL_Awesome-Traffic-Agent-Trajectory-Prediction_readme_b45257e45fd3.png)\n![图片](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FPsychic-DL_Awesome-Traffic-Agent-Trajectory-Prediction_readme_dc42b54658b4.png)\n\n# 🤝 贡献\n\n这是一个关于交通智能体轨迹预测的最新研究资料（数据集、论文和代码）的列表。持续更新，欢迎关注！\n\n**维护者：李超能 (兰州交通大学)           邮箱：xdchaonengli@163.com**\n\n欢迎随时提交 Pull Requests 以添加新资源，或发送电子邮件给我们提出问题、进行讨论与合作。**我们希望能连接更多多智能体轨迹预测领域的学生、教师和专家，如果您也想加入，可以添加我的微信 (CN15691969157)。让我们一起创建轨迹预测社区群组吧！**\n\n# 🧐 引用\n\n如果本仓库加速了您的研究，请考虑引用我们的论文：\n```\n@article{11222824,\n  author={Li, Chaoneng and Wang, Xiaolong and Zhao, Shuxu and Wang, Xiaohu and Ye, Ze},\n  journal={IEEE Transactions on Vehicular Technology}, \n  title={DiffMATP: Interaction-Aware Multi-Agent Trajectory Prediction via Denoising Diffusion Models}, \n  year={2025},\n  pages={1-14},\n  doi={10.1109\u002FTVT.2025.3627215}}\n@inproceedings{li2022fidelity,\n  title={Fidelity Evaluation of Virtual Traffic Based on Anomalous Trajectory Detection},\n  author={Li, Chaoneng and Chao, Qianwen and Feng, Guanwen and Wang, Qiongyan and Liu, Pengfei and Li, Yunan and Miao, Qiguang},\n  booktitle={2022 IEEE\u002FRSJ International Conference on Intelligent Robots and Systems (IROS)},\n  pages={8157--8164},\n  year={2022},\n  organization={IEEE}\n}\n@article{li2024difftad,\n  title={DiffTAD: Denoising diffusion probabilistic models for vehicle trajectory anomaly detection},\n  author={Li, Chaoneng and Feng, Guanwen and Li, Yunan and Liu, Ruyi and Miao, Qiguang and Chang, Liang},\n  journal={Knowledge-Based Systems},\n  volume={286},\n  pages={111387},\n  year={2024},\n  publisher={Elsevier}\n}\n```\n******\n\n# 📜 目录\n\n\u003C!-- TOC depthFrom:1 depthTo:6 withLinks:1 updateOnSave:1 orderedList:0 -->\n- [精选：交通智能体轨迹预测](#awesome-traffic-agent-trajectory-prediction)\n- [🤝 贡献](#-contributions)\n- [🧐 引用](#-citation)\n- [📜 目录](#-table-of-contents)\n- [📚 传统方法](#-traditional-methods)\n- [📚 2018 年及之前的会议与期刊论文](#-2018-and-before-conference-and-journal-papers)\n  - [会议论文](#conference-papers)\n  - [期刊论文](#journal-papers)\n  - [其他](#others)\n- [📚 2019 年会议与期刊论文](#-2019-conference-and-journal-papers)\n  - [2019 年会议论文](#conference-papers-2019)\n  - [2019 年期刊论文](#journal-papers-2019)\n  - [2019 年其他](#others-2019)\n- [📚 2020 年会议与期刊论文](#-2020-conference-and-journal-papers)\n  - [2020 年会议论文](#conference-papers-2020)\n  - [2020 年期刊论文](#journal-papers-2020)\n  - [2020 年其他](#others-2020)\n- [📚 2021 年会议与期刊论文](#-2021-conference-and-journal-papers)\n  - [2021 年会议论文](#conference-papers-2021)\n  - [2021 年期刊论文](#journal-papers-2021)\n  - [2021 年其他](#others-2021)\n- [📚 2022 年会议与期刊论文](#-2022-conference-and-journal-papers)\n  - [2022 年会议论文](#conference-papers-2022)\n  - [2022 年期刊论文](#journal-papers-2022)\n  - [2022 年其他](#others-2022)\n- [📚 2023 年会议与期刊论文](#-2023-conference-and-journal-papers)\n  - [2023 年会议论文](#conference-papers-2023)\n  - [2023 年期刊论文](#journal-papers-2023)\n  - [2023 年其他](#others-2023)\n- [📚 2024 年会议与期刊论文](#-2024-conference-and-journal-papers)\n  - [2024 年会议论文](#conference-papers-2024)\n  - [2024 年期刊论文](#journal-papers-2024)\n  - [2024 年其他](#others-2024)\n- [📚 2025 年会议与期刊论文](#-2025-conference-and-journal-papers)\n  - [2025 年会议论文](#conference-papers-2025)\n  - [2025 年期刊论文](#journal-papers-2025)\n  - [2025 年其他](#others-2025)\n- [📚 2026 年会议与期刊论文](#-2026-conference-and-journal-papers)\n  - [2026 年会议论文](#conference-papers-2026)\n  - [2026 年期刊论文](#journal-papers-2026)\n  - [2026 年其他](#others-2026)\n- [📚 相关综述论文](#-related-review-papers)\n- [📚 数据集](#-datasets)\n  - [数据集综述](#reviews-about-datasets)\n  - [车辆公开数据集](#vehicles-publicly-available-datasets)\n  - [行人公开数据集](#pedestrians-publicly-available-datasets)\n  - [其他智能体数据集](#others-agents-datasets)\n    - [飞机](#aircraft)\n    - [船舶](#ship)\n    - [飓风与动物](#hurricane-and-animal)\n- [🌹 致谢](#-acknowledgments)\n- [🌟 星标历史](#-star-history)\n\n******\n\n# 📚 传统方法\n* 行人动力学社会力模型 (Social force model)，Physical Review E 1995。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002Fcond-mat\u002F9805244.pdf?ref=https:\u002F\u002Fgithubhelp.com)]\n* 模拟逃生恐慌的动力学特征，Nature 2000。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002Fcond-mat\u002F0009448.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fobisargoni\u002FrepastInterSim)]\n* 实证观测与微观模拟中的拥堵交通状态，Physical Review E 2000。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002Fcond-mat\u002F0002177.pdf)]\n* 一种自动轨迹预测 (Trajectory prediction) 分析方法论，AIAA 制导、导航与控制会议及展览 2004。[[paper](http:\u002F\u002Fciteseerx.ist.psu.edu\u002Fviewdoc\u002Fdownload?doi=10.1.1.76.2942&rep=rep1&type=pdf)]\n* 连续体人群，ACM Transactions on Graphics (TOG 2006)。[[paper](https:\u002F\u002Fwww.khoury.neu.edu\u002Fhome\u002Fscooper\u002Findex_files\u002Fpub\u002Ftreuille2006continuum.pdf)]\n* 飞机意图推断与轨迹预测的新算法，Journal of Guidance, Control, and Dynamics 2007。[[paper](https:\u002F\u002Fsci-hub.hkvisa.net\u002F10.2514\u002F1.26750)]\n* 用于实时多智能体导航 (Multi-Agent Navigation) 的互惠速度障碍，ICRA 2008。[[paper](http:\u002F\u002Fciteseerx.ist.psu.edu\u002Fviewdoc\u002Fdownload?doi=10.1.1.161.9395&rep=rep1&type=pdf)]\n* 你永远不会独行：多目标跟踪中的社交行为建模，ICCV 2009。[[paper](http:\u002F\u002Fvision.cse.psu.edu\u002Fcourses\u002FTracking\u002Fvlpr12\u002FPellegriniNeverWalkAlone.pdf)]\n* 用于车辆间碰撞风险估计的实时轨迹预测，International Conference on Intelligent Computer Communication and Processing 2009。[[paper](https:\u002F\u002Fhal.inria.fr\u002Finria-00438624\u002Fdocument)]\n* 基于社会力的人类运动预测进行人员跟踪，ICRA 2010。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=5509779)]\n* 解放机器人：密集交互人群中的导航，IROS 2010。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=5654369)]\n* 你和谁在一起，要去哪里？，CVPR 2011。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=5995468)]\n* 具有显式碰撞预测的社会力模型，Europhysics Letters 2011。[[paper](https:\u002F\u002Fiopscience.iop.org\u002Farticle\u002F10.1209\u002F0295-5075\u002F93\u002F68005\u002Fpdf)]\n* 轨迹预测的机器学习方法，AIAA 制导、导航与控制 (GNC) 会议 2013。[[paper](https:\u002F\u002Fsci-hub.hkvisa.net\u002F10.2514\u002F6.2013-4782)]\n* 混合交通 (Heterogeneous Traffic) 下无信号灯路口的骑行者社会力模型，IEEE Transactions on Industrial Informatics 2016。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7536132)]\n* 走在前面：有向社会力模型，PLoS ONE 2017。[[paper](https:\u002F\u002Fjournals.plos.org\u002Fplosone\u002Farticle\u002Ffile?id=10.1371\u002Fjournal.pone.0169734&type=printable)]\n* AutoRVO：密集混合交通中具有动态约束的局部导航，arXiv 预印本 arXiv:1804.02915, 2018。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1804.02915.pdf)]\n* 行人交通社会力模型——前沿综述，Transport Reviews 2018。[[paper](https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FXu-Chen-67\u002Fpublication\u002F320872442_Social_force_models_for_pedestrian_traffic_-_state_of_the_art\u002Flinks\u002F5bce680b4585152b144eac39\u002FSocial-force-models-for-pedestrian-traffic-state-of-the-art.pdf)]\n\n\n# 📚 2018 年及之前的会议与期刊论文\n\n## Conference Papers\n* Social GAN：基于生成对抗网络的社交可接受轨迹，CVPR 2018。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FGupta_Social_GAN_Socially_CVPR_2018_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fagrimgupta92\u002Fsgan)]\n* 使用深度神经网络编码人群交互以进行行人轨迹预测，CVPR 2018。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FXu_Encoding_Crowd_Interaction_CVPR_2018_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fsvip-lab\u002FCIDNN)]\n* 快速与疯狂：使用单个卷积网络实现实时端到端 3D 检测、跟踪与运动预测，CVPR 2018。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FLuo_Fast_and_Furious_CVPR_2018_paper.pdf)]\n* MX-LSTM：混合轨迹片段与视觉片段以联合预测轨迹和头部姿态，CVPR 2018。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FHasan_MX-LSTM_Mixing_Tracklets_CVPR_2018_paper.pdf)]\n* 不确定交通场景下行人的长期车载预测，CVPR 2018。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018\u002Fpapers\u002FBhattacharyya_Long-Term_On-Board_Prediction_CVPR_2018_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fapratimbhattacharyya18\u002Fonboard_long_term_prediction)]\n* R2P2：用于多样化精确生成路径预测的重参数化前推策略，ECCV 2018。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FNicholas_Rhinehart_R2P2_A_ReparameteRized_ECCV_2018_paper.pdf)]\n* 他们将去向何方？使用条件变分自编码器预测细粒度对抗性多智能体运动，ECCV 2018。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCV_2018\u002Fpapers\u002FPanna_Felsen_Where_Will_They_ECCV_2018_paper.pdf)]\n* 为混合交通中的自动驾驶车辆生成舒适、安全且可理解的轨迹，国际智能交通系统会议 (ITSC 2018)。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8569658)]\n* 考虑形式化交通规则的城市环境中行人集合预测，ITSC 2018。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8569434)]\n* 使用双 LSTM 网络感知周围车辆的意图进行长视界轨迹预测，ITSC 2018。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8569595)]\n* 社交注意力：对人类人群注意力的建模，ICRA 2018。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8460504)]\n* 面向物体杂乱环境的交互感知行人运动预测数据驱动模型，ICRA 2018。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8461157)]\n* 人机交互的多模态概率模型规划，ICRA 2018。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8460766)] [[code](https:\u002F\u002Fgithub.com\u002FStanfordASL\u002FTrafficWeavingCVAE)]\n* GD-GAN：用于人群轨迹预测和群体检测的生成对抗网络，ACCV 2018。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1812.07667.pdf)]\n* 基于机动 LSTM 的周围车辆多模态轨迹预测，IEEE 智能车辆研讨会 (IV 2018)。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8500493)]\n* 通过 LSTM 编码器 - 解码器架构的车辆轨迹序列到序列预测，IV 2018。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8500658)]\n* 利用大规模运动先验预测车辆轨迹，IV 2018。[[paper](http:\u002F\u002Fmssuraj.com\u002Fpublications\u002F2018_IV_0596.pdf)]\n* 用于轨迹预测的道路基础设施指标，IV 2018。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8500678)]\n* 共享空间中基于 LSTM 模型的混合交通轨迹预测，2018 年国际地理信息科学年会。[[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-319-78208-9_16.pdf)]\n* SS-LSTM：用于行人轨迹预测的分层 LSTM 模型，WACV 2018。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8354239)] [[code](https:\u002F\u002Fgithub.com\u002Fxuehaouwa\u002FSS-LSTM)]\n* “眼见为实”：使用视觉注意力视锥进行行人轨迹预测，WACV 2018。[[paper](http:\u002F\u002Firtizahasan.com\u002FWACV_2018_Seeing_is_believing.pdf)]\n* 通过预测进行跟踪：一种用于多人定位和跟踪的深度生成模型，WACV 2018。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8354232)]\n* 上下文感知轨迹预测，国际模式识别会议 (ICPR 2018)。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8545447)]\n* 交叉口的可迁移行人运动预测模型，IROS 2018。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8593783)]\n* 多模态多人行为的生成建模，IROS 2018。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8594393)] [[code](https:\u002F\u002Fgithub.com\u002FStanfordASL\u002FNHumanModeling)]\n* 构建先验知识：使用城市环境数据的基于马尔可夫的行人预测模型，国际控制、自动化、机器人与视觉会议 (ICARCV 2018)。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8581368)]\n* 使用双向循环神经网络的骑行者轨迹预测，2018 年澳大拉西亚人工智能联合会议。[[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-03991-2_28.pdf)]\n* 注意力机制即所需，NIPS 2017。[[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2017\u002Ffile\u002F3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf)]\n* Bi-Prediction：基于双向 LSTM 分类的行人轨迹预测，数字图像计算技术与应用国际会议 (DICTA 2017)。[[paper](https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FDu-Huynh-2\u002Fpublication\u002F322001876_Bi-Prediction_Pedestrian_Trajectory_Prediction_Based_on_Bidirectional_LSTM_Classification\u002Flinks\u002F5c03cef4a6fdcc1b8d5029bb\u002FBi-Prediction-Pedestrian-Trajectory-Prediction-Based-on-Bidirectional-LSTM-Classification.pdf)]\n* 通过循环神经网络在占用栅格地图上进行概率车辆轨迹预测，ITSC 2017。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8317943)]\n* 基于自然视觉的城市环境行人行为预测方法，ITSC 2017。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8317848)]\n* 我的预测有多好？寻找轨迹预测评估的相似度度量，ITSC 2017。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8317825)]\n* 用于高速公路轨迹预测的 LSTM 网络，ITSC 2017。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8317913)]\n* DESIRE：动态场景中交互智能体的远期预测，CVPR 2017。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FLee_DESIRE_Distant_Future_CVPR_2017_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Ftdavchev\u002FDESIRE)]\n* 使用虚构博弈预测行人交互动力学，CVPR 2017。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2017\u002Fpapers\u002FMa_Forecasting_Interactive_Dynamics_CVPR_2017_paper.pdf)]\n* 预测人群场景中的可行路径，IJCAI 2017。[[paper](https:\u002F\u002Fwww.ijcai.org\u002Fproceedings\u002F2017\u002F0386.pdf)]\n* 接下来会发生什么？体育视频中球员动作的预测，ICCV 2017。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017\u002Fpapers\u002FFelsen_What_Will_Happen_ICCV_2017_paper.pdf)]\n* 利用道路拓扑结构改进骑行者路径预测，IV 2017。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7995734)]\n* 使用机器学习方法的短时 4D 轨迹预测，Proc. SID 2017。[[paper](https:\u002F\u002Fwww.sesarju.eu\u002Fsites\u002Fdefault\u002Ffiles\u002Fdocuments\u002Fsid\u002F2017\u002FSIDs_2017_paper_11.pdf)]\n* 使用深度层次网络生成长期轨迹，NIPS 2016。[[paper](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2016\u002Ffile\u002Ffe8c15fed5f808006ce95eddb7366e35-Paper.pdf)]\n* 学习社交礼仪：拥挤场景下的人类轨迹理解，ECCV 2016。[[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-319-46484-8_33.pdf)]\n* 面向特定场景的运动预测知识迁移，ECCV 2016。[[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-319-46448-0_42.pdf)]\n* Structural-RNN：时空图上的深度学习，CVPR 2016。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2016\u002Fpapers\u002FJain_Structural-RNN_Deep_Learning_CVPR_2016_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fasheshjain399\u002FRNNexp)]\n* 拥挤移动物体的复杂场景视觉路径预测，CVPR 2016。[[paper](https:\u002F\u002Fwww.cv-foundation.org\u002Fopenaccess\u002Fcontent_cvpr_2016\u002Fpapers\u002FYoo_Visual_Path_Prediction_CVPR_2016_paper.pdf)]\n* Social LSTM：拥挤空间中的行人轨迹预测，CVPR 2016。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2016\u002Fpapers\u002FAlahi_Social_LSTM_Human_CVPR_2016_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fquancore\u002Fsocial-lstm)]\n* 面向车辆安全系统的行人运动模型比较与评估，ITSC 2016。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7795912)]\n* 行人运动的意图感知长期预测，ICRA 2016。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7487409)]\n* 用于人类运动预测的新型基于规划算法，ICRA 2016。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7487505)]\n* 使用全局和局部运动模式的 GLMP 实时行人路径预测，ICRA 2016。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7487768)]\n* 用于运动预测的增强字典学习，ICRA 2016。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7487407&tag=1)]\n* 预测动态环境下的未来智能体运动，国际机器学习与应用会议 (ICMLA 2016)。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7838128)]\n* 使用物理模型和人工神经网络的骑行者轨迹预测，IV 2016。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7535484)]\n* STF-RNN：用于预测人们下一位置的空时特征循环神经网络，IEEE 计算智能系列研讨会 (SSCI 2016)。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7849919)]\n* 提高行人安全的轨迹分析与预测：集成框架与评估，IV 2015。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7225707)]\n* 目标目的地未知的轨迹预测贝叶斯意图推断，IROS 2015。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7354203)]\n* 无监督机器人学习预测人体运动，ICRA 2015。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7139254)]\n* 用于行人意图识别与路径预测结合的受控交互式多模型滤波器，ITSC 2015。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7313129)]\n* 社会感知的超大规模人群预测，CVPR 2014。[[paper](https:\u002F\u002Fwww.cv-foundation.org\u002Fopenaccess\u002Fcontent_cvpr_2014\u002Fpapers\u002FAlahi_Socially-aware_Large-scale_Crowd_2014_CVPR_paper.pdf)]\n* 补丁通向未来：无监督视觉预测，CVPR 2014。[[paper](https:\u002F\u002Fwww.cv-foundation.org\u002Fopenaccess\u002Fcontent_cvpr_2014\u002Fpapers\u002FWalker_Patch_to_the_2014_CVPR_paper.pdf)]\n* 使用非参数回归的交叉口辅助在线机动识别与多模态轨迹预测，IV 2014。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=6856480)]\n* 使用肢体语言特征的行人路径预测，IV 2014。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=6856498)]\n* 拥挤环境下人类运动预测完整框架的行为估计，ICRA 2014。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=6907734)]\n* 学习预测协同导航智能体的轨迹，ICRA 2014。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=6907442)]\n* 使用人工神经网络的公共交通中行人轨迹预测，国际模式识别会议 (ICPR 2014)。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=6977417)]\n* 基于上下文的行人路径预测，ECCV 2014。[[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-319-10599-4_40.pdf)]\n* 面向驾驶员辅助系统的贝叶斯、基于机动的长期轨迹预测与关键性评估，ITSC 2014。[[paper](https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FMatthias-Schreier\u002Fpublication\u002F266954831_Bayesian_Maneuver-Based_Long-Term_Trajectory_Prediction_and_Criticality_Assessment_for_Driver_Assistance_Systems\u002Flinks\u002F543fb6250cf2be1758cf3c39\u002FBayesian-Maneuver-Based-Long-Term-Trajectory-Prediction-and-Criticality-Assessment-for-Driver-Assistance-Systems.pdf)]\n* 城市环境中自动驾驶车辆的轨迹生成器，ICRA 2013。[[paper](https:\u002F\u002Fhal.inria.fr\u002Ffile\u002Findex\u002Fdocid\u002F789760\u002Ffilename\u002FICRA_Perez_et_al_2360.pdf)]\n* 基于运动模型和机动识别的车辆轨迹预测，IROS 2013。[[paper](https:\u002F\u002Fhal.archives-ouvertes.fr\u002Fhal-00881100\u002Fdocument)]\n* 用于增强车对 X 移动数据的预测性机动评估，IV 2012。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=6232217)]\n* 使用高斯混合模型的概率轨迹预测，IV 2012。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=6232277)]\n* 利用地图信息进行路口驾驶员意图估计，IV 2011。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=5940452)]\n* 轨迹预测：学习将情境映射到机器人轨迹，ICML 2009。[[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F1553374.1553433)]\n* 基于蒙特卡洛的威胁评估：分析与改进，IV 2007。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=4290120)]\n* 机器学习中的高斯过程，2003 年机器学习暑期学校。[[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-540-28650-9_4.pdf)]\n\n## 期刊论文\n* 软 + 硬连线注意力机制：一种用于人类轨迹预测和异常事件检测的长短期记忆网络 (LSTM) 框架，Neural networks 2018。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1702.05552.pdf?ref=https:\u002F\u002Fgithubhelp.com)]\n* 基于圆形分布的城市场景长期路径预测，Image and Vision Computing 2018。[[paper](https:\u002F\u002Freader.elsevier.com\u002Freader\u002Fsd\u002Fpii\u002FS0262885617301853?token=DAD7B9F10835E05341405E75C5AB9F8F114FE99410544AD2BB4EFAA23BFC99D63EA8811C4A8C4F679593A61D0D3E35B6&originRegion=eu-west-1&originCreation=20220509082210)]\n* 非凸环境中异构多智能体系统的最优轨迹生成高效算法，RAL 2018。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8260912)]\n* 城市交通网络中基于深度学习方法 (Deep Learning) 的网络级车辆轨迹预测，Transportation Research Record 2018。[[paper](https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FSeongjin-Choi-2\u002Fpublication\u002F327524033_Network-Wide_Vehicle_Trajectory_Prediction_in_Urban_Traffic_Networks_using_Deep_Learning\u002Flinks\u002F5e3a123e458515072d8015d2\u002FNetwork-Wide-Vehicle-Trajectory-Prediction-in-Urban-Traffic-Networks-using-Deep-Learning.pdf)]\n* 利用堆叠循环神经网络 (Recurrent Neural Networks) 通过运动轨迹预测行人意图，IEEE Transactions on Intelligent Vehicles 2018。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8481390)]\n* 周围车辆将如何移动？一种机动分类与运动预测的统一框架，IEEE Transactions on Intelligent Vehicles 2018。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F8286935)]\n* 通过高斯过程动态模型 (Gaussian Process Dynamical Models) 和行人活动识别进行行人路径、姿态和意图预测，TITS 2018。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8370119)]\n* 虚拟交通的基于字典的保真度度量，TVCG 2018。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8481568)]\n* 使用纹理合成的真实数据驱动交通流动画，TVCG 2017。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7814314)]\n* 利用交互多模型整合物理与基于机动的方法进行车辆轨迹预测，IEEE Transactions on Industrial Electronics 2017。[[paper](https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FJianqiang-Wang\u002Fpublication\u002F321738692_Vehicle_Trajectory_Prediction_by_Integrating_Physics-_and_Maneuver-Based_Approaches_Using_Interactive_Multiple_Models\u002Flinks\u002F5fcde8c445851568d1469e52\u002FVehicle-Trajectory-Prediction-by-Integrating-Physics-and-Maneuver-Based-Approaches-Using-Interactive-Multiple-Models.pdf)]\n* 实时认证概率行人预测，RAL 2017。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7959047)]\n* 深度学习 (Deep Learning) 驱动的视觉路径单图预测，TIP 2016。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7576681)]\n* 年龄与群体驱动的行人行为：从观察到模拟，Collective Dynamics 2016。[[paper](https:\u002F\u002Fcollective-dynamics.eu\u002Findex.php\u002Fcod\u002Farticle\u002Fview\u002FA3\u002F5)]\n* 任意道路环境下基于机动的轨迹预测与关键性评估的综合方法，TITS 2016。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=7412746)]\n* 混合交通流的轨迹数据与流特性，Transportation Research Record 2015。[[paper](https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FGowri-Asaithambi\u002Fpublication\u002F284708700_Trajectory_Data_and_Flow_Characteristics_of_Mixed_Traffic\u002Flinks\u002F5710718008ae68dc79097605\u002FTrajectory-Data-and-Flow-Characteristics-of-Mixed-Traffic.pdf)]\n* 预测和识别公共空间中的人类交互，Journal of Real-Time Image Processing 2015。[[paper](https:\u002F\u002Ffabiopoiesi.github.io\u002Ffiles\u002Fpapers\u002Fjournals\u002F2014_JRTIP_PredictingRecognizingInteractionsPublic_Poiesi_Cavallaro.pdf)]\n* 使用动态行人代理学习集体人群行为，International Journal of Computer Vision 2015。[[paper](https:\u002F\u002Fdspace.mit.edu\u002Fbitstream\u002Fhandle\u002F1721.1\u002F103360\u002F11263_2014_735_ReferencePDF.pdf?sequence=1&isAllowed=y)]\n* 具有不确定、变化意图的行人的实时预测建模与鲁棒避让，Algorithmic Foundations of Robotics XI 2015。[[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-319-16595-0_10.pdf)]\n* BRVO：使用速度空间推理预测行人轨迹，International Journal of Robotics Research 2015。[[paper](https:\u002F\u002Fwww.cs.cityu.edu.hk\u002F~rynson\u002Fpapers\u002Fijrr15.pdf)]\n* 学习意图以改进人类运动预测，Robotics and Autonomous Systems 2014。[[paper](https:\u002F\u002Fwww.techunited.nl\u002Fmedia\u002Fimages\u002FKwalificatie%20materiaal%202014\u002FElfring_2014.pdf)]\n* 基于隐马尔可夫模型 (Hidden Markov Models) 的自适应性参数选择轨迹预测方法，TITS 2014。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=6918501)]\n* TraPlan：交通网络中有效的三位一体轨迹预测模型，TITS 2014。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=6899589)]\n* 行人会过马路吗？关于行人路径预测的研究，TITS 2013。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=6632960)]\n* 使用贝叶斯非参数可达性树 (Bayesian Nonparametric Reachability Trees) 的移动智能体轨迹预测，Infotech@ Aerospace 2011。[[paper](https:\u002F\u002Fdspace.mit.edu\u002Fbitstream\u002Fhandle\u002F1721.1\u002F114899\u002FAoude_Infotech11.pdf?sequence=1&isAllowed=y)]\n* 用于人类运动的高斯过程动态模型，TPAMI 2008。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=4359316)]\n* 线性滤波与预测问题的新方法，Journal of Basic Engineering 1960。[[paper](http:\u002F\u002F160.78.24.2\u002FPublic\u002FKalman\u002FKalman1960.pdf)]\n\n## 其他\n* 轨迹预测方法评估与 TrajNet 基准说明。arXiv 预印本 arXiv:1805.07663, 2018。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1805.07663.pdf)] [[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCVW_2018\u002Fpapers\u002F11131\u002FBecker_RED_A_simple_but_effective_Baseline_Predictor_for_the_TrajNet_ECCVW_2018_paper.pdf)]\n* Scene-LSTM：一种人类轨迹预测模型，arXiv 预印本 arXiv:1808.04018, 2018。[[论文](https:\u002F\u002Farxiv.org\u002Fftp\u002Farxiv\u002Fpapers\u002F1808\u002F1808.04018.pdf)]\n* 用于车辆轨迹预测的卷积社会池化，CVPR 研讨会 2018。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2018_workshops\u002Fpapers\u002Fw29\u002FDeo_Convolutional_Social_Pooling_CVPR_2018_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fnachiket92\u002Fconv-social-pooling)]\n* 用于轨迹预测的卷积神经网络，ECCV 研讨会 2018。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCVW_2018\u002Fpapers\u002F11131\u002FNikhil_Convolutional_Neural_Network_for_Trajectory_Prediction_ECCVW_2018_paper.pdf)]\n* Group LSTM：拥挤场景中的群体轨迹预测，ECCV 研讨会 2018。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ECCVW_2018\u002Fpapers\u002F11131\u002FBisagno_Group_LSTM_Group_Trajectory_Prediction_in_Crowded_Scenarios_ECCVW_2018_paper.pdf)]\n* 他们要过马路吗？行人过街行为的基准数据集与基线，ICCV 研讨会 2017。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2017_workshops\u002Fpapers\u002Fw3\u002FRasouli_Are_They_Going_ICCV_2017_paper.pdf)] [[网站](https:\u002F\u002Fdata.nvision2.eecs.yorku.ca\u002FJAAD_dataset\u002F)]\n* 使用空间感知深度注意力模型进行人类轨迹预测，arXiv 预印本 arXiv:1705.09436, 2017。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1705.09436.pdf)]\n* 建模人类运动的时空动态以预测未来轨迹，AAAI 研讨会 2015。[[论文](https:\u002F\u002Fwww.diva-portal.org\u002Fsmash\u002Fget\u002Fdiva2:808848\u002FFULLTEXT01.pdf)]\n\n# 📚 2019 年会议与期刊论文\n\n## 2019 年会议论文\n* MultiPath：用于行为预测的多个概率锚点轨迹假设，机器人学习会议 (CoRL 2019)。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1910.05449.pdf)]\n* 使用程序化弱监督生成多智能体轨迹，ICLR 2019。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1803.07612.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fezhan94\u002Fmultiagent-programmatic-supervision)]\n* 从部分观测中随机预测多智能体交互，ICLR 2019。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1902.09641.pdf)]\n* TrafficPredict：面向异构交通参与者的轨迹预测，AAAI 2019。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F4569\u002F4447)] [[代码](https:\u002F\u002Fgithub.com\u002Fhuang-xx\u002FTrafficPredict)]\n* 基于生成对抗网络的数据驱动人群仿真，国际计算机动画与社会代理会议 (CASA 2019)。[[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3328756.3328769)] [[代码](https:\u002F\u002Fgithub.com\u002Famiryanj\u002FcrowdGAN)]\n* RobustTP：针对密集交通中噪声传感器输入的异构道路参与者端到端轨迹预测，ACM 汽车计算机科学研讨会 (CSCS 2019)。[[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3359999.3360495)] [[代码](https:\u002F\u002Fgithub.com\u002Frohanchandra30\u002FTrackNPred)]\n* 你要去哪里？动态场景中路径预测的模仿决策学习，CVPR 2019。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FLi_Which_Way_Are_You_Going_Imitative_Decision_Learning_for_Path_CVPR_2019_paper.pdf)]\n* 用于上下文轨迹预测的多智能体张量融合，CVPR 2019。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FZhao_Multi-Agent_Tensor_Fusion_for_Contextual_Trajectory_Prediction_CVPR_2019_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FprogrammingLearner\u002FMATF-architecture-details)]\n* 窥探未来：视频中未来人员活动和位置的预测，CVPR 2019。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FLiang_Peeking_Into_the_Future_Predicting_Future_Person_Activities_and_Locations_CVPR_2019_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fnext-prediction)] [[网站](https:\u002F\u002Fnext.cs.cmu.edu\u002F)]\n* SoPhie：一种用于预测符合社会与物理约束路径的注意力 GAN，CVPR 2019。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FSadeghian_SoPhie_An_Attentive_GAN_for_Predicting_Paths_Compliant_to_Social_CVPR_2019_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fcoolsunxu\u002Fsophie)]\n* SR-LSTM：面向行人轨迹预测的 LSTM 状态细化，CVPR 2019。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FZhang_SR-LSTM_State_Refinement_for_LSTM_Towards_Pedestrian_Trajectory_Prediction_CVPR_2019_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fzhangpur\u002FSR-LSTM)]\n* TraPHic：利用加权交互进行密集和异构交通中的轨迹预测，CVPR 2019。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FChandra_TraPHic_Trajectory_Prediction_in_Dense_and_Heterogeneous_Traffic_Using_Weighted_CVPR_2019_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FBenMSK\u002Ftrajectory_prediction_TraPHic)]\n* 克服混合密度网络的局限性：多模态未来预测的采样与拟合框架，CVPR 2019。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FMakansi_Overcoming_Limitations_of_Mixture_Density_Networks_A_Sampling_and_Fitting_CVPR_2019_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Flmb-freiburg\u002FMultimodal-Future-Prediction)]\n* Argoverse：利用丰富地图进行 3D 跟踪与预测，CVPR 2019。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FChang_Argoverse_3D_Tracking_and_Forecasting_With_Rich_Maps_CVPR_2019_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fargoai\u002Fargoverse-api)]\n* 多智能体体育游戏的多样化生成，CVPR 2019。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FYeh_Diverse_Generation_for_Multi-Agent_Sports_Games_CVPR_2019_paper.pdf)]\n* 面向未来轨迹预测的关系查找，ICCV 2019。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FChoi_Looking_to_Relations_for_Future_Trajectory_Forecast_ICCV_2019_paper.pdf)]\n* 概率轨迹预测背景下多样性损失的分析，ICCV 2019。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FThiede_Analyzing_the_Variety_Loss_in_the_Context_of_Probabilistic_Trajectory_ICCV_2019_paper.pdf)]\n* The Trajectron：具有动态时空图的概率多智能体轨迹建模，ICCV 2019。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FIvanovic_The_Trajectron_Probabilistic_Multi-Agent_Trajectory_Modeling_With_Dynamic_Spatiotemporal_Graphs_ICCV_2019_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FStanfordASL\u002FTrajectron)]\n* 车辆 - 行人混合场景下运动学轨迹的联合预测，ICCV 2019。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FBi_Joint_Prediction_for_Kinematic_Trajectories_in_Vehicle-Pedestrian-Mixed_Scenes_ICCV_2019_paper.pdf)]\n* STGAT：用于人类轨迹预测的时空交互建模，ICCV 2019。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FHuang_STGAT_Modeling_Spatial-Temporal_Interactions_for_Human_Trajectory_Prediction_ICCV_2019_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fhuang-xx\u002FSTGAT)]\n* PIE：用于行人意图估计和轨迹预测的大规模数据集与模型，ICCV 2019。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_ICCV_2019\u002Fpapers\u002FRasouli_PIE_A_Large-Scale_Dataset_and_Models_for_Pedestrian_Intention_Estimation_ICCV_2019_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Faras62\u002FPIEPredict)]\n* 用于模拟车对车相遇场景的多车辆轨迹生成器，ICRA 2019。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8793776)]\n* 使用深度卷积网络进行自动驾驶的多模态轨迹预测，ICRA 2019。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8793868)] [[代码](https:\u002F\u002Fgithub.com\u002Fdaeheepark\u002FPathPredictNusc)]\n* 用于自动驾驶车辆测试的基于力的异构交通仿真，ICRA 2019。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8794430)]\n* 通过对抗学习实现感知交互的多智能体跟踪与概率行为预测，ICRA 2019。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8793661)]\n* StarNet：基于星型拓扑深度神经网络的行人轨迹预测，IROS 2019。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8967811)]\n* 使用多智能体联合轨迹预测和交通规则的深度预测自动驾驶，IROS 2019。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8967708)]\n* 用于概率轨迹预测的条件生成神经系统，IROS 2019。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8967822)]\n* 自动驾驶车辆的可联合学习行为与轨迹规划，IROS 2019。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8967615)]\n* INFER：用于未来预测的中间表示，IROS 2019。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8968553)] [[代码](https:\u002F\u002Fgithub.com\u002Ftalsperre\u002FINFER)] [[网站](https:\u002F\u002Ftalsperre.github.io\u002FINFER\u002F)]\n* 用于行人轨迹预测的随机采样仿真，IROS 2019。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8967857)]\n* 使用路径同调聚类的运动轨迹长期预测，IROS 2019。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8968125)]\n* Social-BiGAT：使用 Bicycle-GAN 和图注意力网络进行多模态轨迹预测，NIPS 2019。[[论文](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2019\u002Ffile\u002Fd09bf41544a3365a46c9077ebb5e35c3-Paper.pdf)]\n* 多未来预测，NIPS 2019。[[论文](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2019\u002Ffile\u002F86a1fa88adb5c33bd7a68ac2f9f3f96b-Paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fapple\u002Fml-multiple-futures-prediction)]\n* 通过耦合场景 LSTM 与人类运动 LSTM 进行轨迹预测，国际视觉计算研讨会 (ISVC 2019)。[[论文](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-33720-9_19.pdf)]\n* 使用社会金字塔的行人轨迹预测，太平洋 rim 国际人工智能会议 (PRICAI 2019)。[[论文](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-29911-8_34.pdf)]\n* 带有时空注意力模型的情境感知行人轨迹预测，计算机视觉冬季研讨会 (CVWW 2019)。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1902.05437.pdf)]\n* 用于行人轨迹预测的位置 - 速度注意力，WACV 2019。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8659060)]\n* 通过贝叶斯生成建模进行车辆交互的协调与轨迹预测，IV 2019。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8813821)]\n* 带有运动学约束的 Wasserstein 生成学习用于概率交互式驾驶行为预测，IV 2019。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8813783)]\n* AGen：用于自动驾驶的自适应生成预测网络，IV 2019。[[论文](http:\u002F\u002Fwww.cs.cmu.edu\u002F~cliu6\u002Ffiles\u002Fiv19-1.pdf)]\n* 使用基于交互的生成对抗网络进行交叉口车辆轨迹预测，ITSC 2019。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8916927), [论文](https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FDebaditya-Roy-2\u002Fpublication\u002F337629029_Vehicle_Trajectory_Prediction_at_Intersections_using_Interaction_based_Generative_Adversarial_Networks\u002Flinks\u002F5de5e6224585159aa45cc76c\u002FVehicle-Trajectory-Prediction-at-Intersections-using-Interaction-based-Generative-Adversarial-Networks.pdf)]\n* GRIP：基于图的感知交互轨迹预测，智能运输系统会议 (ITSC 2019)。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8917228)] [[代码](https:\u002F\u002Fgithub.com\u002Fxincoder\u002FGRIP)]\n* GRIP++：增强版用于自动驾驶的基于图的感知交互轨迹预测，arXiv 预印本 arXiv:1907.07792, 2019。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1907.07792.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fxincoder\u002FGRIP)]\n* 基于姿态的脆弱道路使用者轨迹预测，IEEE 计算智能系列研讨会 (SSCI 2019)。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9003023)]\n* 使用对象属性和语义环境的轨迹预测，计算机视觉、成像与计算机图形理论及应用国际联合会议 (VISIGRAPP 2019)。[[论文](https:\u002F\u002Fpdfs.semanticscholar.org\u002F1d36\u002F88ae8738335f6452147de3c2f33bcfbd81b3.pdf)]\n* 使用障碍物轨迹预测的概率路径规划，CoDS-COMAD 2019。[[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3297001.3297006)]\n* 使用对抗损失的行人轨迹预测，第 19 届瑞士交通研究会议 2019 论文集。[[论文](https:\u002F\u002Fwww.strc.ch\u002F2019\u002FKothari_Alahi.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fvita-epfl\u002FAdversarialLoss-SGAN)]\n\n## 2019 年期刊论文\n* 一种可扩展的轨迹预测框架，TITS。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8658195)]\n* 用于脆弱道路用户长期意图预测的情境循环预测模型，TITS。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8766889&tag=1)]\n* 基于结构 LSTM 网络的自动驾驶周围道路用户交互式轨迹预测，TITS。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8848853)]\n* 基于深度学习的交叉口交通仿真与编辑框架，TVCG。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8600335)]\n* Heter-Sim：通过交互式数据驱动优化的异构多智能体系统仿真，TVCG。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8865441)]\n* AADS：使用数据驱动算法增强自动驾驶仿真，SCIENCE ROBOTICS。[[论文](https:\u002F\u002Farxiv.org\u002Fftp\u002Farxiv\u002Fpapers\u002F1901\u002F1901.07849.pdf)]\n* 学习行人运动的生成式社会感知模型，RAL。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8760356)]\n* 极端拥挤场景下的行人轨迹预测，Sensors。[[论文](https:\u002F\u002Fwww.mdpi.com\u002F1424-8220\u002F19\u002F5\u002F1223\u002Fpdf)]\n* 利用社会亲和性长短期记忆 (Long Short-Term Memory) 网络进行拥挤场景中人类轨迹预测，PR。[[论文](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320319301712)]\n\n## 其他 2019 年成果\n* 使用图神经网络 (Graph Neural Networks) 进行自动驾驶联合交互与轨迹预测，arXiv preprint arXiv:1912.07882, 2019。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1912.07882.pdf)]\n* 学习推断未来轨迹预测的关系，CVPR Workshops 2019。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2019\u002Fpapers\u002FPrecognition\u002FChoi_Learning_to_Infer_Relations_for_Future_Trajectory_Forecast_CVPRW_2019_paper.pdf)]\n* Social Ways：学习行人轨迹的多模态分布，CVPR Workshops 2019。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPRW_2019\u002Fpapers\u002FPrecognition\u002FAmirian_Social_Ways_Learning_Multi-Modal_Distributions_of_Pedestrian_Trajectories_With_GANs_CVPRW_2019_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fcrowdbotp\u002Fsocialways)]\n* 拥挤空间中的社交和场景感知轨迹预测，ICCV Workshops 2019。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1909.08840.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FOghma\u002Fsns-lstm\u002F)]\n* 具有注意力循环神经过程的自动驾驶车辆概率轨迹预测，arXiv preprint arXiv:1910.08102, 2019。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1910.08102.pdf)]\n* 带有社交图网络的随机轨迹预测，arXiv preprint arXiv:1907.10233, 2019。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1907.10233.pdf)]\n\n# 📚 2020 年会议和期刊论文\n\n## 2020 年会议论文\n* 用于行人轨迹预测的时空图 Transformer 网络，ECCV 2020。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2005.08514.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FMajiker\u002FSTAR)]\n* AutoTrajectory：使用动态点从视频中无标签提取和预测轨迹，ECCV 2020。[[论文](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58601-0_38.pdf)]\n* PiP：面向自动驾驶的规划感知轨迹预测，ECCV 2020。[[论文](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58589-1_36.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FHaoran-SONG\u002FPiP-Planning-informed-Prediction)]\n* SMART：同时多智能体循环轨迹预测，ECCV 2020。[[论文](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58583-9_28.pdf)]\n* Trajectron++：具有异构数据的动态可行轨迹预测，ECCV 2020。[[论文](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58523-5_40.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FStanfordASL\u002FTrajectron-plus-plus)]\n* SimAug：从仿真中学习鲁棒表示以进行轨迹预测，ECCV 2020。[[论文](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58601-0_17.pdf)] [[代码](https:\u002F\u002Fnext.cs.cmu.edu\u002Fsimaug\u002F)]\n* 通过多模态上下文理解实现多样且可行的轨迹预测，ECCV 2020。[[论文](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58621-8_17.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fkami93\u002FCMU-DATF)]\n* 不在于旅程而在于目的地：终点条件化的轨迹预测，ECCV 2020。[[论文](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58536-5_45.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FHarshayuGirase\u002FHuman-Path-Prediction)]\n* 我如何看见我的未来？FvTraj：利用第一人称视角进行行人轨迹预测，ECCV 2020。[[论文](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58571-6_34.pdf)]\n* 用于多智能体运动预测的动态与静态上下文感知 LSTM，ECCV 2020。[[论文](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58589-1_33.pdf)]\n* 学习车道图表示以进行运动预测，ECCV 2020。[[论文](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58536-5_32.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fuber-research\u002FLaneGCN)]\n* 隐式潜在变量模型用于场景一致的运动预测，ECCV 2020。[[论文](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58592-1_37.pdf)]\n* 通过模拟感知和预测测试自动驾驶车辆的安全性，ECCV 2020。[[论文](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58574-7_19.pdf)]\n* 感知、预测与规划：通过可解释语义表示实现安全运动规划，ECCV 2020。[[论文](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-58592-1_25.pdf)]\n* 用于轨迹预测的 Transformer 网络，国际模式识别会议 (ICPR 2020)。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9412190)] [[代码](https:\u002F\u002Fgithub.com\u002FFGiuliari\u002FTrajectory-Transformer)]\n* DAG-Net：用于轨迹预测的双注意力图神经网络，ICPR 2020。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9412114)] [[代码](https:\u002F\u002Fgithub.com\u002Falexmonti19\u002Fdagnet)]\n* TNT：目标驱动轨迹预测，机器人学习会议 (CoRL 2020)。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2008.08294.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FHenry1iu\u002FTNT-Trajectory-Predition)]\n* Social-VRNN：交互行人的单次多模态轨迹预测，CoRL 2020。[[论文](https:\u002F\u002Fautonomousrobots.nl\u002Fdocs\u002F20-Brito-CoRL.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Ftud-amr\u002Fsocial_vrnn)]\n* 用于多模态概率运动预测的核轨迹图，CoRL 2020。[[论文](http:\u002F\u002Fproceedings.mlr.press\u002Fv100\u002Fzhi20a\u002Fzhi20a.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fwzhi\u002FKernelTrajectoryMaps)]\n* MATS：一种用于规划和控制的解释性轨迹预测表示，CoRL 2020。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2009.07517)] [[代码](https:\u002F\u002Fgithub.com\u002FStanfordASL\u002FMATS)]\n* 一种基于注意力的交互感知时空图神经网络用于轨迹预测，国际神经网络信息处理大会 (ICONIP 2020)。[[论文](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-63823-8_5.pdf)]\n* OpenTraj：评估人类轨迹数据集中的预测复杂度，ACCV 2020。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FACCV2020\u002Fpapers\u002FAmirian_OpenTraj_Assessing_Prediction_Complexity_in_Human_Trajectories_Datasets_ACCV_2020_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fcrowdbotp\u002FOpenTraj)]\n* Goal-GAN：基于目标位置估计的多模态轨迹预测，ACCV 2020。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.01114.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fdendorferpatrick\u002FGoalGAN)]\n* 行人运动的语义合成，ACCV 2020。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FACCV2020\u002Fpapers\u002FPriisalu_Semantic_Synthesis_of_Pedestrian_Locomotion_ACCV_2020_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FMariaPriisalu\u002Fspl)]\n* EvolveGraph：具有动态关系推理的多智能体轨迹预测，NIPS 2020。[[论文](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Ffile\u002Fe4d8163c7a068b65a64c89bd745ec360-Paper.pdf)] [[网站](https:\u002F\u002Fjiachenli94.github.io\u002Fpublications\u002FEvolvegraph\u002F)]\n* 模糊查询注意力下的多智能体轨迹预测，NIPS 2020。[[论文](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Ffile\u002Ffe87435d12ef7642af67d9bc82a8b3cd-Paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fnitinkamra1992\u002FFQA)]\n* 用于交通预测的时空图结构学习，AAAI 2020。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F5470\u002F5326)]\n* GMAN：用于交通预测的图多注意力网络，AAAI 2020。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F5477\u002F5333)] [[代码](https:\u002F\u002Fgithub.com\u002Fzhengchuanpan\u002FGMAN)]\n* CF-LSTM：基于级联特征的长短期网络用于预测行人轨迹，AAAI 2020。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F6943\u002F6797)]\n* OMuLeT：飓风轨迹预测的在线多领先时间位置预测，AAAI 2020。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F5444\u002F5300)]\n* 拥挤空间中的多模态交互感知轨迹预测，AAAI 2020。[[论文](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F6874\u002F6728)]\n* STINet：用于行人检测和轨迹预测的时空交互网络，CVPR 2020。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FZhang_STINet_Spatio-Temporal-Interactive_Network_for_Pedestrian_Detection_and_Trajectory_Prediction_CVPR_2020_paper.pdf)]\n* CoverNet：使用轨迹集的多模态行为预测，CVPR 2020。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FPhan-Minh_CoverNet_Multimodal_Behavior_Prediction_Using_Trajectory_Sets_CVPR_2020_paper.pdf)]\n* TPNet：用于运动预测的轨迹提议网络，CVPR 2020。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FFang_TPNet_Trajectory_Proposal_Network_for_Motion_Prediction_CVPR_2020_paper.pdf)]\n* 用于人类轨迹预测的互惠学习网络，CVPR 2020。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FSun_Reciprocal_Learning_Networks_for_Human_Trajectory_Prediction_CVPR_2020_paper.pdf)]\n* MANTRA：用于多轨迹预测的记忆增强网络，CVPR 2020。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FMarchetti_MANTRA_Memory_Augmented_Networks_for_Multiple_Trajectory_Prediction_CVPR_2020_paper.pdf)]\n* 用于轨迹预测的递归社会行为图，CVPR 2020。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FSun_Recursive_Social_Behavior_Graph_for_Trajectory_Prediction_CVPR_2020_paper.pdf)]\n* 分叉路径的花园：迈向多未来轨迹预测，CVPR 2020。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FLiang_The_Garden_of_Forking_Paths_Towards_Multi-Future_Trajectory_Prediction_CVPR_2020_paper.pdf)] [[代码](https:\u002F\u002Fnext.cs.cmu.edu\u002Fmultiverse\u002F)]\n* Social-STGCNN：一种用于人类轨迹预测的社会时空图卷积神经网络，CVPR 2020。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FMohamed_Social-STGCNN_A_Social_Spatio-Temporal_Graph_Convolutional_Neural_Network_for_Human_CVPR_2020_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fabduallahmohamed\u002FSocial-STGCNN)]\n* VectorNet：从向量化表示编码高清地图和智能体动力学，CVPR 2020。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FGao_VectorNet_Encoding_HD_Maps_and_Agent_Dynamics_From_Vectorized_Representation_CVPR_2020_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FDQSSSSS\u002FVectorNet)]\n* 用于轨迹预测和填补的模仿非自回归建模，CVPR 2020。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FQi_Imitative_Non-Autoregressive_Modeling_for_Trajectory_Forecasting_and_Imputation_CVPR_2020_paper.pdf)]\n* 通过神经运动消息传递进行协同运动预测，CVPR 2020。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2020\u002Fpapers\u002FHu_Collaborative_Motion_Prediction_via_Neural_Motion_Message_Passing_CVPR_2020_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FPhyllisH\u002FNMMP)]\n* UST：统一自动驾驶中轨迹预测的时空上下文，IROS 2020。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9340943)]\n* 使用 CNN-LSTM 网络的联网车辆交互感知轨迹预测，IEEE 工业电子学会年会 (IECON 2020)。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9255162)]\n* GISNet：用于车辆轨迹预测的基于信息共享的网络，国际神经网络联合会议 (IJCNN 2020)。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9206770)]\n* 在噪声监督下解耦人类动力学以进行行人运动预测，WACV 2020。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_WACV_2020\u002Fpapers\u002FMangalam_Disentangling_Human_Dynamics_for_Pedestrian_Locomotion_Forecasting_with_Noisy_Supervision_WACV_2020_paper.pdf)] [[网站](https:\u002F\u002Fkarttikeya.github.io\u002Fpublication\u002Fplf\u002F)]\n* 用于灵活推理、规划和控制的深度模仿模型，ICLR 2020。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=Skl4mRNYDr)] [[代码](https:\u002F\u002Fgithub.com\u002Fnrhine1\u002Fdeep_imitative_models)] [[网站](https:\u002F\u002Fsites.google.com\u002Fview\u002Fimitative-models)]\n* 使用行列式点过程进行多样轨迹预测，ICLR 2020。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1907.04967.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FGruntrexpewrus\u002FTrajectoryFor-and-DPP)]\n* 通过关注生态嵌入在异构环境中进行轨迹预测，ACM 国际多媒体会议 2020。[[论文](http:\u002F\u002Fbasiclab.lab.nycu.edu.tw\u002Fassets\u002FAEE-GAN_MM2020.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FEgo2Eco\u002FAEE-GAN)]\n* 使用深度时间和空间卷积神经网络进行多轨迹预测，IROS 2020。[[论文](http:\u002F\u002Fras.papercept.net\u002Fimages\u002Ftemp\u002FIROS\u002Ffiles\u002F1081.pdf)]\n* 用于自动驾驶车辆的带车道注意力的概率多模态轨迹预测，IROS 2020。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9341034\u002F)]\n* Lane-Attention：通过学习车辆对车道的注意力来预测车辆移动轨迹，IROS 2020。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1909.13377.pdf)]\n* 用于轨迹预测的交互感知卡尔曼神经网络，IEEE 智能车辆研讨会 (IV 2020)。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1902.10928.pdf)]\n* 用于多模态联合车辆运动预测的多头注意力，ICRA 2020。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9197340)]\n\n## 2020 年期刊论文\n* TrajVAE：一种用于轨迹生成的变分自编码器（Variational AutoEncoder）模型，Neurocomputing。[[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0925231220312017)]\n* 基于状态细化 LSTM（长短期记忆网络）的社交感知行人轨迹预测，TPAMI。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9261113)]\n* 利用图 LSTM 中的谱聚类预测道路参与者的轨迹和行为，IEEE Robotics and Automation Letters。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9126166)]\n* 基于注意力（Attention）机制的车辆轨迹预测，IEEE Transactions on Intelligent Vehicles。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9084255)]\n* AC-VRNN：用于多未来轨迹预测的注意力条件变分循环神经网络，Computer Vision and Image Understanding。[[paper](https:\u002F\u002Freader.elsevier.com\u002Freader\u002Fsd\u002Fpii\u002FS1077314221000898?token=F06466B50D3AE170EC14D460C1AFE91DFE5D61047357252C808857A2BBD4FE4CF2FF3076AD391F842F155CAD2B102C5F&originRegion=eu-west-1&originCreation=20220421024623)]\n* PoPPL：结合自动路线类别聚类的 LSTM 行人轨迹预测，TNNLS。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9031707)]\n* 使用深度条件生成模型进行实时轨迹预测，IEEE Robotics and Automation Letters。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8957482)]\n* 基于以参与者为中心的空时网格的场景合规轨迹预测，RAL。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9000540)]\n* 恒定速度模型能教给我们关于行人运动预测的什么，RAL。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1903.07933.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fcschoeller\u002Fconstant_velocity_pedestrian_motion)]\n* 用于轨迹预测的多模态深度生成模型：一种条件变分自编码器方法，RAL。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9286482)]\n* 深度上下文地图：使用特定位置潜在地图的参与者轨迹预测，RAL。[[paper](http:\u002F\u002Fras.papercept.net\u002Fimages\u002Ftemp\u002FIROS\u002Ffiles\u002F2532.pdf)]\n* 学习拥挤场景中空间与交互动态的结构化表示以用于轨迹预测，RAL。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9309332)] [[code](https:\u002F\u002Fgithub.com\u002Ftdavchev\u002Fstructured-trajectory-prediction), [code](https:\u002F\u002Fgithub.com\u002Ftdavchev\u002FStochastic-Futures-Prediction)]\n* 概率人群 GAN（生成对抗网络）：使用图车辆 - 行人注意力网络的多模态行人轨迹预测，RAL。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9123560)]\n* 面向自主街道穿越的多模态交互感知运动预测，International Journal of Robotics Research。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1808.06887)]\n* 基于深度卷积 LSTM 网络的行人轨迹预测，TITS。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9043898)] [[code](https:\u002F\u002Fgithub.com\u002FParadiseCK\u002FDeepConvLstmNet)]\n* 具有空时张量融合的多车协同学习用于轨迹预测，TITS。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9151374)]\n* 使用记忆增强网络对移动参与者进行多轨迹预测，TPAMI。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9138768)]\n* 通过逆强化学习框架正则化神经网络以用于未来轨迹预测，IET Computer Vision。[[paper](https:\u002F\u002Fietresearch.onlinelibrary.wiley.com\u002Fdoi\u002Fepdf\u002F10.1049\u002Fiet-cvi.2019.0546)] [[code](https:\u002F\u002Fgithub.com\u002Fd1024choi\u002Ftraj-pred-irl)]\n* 基于 CNN（卷积神经网络）-LSTM 序列模型的运动轨迹预测，Science China Information Sciences。[[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002Fs11432-019-2761-y.pdf)]\n\n## 其他 2020 年论文\n* 场景门控社交图：基于动态社交图和场景约束的行人轨迹预测，arXiv 预印本 arXiv:2010.05507, 2020。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.05507.pdf)]\n* 动态场景中多个智能体的鲁棒轨迹预测，arXiv 预印本 arXiv:2005.13133, 2020。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2005.13133.pdf)]\n* 地图自适应目标导向轨迹预测，arXiv 预印本 arXiv:2009.04450, 2020。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2009.04450.pdf)]\n* 具有空间连续性的时空注意力网络用于轨迹预测，arXiv 预印本 arXiv:2003.06107, 2020。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2003.06107v1.pdf)]\n* Trajformer：用于自动驾驶的具有局部自注意力上下文的轨迹预测，arXiv 预印本 arXiv:2011.14910, 2020。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2011.14910.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FManojbhat09\u002FTrajformer)]\n* TPPO：一种带有伪预言家的新颖轨迹预测器，arXiv 预印本 arXiv:2002.01852, 2020。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2002.01852.pdf)]\n* 通过半监督模型的迁移学习进行车辆轨迹预测，arXiv 预印本 arXiv:2007.06781, 2020。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.06781.pdf)]\n* Social-WaGDAT：通过 Wasserstein 图双注意力网络实现交互感知轨迹预测，arXiv 预印本 arXiv:2002.06241, 2020。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2002.06241.pdf)]\n* 基于网格计划的条件未知环境轨迹预测，arXiv 预印本 arXiv:2001.00735, 2020。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2001.00735.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fnachiket92\u002FP2T)]\n* 结合语义地图和动态图注意力网络的自动驾驶多模态轨迹预测，NIPS Workshops 2020。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.16273.pdf)]\n* 场景门控社交图：基于动态社交图和场景约束的行人轨迹预测，arXiv 预印本 arXiv:2010.05507, 2020。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.05507v1.pdf)]\n* PathGAN：使用注意力生成对抗网络进行局部路径规划，arXiv 预印本 arXiv:2007.03877, 2020。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.03877.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fd1024choi\u002Fpathgan_pytorch)]\n\n# 📚 2021 年会议和期刊论文\n\n## 2021 年会议论文\n* 多智能体轨迹预测中的协作不确定性，NIPS 2021。[[论文](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2021\u002Ffile\u002F31ca0ca71184bbdb3de7b20a51e88e90-Paper.pdf)]\n* GRIN：用于多智能体轨迹预测的生成关系与意图网络，NIPS 2021。[[论文](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2021\u002Ffile\u002Fe3670ce0c315396e4836d7024abcf3dd-Paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Flongyuanli\u002FGRIN_NeurIPS21)]\n* LibCity：交通预测开源库，SIGSPATIAL 2021。[[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3474717.3483923)] [[代码](https:\u002F\u002Fgithub.com\u002FLibCity\u002FBigscity-LibCity)]\n* 使用 Transformer 网络和增强信息预测城市场景中的车辆轨迹，IEEE 智能车辆研讨会 (IV 2021)。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9575242)]\n* Social-STAGE：时空多模态未来轨迹预测，ICRA 2021。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2011.04853.pdf)]\n* AVGCN：基于人类注意力引导的图卷积网络进行轨迹预测，ICRA 2021。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2101.05682.pdf)]\n* 探索多路径轨迹预测的动态上下文，ICRA 2021。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9562034)] [[代码](https:\u002F\u002Fgithub.com\u002Fwtliao\u002FDCENet)]\n* 使用上下文增强的 Transformer 网络进行行人轨迹预测，ICRA 2021。[[论文](https:\u002F\u002Fwww.researchgate.net\u002Fpublication\u002F346614349_Pedestrian_Trajectory_Prediction_using_Context-Augmented_Transformer_Networks)] [[代码](https:\u002F\u002Fgithub.com\u002FKhaledSaleh\u002FContext-Transformer-PedTraj)]\n* 用于轨迹预测的光谱时序图神经网络，ICRA 2021。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.02930.pdf)]\n* 避障的拥堵感知多智能体轨迹预测，ICRA 2021。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9560994)] [[代码](https:\u002F\u002Fgithub.com\u002Fxuxie1031\u002FCollisionFreeMultiAgentTrajectoryPrediciton)]\n* 通过概率预测行人未来运动实现人群中的预见性导航，ICRA 2021。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9561022)]\n* AgentFormer：用于社会 - 时间多智能体预测的感知智能体 Transformer，ICCV 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FYuan_AgentFormer_Agent-Aware_Transformers_for_Socio-Temporal_Multi-Agent_Forecasting_ICCV_2021_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FKhrylx\u002FAgentFormer)] [[网站](https:\u002F\u002Fye-yuan.com\u002Fagentformer\u002F)]\n* 用于轨迹预测的基于似然的多样性采样，ICCV 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FJason_Likelihood-Based_Diverse_Sampling_for_Trajectory_Forecasting_ICCV_2021_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FJasonMa2016\u002FLDS)]\n* MG-GAN：防止行人轨迹预测中分布外样本的多生成器模型，ICCV 2021。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.09274.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fselflein\u002FMG-GAN)]\n* 用于低延迟轨迹预测的时空一致性网络，ICCV 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FLi_Spatial-Temporal_Consistency_Network_for_Low-Latency_Trajectory_Forecasting_ICCV_2021_paper.pdf)]\n* 多模态轨迹预测三步法：模态聚类、分类与合成，ICCV 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FSun_Three_Steps_to_Multimodal_Trajectory_Prediction_Modality_Clustering_Classification_and_ICCV_2021_paper.pdf)]\n* 从目标、航点与路径到长期人类轨迹预测，ICCV 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FMangalam_From_Goals_Waypoints__Paths_to_Long_Term_Human_Trajectory_ICCV_2021_paper.pdf)] [[代码](https:\u002F\u002Fkarttikeya.github.io\u002Fpublication\u002Fynet\u002F)]\n* 你要去哪里？利用专家目标示例进行动态轨迹预测，ICCV 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FZhao_Where_Are_You_Heading_Dynamic_Trajectory_Prediction_With_Expert_Goal_ICCV_2021_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FJoeHEZHAO\u002Fexpert_traj)]\n* DenseTNT：从密集目标集进行的端到端轨迹预测，ICCV 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FGu_DenseTNT_End-to-End_Trajectory_Prediction_From_Dense_Goal_Sets_ICCV_2021_paper.pdf)]\n* 自动驾驶的安全感知运动预测（针对未见车辆），ICCV 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FRen_Safety-Aware_Motion_Prediction_With_Unseen_Vehicles_for_Autonomous_Driving_ICCV_2021_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fxrenaa\u002FSafety-Aware-Motion-Prediction)]\n* LOKI：用于轨迹预测的长期与关键意图，ICCV 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FGirase_LOKI_Long_Term_and_Key_Intentions_for_Trajectory_Prediction_ICCV_2021_paper.pdf)] [[数据集](https:\u002F\u002Fusa.honda-ri.com\u002Floki)]\n* 通过反事实分析进行人类轨迹预测，ICCV 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FChen_Human_Trajectory_Prediction_via_Counterfactual_Analysis_ICCV_2021_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FCHENGY12\u002FCausalHTP)]\n* 通过分布判别进行个性化轨迹预测，ICCV 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FChen_Personalized_Trajectory_Prediction_via_Distribution_Discrimination_ICCV_2021_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FCHENGY12\u002FDisDis)]\n* 异构轨迹预测的无限邻域交互，ICCV 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FZheng_Unlimited_Neighborhood_Interaction_for_Heterogeneous_Trajectory_Prediction_ICCV_2021_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fzhengfang1997\u002FUnlimited-Neighborhood-Interaction-for-Heterogeneous-Trajectory-Prediction)]\n* Social NCE：社交感知运动表示的对比学习，ICCV 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FLiu_Social_NCE_Contrastive_Learning_of_Socially-Aware_Motion_Representations_ICCV_2021_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fvita-epfl\u002Fsocial-nce)]\n* RAIN：用于运动预测的强化混合注意力推理网络，ICCV 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FLi_RAIN_Reinforced_Hybrid_Attention_Inference_Network_for_Motion_Forecasting_ICCV_2021_paper.pdf)]\n* 用于行人轨迹预测的多监督时间金字塔网络，AAAI 2021。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.01884.pdf)]\n* SCAN：用于联合多智能体意图预测的空间上下文注意网络，AAAI 2021。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2102.00109.pdf)]\n* 用于行人轨迹预测的解耦多关系图卷积网络，AAAI 2021。[[论文](https:\u002F\u002Fwww.aaai.org\u002FAAAI21Papers\u002FAAAI-1677.BaeI.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FInhwanBae\u002FDMRGCN)]\n* MotionRNN：一种适用于时空变化运动的视频预测灵活模型，CVPR 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FWu_MotionRNN_A_Flexible_Model_for_Video_Prediction_With_Spacetime-Varying_Motions_CVPR_2021_paper.pdf)]\n* 使用堆叠 Transformer 进行多模态运动预测，CVPR 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FLiu_Multimodal_Motion_Prediction_With_Stacked_Transformers_CVPR_2021_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fdecisionforce\u002FmmTransformer)] [[网站](https:\u002F\u002Fdecisionforce.github.io\u002FmmTransformer\u002F?utm_source=catalyzex.com)]\n* SGCN：用于行人轨迹预测的稀疏图卷积网络，CVPR 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FShi_SGCN_Sparse_Graph_Convolution_Network_for_Pedestrian_Trajectory_Prediction_CVPR_2021_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fshuaishiliu\u002FSGCN)]\n* LaPred：动态智能体多模态未来轨迹的车道感知预测，CVPR 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FKim_LaPred_Lane-Aware_Prediction_of_Multi-Modal_Future_Trajectories_of_Dynamic_Agents_CVPR_2021_paper.pdf)]\n* 用于车道感知多样性轨迹预测的分治法，CVPR 2021。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.08277.pdf)]\n* Euro-PVI：密集城市中心的行人与车辆交互，CVPR 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FBhattacharyya_Euro-PVI_Pedestrian_Vehicle_Interactions_in_Dense_Urban_Centers_CVPR_2021_paper.pdf)] [[数据集](https:\u002F\u002Fwww.mpi-inf.mpg.de\u002Fdepartments\u002Fcomputer-vision-and-machine-learning\u002Fresearch\u002Feuro-pvi-dataset)]\n* 基于潜在信念能量模型的轨迹预测，CVPR 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FPang_Trajectory_Prediction_With_Latent_Belief_Energy-Based_Model_CVPR_2021_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fbpucla\u002Flbebm)]\n* 自动驾驶的共享跨模态轨迹预测，CVPR 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FChoi_Shared_Cross-Modal_Trajectory_Prediction_for_Autonomous_Driving_CVPR_2021_paper.pdf)]\n* 基于单目相机的行人与自车轨迹预测，CVPR 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FNeumann_Pedestrian_and_Ego-Vehicle_Trajectory_Prediction_From_Monocular_Camera_CVPR_2021_paper.pdf)] [[代码](https:\u002F\u002Fgitlab.com\u002FlukeN86\u002FpedFutureTracking)]\n* 人群中人类轨迹预测的可解释社会锚点，CVPR 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FKothari_Interpretable_Social_Anchors_for_Human_Trajectory_Forecasting_in_Crowds_CVPR_2021_paper.pdf)]\n* Introvert：通过条件 3D 注意力进行人类轨迹预测，CVPR 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FShafiee_Introvert_Human_Trajectory_Prediction_via_Conditional_3D_Attention_CVPR_2021_paper.pdf)]\n* MP3：一个统一的建图、感知、预测和规划模型，CVPR 2021。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2101.06806.pdf)]\n* TrafficSim：学习模拟真实的多智能体行为，CVPR 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FSuo_TrafficSim_Learning_To_Simulate_Realistic_Multi-Agent_Behaviors_CVPR_2021_paper.pdf)]\n* SceneGen：学习生成真实的交通场景，CVPR 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fpapers\u002FTan_SceneGen_Learning_To_Generate_Realistic_Traffic_Scenes_CVPR_2021_paper.pdf)]\n* 用于行人轨迹预测的多模态 Transformer 网络，IJCAI 2021。[[论文](https:\u002F\u002Fwww.ijcai.org\u002Fproceedings\u002F2021\u002F0174.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fericyinyzy\u002FMTN_trajectory)]\n* Decoder Fusion RNN：用于轨迹预测的感知上下文和交互的解码器，IROS 2021。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.05814.pdf)]\n* 基于 Transformer 的联合意图和轨迹预测，IROS 2021。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9636241)]\n* 使用时空卷积网络进行自动驾驶汽车的基于机动操作的轨迹预测，IROS 2021。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9636875)]\n* 集成多上下文线索的自动驾驶轨迹预测，IROS 2021。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9476975)]\n* MultiXNet：多类多阶段多模态运动预测，IEEE 智能车辆研讨会 (IV 2021)。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9575718)]\n* 基于多头注意力与联合智能体 - 地图表示的自动驾驶轨迹预测，IEEE 智能车辆研讨会 (IV 2021)。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9576054)]\n* Social-IWSTCNN：用于城市交通场景行人轨迹预测的社交交互加权时空卷积神经网络，IV 2021。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9575958)]\n* 生成具有多样行人行为的场景用于自动驾驶车辆测试，机器人学习会议 (CoRL 2021)。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=HTfApPeT4DZ)] [[代码](https:\u002F\u002Fgithub.com\u002FMariaPriisalu\u002Fspl)]\n* 以车道图遍历为条件的多模态轨迹预测，CoRL 2021。[[论文](https:\u002F\u002Fproceedings.mlr.press\u002Fv164\u002Fdeo22a.html)] [[代码](https:\u002F\u002Fgithub.com\u002Fnachiket92\u002FPGP)]\n* 使用基于模型的规划学习预测车辆轨迹，CoRL 2021。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.04027.pdf)]\n* 使用循环神经网络进行弱势道路使用者的基于姿态的轨迹预测，模式识别国际会议 (ICPR 2021)。[[论文](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-68763-2_5.pdf)]\n* GraphTCN：用于人类轨迹预测的时空交互建模，WACV 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2021\u002Fpapers\u002FWang_GraphTCN_Spatio-Temporal_Interaction_Modeling_for_Human_Trajectory_Prediction_WACV_2021_paper.pdf)]\n* 目标驱动的长期轨迹预测，WACV 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2021\u002Fpapers\u002FTran_Goal-Driven_Long-Term_Trajectory_Prediction_WACV_2021_paper.pdf)]\n* 无需详细先验地图的自动驾驶多模态轨迹预测，WACV 2021。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FWACV2021\u002Fpapers\u002FKawasaki_Multimodal_Trajectory_Predictions_for_Autonomous_Driving_Without_a_Detailed_Prior_WACV_2021_paper.pdf)]\n* 用于上下文感知行人轨迹预测的自生长空间图网络，IEEE 国际图像处理会议 (ICIP 2021)。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2012.06320v2.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fserenetech90\u002FAOL_ovsc)]\n* S2TNet：用于自动驾驶轨迹预测的时空 Transformer 网络，亚洲机器学习会议 2021。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.10902.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fchenghuang66\u002Fs2tnet)]\n* 使用等变连续卷积进行轨迹预测，ICLR 2021。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2010.11344.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FRose-STL-Lab\u002FECCO)]\n* TridentNet：用于动态轨迹生成的条件生成模型，国际智能自主系统会议 2021。[[论文](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-030-95892-3_31#Abs1)]\n* HOME：用于未来运动估计的热图输出，ITSC 2021。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2105.10968.pdf)]\n* 基于图和循环神经网络的车辆轨迹预测（用于高速公路驾驶），ITSC 2021。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9564929)]\n* SCSG Attention：用于行人轨迹预测的带注意力的自中心星图，高级应用数据库系统国际会议 (DASFAA 2021)。[[论文](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-030-73194-6_29.pdf)]\n* 利用轨迹预测进行行人视频异常检测，IEEE 计算智能系列研讨会 (SSCI 2021)。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9660004)] [[代码](https:\u002F\u002Fgithub.com\u002Fakanuasiegbu\u002FLeveraging-Trajectory-Prediction-for-Pedestrian-Video-Anomaly-Detection)]\n\n## 2021 年期刊论文\n* 社会感知的轨迹预测模型真的具有社会感知能力吗？，Transportation Research: Part C. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2108.10879.pdf), [paper](https:\u002F\u002Ficcv21-adv-workshop.github.io\u002Fshort_paper\u002Fs-attack-arow2021.pdf)] [[code](https:\u002F\u002Fs-attack.github.io\u002F)]\n* 在数据驱动的车辆轨迹预测器中注入知识，Transportation Research: Part C. [[paper](https:\u002F\u002Freader.elsevier.com\u002Freader\u002Fsd\u002Fpii\u002FS0968090X21000425?token=F03D20769BFB255F56662C10348A81F3D07A42C6B4AB9BA19E3F7B2A5F1DA7D99B96B783616BDA86C12866AFCF4C5671&originRegion=eu-west-1&originCreation=20220506090622)] [[code](https:\u002F\u002Fgithub.com\u002Fvita-epfl\u002FRRB)]\n* 利用沉浸式虚拟现实和可解释深度学习解码行人与自动驾驶车辆的交互，Transportation Research: Part C. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0968090X2030855X)]\n* 拥挤环境中的人类轨迹预测：深度学习视角，IEEE Transactions on Intelligent Transportation Systems. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9408398)] [[code](https:\u002F\u002Fgithub.com\u002Fvita-epfl\u002Ftrajnetplusplusbaselines)]\n* NetTraj：一种基于网络的车辆轨迹预测模型，包含方向表示与时空注意力机制，TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9629362)]\n* 用于多智能体预测与跟踪的时空图双注意力网络，TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9491972)]\n* 基于图神经网络的异构交通参与者交互行为预测分层框架，TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9468360&tag=1)]\n* TrajGAIL：使用生成对抗模仿学习生成城市车辆轨迹，Transportation Research Part C. [[paper](https:\u002F\u002Freader.elsevier.com\u002Freader\u002Fsd\u002Fpii\u002FS0968090X21001121?token=3DEACAF2AD919E99B3331E74F747B61A0EAC2741E79B6F99F4F806155EB394F163D74F2F83806358BBD65911E107EF01&originRegion=us-east-1&originCreation=20220416040814)] [[code](https:\u002F\u002Fgithub.com\u002Fbenchoi93\u002FTrajGAIL)]\n* 使用带有时序逻辑语法树特征的生成对抗网络进行车辆轨迹预测，IEEE ROBOTICS AND AUTOMATION LETTERS. [[paper](https:\u002F\u002Fwww.gilitschenski.org\u002Figor\u002Fpublications\u002F202104-ral-logic_gan\u002Fral21-logic_gan.pdf)]\n* 使用时空注意力机制的 LSTM 进行车辆轨迹预测，IEEE Intelligent Transportation Systems Magazine. [[paper](http:\u002F\u002Furdata.net\u002Ffiles\u002F2020_VTP.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fleilin-research\u002FVTP)]\n* 基于长短期记忆的网联自动驾驶环境下人工驾驶车辆纵向轨迹预测，Transportation Research Record. [[paper](http:\u002F\u002Fsage.cnpereading.com\u002Fparagraph\u002Fdownload\u002F?doi=10.1177\u002F0361198121993471)]\n* 用于行人轨迹预测的带有空间 - 时间注意力的时间金字塔网络，IEEE Transactions on Network Science and Engineering. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9373939)]\n* 一种基于门控线性单元的高效时空轨迹预测模型，Neurocomputing. [[paper](https:\u002F\u002Freader.elsevier.com\u002Freader\u002Fsd\u002Fpii\u002FS0925231221018907?token=C894F657732BB6078B77AEC9BD3858338C1A7F1254CCC0BBC34ADA1421A95CF9A4F68BDCA8812457DE27FB37EEB8F198&originRegion=us-east-1&originCreation=20220420144432)]\n* SRAI-LSTM：一种基于社会关系注意力的交互感知 LSTM 用于人类轨迹预测，Neurocomputing. [[paper](https:\u002F\u002Freader.elsevier.com\u002Freader\u002Fsd\u002Fpii\u002FS0925231221018014?token=BB22DAAC41E3BF453C326A9D72A0CC900C2DFFD0D8AE07B7DEED51C7F2250B9CB40CC89B6812CA20DBFA6A7EDD32AAD6&originRegion=us-east-1&originCreation=20220512100647)]\n* AST-GNN：一种基于注意力的时空图神经网络用于交互感知行人轨迹预测，Neurocomputing. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS092523122100388X)]\n* Multi-PPTP：复杂路口场景下的多模态概率行人轨迹预测，IEEE Transactions on Intelligent Transportation Systems. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9619864)]\n* 一种带有伪预言机的新型基于图的轨迹预测器，TNNLS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9447207)]\n* 基于两阶段 GAN 的基于地图的大规模 GPS 轨迹生成，Journal of Data Science. [[paper](https:\u002F\u002Fwww.jds-online.com\u002Ffiles\u002FJDS202001-08.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FXingruiWang\u002FTwo-Stage-Gan-in-trajectory-generation)]\n* 基于姿态和语义地图的弱势道路使用者轨迹概率预测，IEEE Transactions on Intelligent Vehicles. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9707640)]\n* STI-GAN：使用时空交互和生成对抗网络的多模态行人轨迹预测，IEEE Access. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?arnumber=9387292)]\n* 用于行人轨迹预测的整体 LSTM，TIP. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9361440)]\n* 使用卷积神经网络进行行人轨迹预测，PR. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320321004325)]\n* 利用与环境多重交互的基于 LSTM 的骑行者轨迹预测模型，PR. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320320306038)]\n* 使用 LSTM 模型和 GAN 进行人类轨迹预测与生成，PR. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS003132032100323X)]\n* 使用 LSTM 模型和 GAN 进行车辆轨迹预测与生成，Plos one. [[paper](https:\u002F\u002Fjournals.plos.org\u002Fplosone\u002Farticle?id=10.1371\u002Fjournal.pone.0253868)]\n* BiTraP：具有多模态目标估计的双向行人轨迹预测，RAL. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9345445)] [[code](https:\u002F\u002Fgithub.com\u002Fumautobots\u002Fbidireaction-trajectory-prediction)]\n* 通用高速公路场景下的轨迹预测运动学模型，RAL. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9472993)] [[code](https:\u002F\u002Fgithub.com\u002Fumautobots\u002Fkinematic_highway)]\n* 自动驾驶中的轨迹预测：带有车道航向辅助损失，RAL. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9387075)]\n* 使用带有时序逻辑语法树特征的生成对抗网络进行车辆轨迹预测，RAL. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9366373)]\n* Tra2Tra：带有全局社会时空注意力神经网络的轨迹到轨迹预测，RAL. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9347678)]\n* 用于行人轨迹预测的社会图卷积 LSTM，IET Intelligent Transport Systems. [[paper](https:\u002F\u002Fietresearch.onlinelibrary.wiley.com\u002Fdoi\u002Fepdf\u002F10.1049\u002Fitr2.12033)]\n* HSTA：用于轨迹预测的分层时空注意力模型，IEEE Transactions on Vehicular Technology (TVT). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9548801)]\n* 用于车辆轨迹预测的环境注意力网络，TVT. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9534487)]\n* 他们要去哪里？拥挤场景中的人类行为预测，ACM Transactions on Multimedia Computing, Communications, and Applications (TOMM). [[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3449359)]\n* 带有时空序列融合的多智能体轨迹预测，IEEE Transactions on Multimedia (TMM). [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9580659)]\n\n## 其他 2021\n* 多类别场景下基于生成对抗网络 (GAN) 的轨迹预测，arXiv 预印本 arXiv:2110.11401, 2021。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.11401.pdf)]\n* 用于交通场景轨迹预测的空间 - 通道 Transformer 网络，arXiv 预印本 arXiv:2101.11472, 2021。[[paper](https:\u002F\u002Farxiv.org\u002Fftp\u002Farxiv\u002Fpapers\u002F2101\u002F2101.11472.pdf)]\n* 物理可行的车辆轨迹预测，arXiv 预印本 arXiv:2104.14679, 2021。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.14679.pdf)]\n* MSN：用于轨迹预测的多风格网络，arXiv 预印本 arXiv:2107.00932, 2021。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2107.00932.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FNorthOcean\u002FMSN)]\n* 重新思考轨迹预测评估，arXiv 预印本 arXiv:2107.10297, 2021。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2107.10297)]\n* 基于空间交互 Transformer 网络的行人轨迹预测，IEEE 智能车辆研讨会工作坊 (IV Workshops 2021)。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2112.06624)]\n* 深度社会力，arXiv 预印本 arXiv:2109.12081, 2021。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2109.12081)] [[code](https:\u002F\u002Fgithub.com\u002Fsvenkreiss\u002Fsocialforce)]\n\n# 📚 2022 会议与期刊论文\n\n## 2022 年会议论文\n* 用于行人轨迹预测的社会可解释树，AAAI 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2205.13296.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Flssiair\u002FSIT)]\n* 用于行人轨迹预测的互补注意力门控网络，AAAI 2022。[[论文](https:\u002F\u002Fwww.aaai.org\u002FAAAI22Papers\u002FAAAI-1963.DuanJ.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FjinghaiD\u002FCAGN)]\n* 场景 Transformer：一种用于预测多个智能体未来轨迹的统一架构，ICLR 2022。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=Wm3EA5OlHsG)]\n* 你大多独自行走：分析轨迹预测中的特征归因，ICLR 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.05304.pdf)]\n* 潜在变量序列集合 Transformer 用于联合多智能体运动预测，ICLR 2022。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=Dup_dDqkZC5)] [[代码](https:\u002F\u002Ffgolemo.github.io\u002Fautobots\u002F)]\n* THOMAS：具有学习到的多智能体采样的轨迹热力图输出，ICLR 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.06607)]\n* 记住意图：基于回顾性记忆的轨迹预测，CVPR 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.11474.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FMediaBrain-SJTU\u002FMemoNet)]\n* STCrowd：拥挤场景中行人感知的多模态数据集，CVPR 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.01026.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002F4DVLab\u002FSTCrowd.git)]\n* 车辆轨迹预测有效，但并非处处适用，CVPR 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2112.03909.pdf)] [[代码](https:\u002F\u002Fs-attack.github.io\u002F)]\n* 通过运动不确定性扩散进行随机轨迹预测，CVPR 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.13777.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fgutianpei\u002FMID)]\n* 用于随机人类轨迹预测的非概率采样网络，CVPR 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.13471.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Finhwanbae\u002FNPSN)]\n* 关于自动驾驶车辆轨迹预测的对抗鲁棒性，CVPR 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2201.05057.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fzqzqz\u002FAdvTrajectoryPrediction)]\n* 通过可迁移图神经网络 (GNN) 进行自适应轨迹预测，CVPR 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.05046.pdf)]\n* 迈向鲁棒和自适应的运动预测：一个因果表示视角，CVPR 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2111.14820.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fvita-epfl\u002Fcausalmotion), [代码](https:\u002F\u002Fgithub.com\u002Fsherwinbahmani\u002Fynet_adaptive)]\n* 多少观测就足够了？用于轨迹预测的知识蒸馏，CVPR 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.04781.pdf)]\n* 向所有车辆学习，CVPR 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.11934.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fdotchen\u002FLAV)]\n* 通过未来目标检测从 LiDAR 进行预测，CVPR 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.16297.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fneeharperi\u002FFutureDet)]\n* 基于占用栅格地图的端到端轨迹分布预测，CVPR 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.16910.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FKguo-cs\u002FTDOR)]\n* M2I：从分解边缘轨迹预测到交互式预测，CVPR 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2202.11884.pdf)] [[代码](https:\u002F\u002Ftsinghua-mars-lab.github.io\u002FM2I\u002F)]\n* GroupNet：用于具有关系推理的轨迹预测的多尺度超图神经网络，CVPR 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.08770.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FMediaBrain-SJTU\u002FGroupNet)]\n* 这究竟是谁的轨迹？通过基于亲和度的预测提高对跟踪误差的鲁棒性，CVPR 2022。[[论文](https:\u002F\u002Fxinshuoweng.com\u002Fpapers\u002FAffinipred\u002Fcamera_ready.pdf)]\n* ScePT：用于规划的与场景一致的基于策略的轨迹预测，CVPR 2022。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FChen_ScePT_Scene-Consistent_Policy-Based_Trajectory_Predictions_for_Planning_CVPR_2022_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FNVlabs\u002FScePT)]\n* 基于记忆回放的图空间 Transformer 用于多未来行人轨迹预测，CVPR 2022。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FLi_Graph-Based_Spatial_Transformer_With_Memory_Replay_for_Multi-Future_Pedestrian_Trajectory_CVPR_2022_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FJacobieee\u002FST-MR)]\n* MUSE-VAE：用于环境感知长期轨迹预测的多尺度变分自编码器 (VAE)，CVPR 2022。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FLee_MUSE-VAE_Multi-Scale_VAE_for_Environment-Aware_Long_Term_Trajectory_Prediction_CVPR_2022_paper.pdf)]\n* LTP：用于自动驾驶的基于车道线的轨迹预测，CVPR 2022。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FWang_LTP_Lane-Based_Trajectory_Prediction_for_Autonomous_Driving_CVPR_2022_paper.pdf)]\n* ATPFL：联邦学习框架下的自动轨迹预测模型设计，CVPR 2022。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FWang_ATPFL_Automatic_Trajectory_Prediction_Model_Design_Under_Federated_Learning_Framework_CVPR_2022_paper.pdf)]\n* 瞬时观测下的人类轨迹预测，CVPR 2022。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FSun_Human_Trajectory_Prediction_With_Momentary_Observation_CVPR_2022_paper.pdf)]\n* HiVT：用于多智能体运动预测的分层向量 Transformer，CVPR 2022。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FZhou_HiVT_Hierarchical_Vector_Transformer_for_Multi-Agent_Motion_Prediction_CVPR_2022_paper.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FZikangZhou\u002FHiVT)]\n* 运动预测中 HD 地图的路径感知图注意力，ICRA 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2202.13772.pdf)]\n* 使用语言表示的轨迹预测，ICRA 2022。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9811928)]\n* 利用平滑注意力先验进行多智能体轨迹预测，ICRA 2022。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9811718)] [[网站](https:\u002F\u002Fsites.google.com\u002Fview\u002Fsmoothness-attention)]\n* KEMP：用于长期轨迹预测的基于关键帧的分层端到端深度学习模型，ICRA 2022。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9812337)]\n* 基于视觉的驾驶轨迹生成的域泛化，ICRA 2022。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9812070)] [[网站](https:\u002F\u002Fsites.google.com\u002Fview\u002Fdg-traj-gen)]\n* 用于交互感知轨迹预测的深度概念图网络，ICRA 2022。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9811567)]\n* 使用迭代注意力块的条件人类轨迹预测，ICRA 2022。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9812404)]\n* StopNet：面向城市自动驾驶的可扩展轨迹和占用预测，ICRA 2022。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9811830)]\n* 时空图上的元路径分析用于行人轨迹预测，ICRA 2022。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9811632)] [[网站](https:\u002F\u002Fsites.google.com\u002Fillinois.edu\u002Fmesrnn\u002Fhome)]\n* 通过轨迹预测传播状态不确定性，ICRA 2022。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9811776)] [[代码](https:\u002F\u002Fgithub.com\u002FStanfordASL\u002FPSU-TF)]\n* HYPER：通过分解推理和自适应采样学习的混合轨迹预测，ICRA 2022。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9812254)]\n* Grouptron：用于群体感知密集人群轨迹预测的动态多尺度图卷积网络，ICRA 2022。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9811585)]\n* 基于跨模态 Transformer 的行人轨迹预测生成框架，ICRA 2022。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9812226)]\n* 带有拓扑度量地图的自动驾驶轨迹预测，ICRA 2022。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9811712)] [[代码](https:\u002F\u002Fgithub.com\u002FJiaolong\u002Ftrajectory-prediction)]\n* CRAT-Pred：结合晶体图卷积神经网络和多头自注意力的车辆轨迹预测，ICRA 2022。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9811637)] [[代码](https:\u002F\u002Fgithub.com\u002Fschmidt-ju\u002Fcrat-pred)]\n* MultiPath++：用于行为预测的高效信息融合和轨迹聚合，ICRA 2022。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9812107)]\n* 用于自动驾驶的基于 Transformer 神经网络的多模态运动预测，ICRA 2022。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9812060\u002F)]\n* GOHOME：面向未来运动估计的图导向热力图输出，ICRA 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2109.01827.pdf)]\n* TridentNetV2：用于动态轨迹生成的轻量级图形全局计划表示，ICRA 2022。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?arnumber=9811591)]\n* 结合类别不确定性的异构智能体轨迹预测，IROS 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2104.12446.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FTRI-ML\u002FHAICU)] [[轨迹数据](https:\u002F\u002Fgithub.com\u002Fnvr-avg\u002Ftrajdata)]\n* 基于图的双尺度上下文融合的轨迹预测，IROS 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2111.01592.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FHKUST-Aerial-Robotics\u002FDSP)]\n* 学习行人群体表示用于多模态轨迹预测，ECCV 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.09953.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FInhwanBae\u002FGPGraph)]\n* Social-Implicit：重新思考轨迹预测评估及隐式最大似然估计的有效性，ECCV 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.03057.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fabduallahmohamed\u002FSocial-Implicit)] [[网站](https:\u002F\u002Fwww.abduallahmohamed.com\u002Fsocial-implicit-amdamv-adefde-demo)] \n* 用于多模态车辆轨迹预测的分层潜在结构，ECCV 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.04624.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fd1024choi\u002FHLSTrajForecast)]\n* SocialVAE：使用时序潜在变量进行人类轨迹预测，ECCV 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.08207.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fxupei0610\u002FSocialVAE)]\n* 垂直视角：通过傅里叶频谱进行轨迹预测的分层网络，ECCV 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.07288.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fcocoon2wong\u002FVertical)]\n* 入口翻转 Transformer 用于参与者行为的推理和预测，ECCV 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.06235.pdf)]\n* D2-TPred：交通信号灯下轨迹预测的不连续依赖，ECCV 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.10398.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FVTP-TL\u002FD2-TPred)]\n* 通过神经社会物理进行人类轨迹预测，ECCV 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.10435.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Frealcrane\u002FHuman-Trajectory-Prediction-via-Neural-Social-Physics)]\n* Social-SSL：基于 Transformer 的用于多智能体轨迹预测的自监督跨序列表示学习，ECCV 2022。[[论文](https:\u002F\u002Fbasiclab.lab.nycu.edu.tw\u002Fassets\u002FSocial-SSL.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FSigta678\u002FSocial-SSL)]\n* 感知历史：利用局部行为数据进行轨迹预测，ECCV 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.09646.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FKay1794\u002FAware-of-the-history)]\n* 用于轨迹预测的基于动作的对比学习，ECCV 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.08664.pdf)]\n* AdvDO：用于轨迹预测的真实对抗攻击，ECCV 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.08744.pdf)]\n* ST-P3：通过时空特征学习实现端到端视觉自动驾驶，ECCV 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.07601.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FOpenPerceptionX\u002FST-P3)]\n* Social ODE：使用神经常微分方程 (ODE) 进行多智能体轨迹预测，ECCV 2022。[[论文](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2022\u002Fpapers_ECCV\u002Fpapers\u002F136820211.pdf)]\n* 从场景历史预测人类轨迹，NIPS 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2210.08732.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FMaKaRuiNah\u002FSHENet)]\n* 用于端到端自动驾驶的轨迹引导控制预测：一个简单而强大的基线，NIPS 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.08129)] [[代码](https:\u002F\u002Fgithub.com\u002FOpenPerceptionX\u002FTCP)]\n* 具有全局意图定位和局部运动细化的运动 Transformer，NIPS 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.13508.pdf)] [[网站](https:\u002F\u002Fvas.mpi-inf.mpg.de\u002Fmotion-transformer-with-global-intention-localization-and-local-movement-refinement\u002F)]\n* 使用多重注意力进行交互建模，NIPS 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2208.10660.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Ffanyun-sun\u002FIMMA)]\n* 深度交互式运动预测和规划：与运动预测模型玩游戏，动力学与控制学习会议 (L4DC)。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.02392.pdf)] [[网站](https:\u002F\u002Fsites.google.com\u002Fview\u002Fdeep-interactive-predict-plan)]\n* 针对对抗攻击的鲁棒轨迹预测，CoRL 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2208.00094.pdf)] [[代码](https:\u002F\u002Frobustav.github.io\u002FRobustTraj\u002F)]\n* 使用扩散进行规划以实现灵活的行为合成，ICML 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.09991)] [[网站](https:\u002F\u002Fdiffusion-planning.github.io\u002F)]\n* 带有误差补偿的同步双向行人轨迹预测，ACCV 2022。[[论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FACCV2022\u002Fpapers\u002FXie_Synchronous_Bi-Directional_Pedestrian_Trajectory_Prediction_with_Error_Compensation_ACCV_2022_paper.pdf)]\n* 用于城市驾驶的基于模型的模仿学习，NIPS 2022。[[论文](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2022\u002Ffile\u002F827cb489449ea216e4a257c47e407d18-Paper-Conference.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fwayveai\u002Fmile)]\n\n## 2022 年期刊论文\n* AI-TP：面向自动驾驶的基于注意力的交互感知轨迹预测，IEEE Transactions on Intelligent Vehicles. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9723649)] [[code](https:\u002F\u002Fgithub.com\u002FKP-Zhang\u002FAI-TP)]\n* MDST-DGCN：用于行人轨迹预测的多级动态时空有向图卷积网络 (GCN)，Computational Intelligence and Neuroscience. [[paper](https:\u002F\u002Fdownloads.hindawi.com\u002Fjournals\u002Fcin\u002F2022\u002F4192367.pdf)]\n* 基于图卷积神经网络 (CNN) 的自动驾驶车辆轨迹预测，TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9737058)]\n* 具有异构边增强图注意力的多智能体轨迹预测，TITS. [[paper](https:\u002F\u002Fdspace.lib.cranfield.ac.uk\u002Fbitstream\u002Fhandle\u002F1826\u002F17541\u002FMulti-agent_trajectory_prediction-2022.pdf?sequence=1&isAllowed=y)]\n* 带有注意力机制的全卷积编码器 - 解码器用于实用行人轨迹预测，TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9768201)]\n* STGM：基于时空特征生成模型的车辆轨迹预测，TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9743363)]\n* 使用时空图注意力 Transformer (Transformer 模型) 进行自动驾驶轨迹预测，TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9768029)]\n* 基于时空动态注意力网络的意图感知车辆轨迹预测用于车联网，TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9767719)] [[code](https:\u002F\u002Fxbchen82.github.io\u002Fresource\u002F)]\n* 基于先验感知有向图卷积神经网络的轨迹预测，TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9686621&tag=1)]\n* DeepTrack：高速公路车辆轨迹预测的轻量级深度学习，TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9770480)]\n* 使用集成驾驶风险图的深度学习方法进行高速公路周围车辆的交互式轨迹预测，TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9745461&tag=1)]\n* 通过异构上下文感知图卷积网络在互联环境中进行车辆轨迹预测，TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9781338)]\n* 基于时间模式注意力的轨迹预测神经网络和模型解释，TITS. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9945660)]\n* 学习部分检测行人的稀疏交互图用于轨迹预测，RAL. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9664278)] [[code](https:\u002F\u002Fgithub.com\u002Ftedhuang96\u002Fgst)]\n* GAMMA：面向自动驾驶的通用智能体运动预测模型，RAL. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1906.01566.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FAdaCompNUS\u002Fgamma)]\n* 用于轨迹预测的逐步目标驱动网络，RAL. [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.14107v3.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FChuhuaW\u002FSGNet.pytorch)]\n* GA-STT：具有群体感知时空 Transformer 的人体轨迹预测，RAL. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9779572)]\n* 使用生成对抗网络 (GAN) 进行长期 4D 轨迹预测，Transportation Research Part C: Emerging Technologies. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0968090X22000031)]\n* 面向自动化车辆的上下文感知行人轨迹预测框架，Transportation Research Part C: Emerging Technologies. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0968090X21004423)]\n* 使用注意力模型的可解释多模态轨迹预测，Transportation Research Part C: Emerging Technologies. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0968090X22002509)]\n* CSCNet：拥挤空间中轨迹预测的上下文语义一致性网络，PR. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320322000334)]\n* CSR：用于行人轨迹预测的具有社交感知回归的级联条件变分自编码器，PR. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320322005106)]\n* Step Attention：序列行人轨迹预测，IEEE Sensors Journal. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9732437)]\n* 双重注意力机制下耦合本车运动趋势的车辆轨迹预测方法，IEEE Transactions on Instrumentation and Measurement. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9749176)]\n* 时空交互感知和轨迹分布感知图卷积网络用于行人多模态轨迹预测，IEEE Transactions on Instrumentation and Measurement. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9997233)]\n* 深度编码器 - 解码器神经网络：基于深度学习的自动驾驶车辆轨迹预测和校正模型，Physica A: Statistical Mechanics and its Applications. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0378437122000139)]\n* PTPGC：利用带有 ConvLSTM (卷积长短期记忆网络) 的图注意力网络进行行人轨迹预测，Robotics and Autonomous Systems. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0921889021002165)]\n* GCHGAT：使用群体约束层次图注意力网络进行行人轨迹预测，Applied Intelligence. [[paper](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs10489-021-02997-w)]\n* 使用循环变分自编码器 (VAE) 网络进行车辆轨迹预测，IEEE Access. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9740177)] [[code](https:\u002F\u002Fgithub.com\u002Fmidemig\u002Ftraj_pred_vae)]\n* SEEM：基于序列熵能量的行人轨迹全然后一预测模型，TPAMI. [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9699076)]\n* PTP-STGCN：基于时空图卷积神经网络的行人轨迹预测，Applied Intelligence. [[paper](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs10489-022-03524-1)]\n* 轨迹分布：轨迹预测中运动的新描述，Computational Visual Media. [[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002Fs41095-021-0236-6.pdf)]\n* 基于多尺度时空图的自动驾驶轨迹预测，IET Intelligent Transport Systems. [[paper](https:\u002F\u002Fietresearch.onlinelibrary.wiley.com\u002Fdoi\u002Fpdfdirect\u002F10.1049\u002Fitr2.12265)]\n* 基于记忆增强网络的持续学习轨迹预测，Knowledge-Based Systems. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0950705122011157)]\n* Atten-GAN：基于注意力机制的生成对抗网络 (GAN) 行人轨迹预测，Cognitive Computation. [[paper](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs12559-022-10029-z#Abs1)]\n* EvoSTGAT：用于行人轨迹预测的演化时空图注意力网络，Neurocomputing. [[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0925231222003460?ref=pdf_download&fr=RR-2&rr=7da0ead45e800fcc)]\n\n## 其他 2022\n* 提升运动预测中的上下文感知能力，CVPR 研讨会 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2109.08048.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fvaleoai\u002FCAB)]\n* 用于轨迹预测的目标驱动自注意力循环网络，CVPR 研讨会 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.11561.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fluigifilippochiara\u002FGoal-SAR)]\n* 重要性在于你的注意力：自动驾驶中的代理重要性预测，CVPR 研讨会 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.09121.pdf)]\n* MPA：基于 MultiPath++ 的运动预测架构，CVPR 研讨会 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.10041.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fstepankonev\u002Fwaymo-motion-prediction-challenge-2022-multipath-plus-plus)]\n* TPAD：在轨迹异常检测模型的指导下识别有效的轨迹预测，arXiv:2201.02941, 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2201.02941v1.pdf)]\n* Wayformer：通过简单且高效的注意力网络进行运动预测，arXiv 预印本 arXiv:2207.05844, 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.05844.pdf)]\n* PreTR：时空非自回归轨迹预测 Transformer，arXiv 预印本 arXiv:2203.09293, 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.09293.pdf)]\n* LatentFormer：基于 Transformer 的多智能体交互建模与轨迹预测，arXiv 预印本 arXiv:2203.01880, 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.01880.pdf)]\n* 使用带车道损失的二阶段预测网络进行多样化的多轨迹预测，arXiv 预印本 arXiv:2206.08641, 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.08641.pdf)]\n* 用于轨迹预测的半监督语义引导对抗训练，arXiv 预印本 arXiv:2205.14230, 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2205.14230.pdf)]\n* 通过风险和场景图学习进行异构轨迹预测，arXiv 预印本 arXiv:2211.00848, 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.00848.pdf)]\n* GATraj：一种基于图和注意力的多智能体轨迹预测模型，arXiv 预印本 arXiv:2209.07857, 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.07857.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fmengmengliu1998\u002FGATraj)]\n* 具有关系推理能力的动态组感知网络用于多智能体轨迹预测，arXiv 预印本 arXiv:2206.13114, 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.13114.pdf)]\n* 协作不确定性有益于多智能体多模态轨迹预测，arXiv 预印本 arXiv:2207.05195, 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2207.05195)] [[代码](https:\u002F\u002Fgithub.com\u002FMediaBrain-SJTU\u002FCollaborative-Uncertainty)]\n* 用于可控交通仿真的引导条件扩散，arXiv 预印本 arXiv:2210.17366, 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2210.17366.pdf)] [[网站](https:\u002F\u002Faiasd.github.io\u002Fctg.github.io\u002F)]\n* PhysDiff：物理引导的人体运动扩散模型，arXiv 预印本 arXiv:2212.02500, 2022。[[论文](http:\u002F\u002Fxxx.itp.ac.cn\u002Fpdf\u002F2212.02500.pdf)]\n* 基于时序图的轨迹预测，arXiv 预印本 arXiv:2207.00255, 2022。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2207.00255.pdf)] [[网站](https:\u002F\u002Fkuis-ai.github.io\u002Fftgn\u002F)]\n\n# 📚 2023 会议与期刊论文\n\n## 2023 年会议论文\n* 用于随机运动预测的人类关节运动学扩散细化方法，AAAI 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2210.05976.pdf)]\n* 用于行人轨迹预测的多流表示学习，AAAI 2023。[[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F25389)] [[code](https:\u002F\u002Fgithub.com\u002FYuxuanIAIR\u002FMSRL-master)]\n* 基于两阶段 GAN 的连续轨迹生成，AAAI 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2301.07103.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FWenMellors\u002FTS-TrajGen)]\n* 基于控制点集的行人轨迹预测，AAAI 2023。[[paper](https:\u002F\u002Fassets.underline.io\u002Flecture\u002F67747\u002Fpaper\u002F82988b653861eb7a0d5cdc91c4b26f8c.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FInhwanBae\u002FGraphTERN)]\n* WSiP：用于动态交互感知轨迹预测的波叠加启发池化，AAAI 2023。[[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F25592)] [[code](https:\u002F\u002Fgithub.com\u002FChopin0123\u002FWSiP)]\n* 利用未来关系推理进行车辆轨迹预测，ICLR 2023。[[paper](https:\u002F\u002Fopenreview.net\u002Fforum?id=CGBCTp2M6lA)]\n* IPCC-TP：利用增量皮尔逊相关系数进行联合多智能体轨迹预测，CVPR 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.00575.pdf)]\n* FEND：用于长尾轨迹预测的未来增强分布感知对比学习框架，CVPR 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.16574.pdf)]\n* Trace and Pace：通过引导式轨迹扩散实现可控行人动画，CVPR 2023。[[paper](https:\u002F\u002Fnv-tlabs.github.io\u002Ftrace-pace\u002Fdocs\u002Ftrace_and_pace.pdf)] [[website](https:\u002F\u002Fnv-tlabs.github.io\u002Ftrace-pace\u002F)]\n* FJMP：在学到的有向无环交互图上进行因子化联合多智能体运动预测，CVPR 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.16197.pdf)] [[website](https:\u002F\u002Frluke22.github.io\u002FFJMP\u002F)]\n* 用于随机轨迹预测的蛙跳扩散模型，CVPR 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.10895.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FMediaBrain-SJTU\u002FLED)]\n* ViP3D：通过 3D 智能体查询进行端到端视觉轨迹预测，CVPR 2023。[[paper](http:\u002F\u002Fxxx.itp.ac.cn\u002Fpdf\u002F2208.01582.pdf)] [[website](https:\u002F\u002Ftsinghua-mars-lab.github.io\u002FViP3D\u002F)]\n* EqMotion：具有不变交互推理的等变多智能体运动预测，CVPR 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.10876.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FMediaBrain-SJTU\u002FEqMotion)]\n* 揭示缺失模式：面向轨迹补全与预测的统一框架，CVPR 2023。[[paper](http:\u002F\u002Fxxx.itp.ac.cn\u002Fpdf\u002F2303.16005.pdf)]\n* 用于随机人类轨迹预测的无监督采样促进，CVPR 2023。[[paper](https:\u002F\u002Fchengy12.github.io\u002Ffiles\u002FBosampler.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fviewsetting\u002FUnsupervised_sampling_promoting)]\n* 刺激验证是多模态人类轨迹预测中的通用有效采样器，CVPR 2023。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FSun_Stimulus_Verification_Is_a_Universal_and_Effective_Sampler_in_Multi-Modal_CVPR_2023_paper.pdf)]\n* 以查询为中心的轨迹预测，CVPR 2023。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FZhou_Query-Centric_Trajectory_Prediction_CVPR_2023_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FZikangZhou\u002FQCNet)] [[QCNeXt](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.10508.pdf)]\n* 用于自动驾驶的弱监督类别无关运动预测，CVPR 2023。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FLi_Weakly_Supervised_Class-Agnostic_Motion_Prediction_for_Autonomous_Driving_CVPR_2023_paper.pdf)]\n* 更多分解与更好聚合：对人类运动预测频率表示学习的两次深入观察，CVPR 2023。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FGao_Decompose_More_and_Aggregate_Better_Two_Closer_Looks_at_Frequency_CVPR_2023_paper.pdf)]\n* MotionDiffuser：使用扩散的可控多智能体运动预测，CVPR 2023。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FJiang_MotionDiffuser_Controllable_Multi-Agent_Motion_Prediction_Using_Diffusion_CVPR_2023_paper.pdf)]\n* 规划导向自动驾驶，CVPR 2023。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FHu_Planning-Oriented_Autonomous_Driving_CVPR_2023_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FOpenDriveLab\u002FUniAD)]\n* TrafficGen：学习生成多样且真实的交通场景，ICRA 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2210.06609.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fmetadriverse\u002Ftrafficgen)]\n* GANet：用于运动预测的目标区域网络，ICRA 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.09723.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fkingwmk\u002FGANet)]\n* TOFG：自动驾驶中统一且细粒度的环境表示，ICRA 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.20068.pdf)]\n* SSL-Lanes：自动驾驶运动预测的自监督学习，CoRL 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2206.14116.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FAutoVision-cloud\u002FSSL-Lanes)]\n* PowerBEV：一种强大且轻量级的鸟瞰图实例预测框架，IJCAI 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.10761.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FEdwardLeeLPZ\u002FPowerBEV)]\n* HumanMAC：用于人类运动预测的掩码运动补全，ICCV 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.03665)] [[code](https:\u002F\u002Fgithub.com\u002FLinghaoChan\u002FHumanMAC)]\n* BeLFusion：用于行为驱动人类运动预测的潜在扩散，ICCV 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2211.14304)] [[code](https:\u002F\u002Fgithub.com\u002FBarqueroGerman\u002FBeLFusion)]\n* EigenTrajectory：用于多模态轨迹预报的低秩描述符，ICCV 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.09306)] [[code](https:\u002F\u002Fgithub.com\u002FInhwanBae\u002FEigenTrajectory)]\n* ADAPT：带有适应的高效多智能体轨迹预测，ICCV 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.14187.pdf)] [[code](https:\u002F\u002Fkuis-ai.github.io\u002Fadapt\u002F)]\n* 基于图的轨迹预测的微分约束运动模型评估，IV 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.05116)] [[code](https:\u002F\u002Fgithub.com\u002Fwestny\u002Fmtp-go)]\n* LimSim：长期交互式多场景交通模拟器，ITSC 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.06648.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FPJLab-ADG\u002FLimSim)]\n* V2X-Seq：用于车路协同感知与预测的大规模序列数据集，CVPR 2023。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FYu_V2X-Seq_A_Large-Scale_Sequential_Dataset_for_Vehicle-Infrastructure_Cooperative_Perception_and_CVPR_2023_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FAIR-THU\u002FDAIR-V2X-Seq)]\n* INT2：交叉口的交互式轨迹预测，ICCV 2023。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FYan_INT2_Interactive_Trajectory_Prediction_at_Intersections_ICCV_2023_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FAIR-DISCOVER\u002FINT2)]\n* 用于行人轨迹预测的轨迹统一 Transformer，ICCV 2023。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FShi_Trajectory_Unified_Transformer_for_Pedestrian_Trajectory_Prediction_ICCV_2023_paper.pdf)]\n* 稀疏实例条件的多模态轨迹预测，ICCV 2023。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FDong_Sparse_Instance_Conditioned_Multimodal_Trajectory_Prediction_ICCV_2023_paper.pdf)]\n* MotionLM：多智能体运动预测作为语言建模，ICCV 2023。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FSeff_MotionLM_Multi-Agent_Motion_Forecasting_as_Language_Modeling_ICCV_2023_paper.pdf)]\n* 轨迹预测上概率密度估计的快速推理与更新，ICCV 2023。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FMaeda_Fast_Inference_and_Update_of_Probabilistic_Density_Estimation_on_Trajectory_ICCV_2023_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fmeaten\u002FFlowChain-ICCV2023)]\n* ADAPT：动作感知驾驶描述 Transformer，ICRA 2023。[[paper](https:\u002F\u002Fbrowse.arxiv.org\u002Fpdf\u002F2302.00673.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fjxbbb\u002FADAPT)]\n* 场景扩散：使用扩散的可控驾驶场景生成，NIPS 2023。[[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=99MHSB98yZ)]\n* BCDiff：用于瞬时轨迹预测的双向一致扩散，NIPS 2023。[[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=FOFJmR1oxt)]\n* 带有潜在扩散先验的多模态轨迹预测的条件变分推断，太平洋地区人工智能国际会议 (PRICAI 2023)。[[paper](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-981-99-7019-3_2)]\n* 通过场景级扩散的语言引导交通模拟，CoRL 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.06344.pdf)]\n* 语言条件交通生成，CoRL 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.07947)] [[code](https:\u002F\u002Fariostgx.github.io\u002Flctgen\u002F)]\n* LightSim：城市场景的神经光照模拟，NIPS 2023。[[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=mcx8IGneYw)] [[website](https:\u002F\u002Fwaabi.ai\u002Flightsim\u002F)]\n* 自动驾驶轨迹预测中真正重要的是什么？NIPS 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.15136.pdf)] [[code](https:\u002F\u002Fwhatmatters23.github.io\u002F)]\n\n## 2023 年期刊论文\n* MVHGN：基于多视图自适应分层空间图卷积网络的非同质交通智能体轨迹预测，TITS。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10056303)]\n* 通过可迁移分层 Transformer 网络实现异构智能体的自适应与同步轨迹预测，TITS。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10149109)]\n* SSAGCN：用于行人轨迹预测的社会软注意力图卷积网络，TNNLS。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10063206)] [[code](https:\u002F\u002Fgithub.com\u002FWW-Tong\u002Fssagcn_for_path_prediction)]\n* 解耦人群交互以进行行人轨迹预测，RAL。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10083225)]\n* VNAGT：用于多智能体轨迹预测的变分非自回归图 Transformer 网络，IEEE Transactions on Vehicular Technology。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10121688)]\n* 时空谱 LSTM：一种用于行人轨迹预测的可迁移模型，TIV。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10149368)]\n* 整体 Transformer：用于自动驾驶车辆轨迹预测与决策的联合神经网络，PR。[[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320323002935)]\n* Tri-HGNN：学习融合三重策略的分层图神经网络用于行人轨迹预测，PR。[[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320323004703)]\n* 基于逆强化学习与风险规避的城市无信号灯交叉口多模态车辆轨迹预测，TITS。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10164651)]\n* 基于多尺度时空图的自动驾驶轨迹预测，IET Intelligent Transport Systems。[[paper](https:\u002F\u002Fietresearch.onlinelibrary.wiley.com\u002Fdoi\u002Fpdfdirect\u002F10.1049\u002Fitr2.12265)]\n* 用于人类轨迹预测的社会自注意力生成对抗网络，IEEE Transactions on Artificial Intelligence。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10197467)]\n* CSIR：具有迭代社会感知重思考的级联滑动条件变分自编码器用于轨迹预测，TITS。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10215313)]\n* 使用 Transformer 网络的高速公路自动驾驶多模态机动与轨迹预测，RAL。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10207845)]\n* 一种用于高速公路车辆轨迹预测的物理信息 Transformer 模型，Transportation Research Part C: Emerging Technologies。[[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0968090X23002619)] [[code](https:\u002F\u002Fgithub.com\u002FGengmaosi\u002FPIT-IDM)]\n* MacFormer：用于实时且鲁棒轨迹预测的地图 - 智能体耦合 Transformer，RAL。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2308.10280.pdf)]\n* MRGTraj：一种用于人类轨迹预测的新型非自回归方法，TCSVT。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10226250)] [[code](https:\u002F\u002Fgithub.com\u002Fwisionpeng\u002FMRGTraj)]\n* 受规划启发的通过横向 - 纵向分解实现自动驾驶分层轨迹预测，TIV。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10226224)]\n* 一种通过条件扩散模型的多模态车辆轨迹预测框架：由粗到细的方法，KBS。[[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0950705123007402)]\n* 轨迹预测中的模态探索、检索与适应，TPAMI。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10254381)]\n* MFAN：用于轨迹预测的混合特征注意力网络，PR。[[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0031320323006957#abs0001)]\n* IE-GAN：一种基于生成对抗网络的数据驱动人群模拟方法，Multimedia Tools and Applications。[[paper](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs11042-023-17346-x)]\n* 考虑时空交互与场景信息的轨迹分布感知图卷积网络用于轨迹预测，TKDE。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10309163)]\n* 具有多级时空建模的交通免地图轨迹预测，TIV。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10356823)]\n* STIGCN：用于行人轨迹预测的时空交互感知图卷积网络，The Journal of Supercomputing。[[paper](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs11227-023-05850-8)] [[code](https:\u002F\u002Fgithub.com\u002FChenwangxing\u002FSTIGCN_master)]\n* 面向智能车辆的随机非自回归 Transformer 基础多模态行人轨迹预测，TITS。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10367756)] [[code](https:\u002F\u002Fgithub.com\u002Fxbchen82\u002FSNARTF)]\n* 基于结构 Informer 方法的自动驾驶轨迹预测，IEEE Transactions on Automation Science and Engineering。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10364872)]\n* MTP-GO：基于神经常微分方程的图概率多智能体轨迹预测，IEEE Transactions on Intelligent Vehicles。[[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.00735)] [[code](https:\u002F\u002Fgithub.com\u002Fwestny\u002Fmtp-go)]\n\n## 其他 2023\n* Traj-MAE：用于轨迹预测的掩码自编码器，arXiv 预印本 arXiv:2303.06697, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.06697.pdf)]\n* 通过分布扩散 (Diffusion) 实现不确定性感知行人轨迹预测，arXiv 预印本 arXiv:2303.08367, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.08367.pdf)]\n* DiffTraj：使用扩散概率模型生成 GPS 轨迹，arXiv 预印本 arXiv:2304.11582, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.11582.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FYasoz\u002FDiffTraj)]\n* Multiverse Transformer（变换器）：Waymo Open Sim Agents Challenge 2023 第一名解决方案，CVPR 2023 自动驾驶研讨会。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.11868.pdf)] [[官网](https:\u002F\u002Fmultiverse-transformer.github.io\u002Fsim-agents\u002F)]\n* Joint-Multipath++ for Simulation Agents：Waymo Open Sim Agents Challenge 2023 第二名解决方案，CVPR 2023 自动驾驶研讨会。[[论文](https:\u002F\u002Fstorage.googleapis.com\u002Fwaymo-uploads\u002Ffiles\u002Fresearch\u002F2023%20Technical%20Reports\u002FSA_hm_jointMP.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fwangwenxi-handsome\u002FJoint-Multipathpp)]\n* MTR++：具有对称场景建模和引导意图查询的多智能体运动预测，Waymo Open Motion Prediction Challenge 2023 第一名解决方案，CVPR 2023 自动驾驶研讨会。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.17770.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fsshaoshuai\u002FMTR)]\n* GameFormer：面向自动驾驶的基于 Transformer 的交互式预测与规划的博弈建模与学习，arXiv 预印本 arXiv:2303.05760, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.05760.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FMCZhi\u002FGameFormer)] [[官网](https:\u002F\u002Fmczhi.github.io\u002FGameFormer\u002F)]\n* GameFormer Planner：面向自动驾驶车辆的赋能学习的交互式预测与规划框架，CVPR 2023 端到端 (End-to-End) 自动驾驶研讨会的 nuPlan 规划挑战赛。[[论文](https:\u002F\u002Fopendrivelab.com\u002Fe2ead\u002FAD23Challenge\u002FTrack_4_AID.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FMCZhi\u002FGameFormer-Planner\u002F)]\n* trajdata：多个人类轨迹数据集的统一接口，arXiv 预印本 arXiv:2307.13924, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.13924.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Ftrajdata)]\n* 使用扩散图卷积网络进行基于图的交互感知多模态 2D 车辆轨迹预测，arXiv 预印本 arXiv:2309.01981, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.01981.pdf)]\n* EquiDiff：用于轨迹预测的条件等变扩散模型，arXiv 预印本 arXiv:2308.06564, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2308.06564.pdf)]\n* DICE：用于轨迹预测的带评分的多样化扩散模型，arXiv 预印本 arXiv:2310.14570, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.14570.pdf)]\n* 使用基于动力学的深度学习进行行人轨迹预测，arXiv 预印本 arXiv:2309.09021, 2023。[[论文](https:\u002F\u002Fbrowse.arxiv.org\u002Fpdf\u002F2309.09021.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fsydney-machine-learning\u002Fpedestrianpathprediction)]\n* VT-Former：面向智能公路交通系统的基于 Transformer 的车辆轨迹预测方法，arXiv 预印本 arXiv:2311.06623, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.06623.pdf)]\n* 学习用于运动预测的合作轨迹表示，arXiv 预印本 arXiv:2311.00371, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.00371.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FAIR-THU\u002FV2X-Graph)]\n* Social-Transmotion：可提示的人类轨迹预测，arXiv 预印本 arXiv:2312.16168, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.16168.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fvita-epfl\u002Fsocial-transmotion)]\n* RealGen：用于可控交通场景的检索增强生成 (RAG)，arXiv 预印本 arXiv:2312.13303, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.13303.pdf)] [[代码](https:\u002F\u002Frealgen.github.io\u002F)]\n* SceneDM：具有一致性扩散模型的场景级多智能体轨迹生成，arXiv 预印本 arXiv:2311.15736, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.15736.pdf)] [[官网](https:\u002F\u002Falperen-hub.github.io\u002FSceneDM\u002F)]\n* DriveDreamer：迈向由真实世界驱动的自动驾驶世界模型 (World Models)，arXiv 预印本 arXiv:2309.09777, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.09777.pdf)] [[官网](https:\u002F\u002Fdrivedreamer.github.io\u002F)]\n* 自动驾驶的语言提示，arXiv 预印本 arXiv:2309.04379, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.04379.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fwudongming97\u002FPrompt4Driving)]\n* GAIA-1：自动驾驶的生成式世界模型，arXiv 预印本 arXiv:2309.17080, 2023。[[论文](https:\u002F\u002Fbrowse.arxiv.org\u002Fpdf\u002F2309.17080.pdf)] [[官网](https:\u002F\u002Fwayve.ai\u002Fthinking\u002Fscaling-gaia-1\u002F)]\n* LanguageMPC：作为自动驾驶决策者的大语言模型 (LLM)，arXiv 预印本 arXiv:2310.03026, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.03026.pdf)] [[官网](https:\u002F\u002Fsites.google.com\u002Fview\u002Fllm-mpc)]\n* DriveGPT4：通过大语言模型实现可解释的端到端自动驾驶，arXiv 预印本 arXiv:2310.01412, 2023。[[论文](https:\u002F\u002Fbrowse.arxiv.org\u002Fpdf\u002F2310.01412.pdf)] [[官网](https:\u002F\u002Ftonyxuqaq.github.io\u002Fprojects\u002FDriveGPT4\u002F)]\n* Drive Like a Human：利用大语言模型重新思考自动驾驶，arXiv 预印本 arXiv:2307.07162, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.07162.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FPJLab-ADG\u002FDriveLikeAHuman)]\n* DiLu：一种利用大语言模型的知识驱动型自动驾驶方法，arXiv 预印本 arXiv:2309.16292, 2023。[[论文](https:\u002F\u002Fbrowse.arxiv.org\u002Fpdf\u002F2309.16292.pdf)] [[官网](https:\u002F\u002Fpjlab-adg.github.io\u002FDiLu\u002F)]\n* DrivingDiffusion：使用潜在扩散模型进行布局引导的多视角驾驶场景视频生成，arXiv 预印本 arXiv:2310.07771, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.07771.pdf)] [[官网](https:\u002F\u002Fdrivingdiffusion.github.io\u002F)]\n* Driving with LLMs：融合对象级向量模态以实现可解释的自动驾驶，arXiv 预印本 arXiv:2310.01957, 2023。[[论文](https:\u002F\u002Fbrowse.arxiv.org\u002Fpdf\u002F2310.01957.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fwayveai\u002FDriving-with-LLMs)]\n* WEDGE：从生成式视觉 - 语言模型构建的多天气自动驾驶数据集，CVPR 2023 研讨会。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.07528.pdf)] [[官网](https:\u002F\u002Finfernolia.github.io\u002FWEDGE)]\n* BEVGPT：用于自动驾驶预测、决策和规划的生成式预训练大模型，arXiv 预印本 arXiv:2310.10357, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.10357.pdf)]\n* 扩散世界模型，ICLR 2024 会议投稿，2023。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=bAXmvOLtjA)]\n* Waymax：用于大规模自动驾驶研究的加速数据驱动模拟器，arXiv 预印本 arXiv:2310.08710, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.08710.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fwaymo-research\u002Fwaymax)] [[官网](https:\u002F\u002Fwaymo.com\u002Fintl\u002Fzh-cn\u002Fresearch\u002Fwaymax\u002F)]\n* MagicDrive：具有多样化 3D 几何控制的街景生成，arXiv 预印本 arXiv:2310.02601, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.02601.pdf)] [[官网](https:\u002F\u002Fgaoruiyuan.com\u002Fmagicdrive\u002F)]\n* GPT-Driver：使用 GPT 学习驾驶，arXiv 预印本 arXiv:2310.01415, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.01415.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FPointsCoder\u002FGPT-Driver)]\n* 你能描述正在发生的事情吗？将预训练语言编码器集成到自动驾驶轨迹预测模型中，arXiv 预印本 arXiv:2309.05282, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.05282.pdf)]\n* HiLM-D：迈向自动驾驶多模态大语言模型的高分辨率理解，arXiv 预印本 arXiv:2309.05186, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.05186.pdf)]\n* 面向自动驾驶的语言智能体，arXiv 预印本 arXiv:2311.10813, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.10813.pdf)] [[官网](https:\u002F\u002Fusc-gvl.github.io\u002FAgent-Driver\u002F)]\n* ADriver-I：自动驾驶的通用世界模型，arXiv 预印本 arXiv:2311.13549, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.13549.pdf)]\n* LLM4Drive：自动驾驶大语言模型综述，arXiv 预印本 arXiv:2311.01043, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.01043.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FThinklab-SJTU\u002FAwesome-LLM4AD)]\n* 自动驾驶与智能交通系统中的视觉语言模型，arXiv 预印本 arXiv:2310.14414, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.14414.pdf)]\n* 与 GPT-4V(ision) 同行：视觉 - 语言模型在自动驾驶上的早期探索，arXiv 预印本 arXiv:2311.05332, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.05332.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FPJLab-ADG\u002FGPT4V-AD-Exploration)]\n* Driving into the Future：利用世界模型进行自动驾驶的多视角视觉预测与规划，arXiv 预印本 arXiv:2311.17918, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.17918.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FBraveGroup\u002FDrive-WM)] [[官网](https:\u002F\u002Fdrive-wm.github.io\u002F)]\n* 自动驾驶多模态大语言模型综述，arXiv 预印本 arXiv:2311.12320, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.12320.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FIrohXu\u002FAwesome-Multimodal-LLM-Autonomous-Driving)]\n* Panacea：面向自动驾驶的全景可控视频生成，arXiv 预印本 arXiv:2311.16813, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.16813.pdf)] [[官网](https:\u002F\u002Fpanacea-ad.github.io\u002F)] [[代码](https:\u002F\u002Fgithub.com\u002Fwenyuqing\u002Fpanacea)]\n* LMDrive：利用大语言模型进行闭环端到端驾驶，arXiv 预印本 arXiv:2312.07488, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.07488.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fopendilab\u002FLMDrive)]\n* DriveMLM：将多模态大语言模型与自动驾驶行为规划状态对齐，arXiv 预印本 arXiv:2312.09245, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.09245.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FOpenGVLab\u002FDriveMLM)]\n* 语言模型、智能体模型与世界模型：机器推理与规划的 LAW，arXiv 预印本 arXiv:2312.05230, NIPS Tutorial 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.05230.pdf)] [[官网](https:\u002F\u002Fsites.google.com\u002Fview\u002Fneurips2023law)]\n* Dolphins：用于驾驶的多模态语言模型，arXiv 预印本 arXiv:2312.00438, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.00438.pdf)] [[官网](https:\u002F\u002Fvlm-driver.github.io\u002F)]\n* DriveLM：通过图视觉问答进行驾驶，arXiv 预印本 arXiv:2312.14150, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.14150.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FOpenDriveLab\u002FDriveLM)] [[官网](https:\u002F\u002Fopendrivelab.github.io\u002FDriveLM)]\n* LingoQA：自动驾驶的视频问答，arXiv 预印本 arXiv:2312.14115, 2023。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.14115.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fwayveai\u002FLingoQA)]\n* ViFiT：从惯性测量单元 (IMU) 和 Wi-Fi 精细时间测量重建视觉轨迹，MobiCom ISACom Workshop 2023。[[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3615984.3616503)] [[代码](https:\u002F\u002Fgithub.com\u002Fbryanbocao\u002Fvifit)]\n\n# 📚 2024 年会议与期刊论文\n\n## 2024 年会议论文\n* BAT：面向自动驾驶的行为感知类人轨迹预测，AAAI 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.06371.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FPetrichor625\u002FBATraj-Behavior-aware-Model)]\n* NuScenes-QA：自动驾驶场景的多模态视觉问答基准，AAAI 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.14836.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fqiantianwen\u002FNuScenes-QA)]\n* SocialCVAE：通过交互条件潜在变量预测行人轨迹，AAAI 2024。[[论文](http:\u002F\u002Fwww.cad.zju.edu.cn\u002Fhome\u002Fjin\u002FAAAI20242\u002FSocialCVAE.pdf)] [[代码](http:\u002F\u002Fwww.cad.zju.edu.cn\u002Fhome\u002Fjin\u002FAAAI20242\u002FSocialCVAE.htm)]\n* 通过神经随机微分方程提升跨域轨迹预测的可迁移性，AAAI 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.15906)] [[代码](https:\u002F\u002Fgithub.com\u002Fdaeheepark\u002FTrajSDE)]\n* 语言能胜过数值回归吗？基于语言的多模态轨迹预测，CVPR 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.18447)] [[代码](https:\u002F\u002Fgithub.com\u002FInhwanBae\u002FLMTrajectory)]\n* SingularTrajectory：使用扩散模型的通用轨迹预测器，CVPR 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.18452)] [[代码](https:\u002F\u002Fgithub.com\u002FInhwanBae\u002FSingularTrajectory)]\n* 生成并利用在线地图不确定性进行轨迹预测，CVPR 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.16439.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Falfredgu001324\u002FMapUncertaintyPrediction)]\n* HPNet：带有历史预测注意力的动态轨迹预测，CVPR 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.06351.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FXiaolongTang23\u002FHPNet)]\n* 适应长度偏移：用于轨迹预测的 FlexiLength 网络，CVPR 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.00742.pdf)]\n* T4P：通过掩码自编码器和特定角色令牌记忆实现轨迹预测的测试时训练，CVPR 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.10052.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fdaeheepark\u002FT4P)]\n* SocialCircle：学习基于角度的社会交互表示以用于行人轨迹预测，CVPR 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.05370.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002Fcocoon2wong\u002FSocialCircle)]\n* 自然数据投毒对自动驾驶轨迹预测的对抗后门攻击，CVPR 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.15755.pdf)]\n* CaDeT：一种用于自动驾驶鲁棒轨迹预测的因果解耦方法，CVPR 2024。\n* 用于行人轨迹预测的高阶关系推理，CVPR 2024。\n* 基于 Motif 矩阵的密度自适应多智能体轨迹预测模型，CVPR 2024。\n* OOSTraj：具有视觉定位去噪的视外轨迹预测，CVPR 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.02227.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FHai-chao-Zhang\u002FOOSTraj)]\n* SmartRefine：用于高效运动预测的场景自适应精炼框架，CVPR 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.11492)] [[代码](https:\u002F\u002Fgithub.com\u002Fopendilab\u002FSmartRefine)]\n* MFTraj：面向自动驾驶的无地图、行为驱动轨迹预测，IJCAI 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.01266)]\n* 用于自动驾驶轨迹预测的特征化扩散与时空交互网络，IJCAI 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.02145)]\n* 混合自主环境下自动驾驶的认知驱动轨迹预测模型，IJCAI 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.17520)]\n* 缺失观测下自动驾驶的物理信息轨迹预测，IJCAI 2024。[[论文](https:\u002F\u002Fpapers.ssrn.com\u002Fsol3\u002Fpapers.cfm?abstract_id=4809575 )]\n* 探索大语言模型在轨迹预测中的应用：技术视角，ACM\u002FIEEE 人机交互国际会议 (HRI 2024)。[[论文](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3610978.3640625)]\n* SpectrumNet：用于行人轨迹预测的基于频谱的轨迹编码神经网络，IEEE 声学、语音和信号处理国际会议 (ICASSP 2024)。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10446706)]\n* MapFlow：使用归一化流的多人智能体行人轨迹预测，ICASSP 2024。[[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10448062)]\n* 可提示的闭环交通仿真，CoRL 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.05863)] [[代码](https:\u002F\u002Fariostgx.github.io\u002FProSim\u002F)]\n* TrajCLIP：使用对比学习和幂等网络的行人轨迹预测方法，NIPS 2024。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=fUBFy8tb3z)]\n* 缺失数据下的行人轨迹预测：数据集、插补与基准测试，NIPS 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.00174)] [[代码](https:\u002F\u002Fgithub.com\u002FPranav-chib\u002FTrajImpute)]\n* LaKD：针对任意长度观测的轨迹预测的长度无关知识蒸馏，NIPS 2024。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf\u002F7cf0348cc3747c46278bb98d27d152a16c5722d3.pdf)]\n* 无人机辅助无人机：多无人机物体轨迹预测及其他应用的协作框架，NIPS 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.14674)] [[代码](https:\u002F\u002Fgithub.com\u002FWangzcBruce\u002FDHD)]\n* MGF：用于多样化轨迹预测的混合高斯流，NIPS 2024。[[论文](https:\u002F\u002Fopenreview.net\u002Fpdf?id=muYhNDlxWc)] [[代码](https:\u002F\u002Fgithub.com\u002Fmulplue\u002FMGF)]\n* 推理交互式自动驾驶中的多智能体行为拓扑，NIPS 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.18031)] [[代码](https:\u002F\u002Fgithub.com\u002FOpenDriveLab\u002FBeTop)]\n* SMART：通过下一个 token 预测实现可扩展的多智能体实时仿真，NIPS 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.15677)] [[代码](https:\u002F\u002Fgithub.com\u002Frainmaker22\u002FSMART)]\n* MART：用于多智能体轨迹预测的多尺度关系 Transformer 网络，ECCV 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.21635)] [[代码](https:\u002F\u002Fgithub.com\u002Fgist-ailab\u002FMART)]\n* 优化扩散模型以实现联合轨迹预测和可控生成，ECCV 2024。[[论文](https:\u002F\u002Fyixiaowang7.github.io\u002FOptTrajDiff_Page\u002Fstatic\u002Fpdfs\u002Fpaper.pdf)] [[代码](https:\u002F\u002Fyixiaowang7.github.io\u002FOptTrajDiff_Page\u002F)]\n* 用于人类轨迹预测的渐进式预文本任务学习，ECCV 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.11588)] [[代码](https:\u002F\u002Fgithub.com\u002FiSEE-Laboratory\u002FPPT)]\n* 面向自动驾驶应用的可靠概率人类轨迹预测，ECCV 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.06905)] [[代码](https:\u002F\u002Fgithub.com\u002Fkav-institute\u002Fmdn_trajectory_forecasting)]\n* DySeT：一种用于鲁棒轨迹预测的动态掩码自蒸馏方法，ECCV 2024。[[论文](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2024\u002Fpapers_ECCV\u002Fpapers\u002F00414.pdf)]\n* 通过潜在走廊进行自适应人类轨迹预测，ECCV 2024。[[论文](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2024\u002Fpapers_ECCV\u002Fpapers\u002F05542.pdf)]\n* CRITERIA：评估自动驾驶轨迹预测模型的新基准范式，ICRA 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.07794)] [[代码](https:\u002F\u002Fgithub.com\u002Fhuawei-noah\u002FSMARTS\u002Ftree\u002FCRITERIA-latest\u002Fpapers\u002FCRITERIA)]\n* FIMP：用于多智能体运动预测的未来交互建模，ICRA 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2401.16189)]\n* 使用基于动力学的深度学习进行行人轨迹预测，ICRA 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.09021)] [[代码](https:\u002F\u002Fgithub.com\u002Fsydney-machine-learning\u002Fpedestrianpathprediction)]\n* Scene Informer：部分可观测环境中的基于锚点的遮挡推断与轨迹预测，ICRA 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.13893)] [[代码](https:\u002F\u002Fgithub.com\u002Fsisl\u002FSceneInformer)]\n* 混合自主交通环境中受人类观察启发的自动驾驶轨迹预测，ICRA 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.04318)]\n* 用于多智能体轨迹预测的神经交互能量，ACM MM 2024。[[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.16579)]\n\n## 2024 年期刊论文\n* SMEMO：用于轨迹预测的社会记忆，TPAMI。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2203.12446.pdf)]\n* 一种面向自动驾驶的认知型轨迹预测方法，TIV。[[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.19251)]\n* EMSIN：用于车辆轨迹预测的增强多流交互网络，IEEE Transactions on Fuzzy Systems。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10418557)]\n* 面向多类轨迹预测的嵌入社会力的混合图卷积网络，TIV。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10415371)]\n* 用于实时车辆轨迹预测的上下文感知时序变分自编码器 (VAE)，RAL。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.10873.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fxupei0610\u002FContextVAE)]\n* 学习行人群体关系的自编码器 (Autoencoder) 扩散模型以用于多模态轨迹预测，IEEE Transactions on Instrumentation and Measurement (TIM)。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10466609)]\n* DSTCNN：用于行人轨迹预测的可变形时空卷积神经网络 (CNN)，Information Sciences。[[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0020025524003682)]\n* 用于交互感知车辆轨迹预测的异构图社会池化，Transportation Research Part E: Logistics and Transportation Review。[[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fabs\u002Fpii\u002FS1366554524003399)]\n* VTSIM：用于交叉口车辆轨迹模拟的基于注意力的循环神经网络 (RNN)，Computer Animation and Virtual Worlds。[[paper](https:\u002F\u002Fonlinelibrary.wiley.com\u002Fdoi\u002Fepdf\u002F10.1002\u002Fcav.2298?saml_referrer)]\n* 用于实时车辆轨迹预测的上下文感知时序变分自编码器 (VAE)，RAL。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.10873)] [[code](https:\u002F\u002Fgithub.com\u002Fxupei0610\u002FContextVAE)]\n* MacFormer：用于实时且鲁棒的轨迹预测的地图 - 代理耦合 Transformer，RAL。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2308.10280)]\n* 基于扩散模型的轨迹生成框架模拟人类移动性，International Journal of Geographical Information Science。[[paper](https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FChen-Chu-17\u002Fpublication\u002F378022332_Simulating_human_mobility_with_a_trajectory_generation_framework_based_on_diffusion_model\u002Flinks\u002F65cc2e5c790074549783cbf7\u002FSimulating-human-mobility-with-a-trajectory-generation-framework-based-on-diffusion-model.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fchuchen2017\u002FTrajGDM)]\n\n## 其他 2024\n* 通过引导扩散实现可控安全关键闭环交通仿真，arXiv 预印本 arXiv:2401.00391, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2401.00391.pdf)] [[website](https:\u002F\u002Fsafe-sim.github.io\u002F)]\n* 锻造自动驾驶视觉基础模型：挑战、方法与机遇，arXiv 预印本 arXiv:2401.08045, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2401.08045.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fzhanghm1995\u002FForge_VFM4AD)]\n* 用于轨迹预测的意图感知去噪扩散模型，arXiv 预印本 arXiv:2403.09190, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.09190.pdf)]\n* LG-Traj：大语言模型引导的行人轨迹预测，arXiv 预印本 arXiv:2403.08032, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.08032.pdf)]\n* Traj-LLM：利用预训练大语言模型赋能轨迹预测的新探索，arXiv 预印本 arXiv:2405.04909, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.04909)]\n* UniTraj：可扩展车辆轨迹预测的统一框架，arXiv 预印本 arXiv:2403.15098, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.15098.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fvita-epfl\u002FUniTraj)]\n* 基于扩散优化的多功能场景一致交通场景生成，arXiv 预印本 arXiv:2404.02524, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.02524.pdf)] [[code](https:\u002F\u002Fsites.google.com\u002Fview\u002Fversatile-behavior-diffusion)]\n* ControlTraj：基于拓扑约束扩散模型的可控轨迹生成，arXiv 预印本 arXiv:2404.15380, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.15380)]\n* 基于扩散的环境感知轨迹预测，arXiv 预印本 arXiv:2403.11643, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.11643)]\n* 面向无人机数据集的轨迹预测研究预处理与评估工具箱，arXiv 预印本 arXiv:2405.00604, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.00604)] [[code](https:\u002F\u002Fgithub.com\u002Fwestny\u002Fdronalize)]\n* BehaviorGPT：基于下一补丁预测的自动驾驶智能体模拟，arXiv 预印本 arXiv:2405.17372, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.17372)]\n* Vista：高保真且多功能可控的通用驾驶世界模型，arXiv 预印本 arXiv:2405.17398, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.17398)] [[code](https:\u002F\u002Fgithub.com\u002FOpenDriveLab\u002FVista)]\n* UrbanGPT：时空大语言模型，arXiv 预印本 arXiv:2403.00813, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.00813)] [[code](https:\u002F\u002Fgithub.com\u002FHKUDS\u002FUrbanGPT)]\n* 持续学习、适应与改进：自动驾驶的双过程方法，arXiv 预印本 arXiv:2405.15324, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.15324)] [[code](https:\u002F\u002Fgithub.com\u002FPJLab-ADG\u002FLeapAD)]\n* DriveVLM：自动驾驶与大视觉语言模型的融合，arXiv 预印本 arXiv:2402.12289, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.12289)] [[website](https:\u002F\u002Ftsinghua-mars-lab.github.io\u002FDriveVLM\u002F)]\n* NAVSIM：数据驱动的非反应式自动驾驶车辆仿真与基准测试，arXiv 预印本 arXiv:2406.15349, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.15349)] [[code](https:\u002F\u002Fgithub.com\u002Fautonomousvision\u002Fnavsim)] [[supplementary](https:\u002F\u002Fdanieldauner.github.io\u002Fassets\u002Fpdf\u002FDauner2024NIPS_supplementary.pdf)]\n* SimGen：基于模拟器条件的驾驶场景生成，arXiv 预印本 arXiv:2406.09386, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.09386)] [[code](https:\u002F\u002Fmetadriverse.github.io\u002Fsimgen\u002F)]\n* GenAD：生成式端到端自动驾驶，arXiv 预印本 arXiv:2402.11502, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.11502)] [[code](https:\u002F\u002Fgithub.com\u002Fwzzheng\u002FGenAD)]\n* LCSim：大规模可控交通模拟器，arXiv 预印本 arXiv:2406.19781, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.19781)] [[code](https:\u002F\u002Fgithub.com\u002Ftsinghua-fib-lab\u002FLCSim)]\n* Strada-LLM：用于交通预测的图大语言模型，arXiv 预印本 arXiv:2410.20856, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.20856)]\n* MADiff：用于第一人称视频手部轨迹预测的运动感知 Mamba 扩散模型，arXiv 预印本 arXiv:2409.02638, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.02638)] [[code](https:\u002F\u002Firmvlab.github.io\u002Fmadiff.github.io\u002F)]\n* Gen-Drive：通过奖励建模和强化学习微调增强扩散生成驾驶策略，arXiv 预印本 arXiv:2410.05582, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.05582)] [[code](https:\u002F\u002Fmczhi.github.io\u002FGenDrive)]\n* 协同驾驶中多视图数据整合的共形轨迹预测，arXiv 预印本 arXiv:2408.00374, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.00374)] [[code](https:\u002F\u002Fgithub.com\u002Fxichennn\u002FV2I_trajectory_prediction)]\n* LHPF：回顾历史并规划未来在自动驾驶中的应用，arXiv 预印本 arXiv:2411.17253, 2024。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.17253)] [[website](https:\u002F\u002Fchantsss.github.io\u002FLHPF\u002F)]\n\n# 📚 2025 会议与期刊论文\n\n## 2025 年会议论文\n* 通过上下文学习生成交通场景以学习更好的运动规划器，AAAI 2025。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.18086)] [[code](https:\u002F\u002Fezharjan.github.io\u002FAutoSceneGen\u002F)]\n* NEST：一种用于自动驾驶的神经调节小世界超图轨迹预测模型，AAAI 2025。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.11682)]\n* C2F-TP：一种用于不确定性感知轨迹预测的由粗到细去噪框架，AAAI 2025。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.13231)] [[code](https:\u002F\u002Fgithub.com\u002Fwangzc0422\u002FC2F-TP)]\n* CUQDS：分布偏移下的共形不确定性量化用于轨迹预测，AAAI 2025。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.12100)]\n* STraj：自训练以弥合轨迹预测中的跨地理差异，AAAI 2025。[[paper](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fdownload\u002F34432\u002F36587)] [[code](https:\u002F\u002Fgithub.com\u002FZhanwei-Z\u002FSTraj)]\n* 桥接交通状态与轨迹以实现动态路网和轨迹表征学习，AAAI 2025。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.06870)] [[code](https:\u002F\u002Fgithub.com\u002FNickHan-cs\u002FTRACK)]\n* GTG：面向城市移动性的可泛化轨迹生成模型，AAAI 2025。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.01107)] [[code](https:\u002F\u002Fgithub.com\u002Flyd1881310\u002FGTG)]\n* MoFlow：基于隐式最大似然估计蒸馏的一步流匹配用于人类轨迹预测，CVPR 2025。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.09950)] [[code](https:\u002F\u002Fgithub.com\u002FDSL-Lab\u002FMoFlow)]\n* 认证人类轨迹预测，CVPR 2025。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.13778)] [[website](https:\u002F\u002Fs-attack.github.io\u002F)] [[code](https:\u002F\u002Fs-attack.github.io\u002Fcertified\u002F)]\n* 通过优化驱动多帧扰动框架实现自动驾驶中持久、高效且鲁棒的轨迹预测攻击，CVPR 2025。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FYu_Enduring_Efficient_and_Robust_Trajectory_Prediction_Attack_in_Autonomous_Driving_CVPR_2025_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fy1y5\u002FOMP-ATTACK)]\n* PerReg+：利用双层表征学习和自适应提示实现可泛化的轨迹预测，CVPR 2025。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FMessaoud_Towards_Generalizable_Trajectory_Prediction_using_Dual-Level_Representation_Learning_and_Adaptive_CVPR_2025_paper.pdf)]\n* 利用 SD 地图增强基于 HD 地图的轨迹预测，CVPR 2025。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FDong_Leveraging_SD_Map_to_Augment_HD_Map-based_Trajectory_Prediction_CVPR_2025_paper.pdf)]\n* 通过对比学习适应轨迹预测的观察长度，CVPR 2025。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FQiu_Adapting_to_Observation_Length_of_Trajectory_Prediction_via_Contrastive_Learning_CVPR_2025_paper.pdf)]\n* 基于多模态知识蒸馏的人类轨迹预测，CVPR 2025。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FJeong_Multi-modal_Knowledge_Distillation-based_Human_Trajectory_Forecasting_CVPR_2025_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FJaewoo97\u002FKDTF)]\n* 通过运动具身实现物理合理性感知的轨迹预测，CVPR 2025。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FTaketsugu_Physical_Plausibility-aware_Trajectory_Prediction_via_Locomotion_Embodiment_CVPR_2025_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FImIntheMiddle\u002FEmLoco)]\n* Sim-to-Real 因果转移：一种因果感知交互表征的度量学习方法，CVPR 2025。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FRahimi_Sim-to-Real_Causal_Transfer_A_Metric_Learning_Approach_to_Causally-Aware_Interaction_CVPR_2025_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002Fvita-epfl\u002FCausalSim2Real)]\n* SocialMOIF：用于行人轨迹预测的多阶意图融合，CVPR 2025。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FChen_SocialMOIF_Multi-Order_Intention_Fusion_for_Pedestrian_Trajectory_Prediction_CVPR_2025_paper.pdf)]\n* Trajectory-Mamba：基于选择性状态空间模型的高效注意力-Mamba 预测模型，CVPR 2025。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FHuang_Trajectory_Mamba_Efficient_Attention-Mamba_Forecasting_Model_Based_on_Selective_SSM_CVPR_2025_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FYiZhou-H\u002FTrajectory-Mamba-CVPR)]\n* Tra-MoE：从多个领域学习轨迹预测模型以实现自适应策略条件化，CVPR 2025。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FYang_Tra-MoE_Learning_Trajectory_Prediction_Model_from_Multiple_Domains_for_Adaptive_CVPR_2025_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FMCG-NJU\u002FTra-MoE)]\n* 统一的不确定性感知扩散用于多智能体轨迹建模，CVPR 2025。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2025\u002Fpapers\u002FCapellera_Unified_Uncertainty-Aware_Diffusion_for_Multi-Agent_Trajectory_Modeling_CVPR_2025_paper.pdf)]\n* AMD：用于鲁棒长尾轨迹预测的自适应动量和解耦对比学习框架，ICCV 2025。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fpapers\u002FRao_AMD_Adaptive_Momentum_and_Decoupled_Contrastive_Learning_Framework_for_Robust_ICCV_2025_paper.pdf)]\n* DONUT：一种用于轨迹预测的仅解码器模型，ICCV 2025。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fpapers\u002FKnoche_DONUT_A_Decoder-Only_Model_for_Trajectory_Prediction_ICCV_2025_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FMKnoche\u002FDONUT)]\n* 运动中的远见：通过奖励启发式强化轨迹预测，ICCV 2025。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fpapers\u002FPei_Foresight_in_Motion_Reinforcing_Trajectory_Prediction_with_Reward_Heuristics_ICCV_2025_paper.pdf)]\n* ForeSight：多视图流式联合目标检测和轨迹预测，ICCV 2025。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fpapers\u002FPapais_ForeSight_Multi-View_Streaming_Joint_Object_Detection_and_Trajectory_Forecasting_ICCV_2025_paper.pdf)] [[code](https:\u002F\u002Fforesight-iccv.github.io\u002F)]\n* 通过可控扩散模型进行长尾轨迹预测的生成式主动学习，ICCV 2025。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fpapers\u002FPark_Generative_Active_Learning_for_Long-tail_Trajectory_Prediction_via_Controllable_Diffusion_ICCV_2025_paper.pdf)]\n* NATRA：针对噪声观测的轨迹预测噪声无关框架，ICCV 2025。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fpapers\u002FLi_NATRA_Noise-Agnostic_Framework_for_Trajectory_Prediction_with_Noisy_Observations_ICCV_2025_paper.pdf)]\n* SRefiner：用于多智能体轨迹细化的软编织注意力，ICCV 2025。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fpapers\u002FXiao_SRefiner_Soft-Braid_Attention_for_Multi-Agent_Trajectory_Refinement_ICCV_2025_paper.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FLiwen-Xiao\u002FSRefiner)]\n* TOTP：具有时间自适应 Mamba 潜在扩散的可迁移在线行人轨迹预测，ICCV 2025。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fpapers\u002FRen_TOTP_Transferable_Online_Pedestrian_Trajectory_Prediction_with_Temporal-Adaptive_Mamba_Latent_ICCV_2025_paper.pdf)]\n* 使用掩码轨迹扩散的统一多智能体轨迹建模，ICCV 2025。[[paper](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fpapers\u002FYang_Unified_Multi-Agent_Trajectory_Modeling_with_Masked_Trajectory_Diffusion_ICCV_2025_paper.pdf)]\n* 一种用于车辆轨迹预测的驾驶风格自适应框架，NIPS 2025。[[paper](https:\u002F\u002Fopenreview.net\u002Fpdf\u002F19a4046603d4ecb927c2708967c00e223725333a.pdf)]\n* 迈向在上下文中预测任意人类轨迹，NIPS 2025。[[paper](https:\u002F\u002Fopenreview.net\u002Fpdf\u002Fda08cd5bb2b08b484195bc720c3b833a7fd3bab6.pdf)]\n* 带个体反馈的人类轨迹预测交互式调整，ICLR 2025。[[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=DCpukR83sw)]\n* 利用驾驶员视野进行多模态自车轨迹预测，ICLR 2025。[[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=LLWj8on4Rv)] [[code](https:\u002F\u002Fgithub.com\u002Fmeakbiyik\u002Frouteformer)]\n* 用于交互感知随机人类轨迹预测的神经化马尔可夫随机场，ICLR 2025。[[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=r3cEOVj7Ze)] [[code](https:\u002F\u002Fgithub.com\u002FAdaCompNUS\u002FNMRF_TrajectoryPrediction)]\n* Sports-Traj：用于体育中多智能体运动的统一轨迹生成模型，ICLR 2025。[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=9aTZf71uiD)] [[code](https:\u002F\u002Fgithub.com\u002Fcolorfulfuture\u002FUniTraj-pytorch)]\n* Trajectory-LLM：用于自动驾驶中轨迹预测的基于语言的数据生成器，ICLR 2025。[[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=UapxTvxB3N)] [[code](https:\u002F\u002Fgithub.com\u002FTJU-IDVLab\u002FTraj-LLM)]\n* TSC-Net：通过轨迹 - 场景 - 单元分类预测行人轨迹，ICLR 2025。[[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=Xmh5gdMfRJ)]\n* SmartPretrain：用于运动预测的模型无关和数据集无关表征学习，ICLR 2025。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.08669)] [[code](https:\u002F\u002Fgithub.com\u002Fyoungzhou1999\u002FSmartPretrain)]\n* DriveGPT：扩展用于驾驶的自回归行为模型，ICML 2025。[[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=SBUxQakoJJ)]\n* SAH-Drive：用于闭环车辆轨迹生成的场景感知混合规划器，ICML 2025。[[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=OYbZWmNHwn)] [[code](https:\u002F\u002Fgithub.com\u002Frichie-live\u002FSAH-Drive)]\n* 使用 3DMoTraj 数据集的三维轨迹预测，ICML 2025。[[paper](https:\u002F\u002Fopenreview.net\u002Fpdf?id=jkVH7nLzUR)] [[code](https:\u002F\u002Fgithub.com\u002Fzhouhao94\u002F3DMoTraj)]\n* 用于条件轨迹预测的跨时间域意图交互，ACM MM 2025。[[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3746027.3754709)]\n* ViTraj：学习用于车路协同轨迹预测的双边表征，ACM MM 2025。[[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3746027.3755295)]\n* 使用单目视觉的统一人类定位与轨迹预测，ICRA 2025。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.03535)] [[code](https:\u002F\u002Fgithub.com\u002Fvita-epfl\u002FMonoTransmotion)]\n* 使用 IDD-PeD 在非结构化交通中进行行人意图和轨迹预测，ICRA 2025。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11128395)] [[code](https:\u002F\u002Fcvit.iiit.ac.in\u002Fresearch\u002Fprojects\u002Fcvit-projects\u002Fiddped)]\n* 用于行人轨迹预测的视觉 - 语言推理，ICRA 2025。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11127538)]\n* 约束你的注意力：用于自动驾驶中鲁棒轨迹预测的因果注意力门控，ICRA 2025。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11128367)] [[code](https:\u002F\u002Fehsan-ami.github.io\u002Fcritic\u002F)]\n* Co-MTP：一种用于自动驾驶的具有多时间融合的协同轨迹预测框架，ICRA 2025。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11127303)] [[code](https:\u002F\u002Fxiaomiaozhang.github.io\u002FCo-MTP\u002F)]\n* WcDT：用于交通场景生成的以世界为中心的扩散变换器，ICRA 2025。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11127600)] [[code](https:\u002F\u002Fgithub.com\u002Fyangchen1997\u002FWcDT)]\n* Diff-Refiner：使用即插即用扩散细化器增强多智能体轨迹预测，ICRA 2025。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11127226)]\n* 场景感知可解释多模态轨迹预测，ICRA 2025。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11128379)] [[code](https:\u002F\u002Fgithub.com\u002Focean-luna\u002FExplainable-Prediction)]\n* 非结构化约束下的随机轨迹预测，ICRA 2025。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11127647)]\n* DSFormer-RTP：用于实时确定性轨迹预测的动态流变换器，IROS 2025。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11247611)] [[code](https:\u002F\u002Fgithub.com\u002Fcxnaive\u002FDSFormer-RTP)]\n* ParkDiffusion：使用扩散模型的用于自动泊车的异构多智能体多模态轨迹预测，IROS 2025。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11246467)]\n* GDTS：带有树采样的目标引导扩散模型用于多模态行人轨迹预测，IROS 2025。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11246846)]\n* TR-LLM：整合轨迹数据以实现场景感知的基于 LLM 的人类动作预测，IROS 2025。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=11246714)] [[code](https:\u002F\u002Fsites.google.com\u002Fview\u002Ftrllm?usp=)]\n\n## 2025 年期刊论文\n* DEMO：一种用于自动驾驶车辆 (Autonomous Vehicles) 多时间尺度轨迹预测 (Trajectory Prediction) 的动力增强学习模型，信息融合 (Information Fusion)。 [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.20784)]\n* DSTIGCN：用于行人轨迹预测的可变形时空交互图卷积网络 (Graph Convolution Network)，IEEE 智能交通系统汇刊 (TITS)。 [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10843981)] [[代码](https:\u002F\u002Fgithub.com\u002FChenwangxing\u002FDSTIGCN_Master)]\n* PCHGCN：用于行人轨迹预测的物理约束高阶图卷积网络，IEEE 物联网期刊。 [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10948459)] [[代码](https:\u002F\u002Fgithub.com\u002FChenwangxing\u002FPCHGCN-Master)]\n* DiffMATP：基于去噪扩散模型 (Diffusion Models) 的交互感知多智能体轨迹预测，IEEE 车辆技术汇刊 (TVT)。 [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F11222824)]\n* HDSVT：基于大都市城市桥梁的高密度语义车辆轨迹数据集，科学数据 (Scientific Data)。 [[论文](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41597-025-05603-7)] [[数据集](https:\u002F\u002Ffigshare.com\u002Farticles\u002Fdataset\u002FHDSVT_High-Density_Semantic_Vehicle_Trajectory_Dataset_Based_on_a_Cosmopolitan_City_Bridge\u002F27180387)]\n* CoT-Drive：利用大语言模型 (LLMs) 和思维链提示 (Chain-of-Thought Prompting) 进行自动驾驶的高效运动预测，IEEE 人工智能汇刊。 [[论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10980428)]\n\n## 2025 年其他论文\n* V2V-LLM：基于多模态大语言模型 (Multi-Modal Large Language Models) 的车对车协同自动驾驶，arXiv 预印本 arXiv:2502.09980, 2025。 [[论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.09980)] [[代码](https:\u002F\u002Feddyhkchiu.github.io\u002Fv2vllm.github.io\u002F)]\n* V2V-GoT：基于多模态大语言模型和思维图 (Graph-of-Thoughts) 的车对车协同自动驾驶，arXiv 预印本 arXiv:2509.18053, 2025。 [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.18053v3)] [[代码](https:\u002F\u002Feddyhkchiu.github.io\u002Fv2vgot.github.io\u002F)]\n\n# 📚 2026 年会议与期刊论文\n## 2026 年会议论文\n* ViTE：用于行人轨迹预测的虚拟图轨迹专家路由器，AAAI 2026。 [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.12214)] [[代码](https:\u002F\u002Fgithub.com\u002FCarrotsniper\u002FViTE)]\n* DiffRefiner：用于端到端 (End to End) 自动驾驶的通过扩散细化与语义交互实现由粗到细的轨迹规划，AAAI 2026。 [[论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.17150)] [[代码](https:\u002F\u002Fgithub.com\u002Fnullmax-vision\u002FDiffRefiner)]\n* TRAJEVO：通过大语言模型 (LLM) 驱动的进化设计轨迹预测启发式方法，AAAI 2026。 [[论文](https:\u002F\u002Fwww.arxiv.org\u002Fpdf\u002F2508.05616)] [[代码](https:\u002F\u002Fgithub.com\u002Fai4co\u002Ftrajevo)]\n## 2026 年期刊论文\n* \n## 2026 年其他论文\n*\n\n# 📚 相关综述论文\n* 自动驾驶中轨迹预测的大型基础模型：综合综述，arXiv 预印本 arXiv:2509.10570, 2025。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.10570)]\n* 轨迹预测遇上大型语言模型：综述，arXiv 预印本 arXiv:2506.03408, 2025。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.03408)] [[code](https:\u002F\u002Fgithub.com\u002Fcolorfulfuture\u002FAwesome-Trajectory-Motion-Prediction-Papers)]\n* 自动驾驶领域行人轨迹预测的总结与反思，TIV 2024。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10528911)]\n* 弱势道路使用者轨迹预测方法综述，Robotics 2023。[[paper](https:\u002F\u002Fwww.mdpi.com\u002F2218-6581\u002F13\u002F1\u002F1)]\n* 面向智能交通系统的生成式人工智能综述，arXiv 预印本 arXiv:2312.08248, 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.08248.pdf)]\n* 自动驾驶系统中的行人与车辆行为预测——综述，Expert Systems With Applications 2023。[[paper](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0957417423024855)]\n* 数据驱动的交通仿真：全面综述，arXiv 预印本 arXiv:2310.15975, 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fftp\u002Farxiv\u002Fpapers\u002F2310\u002F2310.15975.pdf)]\n* 人车混合环境中的行人轨迹预测：系统综述，TITS 2023。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10181234)]\n* 自动驾驶车辆轨迹预测的机器学习：综合综述、挑战与未来研究方向，arXiv 预印本 arXiv:2307.07527, 2023。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.07527.pdf)]\n* 在基于深度学习的车辆轨迹预测中融入驾驶知识：综述，IEEE Transactions on Intelligent Vehicles 2023。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10100881)]\n* 基于深度学习模型的车辆轨迹预测综述，国际可持续专家系统会议：ICSES 2022。[[paper](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-981-19-7874-6_48)]\n* 自动驾驶轨迹预测方法综述，IEEE Transactions on Intelligent Vehicles 2022。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9756903)]\n* 时空数据的生成对抗网络：综述，ACM Transactions on Intelligent Systems and Technology 2022。[[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3474838)]\n* 自动驾驶车辆的场景理解与运动预测——回顾与比较，TITS 2022。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9733973)]\n* 自动驾驶的深度强化学习：综述，TITS 2022。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9351818)]\n* 自动驾驶中的社会交互：回顾与展望，arXiv 预印本 arXiv:2208.07541, 2022。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2208.07541.pdf)]\n* 驾驶场景中的行为意图预测：综述，arXiv 预印本 arXiv:2211.00385, 2022。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.00385.pdf)]\n* 基于车辆信息的多模态融合技术：综述，arXiv 预印本 arXiv:2211.06080, 2022。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.06080.pdf)]\n* 自动驾驶中的行人行为预测：需求、指标及相关特征，TITS 2021。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9660784)]\n* 基于深度学习的行人轨迹预测方法综述，Sensors 2021。[[paper](https:\u002F\u002Fwww.mdpi.com\u002F1424-8220\u002F21\u002F22\u002F7543\u002Fpdf)]\n* 自动驾驶中车辆轨迹预测的深度学习途径综述，IEEE 机器人与仿生学国际会议 (ROBIO 2021)。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.10436.pdf)] [[code](https:\u002F\u002Fgithub.com\u002FHenry1iu\u002FTNT-Trajectory-Predition)]\n* 行人轨迹预测方法回顾：比较深度学习与基于知识的方法，arXiv 预印本 arXiv:2111.06740, 2021。[[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2111.06740.pdf)]\n* 轨迹数据管理、分析与学习综述，ACM Computing Surveys (CSUR 2021)。[[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3440207)]\n* 自动驾驶中行人和车辆运动预测综述，IEEE Access 2021。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9559998)]\n* 基于深度学习的自动驾驶：前沿技术综述，arXiv 预印本 arXiv:2006.06091, 2020。[[paper](https:\u002F\u002Farxiv.org\u002Fftp\u002Farxiv\u002Fpapers\u002F2006\u002F2006.06091.pdf)]\n* 视觉交通仿真综述：模型、评估及在自动驾驶中的应用，Computer Graphics Forum 2020。[[paper](https:\u002F\u002Fonlinelibrary.wiley.com\u002Fdoi\u002Fepdf\u002F10.1111\u002Fcgf.13803?saml_referrer)]\n* 自动驾驶深度学习技术综述，Journal of Field Robotics 2020。[[paper](https:\u002F\u002Fonlinelibrary.wiley.com\u002Fdoi\u002Fepdf\u002F10.1002\u002Frob.21918?saml_referrer)]\n* 人类运动轨迹预测：综述，International Journal of Robotics Research 2020。[[paper](http:\u002F\u002Fsage.cnpereading.com\u002Fparagraph\u002Fdownload\u002F?doi=10.1177\u002F0278364920917446)]\n* 车辆轨迹相似度：模型、方法与应用，ACM Computing Surveys (CSUR 2020)。[[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3406096)]\n* 面向自动驾驶应用的基于深度学习的车辆行为预测：综述，TITS 2020。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9158529)]\n* 自动驾驶车辆运动规划深度强化学习综述，TITS 2020。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9210154)]\n* 支持自动驾驶规划的工具概述，ITSC 2020。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=9294512)]\n* 与行人交互的自动驾驶车辆：理论与实践综述，TITS 2019。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8667866)]\n* 弱势道路使用者路径预测技术综述：从传统到深度学习途径，ITSC 2019。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=8917053)]\n* 时空数据挖掘：问题与方法综述，ACM Computing Surveys 2018。[[paper](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3161602)]\n* 基于视觉的路径预测综述，分布式、环境与普适交互国际会议 (DAPI 2018)。[[paper](https:\u002F\u002Flink.springer.com\u002Fcontent\u002Fpdf\u002F10.1007\u002F978-3-319-91131-1_4.pdf)]\n* 移动对象分析：未来位置与轨迹预测方法综述，arXiv 预印本 arXiv:1807.04639, 2018。[[paper](https:\u002F\u002Farxiv.org\u002Fftp\u002Farxiv\u002Fpapers\u002F1807\u002F1807.04639.pdf)]\n* 轨迹数据挖掘综述：技术与应用，IEEE Access 2016。[[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?arnumber=7452339)]\n* 轨迹数据挖掘：概述，ACM Transactions on Intelligent Systems and Technology 2015。[[paper](http:\u002F\u002Furban-computing.com\u002Fpdf\u002FTrajectoryDataMining-tist-yuzheng.pdf)]\n* 智能车辆运动预测与风险评估综述，ROBOMECH Journal 2014。[[paper](https:\u002F\u002Frobomechjournal.springeropen.com\u002Ftrack\u002Fpdf\u002F10.1186\u002Fs40648-014-0001-z.pdf)]\n\n# 📚 数据集\n## 数据集综述\n* 自动驾驶数据集综述：数据统计、标注与展望，arXiv 预印本 arXiv:2401.01454, 2024。 [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2401.01454.pdf)]\n* 自动驾驶开源数据生态系统：现状与未来，arXiv 预印本 arXiv:2312.03408, 2023。 [[paper](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.03408.pdf)] [[中文](https:\u002F\u002Fopendrivelab.com\u002FDataset_Survey_Chinese.pdf)] [[代码](https:\u002F\u002Fgithub.com\u002FOpenDriveLab\u002FDriveAGI)]\n* HDSVT：基于大都市城市桥梁的高密度语义车辆轨迹数据集，Scientific Data 2025。 [[paper](https:\u002F\u002Fwww.nature.com\u002Farticles\u002Fs41597-025-05603-7)] [[数据集](https:\u002F\u002Ffigshare.com\u002Farticles\u002Fdataset\u002FHDSVT_High-Density_Semantic_Vehicle_Trajectory_Dataset_Based_on_a_Cosmopolitan_City_Bridge\u002F27180387)]\n* AD4CHE 数据集及其在交通拥堵领航系统典型拥堵场景中的应用，IEEE Transactions on Intelligent Vehicles 2023。 [[paper](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10079130)] [[代码](https:\u002F\u002Fgithub.com\u002FADSafetyJointLab\u002FAD4CHE)]\n\n## 公开可用的车辆数据集\n* [Porto](https:\u002F\u002Fwww.kaggle.com\u002Fc\u002Fpkdd-15-predict-taxi-service-trajectory-i\u002Fdata), [网站](https:\u002F\u002Farchive.ics.uci.edu\u002Fml\u002Fdatasets\u002FTaxi+Service+Trajectory+-+Prediction+Challenge,+ECML+PKDD+2015)\n* [NGSIM](https:\u002F\u002Fdata.transportation.gov\u002FAutomobiles\u002FNext-Generation-Simulation-NGSIM-Vehicle-Trajector\u002F8ect-6jqj)\n* [NYC](https:\u002F\u002Fwww1.nyc.gov\u002Fsite\u002Ftlc\u002Fabout\u002Ftlc-trip-record-data.page)\n* [T-drive](https:\u002F\u002Fwww.microsoft.com\u002Fen-us\u002Fresearch\u002Fpublication\u002Ft-drive-trajectory-data-sample\u002F)\n* [Greek Trucks](http:\u002F\u002Fwww.chorochronos.org\u002F)\n* [highD](https:\u002F\u002Fwww.highd-dataset.com\u002F)\n* [inD](https:\u002F\u002Fwww.ind-dataset.com\u002F)\n* [rounD](https:\u002F\u002Fwww.round-dataset.com\u002F)\n* [uniD](https:\u002F\u002Fwww.unid-dataset.com\u002F)\n* [exiD](https:\u002F\u002Fwww.exid-dataset.com\u002F)\n* [Dronalize](https:\u002F\u002Fgithub.com\u002Fwestny\u002Fdronalize)\n* [Mirror-Traffic](http:\u002F\u002Fwww.scenarios.cn\u002Fhtml\u002Fdataset.html)\n* [Argoverse 网站](https:\u002F\u002Fwww.argoverse.org\u002F), [Argoverse 1](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_CVPR_2019\u002Fpapers\u002FChang_Argoverse_3D_Tracking_and_Forecasting_With_Rich_Maps_CVPR_2019_paper.pdf), [Argoverse 2](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2301.00493.pdf)\n* [ApolloScape](http:\u002F\u002Fapolloscape.auto\u002Ftrajectory.html)\n* [INTERACTION](https:\u002F\u002Finteraction-dataset.com\u002F)\n* [Waymo Open Dataset](https:\u002F\u002Fwaymo.com\u002Fopen\u002F)\n* [Cityscapes](https:\u002F\u002Fwww.cityscapes-dataset.com\u002F)\n* [KITTI](http:\u002F\u002Fwww.cvlibs.net\u002Fdatasets\u002Fkitti\u002F)\n* [nuScenes](https:\u002F\u002Fwww.nuscenes.org\u002F)\n* [TRAF](https:\u002F\u002Fgamma.umd.edu\u002Fresearchdirections\u002Fautonomousdriving\u002Ftrafdataset)\n* [Lyft Level 5](https:\u002F\u002Flevel-5.global\u002F)\n* [METEOR](https:\u002F\u002Fgamma.umd.edu\u002Fresearchdirections\u002Fautonomousdriving\u002Fmeteor\u002F)\n* [DiDi GAIA](https:\u002F\u002Foutreach.didichuxing.com\u002Fresearch\u002Fopendata\u002F), [D²-City](https:\u002F\u002Fwww.scidb.cn\u002Fen\u002Fdetail?dataSetId=804399692560465920&dataSetType=personal), [论文](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1904.01975)\n* [上海 & 杭州](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F2700478)\n* [北京](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F2525314.2525343)\n* [VMT](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F6482546)\n* [TRAFFIC](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F7565640), [网站](https:\u002F\u002Fmin.sjtu.edu.cn\u002Flwydemo\u002FTrajectory%20analysis.htm)\n* [CROSS](https:\u002F\u002Fcvrr-nas.ucsd.edu\u002Fpublications\u002F2011\u002FMorris_PAMI2011.pdf), [网站](http:\u002F\u002Fcvrr.ucsd.edu\u002Fbmorris\u002Fdatasets\u002F)\n* [Ubiquitous Traffic Eyes (UTE)](http:\u002F\u002Fseutraffic.com\u002F#\u002Fhome)\n* [CitySim](https:\u002F\u002Fgithub.com\u002FUCF-SST-Lab\u002FUCF-SST-CitySim1-Dataset)\n* [pNEUMA](https:\u002F\u002Fopen-traffic.epfl.ch\u002F)\n* [I-24 MOTION](https:\u002F\u002Fi24motion.org\u002Fdata)\n* [Zen Traffic Data](https:\u002F\u002Fzen-traffic-data.net\u002Fenglish\u002F)\n* [DLR Urban Traffic](https:\u002F\u002Fdoi.org\u002F10.5281\u002Fzenodo.11396371)\n* [DLR Highway Traffic](https:\u002F\u002Fdoi.org\u002F10.5281\u002Fzenodo.14012005)\n## 公开可用的行人数据集\n* [GeoLife](https:\u002F\u002Fwww.microsoft.com\u002Fen-us\u002Fresearch\u002Fpublication\u002Fgeolife-gps-trajectory-dataset-user-guide\u002F)\n* [UCY](https:\u002F\u002Fgraphics.cs.ucy.ac.cy\u002Fresearch\u002Fdownloads\u002Fcrowd-data)\n* [ETH](https:\u002F\u002Ficu.ee.ethz.ch\u002Fresearch\u002Fdatsets.html), [论文](https:\u002F\u002Fethz.ch\u002Fcontent\u002Fdam\u002Fethz\u002Fspecial-interest\u002Fbaug\u002Figp\u002Fphotogrammetry-remote-sensing-dam\u002Fdocuments\u002Fpdf\u002Fpellegrini09iccv.pdf)\n* [Stanford Drone Dataset](https:\u002F\u002Fcvgl.stanford.edu\u002Fprojects\u002Fuav_data\u002F)\n* [TrajNet](http:\u002F\u002Ftrajnet.stanford.edu\u002F)\n* [Oxford Town Center](https:\u002F\u002Fexposing.ai\u002Foxford_town_centre\u002F)\n* [New York Grand Central Station](https:\u002F\u002Fwww.ee.cuhk.edu.hk\u002F~xgwang\u002Fgrandcentral.html), [论文](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F5995459), [论文](https:\u002F\u002Fpeople.csail.mit.edu\u002Fbzhou\u002Fproject\u002Fcvpr2012\u002Fzhoucvpr2012.pdf), [论文](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent_cvpr_2015\u002Fpapers\u002FYi_Understanding_Pedestrian_Behaviors_2015_CVPR_paper.pdf)\n* [PIE](https:\u002F\u002Fdata.nvision2.eecs.yorku.ca\u002FPIE_dataset\u002F)\n* [JAAD](https:\u002F\u002Fdata.nvision2.eecs.yorku.ca\u002FJAAD_dataset\u002F)\n* [DS4C-PPP](https:\u002F\u002Fwww.kaggle.com\u002Fdatasets\u002Fkimjihoo\u002Fcoronavirusdataset)\n* [BDBC COVID-19](https:\u002F\u002Fgithub.com\u002FBDBC-KG-NLP\u002FCOVID-19-tracker)\n* [Vi-Fi](https:\u002F\u002Fsites.google.com\u002Fwinlab.rutgers.edu\u002Fvi-fidataset\u002Fhome)\n## 其他智能体数据集\n### 航空器\n* [LocaRDS](https:\u002F\u002Fatmdata.github.io\u002F)\n* [ZUMAVD](https:\u002F\u002Frpg.ifi.uzh.ch\u002Fzurichmavdataset.html)\n### 船舶\n* [Ushant](https:\u002F\u002Ffigshare.com\u002Farticles\u002Fdataset\u002FUshant_AIS_dataset\u002F8966273)\n* [Cargo](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs10707-020-00421-y)\n### 飓风与动物\n* [HURDAT2](https:\u002F\u002Fwww.nhc.noaa.gov\u002Fdata\u002F)\n* [Movebank](https:\u002F\u002Fwww.movebank.org\u002Fcms\u002Fmovebank-main)\n\n# 🌹 致谢\n我们要感谢提供论文、开源代码和项目网站的作者及开发者！感谢您为智能体轨迹预测社区做出的积极贡献。您的想法和贡献是我们前行的积极信号。如果您有任何建议或额外见解，欢迎提交 issue (问题) 或 pull request (拉取请求)。\n\n# 🌟 Star 历史\n\n[![Star History Chart](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FPsychic-DL_Awesome-Traffic-Agent-Trajectory-Prediction_readme_0371064377b3.png)](https:\u002F\u002Fstar-history.com\u002F#Psychic-DL\u002FAwesome-Traffic-Agent-Trajectory-Prediction&Date)","# Awesome-Traffic-Agent-Trajectory-Prediction 快速上手指南\n\n本仓库是一个关于交通智能体轨迹预测的开源资源汇总库，收录了从传统方法到最新研究（2024-2026）的论文、数据集及代码实现。它旨在为研究人员和学生提供一站式的学习与参考资料。\n\n## 🛠️ 环境准备\n\n由于本仓库主要作为文献与资源索引，无需特定的运行时环境即可浏览。若需复现其中列出的具体算法模型，请根据目标项目的要求配置环境：\n\n- **操作系统**: Linux \u002F macOS \u002F Windows\n- **必备工具**: Git, Web 浏览器\n- **开发依赖**: \n  - 多数深度学习项目依赖 **Python** 和 **PyTorch**。\n  - 部分高性能计算项目可能涉及 **C++**。\n  - 建议参考各子项目链接中的 `requirements.txt` 或文档进行具体配置。\n\n## 📥 安装步骤\n\n通过 Git 克隆本仓库到本地。为了提升下载速度，建议使用国内镜像源或网络加速工具。\n\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002FPsychic-DL\u002FAwesome-Traffic-Agent-Trajectory-Prediction.git\ncd Awesome-Traffic-Agent-Trajectory-Prediction\n```\n\n## 🚀 基本使用\n\n### 1. 浏览资源列表\n打开 `README.md` 文件，通过目录结构（Table of Contents）快速定位所需内容：\n- **📜 Traditional Methods**: 查看基础理论（如 Social Force Model）。\n- **📚 [年份] Conference and Journal Papers**: 按年份筛选最新的顶会论文（CVPR, ICCV, ITSC 等）。\n- **📚 Datasets**: 获取公开的交通轨迹数据集（车辆、行人、船舶等）。\n\n### 2. 访问外部资源\n本仓库通过超链接指向具体的论文 PDF、代码仓库和数据集页面。\n- 点击标题后的 `[[paper]]` 链接下载论文。\n- 点击 `[[code]]` 链接进入对应的 GitHub 项目。\n- 在外部项目中查找 `README` 以了解具体的训练和推理命令。\n\n### 3. 参与社区\n如需添加新资源或讨论合作，可通过以下方式联系维护者：\n- **Email**: xdchaonengli@163.com\n- **WeChat**: CN15691969157\n- **Pull Requests**: 直接提交 PR 更新资源列表。","某自动驾驶初创公司的算法工程师正在开发 L4 级无人车的轨迹预测模块，面临技术选型和复现效率低下的挑战。\n\n### 没有 Awesome-Traffic-Agent-Trajectory-Prediction 时\n- 需要手动在 arXiv、IEEE Xplore 等多个平台搜索论文，耗时且容易遗漏关键文献\n- 即使找到相关论文，也往往难以直接获取对应的开源代码实现\n- 面对海量研究资料，无法快速区分传统方法与深度学习方法的演进脉络\n- 不清楚哪些数据集最适合当前复杂的城市路口场景，导致实验选型困难\n\n### 使用 Awesome-Traffic-Agent-Trajectory-Prediction 后\n- 通过分类目录直接定位到 2024-2025 年的最新多智能体预测论文与代码资源\n- 一键跳转至官方仓库，快速获取包含 PyTorch 实现的复现材料\n- 清晰了解从传统方法到扩散模型（Diffusion）的技术发展路线，避免重复造轮子\n- 根据列表中的数据集推荐，迅速选定匹配度高的公开数据完成验证\n\nAwesome-Traffic-Agent-Trajectory-Prediction 通过系统整合前沿论文与代码，显著提升了团队在多智能体轨迹预测领域的研发效率。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FPsychic-DL_Awesome-Traffic-Agent-Trajectory-Prediction_7c8ec8df.png","Psychic-DL","Psychic","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FPsychic-DL_44d43104.jpg","Nothing is given. Everything is earned.🏀","Lanzhou Jiaotong University","Lanzhou, Gansu Province","xdchaonengli@163.com",null,"https:\u002F\u002Fgithub.com\u002FPsychic-DL",507,62,"2026-03-25T07:18:36","MIT",1,"未说明",{"notes":91,"python":89,"dependencies":92},"该仓库为资源聚合列表（Awesome List），整理轨迹预测相关的论文、数据集和代码链接，本身无独立安装脚本。具体环境依赖需查阅所引用项目的文档，内容涉及 C++ 和 Python\u002FPyTorch 技术栈。",[],[15,51,13],[95,96,97,98,99,100,101],"awesome","deep-learning","papers","trajectory-prediction","traffic-agent","dataset","source-code","2026-03-27T02:49:30.150509","2026-04-06T05:44:21.370997",[105,110],{"id":106,"question_zh":107,"answer_zh":108,"source_url":109},2911,"是否可以在论文条目中添加对应的数据集基准测试？","维护者感谢用户的建设性建议，表示将继续在空闲时间维护该项目，并会考虑添加数据集基准。","https:\u002F\u002Fgithub.com\u002FPsychic-DL\u002FAwesome-Traffic-Agent-Trajectory-Prediction\u002Fissues\u002F5",{"id":111,"question_zh":112,"answer_zh":113,"source_url":114},2912,"论文列表是否支持按时间倒序排列？","用户建议按从新到旧（2022 ---> 2020）排序。维护者回复感谢提醒，表示将根据自己的时间安排进行调整。","https:\u002F\u002Fgithub.com\u002FPsychic-DL\u002FAwesome-Traffic-Agent-Trajectory-Prediction\u002Fissues\u002F2",[]]