[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-CHIANGEL--Awesome-LLM-for-RecSys":3,"tool-CHIANGEL--Awesome-LLM-for-RecSys":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",147882,2,"2026-04-09T11:32:47",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",108111,"2026-04-08T11:23:26",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":32,"last_commit_at":50,"category_tags":51,"status":17},4721,"markitdown","microsoft\u002Fmarkitdown","MarkItDown 是一款由微软 AutoGen 团队打造的轻量级 Python 工具，专为将各类文件高效转换为 Markdown 格式而设计。它支持 PDF、Word、Excel、PPT、图片（含 OCR）、音频（含语音转录）、HTML 乃至 YouTube 链接等多种格式的解析，能够精准提取文档中的标题、列表、表格和链接等关键结构信息。\n\n在人工智能应用日益普及的今天，大语言模型（LLM）虽擅长处理文本，却难以直接读取复杂的二进制办公文档。MarkItDown 恰好解决了这一痛点，它将非结构化或半结构化的文件转化为模型“原生理解”且 Token 效率极高的 Markdown 格式，成为连接本地文件与 AI 分析 pipeline 的理想桥梁。此外，它还提供了 MCP（模型上下文协议）服务器，可无缝集成到 Claude Desktop 等 LLM 应用中。\n\n这款工具特别适合开发者、数据科学家及 AI 研究人员使用，尤其是那些需要构建文档检索增强生成（RAG）系统、进行批量文本分析或希望让 AI 助手直接“阅读”本地文件的用户。虽然生成的内容也具备一定可读性，但其核心优势在于为机器",93400,"2026-04-06T19:52:38",[52,14],"插件",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":10,"last_commit_at":59,"category_tags":60,"status":17},4487,"LLMs-from-scratch","rasbt\u002FLLMs-from-scratch","LLMs-from-scratch 是一个基于 PyTorch 的开源教育项目，旨在引导用户从零开始一步步构建一个类似 ChatGPT 的大型语言模型（LLM）。它不仅是同名技术著作的官方代码库，更提供了一套完整的实践方案，涵盖模型开发、预训练及微调的全过程。\n\n该项目主要解决了大模型领域“黑盒化”的学习痛点。许多开发者虽能调用现成模型，却难以深入理解其内部架构与训练机制。通过亲手编写每一行核心代码，用户能够透彻掌握 Transformer 架构、注意力机制等关键原理，从而真正理解大模型是如何“思考”的。此外，项目还包含了加载大型预训练权重进行微调的代码，帮助用户将理论知识延伸至实际应用。\n\nLLMs-from-scratch 特别适合希望深入底层原理的 AI 开发者、研究人员以及计算机专业的学生。对于不满足于仅使用 API，而是渴望探究模型构建细节的技术人员而言，这是极佳的学习资源。其独特的技术亮点在于“循序渐进”的教学设计：将复杂的系统工程拆解为清晰的步骤，配合详细的图表与示例，让构建一个虽小但功能完备的大模型变得触手可及。无论你是想夯实理论基础，还是为未来研发更大规模的模型做准备",90106,"2026-04-06T11:19:32",[35,15,13,14],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":66,"readme_en":67,"readme_zh":68,"quickstart_zh":69,"use_case_zh":70,"hero_image_url":71,"owner_login":72,"owner_name":73,"owner_avatar_url":74,"owner_bio":75,"owner_company":76,"owner_location":76,"owner_email":77,"owner_twitter":76,"owner_website":78,"owner_url":79,"languages":76,"stars":80,"forks":81,"last_commit_at":82,"license":83,"difficulty_score":84,"env_os":85,"env_gpu":86,"env_ram":86,"env_deps":87,"category_tags":90,"github_topics":91,"view_count":32,"oss_zip_url":76,"oss_zip_packed_at":76,"status":17,"created_at":100,"updated_at":101,"faqs":102,"releases":103},5953,"CHIANGEL\u002FAwesome-LLM-for-RecSys","Awesome-LLM-for-RecSys","Survey: A collection of AWESOME papers and resources on the large language model (LLM) related recommender system topics.","Awesome-LLM-for-RecSys 是一个专注于大语言模型（LLM）与推荐系统交叉领域的开源资源库。它系统性地收集并整理了该方向的高质量学术论文与技术资源，旨在帮助从业者快速掌握如何利用大模型提升推荐系统的性能。\n\n当前，传统推荐系统在特征工程、语义理解及冷启动等方面面临瓶颈，而大模型的出现为解决这些问题提供了新范式。Awesome-LLM-for-RecSys 通过独特的分类框架，将论文按大模型在推荐流程中的介入位置（如特征增强、排序优化、生成式推荐等）进行梳理，清晰展示了技术演进脉络。项目不仅包含详尽的论文列表，还配套了被 ACM TOIS 接收的综述文章，并持续更新最新研究成果。\n\n这份资源特别适合人工智能研究人员、算法工程师以及对下一代推荐技术感兴趣的学生使用。无论是希望深入探索 LLM 在推荐场景中应用机理的学者，还是寻求落地解决方案的开发者，都能从中找到极具价值的参考依据和技术灵感，从而高效跟进这一前沿领域的发展动态。","# Awesome-LLM-for-RecSys [![Awesome](https:\u002F\u002Fawesome.re\u002Fbadge.svg)](https:\u002F\u002Fawesome.re)\n\nA collection of AWESOME papers and resources on the large language model (LLM) related recommender system topics. \n\n:tada: Our survey paper has been accepted by **_ACM Transactions on Information Systems (TOIS)_**: [How Can Recommender Systems Benefit from Large Language Models: A Survey](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3678004)\n\n:bell: Since our survey paper is archived, we will update the latest research works at ``1.7 Newest Research Work List``.\n\n:grin: I am also wrting weekly paper notes about latest LLM-enhanced RS at WeChat. Welcome to follow by scanning the [QR-Code](https:\u002F\u002Fgithub.com\u002FCHIANGEL\u002FAwesome-LLM-for-RecSys\u002Fblob\u002Fmain\u002Fwechat_for_paper_notes.jpeg).\n\n:rocket:\t**2024.07.09 - Paper v6 released**: Our archived camera-ready version for TOIS.\n\u003Cdetails>\u003Csummary>\u003Cb>Survey Paper Update Logs\u003C\u002Fb>\u003C\u002Fsummary>\n\n\u003Cp>\n\u003Cul>\n  \u003Cli>\u003Cb>2024.07.09 - Paper v6 released\u003C\u002Fb>: Our camera-ready Version for TOIS, which will be archived.\u003C\u002Fli>\n  \u003Cli>\u003Cb>2024.02.05 - Paper v5 released\u003C\u002Fb>: New release with 27-page main content & more thorough taxonomies.\u003C\u002Fli>\n  \u003Cli>\u003Cb>2023.06.29 - Paper v4 released\u003C\u002Fb>: 7 papers have been newly added.\u003C\u002Fli>\n  \u003Cli>\u003Cb>2023.06.28 - Paper v3 released\u003C\u002Fb>: Fix typos.\u003C\u002Fli>\n  \u003Cli>\u003Cb>2023.06.12 - Paper v2 released\u003C\u002Fb>: Add summerization table in the appendix.\u003C\u002Fli>\n  \u003Cli>\u003Cb>2023.06.09 - Paper v1 released\u003C\u002Fb>: Initial version.\u003C\u002Fli>\n\u003C\u002Ful>\n\u003C\u002Fp>\n\n\u003C\u002Fdetails>\n\n## 1. Papers\n\nWe classify papers according to where LLM will be adapted in the pipeline of RS, which is summarized in the figure below.\n\n\u003Cimg width=\"650\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FCHIANGEL_Awesome-LLM-for-RecSys_readme_18b6a766ec7e.png\">\n\n\u003Cdetails>\u003Csummary>\u003Cb>1.1 LLM for Feature Engineering\u003C\u002Fb>\u003C\u002Fsummary>\n\u003Cp>\n\n\u003Cb>1.1.1 User- and Item-level Feature Augmentation\u003C\u002Fb>\n\n| **Name** | **Paper** | **LLM Backbone (Largest)** | **LLM Tuning Strategy** | **Publication** | **Link** |\n|:---:|:---|:---:|:---:|:---:|:---:|\n| LLM4KGC | Knowledge Graph Completion Models are Few-shot Learners: An Empirical Study of Relation Labeling in E-commerce with LLMs | PaLM (540B)\u002F ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.09858v1) |\n| TagGPT | TagGPT: Large Language Models are Zero-shot Multimodal Taggers | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03022v1) |\n| ICPC | Large Language Models for User Interest Journeys | LaMDA (137B) | Full Finetuning\u002F Prompt Tuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15498) |\n| KAR | Towards Open-World Recommendation with Knowledge Augmentation from Large Language Models | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.10933) |\n| PIE | Product Information Extraction using ChatGPT | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.14921) |\n| LGIR | Enhancing Job Recommendation through LLM-based Generative Adversarial Networks | GhatGLM (6B) | Frozen | AAAI 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.10747) |\n| GIRL | Generative Job Recommendations with Large Language Model | BELLE (7B) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.02157) |\n| LLM-Rec | LLM-Rec: Personalized Recommendation via Prompting Large Language Models | text-davinci-003 | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.15780) |\n| HKFR | Heterogeneous Knowledge Fusion: A Novel Approach for Personalized Recommendation via LLM | ChatGPT | Frozen | RecSys 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.03333) |\n| LLaMA-E | LLaMA-E: Empowering E-commerce Authoring with Multi-Aspect Instruction Following | LLaMA (30B) | LoRA | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.04913) |\n| EcomGPT | EcomGPT: Instruction-tuning Large Language Models with Chain-of-Task Tasks for E-commerce | BLOOMZ (7.1B) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.06966) |\n| TF-DCon | Leveraging Large Language Models (LLMs) to Empower Training-Free Dataset Condensation for Content-Based Recommendation | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.09874) |\n| RLMRec | Representation Learning with Large Language Models for Recommendation | ChatGPT | Frozen | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.15950) |\n| LLMRec | LLMRec: Large Language Models with Graph Augmentation for Recommendation | ChatGPT | Frozen | WSDM 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.00423.pdf) |\n| LLMRG | Enhancing Recommender Systems with Large Language Model Reasoning Graphs | GPT4 | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.10835) |\n| CUP | Recommendations by Concise User Profiles from Review Text | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.01314) |\n| SINGLE | Modeling User Viewing Flow using Large Language Models for Article Recommendation | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.07619) |\n| SAGCN | Understanding Before Recommendation: Semantic Aspect-Aware Review Exploitation via Large Language Models | Vicuna (13B) | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.16275) |\n| UEM | User Embedding Model for Personalized Language Prompting | FLAN-T5-base (250M) | Full Finetuning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.04858) |\n| LLMHG | LLM-Guided Multi-View Hypergraph Learning for Human-Centric Explainable Recommendation | GPT4 | Frozen | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.08217) |\n| Llama4Rec | Integrating Large Language Models into Recommendation via Mutual Augmentation and Adaptive Aggregation | LLaMA2 (7B) | Full Finetuning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.13870) |\n| LLM4Vis | LLM4Vis: Explainable Visualization Recommendation using ChatGPT | ChatGPT | Frozen | EMNLP 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.07652) |\n| LoRec | LoRec: Large Language Model for Robust Sequential Recommendation against Poisoning Attacks | LLaMA2 | Frozen | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.17723) |\n\n\u003Cb>1.1.2 Instance-level Sample Generation\u003C\u002Fb>\n\n| **Name** | **Paper** | **LLM Backbone (Largest)** | **LLM Tuning Strategy** | **Publication** | **Link** |\n|:---:|:---|:---:|:---:|:---:|:---:|\n| RecInter | Beyond Static Testbeds: An Interaction-Centric Agent Simulation Platform for Dynamic Recommender Systems | GPT-4o | Frozen | EMNLP 2025 | [[Link]](https:\u002F\u002Faclanthology.org\u002F2025.emnlp-main.956) |\n| GReaT | Language Models are Realistic Tabular Data Generators | GPT2-medium (355M) | Full Finetuning | ICLR 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.06280) |\n| ONCE | ONCE: Boosting Content-based Recommendation with Both Open- and Closed-source Large Language Models | ChatGPT | Frozen | WSDM 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.06566) |\n| AnyPredict | AnyPredict: Foundation Model for Tabular Prediction | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.12081) |\n| DPLLM | Privacy-Preserving Recommender Systems with Synthetic Query Generation using Differentially Private Large Language Models | T5-XL (3B) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.05973) |\n| MINT | Large Language Model Augmented Narrative Driven Recommendations | text-davinci-003 | Frozen | RecSys 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.02250) |\n| Agent4Rec | On Generative Agents in Recommendation | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.10108) |\n| RecPrompt | RecPrompt: A Prompt Tuning Framework for News Recommendation Using Large Language Models | GPT4 | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.10463) |\n| PO4ISR | Large Language Models for Intent-Driven Session Recommendations | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.07552) |\n| BEQUE | Large Language Model based Long-tail Query Rewriting in Taobao Search | ChatGLM (6B) | FFT | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.03758) |\n| Agent4Ranking | Agent4Ranking: Semantic Robust Ranking via Personalized Query Rewriting Using Multi-agent LLM | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.15450) |\n| PopNudge | Improving Conversational Recommendation Systems via Bias Analysis and Language-Model-Enhanced Data Augmentation | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.16738) |\n\n\u003C\u002Fp>\n\u003C\u002Fdetails>\n\n\u003Cdetails>\u003Csummary>\u003Cb>1.2 LLM as Feature Encoder\u003C\u002Fb>\u003C\u002Fsummary>\n\u003Cp>\n\n\u003Cb>1.2.1 Representation Enhancement\u003C\u002Fb>\n\n| **Name** | **Paper** | **LLM Backbone (Largest)** | **LLM Tuning Strategy** | **Publication** | **Link** |\n|:---:|:---|:---:|:---:|:---:|:---:|\n| U-BERT | U-BERT: Pre-training User Representations for Improved Recommendation | BERT-base (110M) | Full Finetuning | AAAI 2021 | [[Link]](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F16557) |\n| UNBERT | UNBERT: User-News Matching BERT for News Recommendation | BERT-base (110M) | Full Finetuning | IJCAI 2021 | [[Link]](https:\u002F\u002Fwww.ijcai.org\u002Fproceedings\u002F2021\u002F462) |\n| PLM-NR | Empowering News Recommendation with Pre-trained Language Models | RoBERTa-base (125M) | Full Finetuning | SIGIR 2021 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.07413) |\n| Pyramid-ERNIE | Pre-trained Language Model based Ranking in Baidu Search | ERNIE (110M) | Full Finetuning | KDD 2021 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.11108) |\n| ERNIE-RS | Pre-trained Language Model for Web-scale Retrieval in Baidu Search | ERNIE (110M) | Full Finetuning | KDD 2021 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.03373) |\n| CTR-BERT | CTR-BERT: Cost-effective knowledge distillation for billion-parameter teacher models | Customized BERT (1.5B) | Full Finetuning | ENLSP 2021 | [[Link]](https:\u002F\u002Fneurips2021-nlp.github.io\u002Fpapers\u002F20\u002FCameraReady\u002Fcamera_ready_final.pdf) |\n| SuKD | Learning Supplementary NLP Features for CTR Prediction in Sponsored Search | RoBERTa-large (355M) | Full Finetuning | KDD 2022 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3534678.3539064) |\n| PREC | Boosting Deep CTR Prediction with a Plug-and-Play Pre-trainer for News Recommendation | BERT-base (110M) | Full Finetuning | COLING 2022 | [[Link]](https:\u002F\u002Faclanthology.org\u002F2022.coling-1.249\u002F) |\n| MM-Rec | MM-Rec: Visiolinguistic Model Empowered Multimodal News Recommendation | BERT-base (110M) | Full Finetuning | SIGIR 2022 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3477495.3531896) |\n| Tiny-NewsRec | Tiny-NewsRec: Effective and Efficient PLM-based News Recommendation | UniLMv2-base (110M) | Full Finetuning | EMNLP 2022 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.00944) |\n| PLM4Tag | PTM4Tag: Sharpening Tag Recommendation of Stack Overflow Posts with Pre-trained Models | CodeBERT (125M) | Full Finetuning | ICPC 2022 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.10965) |\n| TwHIN-BERT | TwHIN-BERT: A Socially-Enriched Pre-trained Language Model for Multilingual Tweet Representations | BERT-base (110M) | Full Finetuning | Arxiv 2022 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.07562) |\n| LSH | Improving Code Example Recommendations on Informal Documentation Using BERT and Query-Aware LSH: A Comparative Study | BERT-base (110M) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.03017v1) |\n| LLM2BERT4Rec | Leveraging Large Language Models for Sequential Recommendation | text-embedding-ada-002 | Frozen | RecSys 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.09261) | \n| LLM4ARec | Prompt Tuning Large Language Models on Personalized Aspect Extraction for Recommendations | GPT2 (110M) | Prompt Tuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.01475) |\n| TIGER | Recommender Systems with Generative Retrieval | Sentence-T5-base (223M) | Frozen | NIPS 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.05065) |\n| TBIN | TBIN: Modeling Long Textual Behavior Data for CTR Prediction | BERT-base (110M) | Frozen | DLP-RecSys 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.08483) |\n| LKPNR | LKPNR: LLM and KG for Personalized News Recommendation Framework | LLaMA2 (7B) | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.12028) |\n| SSNA | Towards Efficient and Effective Adaptation of Large Language Models for Sequential Recommendation | DistilRoBERTa-base (83M) | Layerwise Adapter Tuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.01612) |\n| CollabContext | Collaborative Contextualization: Bridging the Gap between Collaborative Filtering and Pre-trained Language Model | Instructor-XL (1.5B) | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.09400) |\n| LMIndexer | Language Models As Semantic Indexers | T5-base (223M) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.07815) |\n| Stack | A BERT based Ensemble Approach for Sentiment Classification of Customer Reviews and its Application to Nudge Marketing in e-Commerce | BERT-base (110M) | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.10782) |\n| N\u002FA | Utilizing Language Models for Tour Itinerary Recommendation | BERT-base (110M) | Full Finetuning | PMAI@IJCAI 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.12355) |\n| UEM | User Embedding Model for Personalized Language Prompting | Sentence-T5-base (223M) | Frozen | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.04858) |\n| Social-LLM | Social-LLM: Modeling User Behavior at Scale using Language Models and Social Network Data | SBERT-MPNet-base (110M) | Frozen | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.00893) |\n| LLMRS | LLMRS: Unlocking Potentials of LLM-Based Recommender Systems for Software Purchase | MPNet (110M) | Frozen | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.06676) |\n| KERL | Knowledge Graphs and Pre-trained Language Models enhanced Representation Learning for Conversational Recommender Systems | BERT-mini | Frozen | TNNLS | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.10967) |\n| N\u002FA | Empowering Few-Shot Recommender Systems with Large Language Models -- Enhanced Representations | ChatGPT | Frozen | IEEE Access | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.13557) |\n| N\u002FA | Better Generalization with Semantic IDs: A Case Study in Ranking for Recommendations | Unknown | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.08121) |\n\n\u003Cb>1.2.2 Unified Cross-domain Recommendation\u003C\u002Fb>\n\n| **Name** | **Paper** | **LLM Backbone (Largest)** | **LLM Tuning Strategy** | **Publication** | **Link** |\n|:---:|:---|:---:|:---:|:---:|:---:|\n| ZESRec | Zero-Shot Recommender Systems | BERT-base (110M) | Frozen | Arxiv 2021 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.08318) |\n| UniSRec | Towards Universal Sequence Representation Learning for Recommender Systems | BERT-base (110M) | Frozen | KDD 2022 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.05941) |\n| TransRec | TransRec: Learning Transferable Recommendation from Mixture-of-Modality Feedback | BERT-base (110M) | Full Finetuning | Arxiv 2022 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.06190) |\n| VQ-Rec | Learning Vector-Quantized Item Representation for Transferable Sequential Recommenders | BERT-base (110M) | Frozen | WWW 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.12316) |\n| IDRec vs MoRec | Where to Go Next for Recommender Systems? ID- vs. Modality-based Recommender Models Revisited | BERT-base (110M) | Full Finetuning | SIGIR 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.13835) |\n| TransRec | Exploring Adapter-based Transfer Learning for Recommender Systems: Empirical Studies and Practical Insights | RoBERTa-base (125M) | Layerwise Adapter Tuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15036) |\n| TCF | Exploring the Upper Limits of Text-Based Collaborative Filtering Using Large Language Models: Discoveries and Insights | OPT-175B (175B) | Frozen\u002F Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.11700) |\n| S&R Foundation | An Unified Search and Recommendation Foundation Model for Cold-Start Scenario | ChatGLM (6B) | Frozen | CIKM 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.08939) |\n| MISSRec | MISSRec: Pre-training and Transferring Multi-modal Interest-aware Sequence Representation for Recommendation | CLIP-B\u002F32 (400M) | Full Finetuning | MM 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.11175) |\n| UFIN | UFIN: Universal Feature Interaction Network for Multi-Domain Click-Through Rate Prediction | FLAN-T5-base (250M) | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.15493) |\n| PMMRec | Multi-Modality is All You Need for Transferable Recommender Systems | RoBERTa-large (355M) | Top-2-layer Finetuning | ICDE 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.09602) |\n| Uni-CTR | A Unified Framework for Multi-Domain CTR Prediction via Large Language Models | Sheared-LLaMA (1.3B) | LoRA | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.10743) |\n| PCDR | Prompt-enhanced Federated Content Representation Learning for Cross-domain Recommendation | BERT-base (110M) | Frozen | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.14678) |\n\n\u003C\u002Fp>\n\u003C\u002Fdetails>\n\n\u003Cdetails>\u003Csummary>\u003Cb>1.3 LLM as Scoring\u002FRanking Function\u003C\u002Fb>\u003C\u002Fsummary>\n\u003Cp>\n\n\u003Cb>1.3.1 Item Scoring Task\u003C\u002Fb>\n\n| **Name** | **Paper** | **LLM Backbone (Largest)** | **LLM Tuning Strategy** | **Publication** | **Link** |\n|:---:|:---|:---:|:---:|:---:|:---:|\n| LMRecSys | Language Models as Recommender Systems: Evaluations and Limitations | GPT2-XL (1.5B) | Full Finetuning | ICBINB 2021 | [[Link]](https:\u002F\u002Fopenreview.net\u002Fforum?id=hFx3fY7-m9b) |\n| PTab | PTab: Using the Pre-trained Language Model for Modeling Tabular Data | BERT-base (110M) | Full Finetuning | Arxiv 2022 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.08060) |\n| UniTRec | UniTRec: A Unified Text-to-Text Transformer and Joint Contrastive Learning Framework for Text-based Recommendation | BART (406M) | Full Finetuning | ACL 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15756) |\n| Prompt4NR | Prompt Learning for News Recommendation | BERT-base (110M) | Full Finetuning | SIGIR 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.05263) |\n| RecFormer | Text Is All You Need: Learning Language Representations for Sequential Recommendation | LongFormer (149M) | Full Finetuning | KDD 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.13731v1) |\n| TabLLM | TabLLM: Few-shot Classification of Tabular Data with Large Language Models | T0 (11B) | Few-shot Parameter-effiecnt Finetuning | AISTATS 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.10723) |\n| Zero-shot GPT | Zero-Shot Recommendation as Language Modeling | GPT2-medium (355M) | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.04184) |\n| FLAN-T5 | Do LLMs Understand User Preferences? Evaluating LLMs On User Rating Prediction | FLAN-5-XXL (11B) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.06474.pdf) |\n| BookGPT | BookGPT: A General Framework for Book Recommendation Empowered by Large Language Model | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15673v1) |\n| TALLRec | TALLRec: An Effective and Efficient Tuning Framework to Align Large Language Model with Recommendation | LLaMA (7B) | LoRA | RecSys 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.00447) |\n| PBNR | PBNR: Prompt-based News Recommender System | T5-small (60M) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.07862) |\n| CR-SoRec | CR-SoRec: BERT driven Consistency Regularization for Social Recommendation | BERT-base (110M) | Full Finetuning | RecSys 2023 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002FfullHtml\u002F10.1145\u002F3604915.3608844) |\n| PromptRec | Towards Personalized Cold-Start Recommendation with Prompts | LLaMA (7B) | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.17256) |\n| GLRec | Exploring Large Language Model for Graph Data Understanding in Online Job Recommendations | BELLE-LLaMA (7B) | LoRA | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.05722) |\n| BERT4CTR | BERT4CTR: An Efficient Framework to Combine Pre-trained Language Model with Non-textual Features for CTR Prediction | RoBERTa-large (355M) | Full Finetuning | KDD 2023 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3580305.3599780) |\n| ReLLa | ReLLa: Retrieval-enhanced Large Language Models for Lifelong Sequential Behavior Comprehension in Recommendation | Vicuna (13B) | LoRA | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.11131) |\n| TASTE | Text Matching Improves Sequential Recommendation by Reducing Popularity Biases | T5-base (223M) | Full Finetuning | CIKM 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.14029) |\n| N\u002FA | Unveiling Challenging Cases in Text-based Recommender Systems | BERT-base (110M) | Full Finetuning | RecSys Workshop 2023 | [[Link]](https:\u002F\u002Fceur-ws.org\u002FVol-3476\u002Fpaper5.pdf) |\n| ClickPrompt | ClickPrompt: CTR Models are Strong Prompt Generators for Adapting Language Models to CTR Prediction | RoBERTa-large (355M) | Full Finetuning | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.09234) |\n| SetwiseRank | A Setwise Approach for Effective and Highly Efficient Zero-shot Ranking with Large Language Models | FLAN-T5-XXL (11B) | Frozen |  Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.09497) |\n| UPSR | Thoroughly Modeling Multi-domain Pre-trained Recommendation as Language | T5-base (223M) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.13540) |\n| LLM-Rec | One Model for All: Large Language Models are Domain-Agnostic Recommendation Systems | OPT (6.7B) | LoRA | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.14304) |\n| LLMRanker | Beyond Yes and No: Improving Zero-Shot LLM Rankers via Scoring Fine-Grained Relevance Labels | FLAN PaLM2 S | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.14122) |\n| CoLLM | CoLLM: Integrating Collaborative Embeddings into Large Language Models for Recommendation | Vicuna (7B) | LoRA | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.19488) |\n| FLIP | FLIP: Towards Fine-grained Alignment between ID-based Models and Pretrained Language Models for CTR Prediction | RoBERTa-large (355M) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.19453) |\n| BTRec | BTRec: BERT-Based Trajectory Recommendation for Personalized Tours | BERT-base (110M) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.19886) |\n| CLLM4Rec | Collaborative Large Language Model for Recommender Systems | GPT2 (110M) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.01343) |\n| CUP | Recommendations by Concise User Profiles from Review Text | BERT-base (110M) | Last-layer Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.01314) |\n| N\u002FA | Instruction Distillation Makes Large Language Models Efficient Zero-shot Rankers | FLAN-T5-XL (3B) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.01555) |\n| CoWPiRec | Collaborative Word-based Pre-trained Item Representation for Transferable Recommendation | BERT-base (110M) | Full Finetuning | ICDM 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.10501) |\n| RecExplainer | RecExplainer: Aligning Large Language Models for Recommendation Model Interpretability | Vicuna-v1.3 (7B) | LoRA | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.10947) |\n| E4SRec | E4SRec: An Elegant Effective Efficient Extensible Solution of Large Language Models for Sequential Recommendation | LLaMA2 (13B) | LoRA | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.02443) |\n| CER | The Problem of Coherence in Natural Language Explanations of Recommendations | GPT2 (110M) | Full Finetuning | ECAI 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.11356) |\n| LSAT | Preliminary Study on Incremental Learning for Large Language Model-based Recommender Systems | LLaMA (7B) | LoRA | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.15599) |\n| Llama4Rec | Integrating Large Language Models into Recommendation via Mutual Augmentation and Adaptive Aggregation | LLaMA2 (7B) | Full Finetuning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.13870) |\n    \n\u003Cb>1.3.2 Item Generation Task\u003C\u002Fb>\n\n| **Name** | **Paper** | **LLM Backbone (Largest)** | **LLM Tuning Strategy** | **Publication** | **Link** |\n|:---:|:---|:---:|:---:|:---:|:---:|\n| GPT4Rec | GPT4Rec: A Generative Framework for Personalized Recommendation and User Interests Interpretation | GPT2 (110M) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03879) |\n| VIP5 | VIP5: Towards Multimodal Foundation Models for Recommendation | T5-base (223M) | Layerwise Adater Tuning | EMNLP 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.14302) |\n| P5-ID | How to Index Item IDs for Recommendation Foundation Models | T5-small (60M) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.06569) |\n| FaiRLLM | Is ChatGPT Fair for Recommendation? Evaluating Fairness in Large Language Model Recommendation | ChatGPT | Frozen | RecSys 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.07609) |\n| PALR | PALR: Personalization Aware LLMs for Recommendation | LLaMA (7B) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.07622) |\n| ChatGPT | Large Language Models are Zero-Shot Rankers for Recommender Systems | ChatGPT | Frozen | ECIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.08845) |\n| AGR | Sparks of Artificial General Recommender (AGR): Early Experiments with ChatGPT | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.04518) |\n| NIR | Zero-Shot Next-Item Recommendation using Large Pretrained Language Models | GPT3 (175B) | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03153) |\n| GPTRec | Generative Sequential Recommendation with GPTRec | GPT2-medium (355M) | Full Finetuning | Gen-IR@SIGIR 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.11114) |\n| ChatNews | A Preliminary Study of ChatGPT on News Recommendation: Personalization, Provider Fairness, Fake News | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.10702) |\n| N\u002FA | Large Language Models are Competitive Near Cold-start Recommenders for Language- and Item-based Preferences | PaLM (62B) | Frozen | RecSys 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.14225) |\n| LLMSeqPrompt | Leveraging Large Language Models for Sequential Recommendation | OpenAI ada model | Finetune | RecSys 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.09261) | \n| GenRec | GenRec: Large Language Model for Generative Recommendation | LLaMA (7B) | LoRA | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.00457) |\n| UP5 | UP5: Unbiased Foundation Model for Fairness-aware Recommendation | T5-base (223M) | Prefix Tuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.12090) |\n| HKFR | Heterogeneous Knowledge Fusion: A Novel Approach for Personalized Recommendation via LLM | ChatGLM (6B) | LoRA | RecSys 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.03333) |\n| N\u002FA | The Unequal Opportunities of Large Language Models: Revealing Demographic Bias through Job Recommendations | ChatGPT | Frozen | EAAMO 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.02053) |\n| BIGRec | A Bi-Step Grounding Paradigm for Large Language Models in Recommendation Systems | LLaMA (7B) | LoRA | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.08434) |\n| KP4SR | Knowledge Prompt-tuning for Sequential Recommendation | T5-small (60M) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.08459) |\n| RecSysLLM | Leveraging Large Language Models for Pre-trained Recommender Systems | GLM (10B) | LoRA | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.10837) |\n| POD | Prompt Distillation for Efficient LLM-based Recommendation | T5-small (60M) | Full Finetuning | CIKM 2023 | [[Link]](https:\u002F\u002Flileipisces.github.io\u002Ffiles\u002FCIKM23-POD-paper.pdf) |\n| N\u002FA | Evaluating ChatGPT as a Recommender System: A Rigorous Approach | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.03613) |\n| RaRS | Retrieval-augmented Recommender System: Enhancing Recommender Systems with Large Language Models | ChatGPT | Frozen | RecSys Doctoral Symposium 2023 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3604915.3608889) |\n| JobRecoGPT | JobRecoGPT -- Explainable job recommendations using LLMs | GPT4 | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.11805) |\n| LANCER | Reformulating Sequential Recommendation: Learning Dynamic User Interest with Content-enriched Language Modeling | GPT2 (110M) | Prefix Tuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.10435) |\n| TransRec | A Multi-facet Paradigm to Bridge Large Language Model and Recommendation | LLaMA (7B) | LoRA | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.06491) |\n| AgentCF | AgentCF: Collaborative Learning with Autonomous Language Agents for Recommender Systems | text-davinci-003 & gpt-3.5-turbo | Frozen | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.09233) |\n| P4LM | Factual and Personalized Recommendations using Language Models and Reinforcement Learning | PaLM2-XS | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.06176) |\n| InstructMK | Multiple Key-value Strategy in Recommendation Systems Incorporating Large Language Model | LLaMA (7B) | Full Finetuning | CIKM GenRec 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.16409) |\n| LightLM | LightLM: A Lightweight Deep and Narrow Language Model for Generative Recommendation | T5-small (60M) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.17488) |\n| LlamaRec | LlamaRec: Two-Stage Recommendation using Large Language Models for Ranking | LLaMA2 (7B) | QLoRA | PGAI@CIKM 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.02089) |\n| N\u002FA | Exploring Recommendation Capabilities of GPT-4V(ision): A Preliminary Case Study | GPT-4V | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.04199) |\n| N\u002FA | Exploring Fine-tuning ChatGPT for News Recommendation | ChatGPT | gpt-3.5-turbo finetuning API | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.05850) |\n| N\u002FA | Do LLMs Implicitly Exhibit User Discrimination in Recommendation? An Empirical Study | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.07054) |\n| LC-Rec | Adapting Large Language Models by Integrating Collaborative Semantics for Recommendation | LLaMA (7B) | LoRA | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.09049) |\n| DOKE | Knowledge Plugins: Enhancing Large Language Models for Domain-Specific Recommendations | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.10779) |\n| ControlRec | ControlRec: Bridging the Semantic Gap between Language Model and Personalized Recommendation | T5-base (223M) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.16441) |\n| LLaRA | LLaRA: Large Language-Recommendation Assistant | LLaMA2 (7B) | LoRA | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.02445) |\n| PO4ISR | Large Language Models for Intent-Driven Session Recommendations | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.07552) |\n| DRDT | DRDT: Dynamic Reflection with Divergent Thinking for LLM-based Sequential Recommendation | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.11336) |\n| RecPrompt | RecPrompt: A Prompt Tuning Framework for News Recommendation Using Large Language Models | GPT4 | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.10463) |\n| LiT5 | Scaling Down, LiTting Up: Efficient Zero-Shot Listwise Reranking with Seq2seq Encoder-Decoder Models | T5-XL (3B) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.16098) |\n| STELLA | Large Language Models are Not Stable Recommender Systems | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.15746) |\n| Llama4Rec | Integrating Large Language Models into Recommendation via Mutual Augmentation and Adaptive Aggregation | LLaMA2 (7B) | Full Finetuning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.13870) |\n| RECLLM | Understanding Biases in ChatGPT-based Recommender Systems: Provider Fairness, Temporal Stability, and Recency | ChatGPT | Frozen | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.10545) |\n| DEALRec | Data-efficient Fine-tuning for LLM-based Recommendation | LLaMA (7B) | LoRA | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.17197) |\n\n\u003Cb>1.3.3 Hybrid Task\u003C\u002Fb>\n\n| **Name** | **Paper** | **LLM Backbone (Largest)** | **LLM Tuning Strategy** | **Publication** | **Link** |\n|:---:|:---|:---:|:---:|:---:|:---:|\n| P5 | Recommendation as Language Processing (RLP): A Unified Pretrain, Personalized Prompt & Predict Paradigm (P5) | T5-base (223M) | Full Finetuning | RecSys 2022 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.13366) |\n| M6-Rec | M6-Rec: Generative Pretrained Language Models are Open-Ended Recommender Systems | M6-base (300M) | Option Tuning | Arxiv 2022 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.08084) |\n| InstructRec | Recommendation as Instruction Following: A Large Language Model Empowered Recommendation Approach | FLAN-T5-XL (3B) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.07001) |\n| ChatGPT | Is ChatGPT a Good Recommender? A Preliminary Study | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.10149) |\n| ChatGPT | Is ChatGPT Good at Search? Investigating Large Language Models as Re-Ranking Agent | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.09542) |\n| ChatGPT | Uncovering ChatGPT's Capabilities in Recommender Systems | ChatGPT | Frozen | RecSys 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.02182) |\n| BDLM | Bridging the Information Gap Between Domain-Specific Model and General LLM for Personalized Recommendation | Vicuna (7B) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.03778) |\n| RecRanker | RecRanker: Instruction Tuning Large Language Model as Ranker for Top-k Recommendation | LLaMA2 (13B) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.16018) |\n\n\u003C\u002Fp>\n\u003C\u002Fdetails>\n\n\u003Cdetails>\u003Csummary>\u003Cb>1.4 LLM for User Interaction\u003C\u002Fb>\u003C\u002Fsummary>\n\u003Cp>\n\n\u003Cb>1.4.1 Task-oriented User Interaction\u003C\u002Fb>\n    \n| **Name** | **Paper** | **LLM Backbone (Largest)** | **LLM Tuning Strategy** | **Publication** | **Link** |\n|:---:|:---|:---:|:---:|:---:|:---:|\n| TG-ReDial | Towards Topic-Guided Conversational Recommender System | BERT-base (110M) & GPT2 (110M) | Unknown | COLING 2020 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2010.04125) |\n| TCP | Follow Me: Conversation Planning for Target-driven Recommendation Dialogue Systems | BERT-base (110M) | Full Finetuning | Arxiv 2022 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.03516) |\n| MESE | Improving Conversational Recommendation Systems' Quality with Context-Aware Item Meta-Information | DistilBERT (67M) & GPT2 (110M) | Full Finetuning | ACL 2022 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.08140) |\n| UniMIND | A Unified Multi-task Learning Framework for Multi-goal Conversational Recommender Systems | BART-base (139M) | Full Finetuning | ACM TOIS 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.06923) |\n| VRICR | Variational Reasoning over Incomplete Knowledge Graphs for Conversational Recommendation | BERT-base (110M) | Full Finetuning | WSDM 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.11868) |\n| KECR | Explicit Knowledge Graph Reasoning for Conversational Recommendation | BERT-base (110M) & GPT2 (110M) | Frozen | ACM TIST 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.00783) |\n| N\u002FA | Large Language Models as Zero-Shot Conversational Recommenders | GPT4 | Frozen | CIKM 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.10053) |\n| MuseChat | MuseChat: A Conversational Music Recommendation System for Videos | Vicuna (7B) | LoRA | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.06282) |\n| N\u002FA | Conversational Recommender System and Large Language Model Are Made for Each Other in E-commerce Pre-sales Dialogue | Chinese-Alpaca (7B) | LoRA | EMNLP 2023 Findings | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.14626) |\n| N\u002FA | ChatGPT for Conversational Recommendation: Refining Recommendations by Reprompting with Feedback | ChatGPT | Frozen | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.03605) |\n\n\u003Cb>1.4.2 Open-ended User Interaction\u003C\u002Fb>\n    \n| **Name** | **Paper** | **LLM Backbone (Largest)** | **LLM Tuning Strategy** | **Publication** | **Link** |\n|:---:|:---|:---:|:---:|:---:|:---:|\n| BARCOR | BARCOR: Towards A Unified Framework for Conversational Recommendation Systems | BART-base (139M) | Selective-layer Finetuning | Arxiv 2022 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.14257) |\n| RecInDial | RecInDial: A Unified Framework for Conversational Recommendation with Pretrained Language Models | DialoGPT (110M) | Full Finetuning | AACL 2022 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.07477) |\n| UniCRS | Towards Unified Conversational Recommender Systems via Knowledge-Enhanced Prompt Learning | DialoGPT-small (176M) | Frozen | KDD 2022 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.09363) |\n| T5-CR | Multi-Task End-to-End Training Improves Conversational Recommendation | T5-base (223M) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.06218) |\n| TtW | Talk the Walk: Synthetic Data Generation for Conversational Music Recommendation | T5-base (223M) & T5-XXL (11B) | Full Finetuning & Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.11489) |\n| N\u002FA | Rethinking the Evaluation for Conversational Recommendation in the Era of Large Language Models | ChatGPT | Frozen | EMNLP 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.13112) |\n| PECRS | Parameter-Efficient Conversational Recommender System as a Language Processing Task | GPT2-medium (355M) | LoRA | EACL 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.14194) |\n\n\u003C\u002Fp>\n\u003C\u002Fdetails>\n\n\u003Cdetails>\u003Csummary>\u003Cb>1.5 LLM for RS Pipeline Controller\u003C\u002Fb>\u003C\u002Fsummary>\n\u003Cp>\n    \n| **Name** | **Paper** | **LLM Backbone (Largest)** | **LLM Tuning Strategy** | **Publication** | **Link** |\n|:---:|:---|:---:|:---:|:---:|:---:|\n| Chat-REC | Chat-REC: Towards Interactive and Explainable LLMs-Augmented Recommender System | ChatGPT | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.14524) |\n| RecLLM | Leveraging Large Language Models in Conversational Recommender Systems | LLaMA (7B) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.07961) |\n| RAH | RAH! RecSys-Assistant-Human: A Human-Central Recommendation Framework with Large Language Models | GPT4 | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.09904) |\n| RecMind | RecMind: Large Language Model Powered Agent For Recommendation | ChatGPT | Frozen | NAACL 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.14296) |\n| InteRecAgent | Recommender AI Agent: Integrating Large Language Models for Interactive Recommendations | GPT4 | Frozen | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.16505) |\n| CORE | Lending Interaction Wings to Recommender Systems with Conversational Agents | N\u002FA | N\u002FA | NIPS 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.04230) |\n| LLMCRS | A Large Language Model Enhanced Conversational Recommender System | LLaMA (7B) | Full Finetuning | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.06212) |\n\n\u003C\u002Fp>\n\u003C\u002Fdetails>\n\n\u003Cdetails>\u003Csummary>\u003Cb>1.6 Related Survey Papers\u003C\u002Fb>\u003C\u002Fsummary>\n\u003Cp>\n\n| **Paper** | **Publication** | **Link** |\n|:---|:---:|:---:|\n| GR-LLMs: Recent Advances in Generative Recommendation Based on Large Language Models | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.06507) |\n| The Future is Agentic: Definitions, Perspectives, and Open Challenges of Multi-Agent Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.02097) |\n| A Survey of Foundation Model-Powered Recommender Systems: From Feature-Based, Generative to Agentic Paradi | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.16420) |\n| A Survey of Personalization: From RAG to Agent | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.10147) |\n| A Survey of Large Language Model Empowered Agents for Recommendation and Search: Towards Next-Generation Information Retrieval | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.05659) |\n| Agent-centric Information Access | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.19298) |\n| A Survey on LLM-based News Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.09797) |\n| A Survey on LLM-powered Agents for Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.10050) |\n| Cold-Start Recommendation towards the Era of Large Language Models (LLMs): A Comprehensive Survey and Roadmap | Arxiv 2025 | [[link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.01945) |\n| Large Language Model Enhanced Recommender Systems: Taxonomy, Trend, Application and Future | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.13432) |\n| Recommender Systems in the Era of Large Language Model Agents: A Survey | Preprint | [[Link]](https:\u002F\u002Fwww.researchgate.net\u002Fpublication\u002F386342676_Recommender_Systems_in_the_Era_of_Large_Language_Model_Agents_A_Survey) |\n| A Survey on Efficient Solutions of Large Language Models for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Fwww.researchgate.net\u002Fpublication\u002F385863443_A_Survey_on_Efficient_Solutions_of_Large_Language_Models_for_Recommendation) |\n| Towards Next-Generation LLM-based Recommender Systems: A Survey and Beyond | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.19744) |\n| Bias and Unfairness in Information Retrieval Systems: New Challenges in the LLM Era | KDD 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.11457) |\n| All Roads Lead to Rome: Unveiling the Trajectory of Recommender Systems Across the LLM Era | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.10081) |\n| Survey for Landing Generative AI in Social and E-commerce Recsys - the Industry Perspectives | Arxiv 2024 | [[Link]](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2406.06475) |\n| A Survey of Generative Search and Recommendation in the Era of Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.16924) |\n| When Search Engine Services meet Large Language Models: Visions and Challenges | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.00128) |\n| A Review of Modern Recommender Systems Using Generative Models (Gen-RecSys) | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.00579) |\n| Exploring the Impact of Large Language Models on Recommender Systems: An Extensive Review | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.18590) |\n| Foundation Models for Recommender Systems: A Survey and New Perspectives | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.11143) |\n| Prompting Large Language Models for Recommender Systems: A Comprehensive Framework and Empirical Analysis | Arixv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.04997) |\n| User Modeling in the Era of Large Language Models: Current Research and Future Directions | IEEE Data Engineering Bulletin 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.11518) |\n| A Survey on Large Language Models for Personalized and Explainable Recommendations | Arxiv 2023 |[[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.12338) |\n| Large Language Models for Generative Recommendation: A Survey and Visionary Discussions | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.01157) |\n| Large Language Models for Information Retrieval: A Survey | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.07107) |\n| When Large Language Models Meet Personalization: Perspectives of Challenges and Opportunities | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.16376) | |\n| Recommender Systems in the Era of Large Language Models (LLMs) | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.02046) |\n| A Survey on Large Language Models for Recommendation | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.19860) |\n| Pre-train, Prompt and Recommendation: A Comprehensive Survey of Language Modelling Paradigm Adaptations in Recommender Systems | TACL 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.03735) |\n| Self-Supervised Learning for Recommender Systems: A Survey | TKDE 2022 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15876) |\n    \n\u003C\u002Fp>\n\u003C\u002Fdetails>\n\n\u003Cdetails>\u003Csummary>\u003Cb>1.7 Newest Research Work List\u003C\u002Fb>\u003C\u002Fsummary>\n\u003Cp>\n\n| **Paper** | **Publication** | **Link** |\n|:---|:---:|:---:|\n| Large Language Model Can Interpret Latent Space of Sequential Recommender | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.20487) |\n| Zero-Shot Recommendations with Pre-Trained Large Language Models for Multimodal Nudging | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.01026) |\n| INTERS: Unlocking the Power of Large Language Models in Search with Instruction Tuning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.06532) |\n| Evaluation of Synthetic Datasets for Conversational Recommender Systems | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.08167v1) |\n| Generative Recommendation: Towards Next-generation Recommender Paradigm | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03516) |\n| Towards Personalized Prompt-Model Retrieval for Generative Recommendation | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.02205) |\n| Generative Next-Basket Recommendation | RecSys 2023 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3604915.3608823) |\n| Unlocking the Potential of Large Language Models for Explainable Recommendations | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.15661) |\n| Logic-Scaffolding: Personalized Aspect-Instructed Recommendation Explanation Generation using LLMs | Falcon (40B) | Frozen | WSDM 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.14345) |\n| Improving Sequential Recommendations with LLMs | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.01339) |\n| A Multi-Agent Conversational Recommender System | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.01135) |\n| TransFR: Transferable Federated Recommendation with Pre-trained Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.01124) |\n| Large Language Model Distilling Medication Recommendation Model | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.02803) |\n| Uncertainty-Aware Explainable Recommendation with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.03366) |\n| Natural Language User Profiles for Transparent and Scrutable Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.05810) |\n| Leveraging LLMs for Unsupervised Dense Retriever Ranking | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.04853) |\n| RA-Rec: An Efficient ID Representation Alignment Framework for LLM-based Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.04527) |\n| A Multi-Agent Conversational Recommender System | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.01135) |\n| Fairly Evaluating Large Language Model-based Recommendation Needs Revisit the Cross-Entropy Loss | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.06216) |\n| SearchAgent: A Lightweight Collaborative Search Agent with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.06360) |\n| Large Language Model Interaction Simulator for Cold-Start Item Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.09176) |\n| Enhancing ID and Text Fusion via Alternative Training in Session-based Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.08921) |\n| eCeLLM: Generalizing Large Language Models for E-commerce from Large-scale, High-quality Instruction Data | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.08831) |\n| LLM-Enhanced User-Item Interactions: Leveraging Edge Information for Optimized Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.09617) |\n| LLM-based Federated Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.09959) |\n| Rethinking Large Language Model Architectures for Sequential Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.09543) |\n| Large Language Model with Graph Convolution for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.08859) |\n| Rec-GPT4V: Multimodal Recommendation with Large Vision-Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.08670) |\n| Enhancing Recommendation Diversity by Re-ranking with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.11506) |\n| Are ID Embeddings Necessary? Whitening Pre-trained Text Embeddings for Effective Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.10602) |\n| SPAR: Personalized Content-Based Recommendation via Long Engagement Attention | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.10555) |\n| Cognitive Personalized Search Integrating Large Language Models with an Efficient Memory Mechanism | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.10548) |\n| Large Language Models as Data Augmenters for Cold-Start Item Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.11724) |\n| Explain then Rank: Scale Calibration of Neural Rankers Using Natural Language Explanations from Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.12276) |\n| LLM4SBR: A Lightweight and Effective Framework for Integrating Large Language Models in Session-based Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.13840) |\n| Breaking the Barrier: Utilizing Large Language Models for Industrial Recommendation Systems through an Inferential Knowledge Graph | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.13750) |\n| User-LLM: Efficient LLM Contextualization with User Embeddings | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.13598) |\n| Stealthy Attack on Large Language Model based Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.14836) |\n| Multi-Agent Collaboration Framework for Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.15235) |\n| Item-side Fairness of Large Language Model-based Recommendation System | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.15215) |\n| Integrating Large Language Models with Graphical Session-Based Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.16539) |\n| Language-Based User Profiles for Recommendation | LLM-IGS@WSDM2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.15623) |\n| BASES: Large-scale Web Search User Simulation with Large Language Model based Agents | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.17505) |\n| Prospect Personalized Recommendation on Large Language Model-based Agent Platform | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.18240) |\n| Sequence-level Semantic Representation Fusion for Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.18166) |\n| Corpus-Steered Query Expansion with Large Language Models | ECAL 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.18031) |\n| NoteLLM: A Retrievable Large Language Model for Note Recommendation | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.01744) |\n| An Interpretable Ensemble of Graph and Language Models for Improving Search Relevance in E-Commerce | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.00923) |\n| LLM-Ensemble: Optimal Large Language Model Ensemble Method for E-commerce Product Attribute Value Extraction | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.00863) |\n| Enhancing Long-Term Recommendation with Bi-level Learnable Large Language Model Planning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.00843) |\n| InteraRec: Interactive Recommendations Using Multimodal Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.00822) |\n| ChatDiet: Empowering Personalized Nutrition-Oriented Food Recommender Chatbots through an LLM-Augmented Framework  | CHASE 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.00781) |\n| Towards Efficient and Effective Unlearning of Large Language Models for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.03536) |\n| Generative News Recommendation | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.03424) |\n| Bridging Language and Items for Retrieval and Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.03952) |\n| Can Small Language Models be Good Reasoners for Sequential Recommendation? | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.04260) |\n| Aligning Large Language Models for Controllable Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.05063) |\n| Personalized Audiobook Recommendations at Spotify Through Graph Neural Networks | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.05185) |\n| Towards Graph Foundation Models for Personalization | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.07478) |\n| CFaiRLLM: Consumer Fairness Evaluation in Large-Language Model Recommender System | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.05668) |\n| CoRAL: Collaborative Retrieval-Augmented Large Language Models Improve Long-tail Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.06447) |\n| RecAI: Leveraging Large Language Models for Next-Generation Recommender Systems | WWW 2024 Demo | [[Link]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.06465.pdf) |\n| KELLMRec: Knowledge-Enhanced Large Language Models for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.06642) |\n| USimAgent: Large Language Models for Simulating Search Users | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.09142) |\n| CALRec: Contrastive Alignment of Generative LLMs For Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.02429) |\n| Integrating Large Language Models with Graphical Session-Based Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.16539) |\n| Language-Based User Profiles for Recommendation | LLM-IGS@WSDM2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.15623) |\n| BASES: Large-scale Web Search User Simulation with Large Language Model based Agents | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.17505) |\n| Prospect Personalized Recommendation on Large Language Model-based Agent Platform | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.18240) |\n| Sequence-level Semantic Representation Fusion for Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.18166) |\n| Corpus-Steered Query Expansion with Large Language Models | EACL 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.18031) |\n| NoteLLM: A Retrievable Large Language Model for Note Recommendation | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.01744) |\n| An Interpretable Ensemble of Graph and Language Models for Improving Search Relevance in E-Commerce | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.00923) |\n| LLM-Ensemble: Optimal Large Language Model Ensemble Method for E-commerce Product Attribute Value Extraction | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.00863) |\n| Enhancing Long-Term Recommendation with Bi-level Learnable Large Language Model Planning | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.00843) |\n| Towards Efficient and Effective Unlearning of Large Language Models for Recommendation | FCS | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.03536) |\n| Generative News Recommendation | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.03424) |\n| Bridging Language and Items for Retrieval and Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.03952) |\n| Can Small Language Models be Good Reasoners for Sequential Recommendation? | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.04260) |\n| Aligning Large Language Models for Controllable Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.05063) |\n| Personalized Audiobook Recommendations at Spotify Through Graph Neural Networks | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.05185) |\n| CFaiRLLM: Consumer Fairness Evaluation in Large-Language Model Recommender System | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.05668) |\n| CoRAL: Collaborative Retrieval-Augmented Large Language Models Improve Long-tail Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.06447) |\n| RecAI: Leveraging Large Language Models for Next-Generation Recommender Systems | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.06465) |\n| KELLMRec: Knowledge-Enhanced Large Language Models for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.06642) |\n| Towards Graph Foundation Models for Personalization | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.07478) |\n| USimAgent: Large Language Models for Simulating Search Users | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.09142) |\n| The Whole is Better than the Sum: Using Aggregated Demonstrations in In-Context Learning for Sequential Recommendation | NAACL 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.10135) |\n| PPM : A Pre-trained Plug-in Model for Click-through Rate Prediction | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.10049) |\n| Evaluating Large Language Models as Generative User Simulators for Conversational Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.09738) |\n| Towards Unified Multi-Modal Personalization: Large Vision-Language Models for Generative Recommendation and Beyond | ICLR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.10667) |\n| Harnessing Large Language Models for Text-Rich Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.13325) |\n| A Large Language Model Enhanced Sequential Recommender for Joint Video and Comment Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.13574) |\n| Could Small Language Models Serve as Recommenders? Towards Data-centric Cold-start Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.17256) |\n| Play to Your Strengths: Collaborative Intelligence of Conventional Recommender Models and Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.16378) |\n| Reinforcement Learning-based Recommender Systems with Large Language Models for State Reward and Action Modeling | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.16948) |\n| Large Language Models Enhanced Collaborative Filtering | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.17688) |\n| Improving Content Recommendation: Knowledge Graph-Based Semantic Contrastive Learning for Diversity and Cold-Start Users | LREC-COLING 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.18667) |\n| Sequential Recommendation with Latent Relations based on Large Language Model | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.18348) |\n| Enhanced Generative Recommendation via Content and Collaboration Integration | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.18480) |\n| To Recommend or Not: Recommendability Identification in Conversations with Pre-trained Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.18628) |\n| IDGenRec: LLM-RecSys Alignment with Textual ID Learning | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.19021) |\n| Breaking the Length Barrier: LLM-Enhanced CTR Prediction in Long Textual User Behaviors | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.19347) |\n| Make Large Language Model a Better Ranker | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.19181) |\n| Do Large Language Models Rank Fairly? An Empirical Study on the Fairness of LLMs as Rankers | NAACL 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.03192) |\n| IISAN: Efficiently Adapting Multimodal Representation for Sequential Recommendation with Decoupled PEFT | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.02059) |\n| Where to Move Next: Zero-shot Generalization of LLMs for Next POI Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.01855) |\n| Tired of Plugins? Large Language Models Can Be End-To-End Recommender | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.00702) |\n| Aligning Large Language Models with Recommendation Knowledge | NAACL 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.00245) |\n| Enhancing Content-based Recommendation via Large Language Model | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.00236) |\n| DRE: Generating Recommendation Explanations by Aligning Large Language Models at Data-level | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.06311) |\n| Optimization Methods for Personalizing Large Language Models through Retrieval Augmentation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.05970) |\n| Q-PEFT: Query-dependent Parameter Efficient Fine-tuning for Text Reranking with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.04522) |\n| JobFormer: Skill-Aware Job Recommendation with Semantic-Enhanced Transformer | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.04313) |\n| PMG : Personalized Multimodal Generation with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.08677) |\n| The Elephant in the Room: Rethinking the Usage of Pre-trained Language Model in Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.08796) |\n| Exact and Efficient Unlearning for Large Language Model-based Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.10327) |\n| Large Language Models meet Collaborative Filtering: An Efficient All-round LLM-based Recommender System | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.11343) |\n| Behavior Alignment: A New Perspective of Evaluating LLM-based Conversational Recommendation Systems | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.11773) |\n| Generating Diverse Criteria On-the-Fly to Improve Point-wise LLM Rankers | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.11960) |\n| RecGPT: Generative Personalized Prompts for Sequential Recommendation via ChatGPT Training Paradigm | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.08675) |\n| MMGRec: Multimodal Generative Recommendation with Transformer Model | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.16555) |\n| Hi-Gen: Generative Retrieval For Large-Scale Personalized E-commerce Search | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.15675) |\n| Contrastive Quantization based Semantic Code for Generative Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.14774) |\n| ImplicitAVE: An Open-Source Dataset and Multimodal LLMs Benchmark for Implicit Attribute Value Extraction | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.15592) |\n| Large Language Models for Next Point-of-Interest Recommendation | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.17591) |\n| Ranked List Truncation for Large Language Model-based Re-Ranking | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.18185) |\n| Large Language Models as Conversational Movie Recommenders: A User Study | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.19093) |\n| Distillation Matters: Empowering Sequential Recommenders to Match the Performance of Large Language Model | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.00338) |\n| Efficient and Responsible Adaptation of Large Language Models for Robust Top-k Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.00824) |\n| FairEvalLLM. A Comprehensive Framework for Benchmarking Fairness in Large Language Model Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.02219) |\n| Improve Temporal Awareness of LLMs for Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.02778) |\n| CALRec: Contrastive Alignment of Generative LLMs For Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.02429) |\n| Knowledge Adaptation from Large Language Model to Recommendation for Practical Industrial Application | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.03988) |\n| DynLLM: When Large Language Models Meet Dynamic Graph Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.07580) |\n| Learnable Tokenizer for LLM-based Generative Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002F2405.07314) |\n| CELA: Cost-Efficient Language Model Alignment for CTR Prediction | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.10596) |\n| RDRec: Rationale Distillation for LLM-based Recommendation | ACL 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.10587) |\n| EmbSum: Leveraging the Summarization Capabilities of Large Language Models for Content-Based Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2405.11441) |\n| Reindex-Then-Adapt: Improving Large Language Models for Conversational Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.12119) |\n| RecGPT: Generative Pre-training for Text-based Recommendation | ACL 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.12715) |\n| Let Me Do It For You: Towards LLM Empowered Recommendation via Tool Learning | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.15114) |\n| Finetuning Large Language Model for Personalized Ranking | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.16127) |\n| LLMs for User Interest Exploration: A Hybrid Approach | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.16363) |\n| NoteLLM-2: Multimodal Large Representation Models for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.16789) |\n| Multimodality Invariant Learning for Multimedia-Based New Item Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.15783) |\n| SLMRec: Empowering Small Language Models for Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.17890) |\n| Keyword-driven Retrieval-Augmented Large Language Models for Cold-start User Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.19612) |\n| Generating Query Recommendations via LLMs | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.19749) |\n| Large Language Models Enhanced Sequential Recommendation for Long-tail User and Item | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.20646) |\n| DisCo: Towards Harmonious Disentanglement and Collaboration between Tabular and Semantic Space for Recommendation | KDD 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.00011) |\n| LLM-RankFusion: Mitigating Intrinsic Inconsistency in LLM-based Ranking | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.00231) |\n| A Practice-Friendly Two-Stage LLM-Enhanced Paradigm in Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.00333) |\n| Large Language Models as Recommender Systems: A Study of Popularity Bias | Gen-IR@SIGIR24 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.01285) |\n| Privacy in LLM-based Recommendation: Recent Advances and Future Directions | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.01363) |\n| An LLM-based Recommender System Environment | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.01631) |\n| Robust Interaction-based Relevance Modeling for Online E-Commerce and LLM-based Retrieval | ECML-PKDD 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.02135) |\n| Large Language Models Make Sample-Efficient Recommender Systems | FCS | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.02368) |\n| XRec: Large Language Models for Explainable Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.02377) |\n| Exploring User Retrieval Integration towards Large Language Models for Cross-Domain Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.03085) |\n| Large Language Models as Evaluators for Recommendation Explanations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.03248) |\n| Text-like Encoding of Collaborative Information in Large Language Models for Recommendation | ACL 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.03210) |\n| Item-Language Model for Conversational Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.02844) |\n| Improving LLMs for Recommendation with Out-Of-Vocabulary Tokens | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.08477) |\n| On Softmax Direct Preference Optimization for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.09215) |\n| TokenRec: Learning to Tokenize ID for LLM-based Generative Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.10450) |\n| DELRec: Distilling Sequential Pattern to Enhance LLM-based Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.11156) |\n| TourRank: Utilizing Large Language Models for Documents Ranking with a Tournament-Inspired Strategy | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.11678) |\n| Multi-Layer Ranking with Large Language Models for News Source Recommendation | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.11745) |\n| Intermediate Distillation: Data-Efficient Distillation from Black-Box LLMs for Information Retrieval | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.12169) |\n| LLM-enhanced Reranking in Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.12433) |\n| LLM4MSR: An LLM-Enhanced Paradigm for Multi-Scenario Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.12529) |\n| Taxonomy-Guided Zero-Shot Recommendations with LLMs | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.14043) |\n| EAGER: Two-Stream Generative Recommender with Behavior-Semantic Collaboration | KDD 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.14017) |\n| An Investigation of Prompt Variations for Zero-shot LLM-based Rankers | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.14117) |\n| Optimizing Novelty of Top-k Recommendations using Large Language Models and Reinforcement Learning | KDD 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.14169) |\n| Enhancing Collaborative Semantics of Language Model-Driven Recommendations via Graph-Aware Learning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.13235) |\n| Decoding Matters: Addressing Amplification Bias and Homogeneity Issue for LLM-based Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.14900) |\n| FIRST: Faster Improved Listwise Reranking with Single Token Decoding | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.15657) |\n| LLM-Powered Explanations: Unraveling Recommendations Through Subgraph Reasoning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.15859) |\n| DemoRank: Selecting Effective Demonstrations for Large Language Models in Ranking Task | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.16332) |\n| ELCoRec: Enhance Language Understanding with Co-Propagation of Numerical and Categorical Features for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.18825) |\n| Generative Explore-Exploit: Training-free Optimization of Generative Recommender Systems using LLM Optimizers | ACL 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.05255) |\n| ProductAgent: Benchmarking Conversational Product Search Agent with Asking Clarification Questions | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.00942) |\n| MemoCRS: Memory-enhanced Sequential Conversational Recommender Systems with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.04960) |\n| Preference Distillation for Personalized Generative Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.05033) |\n| Towards Bridging the Cross-modal Semantic Gap for Multi-modal Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.05420) |\n| Language Models Encode Collaborative Signals in Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.05441) |\n| A Neural Matrix Decomposition Recommender System Model based on the Multimodal Large Language Model | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.08942) |\n| LLMGR: Large Language Model-based Generative Retrieval in Alipay Search | SIGIR 2024 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3626772.3661364) |\n| Enhancing Sequential Recommenders with Augmented Knowledge from Aligned Large Language Models | SIGIR 2024 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3626772.3657782) |\n| Reinforced Prompt Personalization for Recommendation with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.17115) |\n| Improving Retrieval in Sponsored Search by Leveraging Query Context Signals | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.14346) |\n| Generative Retrieval with Preference Optimization for E-commerce Search | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.19829) |\n| GenRec: Generative Personalized Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.21191v1) |\n| Breaking the Hourglass Phenomenon of Residual Quantization: Enhancing the Upper Bound of Generative Retrieval | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.21488) |\n| Enhancing Taobao Display Advertising with Multimodal Representations: Challenges, Approaches and Insights | CIKM 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.19467) |\n| Leveraging LLM Reasoning Enhances Personalized Recommender Systems | ACL 2024 |[[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.00802) |\n| Multi-Aspect Reviewed-Item Retrieval via LLM Query Decomposition and Aspect Fusion | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.00878) |\n| Lifelong Personalized Low-Rank Adaptation of Large Language Models for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2408.03533) |\n| Exploring Query Understanding for Amazon Product Search | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.02215) |\n| A Decoding Acceleration Framework for Industrial Deployable LLM-based Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.05676) |\n| Prompt Tuning as User Inherent Profile Inference Machine | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.06577) |\n| Beyond Inter-Item Relations: Dynamic Adaptive Mixture-of-Experts for LLM-Based Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.07427) |\n| Review-driven Personalized Preference Reasoning with Large Language Models for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.06276) |\n| DaRec: A Disentangled Alignment Framework for Large Language Model and Recommender System | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.08231) |\n| LLM4DSR: Leveraing Large Language Model for Denoising Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.08208) |\n| EasyRec: Simple yet Effective Language Models for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.08821) |\n| Collaborative Cross-modal Fusion with Large Language Model for Recommendation | CIKM 2024 | [[Link]](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2408.08564) |\n| Customizing Language Models with Instance-wise LoRA for Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.10159) |\n| Efficient and Deployable Knowledge Infusion for Open-World Recommendations via Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2408.10520) |\n| CoRA: Collaborative Information Perception by Large Language Model's Weights for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2408.10645) |\n| GANPrompt: Enhancing Robustness in LLM-Based Recommendations with GAN-Enhanced Diversity Prompts | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.09671) |\n| Harnessing Multimodal Large Language Models for Multimodal Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.09698) |\n| DLCRec: A Novel Approach for Managing Diversity in LLM-Based Recommender Systems | Arxiv | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.12470) |\n| LARR: Large Language Model Aided Real-time Scene Recommendation with Semantic Understanding | RecSys 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.11523) |\n| SC-Rec: Enhancing Generative Retrieval with Self-Consistent Reranking for Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.08686) |\n| Are LLM-based Recommenders Already the Best? Simple Scaled Cross-entropy Unleashes the Potential of Traditional Sequential Recommenders | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.14238) |\n| HRGraph: Leveraging LLMs for HR Data Knowledge Graphs with Information Propagation-based Job Recommendation | KaLLM 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.13521) |\n| An Extremely Data-efficient and Generative LLM-based Reinforcement Learning Agent for Recommenders | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.16032) |\n| CheatAgent: Attacking LLM-Empowered Recommender Systems via LLM Agent | KDD 2024 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3637528.3671837) |\n| Laser: Parameter-Efficient LLM Bi-Tuning for Sequential Recommendation with Collaborative Information | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.01605) |\n| MARS: Matching Attribute-aware Representations for Text-based Sequential Recommendation | CIKM 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.00702) |\n| End-to-End Learnable Item Tokenization for Generative Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.05546) |\n| Incorporate LLMs with Influential Recommender System | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.04827) |\n| Enhancing Sequential Recommendations through Multi-Perspective Reflections and Iteration | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.06377) |\n| STORE: Streamlining Semantic Tokenization and Generative Recommendation with A Single LLM | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.07276) |\n| Multilingual Prompts in LLM-Based Recommenders: Performance Across Languages | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.07604) |\n| Unleash LLMs Potential for Recommendation by Coordinating Twin-Tower Dynamic Semantic Token Generator | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.09253) |\n| Large Language Model Enhanced Hard Sample Identification for Denoising Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2409.10343) |\n| Chain-of-thought prompting empowered generative user modeling for personalized recommendation | Neural Computing and Applications | [[Link]](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs00521-024-10364-2) |\n| Challenging Fairness: A Comprehensive Exploration of Bias in LLM-Based Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.10825) |\n| Decoding Style: Efficient Fine-Tuning of LLMs for Image-Guided Outfit Recommendation with Preference | CIKM 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.12150) |\n| LLM-Powered Text Simulation Attack Against ID-Free Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.11690) |\n| FLARE: Fusing Language Models and Collaborative Architectures for Recommender Enhancement | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.11699) |\n| Retrieve, Annotate, Evaluate, Repeat: Leveraging Multimodal LLMs for Large-Scale Product Retrieval Evaluation | Arxiv 2024 | [[Link]](http:\u002F\u002Farxiv.org\u002Fabs\u002F2409.11860) |\n| HLLM: Enhancing Sequential Recommendations via Hierarchical Large Language Models for Item and User Modeling | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.12740) |\n| Large Language Model Ranker with Graph Reasoning for Zero-Shot Recommendation | ICANN 2024 | [[Link]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-72344-5_24) |\n| User Knowledge Prompt for Sequential Recommendation | RecSys 2024 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3640457.3691714) |\n| RLRF4Rec: Reinforcement Learning from Recsys Feedback for Enhanced Recommendation Reranking | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.05939) |\n| FELLAS: Enhancing Federated Sequential Recommendation with LLM as External Services | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.04927) |\n| TLRec: A Transfer Learning Framework to Enhance Large Language Models for Sequential Recommendation Tasks | RecSys 2024 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3640457.3691710) |\n| SeCor: Aligning Semantic and Collaborative Representations by Large Language Models for Next-Point-of-Interest Recommendations | RecSys 2024 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3640457.3688124) |\n| Efficient Inference for Large Language Model-based Generative Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.05165) |\n| Instructing and Prompting Large Language Models for Explainable Cross-domain Recommendations | RecSys 2024 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3640457.3688137) |\n| ReLand: Integrating Large Language Models' Insights into Industrial Recommenders via a Controllable Reasoning Pool | RecSys 2024 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3640457.3688131) |\n| Inductive Generative Recommendation via Retrieval-based Speculation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.02939) |\n| Constructing and Masking Preference Profile with LLMs for Filtering Discomforting Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.05411) |\n| Towards Scalable Semantic Representation for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.09560) |\n| Large Language Models as Narrative-Driven Recommenders | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.13604) |\n| The Moral Case for Using Language Model Agents for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.12123) |\n| RosePO: Aligning LLM-based Recommenders with Human Values | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.12519) |\n| Comprehending Knowledge Graphs with Large Language Models for Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.12229) |\n| Triple Modality Fusion: Aligning Visual, Textual, and Graph Data with Large Language Models for Multi-Behavior Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.12228) |\n| Improving Pinterest Search Relevance Using Large Language Models | CIKM 2024 Workshop | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.17152) |\n| STAR: A Simple Training-free Approach for Recommendations using Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.16458) |\n| End-to-end Training for Recommendation with Language-based User Profiles | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.18870) |\n| Knowledge Graph Enhanced Language Agents for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.19627) |\n| Collaborative Knowledge Fusion: A Novel Approach for Multi-task Recommender Systems via LLMs | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.20642) |\n| Real-Time Personalization for LLM-based Recommendation with Customized In-Context Learning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.23136) |\n| ReasoningRec: Bridging Personalized Recommendations and Human-Interpretable Explanations through LLM Reasoning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.23180) |\n| Beyond Utility: Evaluating LLM as Recommender | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.00331) |\n| Enhancing ID-based Recommendation with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.02041) |\n| LLM4PR: Improving Post-Ranking in Search Engine with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.01178) |\n| Proactive Detection and Calibration of Seasonal Advertisements with Multimodal Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.00780) |\n| Enhancing ID-based Recommendation with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.02041) |\n| Transferable Sequential Recommendation via Vector Quantized Meta Learning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.01785) |\n| Self-Calibrated Listwise Reranking with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.04602) |\n| Enhancing Large Language Model Based Sequential Recommender Systems with Pseudo Labels Reconstruction | ACL Findings 2024 | [[Link]](https:\u002F\u002Faclanthology.org\u002F2024.findings-emnlp.423\u002F) |\n| Unleashing the Power of Large Language Models for Group POI Recommendations | Avrxi 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.13415) |\n| Scaling Laws for Online Advertisement Retrieval | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.13322) |\n| Explainable LLM-driven Multi-dimensional Distillation for E-Commerce Relevance Learning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.13045) |\n| GOT4Rec: Graph of Thoughts for Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.14922) |\n| HARec: Hyperbolic Graph-LLM Alignment for Exploration and Exploitation in Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.13865) |\n| Cross-Domain Recommendation Meets Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.19862) |\n| Explainable CTR Prediction via LLM Reasoning | WSDM 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.02588) |\n| Enabling Explainable Recommendation in E-commerce with LLM-powered Product Knowledge Graph | IJCAI Workshop 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.01837) |\n| Break the ID-Language Barrier: An Adaption Framework for Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.18262) |\n| LEADRE: Multi-Faceted Knowledge Enhanced LLM Empowered Display Advertisement Recommender System | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.13789) |\n| Pre-train, Align, and Disentangle: Empowering Sequential Recommendation with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.04107) |\n| ULMRec: User-centric Large Language Model for Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.05543) |\n| AltFS: Agency-light Feature Selection with Large Language Models in Deep Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.08516) |\n| MRP-LLM: Multitask Reflective Large Language Models for Privacy-Preserving Next POI Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.07796) |\n| MOPI-HFRS: A Multi-objective Personalized Health-aware Food Recommendation System with LLM-enhanced Interpretation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.08847) |\n| SPRec: Leveraging Self-Play to Debias Preference Alignment for Large Language Model-based Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.09243) |\n| RecSys Arena: Pair-wise Recommender System Evaluation with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.11068) |\n| CRS Arena: Crowdsourced Benchmarking of Conversational Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.10514) |\n| Boosting LLM-based Relevance Modeling with Distribution-Aware Robust Learning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.12504) |\n| LLM is Knowledge Graph Reasoner: LLM's Intuition-aware Knowledge Graph Reasoning for Cold-start Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.12464) |\n| Bridging the User-side Knowledge Gap in Knowledge-aware Recommendations with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.13544) |\n| Sliding Windows Are Not the End: Exploring Full Ranking with Long-Context Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.14574) |\n| ChainRank-DPO: Chain Rank Direct Preference Optimization for LLM Rankers | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.14405) |\n| Are Longer Prompts Always Better? Prompt Selection in Large Language Models for Recommendation Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.14454) |\n| Towards a Unified Paradigm: Integrating Recommendation Systems as a New Language in Large Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.16933) |\n| LLM-Powered User Simulator for Recommender System | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.16984) |\n| Enhancing Item Tokenization for Generative Recommendation through Self-Improvement | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.17171) |\n| Molar: Multimodal LLMs with Collaborative Filtering Alignment for Enhanced Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.18176) |\n| An Automatic Graph Construction Framework based on Large Language Models for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.18241) |\n| RecLM: Recommendation Instruction Tuning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.19302) |\n| The Efficiency vs. Accuracy Trade-off: Optimizing RAG-Enhanced LLM Recommender Systems Using Multi-Head Early Exit | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.02173) |\n| Knowledge Graph Retrieval-Augmented Generation for LLM-based Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.02226) |\n| Efficient and Responsible Adaptation of Large Language Models for Robust and Equitable Top-k Recommendations | Avrxi 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.04762) |\n| Collaboration of Large Language Models and Small Recommendation Models for Device-Cloud Recommendation | KDD 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.05647) |\n| Guiding Retrieval using LLM-based Listwise Rankers | Avrxi 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.09186) |\n| Generative Retrieval for Book search | KDD 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.11034) |\n| Full-Stack Optimized Large Language Models for Lifelong Sequential Behavior Comprehension in Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.13344) |\n| Large Language Model driven Policy Exploration for Recommender Systems | WSDM 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.13816) |\n| SampleLLM: Optimizing Tabular Data Synthesis in Recommendations | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.16125) |\n| PatchRec: Multi-Grained Patching for Efficient LLM-based Sequential Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.15087) |\n| Uncertainty Quantification and Decomposition for LLM-based Recommendation | WWW 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.17630) |\n| A Zero-Shot Generalization Framework for LLM-Driven Cross-Domain Sequential Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.19232) |\n| RankFlow: A Multi-Role Collaborative Reranking Workflow Utilizing Large Language Models | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.00709) |\n| FACTER: Fairness-Aware Conformal Thresholding and Prompt Engineering for Enabling Fair LLM-Based Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.02966) |\n| Large Language Models Are Universal Recommendation Learners | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.03041) |\n| Intent Representation Learning with Large Language Model for Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.03307) |\n| Boosting Knowledge Graph-based Recommendations through Confidence-Aware Augmentation with Large Language Models | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.03715) |\n| RALLRec: Improving Retrieval Augmented Large Language Model Recommendation with Representation Learning | WWW 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.06101) |\n| Solving the Content Gap in Roblox Game Recommendations: LLM-Based Profile Generation and Reranking | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.06802) |\n| MoLoRec: A Generalizable and Efficient Framework for LLM-Based Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.08271) |\n| Unleashing the Power of Large Language Model for Denoising Recommendation | WWW 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.09058) |\n| Semantic Ads Retrieval at Walmart eCommerce with Language Models Progressively Trained on Multiple Knowledge Domains | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.09089) |\n| Order-agnostic Identifier for Large Language Model-based Generative Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.10833) |\n| G-Refer: Graph Retrieval-Augmented Large Language Model for Explainable Recommendation | WWW 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.12586) |\n| LLM4Tag: Automatic Tagging System for Information Retrieval via Large Language Models | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.13481) |\n| Bursting Filter Bubble: Enhancing Serendipity Recommendations with Aligned Large Language Models | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.13539) |\n| ActionPiece: Contextually Tokenizing Action Sequences for Generative Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.13581) |\n| TALKPLAY: Multimodal Music Recommendation with Large Language Models | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.13713) |\n| Enhancing Cross-Domain Recommendations with Memory-Optimized LLM-Based User Agents | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.13843) |\n| Enhancing LLM-Based Recommendations Through Personalized Reasoning | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.13845) |\n| Lost in Sequence: Do Large Language Models Understand Sequential Recommendation? | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.13909) |\n| InstructAgent: Building User Controllable Recommender via LLM Agent | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.14662) |\n| EAGER-LLM: Enhancing Large Language Models as Recommenders through Exogenous Behavior-Semantic Integration | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.14735) |\n| Efficient AI in Practice: Training and Deployment of Efficient LLMs for Industry Applications | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.14305) |\n| Collaborative Retrieval for Large Language Model-based Conversational Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.14137) |\n| Active Large Language Model-based Knowledge Distillation for Session-based Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.15685) |\n| Training Large Recommendation Models via Graph-Language Token Alignment | WWW 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.18757) |\n| PCL: Prompt-based Continual Learning for User Modeling in Recommender Systems | WWW 2025 | |[[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.19628) |\n| FilterLLM: Text-To-Distribution LLM for Billion-Scale Cold-Start Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.16924) |\n| Towards An Efficient LLM Training Paradigm for CTR Prediction | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.01001) |\n| LLMInit: A Free Lunch from Large Language Models for Selective Initialization of Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.01814) |\n| PersonaX: A Recommendation Agent Oriented User Modeling Framework for Long Behavior Sequence | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.02398) |\n| Towards Next-Generation Recommender Systems: A Benchmark for Personalized Recommendation Assistant with LLMs | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.09382) |\n| Uncovering Cross-Domain Recommendation Ability of Large Language Models | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.07761) |\n| LLM-Driven Usefulness Labeling for IR Evaluation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.08965) |\n| LREF: A Novel LLM-based Relevance Framework for E-commerce | WWW 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.09223) |\n| Process-Supervised LLM Recommenders via Flow-guided Tuning | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.07377) |\n| Image is All You Need: Towards Efficient and Effective Large Language Model-Based Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.06238) |\n| Rank-R1: Enhancing Reasoning in LLM-based Document Rerankers via Reinforcement Learning | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.06034) |\n| Federated Cross-Domain Click-Through Rate Prediction With Large Language Model Augmentation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.16875) |\n| BeLightRec: A lightweight recommender system enhanced with BERT | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.20206) |\n| RALLRec+: Retrieval Augmented Large Language Model Recommendation with Reasoning | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.20430) |\n| Alleviating LLM-based Generative Retrieval Hallucination in Alipay Search | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.21098) |\n| RuleAgent: Discovering Rules for Recommendation Denoising with Autonomous Language Agents | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.23374) |\n| CoRanking: Collaborative Ranking with Small and Large Ranking Agents | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.23427) |\n| Get the Agents Drunk: Memory Perturbations in Autonomous Agent-based Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.23804) |\n| Rec-R1: Bridging Generative Large Language Models and User-Centric Recommendation Systems via Reinforcement Learning | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.24289) |\n| LLM-Augmented Graph Neural Recommenders: Integrating User Reviews | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.02195) |\n| Enhancing Embedding Representation Stability in Recommendation Systems with Semantic ID | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.02137) |\n| Retrieval-Augmented Purifier for Robust LLM-Empowered Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.02458) |\n| Pre-training Generative Recommender with Multi-Identifier Item Tokenization | SIGIR 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.04400) |\n| LLM-Alignment Live-Streaming Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05217) |\n| Decoding Recommendation Behaviors of In-Context Learning LLMs Through Gradient Descent | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.04386) |\n| Automating Personalization: Prompt Optimization for Recommendation Reranking | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.03965) |\n| IterQR: An Iterative Framework for LLM-based Query Rewrite in e-Commercial Search System | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05309) |\n| Multimodal Quantitative Language for Generative Recommendation | ICLR 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05314) |\n| Coherency Improved Explainable Recommendation via Large Language Model | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05315) |\n| VALUE: Value-Aware Large Language Model for Query Rewriting via Weighted Trie in Sponsored Search | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05321) |\n| Large Language Models Enhanced Hyperbolic Space Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05694) |\n| Unified Generative Search and Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05730) |\n| Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation | SIGIR 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05731) |\n| StealthRank: LLM Ranking Manipulation via Stealthy Prompt Optimization | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05804) |\n| PathGPT: Leveraging Large Language Models for Personalized Route Generation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05846) |\n| LLM4Ranking: An Easy-to-use Framework of Utilizing Large Language Models for Document Reranking | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.07439) |\n| How Good Are Large Language Models for Course Recommendation in MOOCs? | Avrxi 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.08208) |\n| Large Language Model Empowered Recommendation Meets All-domain Continual Pre-Training | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.08949) |\n| Enhancing LLM-based Recommendation through Semantic-Aligned Collaborative Knowledge | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.10107) |\n| HistLLM: A Unified Framework for LLM-Based Multimodal Recommendation with User History Encoding and Compression | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.10150) |\n| CROSSAN: Towards Efficient and Effective Adaptation of Multiple Multimodal Foundation Models for Sequential Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.10307) |\n| PinRec: Outcome-Conditioned, Multi-Token Generative Retrieval for Industry-Scale Recommendation Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.10507) |\n| Distilling Transitional Pattern to Large Language Models for Multimodal Session-based Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.10538) |\n| Multi-Modal Hypergraph Enhanced LLM Learning for Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.10541) |\n| Rethinking LLM-Based Recommendations: A Query Generation-Based, Training-Free Approach | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.11889) |\n| Generative Recommendation with Continuous-Token Diffusion | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.12007) |\n| From Reviews to Dialogues: Active Synthesis for Zero-Shot LLM-based Conversational Recommender System | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.15476) |\n| Killing Two Birds with One Stone: Unifying Retrieval and Ranking with a Single Generative Recommendation Model | SIGIR 2025 | [[Link(https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.16454) |\n| Bridge the Domains: Large Language Models Enhanced Cross-domain Sequential Recommendation | SIGIR 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.18383) |\n| Search-Based Interaction For Conversation Recommendation via Generative Reward Model Based Simulated User | SIGIR 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.20458) |\n| Preserving Privacy and Utility in LLM-Based Product Recommendations | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.00951) |\n| Multi-agents based User Values Mining for Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.00981) |\n| Who You Are Matters: Bridging Topics and Social Roles via LLM-Enhanced Logical Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.10940) |\n| Explain What You Mean: Intent Augmented Knowledge Graph Recommender Built With LLM | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.10900) |\n| ThinkRec: Thinking-based recommendation via LLM | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.15091) |\n| DeepRec: Towards a Deep Dive Into the Item Space with Large Language Model Based Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.16810) |\n| Bridging the Gap: Self-Optimized Fine-Tuning for LLM-based Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.20771) |\n| What LLMs Miss in Recommendations: Bridging the Gap with Retrieval-Augmented Collaborative Signals | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.20730) |\n| Reinforced Latent Reasoning for LLM-based Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.19092) |\n| AgentRecBench: Benchmarking LLM Agent-based Personalized Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.19623) |\n| Reason-to-Recommend: Using Interaction-of-Thought Reasoning to Enhance LLM Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.05069) |\n| Generating Long Semantic IDs in Parallel for Recommendation | KDD 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.05781) |\n| RecGPT: A Foundation Model for Sequential Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.06270) |\n| Serendipitous Recommendation with Multimodal LLM | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.08283) |\n| Research on E-Commerce Long-Tail Product Recommendation Mechanism Based on Large-Scale Language Models | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.06316) |\n| LettinGo: Explore User Profile Generation for Recommendation System | Arxiv 2025| [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.18309) |\n| CORONA: A Coarse-to-Fine Framework for Graph-based Recommendation with Large Language Models | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.17281) |\n| CoVE: Compressed Vocabulary Expansion Makes Better LLM-based Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.19993) |\n| LLM2Rec: Large Language Models Are Powerful Embedding Models for Sequential Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.21579) |\n| Reinforcement Fine-Tuned Large Language Models for Next POI Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.21599) |\n| IRanker: Towards Ranking Foundation Model | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.21638) |\n| LLM2Rec: Large Language Models Are Powerful Embedding Models for Sequential Recommendation | KDD 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.21579) |\n| FindRec: Stein-Guided Entropic Flow for Multi-Modal Sequential Recommendation | KDD 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.04651) |\n| Heterogeneous User Modeling for LLM-based Recommendation | RecSys 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.04626) |\n| BiFair: A Fairness-aware Training Framework for LLM-enhanced Recommender Systems via Bi-level Optimization | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.04294) |\n| CTR-Guided Generative Query Suggestion in Conversational Search | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.04072) |\n| Boosting Parameter Efficiency in LLM-Based Recommendation through Sophisticated Pruning | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.07064) |\n| A Language-Driven Framework for Improving Personalized Recommendations: Merging LLMs with Traditional Algorithms | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.07251) |\n| LLM-Driven Dual-Level Multi-Interest Modeling for Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.10917) |\n| Revisiting Prompt Engineering: A Comprehensive Evaluation for LLM-based Personalized Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.13525) |\n| R4ec: A Reasoning, Reflection, and Refinement Framework for Recommendation Systems | RecSys 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.17249) |\n| Exploring the Potential of LLMs for Serendipity Evaluation in Recommender Systems | RecSys 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.17290) |\n| Improving the Performance of Sequential Recommendation Systems with an Extended Large Language Model | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.19990) |\n| Integrating LLM-Derived Multi-Semantic Intent into Graph Model for Session-based Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.20147) |\n| A Comprehensive Review on Harnessing Large Language Models to Overcome Recommender System Challenges | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.21117) |\n| End-to-End Personalization: Unifying Recommender Systems with Large Language Models | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.01514) |\n| Temporal User Profiling with LLMs: Balancing Short-Term and Long-Term Preferences for Recommendations | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.08454) |\n| LLM-Based Intelligent Agents for Music Recommendation: A Comparison with Classical Content-Based Filtering | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.11671) |\n| AdaptJobRec: Enhancing Conversational Career Recommendation through an LLM-Powered Agentic System | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.13423) |\n| LLM-Enhanced Linear Autoencoders for Recommendation | CIKM 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.13500) |\n| TrackRec: Iterative Alternating Feedback with Chain-of-Thought via Preference Alignment for Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.15388) |\n| MMQ: Multimodal Mixture-of-Quantization Tokenization for Semantic ID Generation and User Behavioral Adaptation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.15281) |\n| MLLMRec: Exploring the Potential of Multimodal Large Language Models in Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.15304) |\n| Membership Inference Attacks on LLM-based Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.18665) |\n| Revealing Potential Biases in LLM-Based Recommender Systems in the Cold Start Setting | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.20401) |\n| Efficient Item ID Generation for Large-Scale LLM-based Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.03746) |\n| Knowledge-Augmented Relation Learning for Complementary Recommendation with Large Language Models | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.05564) |\n| Decoding in Latent Spaces for Efficient Inference in LLM-based Recommendation | EMNLP 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.11524) |\n| Learning Decomposed Contextual Token Representations from Pretrained and Collaborative Signals for Generative Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.10468) |\n| LLM4Rec: Large Language Models for Multimodal Generative Recommendation with Causal Debiasing | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.01622) |\n| Empowering Denoising Sequential Recommendation with Large Language Model Embeddings | CIKM 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.04239) |\n| GRACE: Generative Representation Learning via Contrastive Policy Optimization | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.04506) |\n\n\u003C\u002Fp >\n\u003C\u002Fdetails>\n\n## 2. Datasets & Benchmarks\n\nThe datasets & benchmarks for LLM-related RS topics should maintain the original semantic\u002Ftextual features, instead of anonymous feature IDs.\n\n### 2.1 Datasets\n\n| **Dataset** | **RS Scenario** | **Link** |\n|:---:|:---:|:---:|\n| RecSysLLMsP | Social Networks | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.00055) |\n| AmazonQAC | Query Autocomplete | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.04129) |\n| NineRec | 9 Domains | [[Link]](https:\u002F\u002Fgithub.com\u002Fwestlake-repl\u002FNineRec) |\n| MicroLens | Video Streaming | [[Link]](https:\u002F\u002Fgithub.com\u002Fwestlake-repl\u002FMicroLens?tab=readme-ov-file) |\n| Amazon-Review 2023 | E-commerce | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.03952) |\n| Reddit-Movie | Conversational & Movie | [[Link]](https:\u002F\u002Fgithub.com\u002FAaronHeee\u002FLLMs-as-Zero-Shot-Conversational-RecSys#large-language-models-as-zero-shot-conversational-recommenders) |\n| Amazon-M2 | E-commerce | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.09688) |\n| MovieLens | Movie | [[Link]](https:\u002F\u002Fgrouplens.org\u002Fdatasets\u002Fmovielens\u002F1m\u002F) |\n| Amazon | E-commerce | [[Link]](https:\u002F\u002Fcseweb.ucsd.edu\u002F~jmcauley\u002Fdatasets.html#amazon_reviews) |\n| BookCrossing | Book | [[Link]](http:\u002F\u002Fwww2.informatik.uni-freiburg.de\u002F~cziegler\u002FBX\u002F) |\n| GoodReads | Book | [[Link]](https:\u002F\u002Fmengtingwan.github.io\u002Fdata\u002Fgoodreads.html) |\n| Anime | Anime | [[Link]](https:\u002F\u002Fwww.kaggle.com\u002Fdatasets\u002FCooperUnion\u002Fanime-recommendations-database) |\n| PixelRec | Short Video | [[Link]](https:\u002F\u002Fgithub.com\u002Fwestlake-repl\u002FPixelRec) |\n| Netflix | Movie | [[Link]](https:\u002F\u002Fgithub.com\u002FHKUDS\u002FLLMRec) |\n    \n### 2.2 Benchmarks\n\n| **Benchmarks** | **Webcite Link** | **Paper** |\n|:---:|:---:|:---:|\n| InfoDeepSeek | [[Paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.15872) |\n| RecBench | [[Paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.05493) |\n| RecBench+ | [[Paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.09382) |\n| Shopping MMLU | [[Paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.20745?) |\n| Amazon-M2 (KDD Cup 2023) | [[Link]](https:\u002F\u002Fwww.aicrowd.com\u002Fchallenges\u002Famazon-kdd-cup-23-multilingual-recommendation-challenge) | [[Paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.09688) |\n| LLMRec | [[Link]](https:\u002F\u002Fgithub.com\u002Fwilliamliujl\u002FLLMRec) | [[Paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.12241) |\n| OpenP5 | [[Link]](https:\u002F\u002Fgithub.com\u002Fagiresearch\u002FOpenP5) | [[Paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.11134) |\n| TABLET | [[Link]](https:\u002F\u002Fdylanslacks.website\u002FTablet) | [[Paper]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.13188) |\n\n## 3. Related Repositories\n\n| **Repo Name** | **Maintainer** |\n|:---:|:---:|\n| [rs-llm-paper-list](https:\u002F\u002Fgithub.com\u002Fwwliu555\u002Frs-llm-paper-list) | [wwliu555](https:\u002F\u002Fgithub.com\u002Fwwliu555) |\n| [awesome-recommend-system-pretraining-papers](https:\u002F\u002Fgithub.com\u002Farchersama\u002Fawesome-recommend-system-pretraining-papers) | [archersama](https:\u002F\u002Fgithub.com\u002Farchersama) |\n| [LLM4Rec](https:\u002F\u002Fgithub.com\u002FWLiK\u002FLLM4Rec) | [WLiK](https:\u002F\u002Fgithub.com\u002FWLiK) |\n| [Awesome-LLM4RS-Papers](https:\u002F\u002Fgithub.com\u002Fnancheng58\u002FAwesome-LLM4RS-Papers) | [nancheng58](https:\u002F\u002Fgithub.com\u002Fnancheng58) |\n| [LLM4IR-Survey](https:\u002F\u002Fgithub.com\u002FRUC-NLPIR\u002FLLM4IR-Survey) | [RUC-NLPIR](https:\u002F\u002Fgithub.com\u002FRUC-NLPIR) |\n\n## Contributing\n👍 Welcome to contribute to this repository.\n\nIf you have come across relevant resources or found some errors in this repesitory, feel free to open an issue or submit a pull request.\n\n**Contact**: chiangel [DOT] ljh [AT] gmail [DOT] com\n\n## Citation\n\n```\n@article{10.1145\u002F3678004,\nauthor = {Lin, Jianghao and Dai, Xinyi and Xi, Yunjia and Liu, Weiwen and Chen, Bo and Zhang, Hao and Liu, Yong and Wu, Chuhan and Li, Xiangyang and Zhu, Chenxu and Guo, Huifeng and Yu, Yong and Tang, Ruiming and Zhang, Weinan},\ntitle = {How Can Recommender Systems Benefit from Large Language Models: A Survey},\nyear = {2024},\npublisher = {Association for Computing Machinery},\naddress = {New York, NY, USA},\nissn = {1046-8188},\nurl = {https:\u002F\u002Fdoi.org\u002F10.1145\u002F3678004},\ndoi = {10.1145\u002F3678004},\njournal = {ACM Trans. Inf. Syst.},\nmonth = {jul}\n}\n```\n","# 用于推荐系统的优秀大语言模型资源库 [![Awesome](https:\u002F\u002Fawesome.re\u002Fbadge.svg)](https:\u002F\u002Fawesome.re)\n\n这是一份关于大语言模型（LLM）相关推荐系统主题的优秀论文和资源合集。\n\n:tada: 我们的综述论文已被**_ACM信息管理系统事务（TOIS）_**接收：[推荐系统如何从大语言模型中获益：综述](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3678004)\n\n:bell: 自从我们的综述论文被归档后，我们将在“1.7 最新研究工作列表”中更新最新的研究成果。\n\n:grin: 我还在微信上每周撰写关于最新LLM增强型推荐系统的论文笔记。欢迎通过扫描[二维码](https:\u002F\u002Fgithub.com\u002FCHIANGEL\u002FAwesome-LLM-for-RecSys\u002Fblob\u002Fmain\u002Fwechat_for_paper_notes.jpeg)关注。\n\n:rocket: **2024年7月9日 - 论文v6发布**：我们为TOIS准备的最终定稿版本。\n\n\u003Cdetails>\u003Csummary>\u003Cb>综述论文更新日志\u003C\u002Fb>\u003C\u002Fsummary>\n\n\u003Cp>\n\u003Cul>\n  \u003Cli>\u003Cb>2024年7月9日 - 论文v6发布\u003C\u002Fb>：我们为TOIS准备的最终定稿版本，该版本将被归档。\u003C\u002Fli>\n  \u003Cli>\u003Cb>2024年2月5日 - 论文v5发布\u003C\u002Fb>：全新发布，包含27页主体内容及更详尽的分类体系。\u003C\u002Fli>\n  \u003Cli>\u003Cb>2023年6月29日 - 论文v4发布\u003C\u002Fb>：新增7篇论文。\u003C\u002Fli>\n  \u003Cli>\u003Cb>2023年6月28日 - 论文v3发布\u003C\u002Fb>：修正了错别字。\u003C\u002Fli>\n  \u003Cli>\u003Cb>2023年6月12日 - 论文v2发布\u003C\u002Fb>：在附录中添加了摘要表格。\u003C\u002Fli>\n  \u003Cli>\u003Cb>2023年6月9日 - 论文v1发布\u003C\u002Fb>：初始版本。\u003C\u002Fli>\n\u003C\u002Ful>\n\u003C\u002Fp>\n\n\u003C\u002Fdetails>\n\n## 1. 论文\n\n我们根据LLM将在推荐系统流程中的哪个环节被应用来进行论文分类，如下图所示。\n\n\u003Cimg width=\"650\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FCHIANGEL_Awesome-LLM-for-RecSys_readme_18b6a766ec7e.png\">\n\n\u003Cdetails>\u003Csummary>\u003Cb>1.1 LLM用于特征工程\u003C\u002Fb>\u003C\u002Fsummary>\n\u003Cp>\n\n\u003Cb>1.1.1 用户与物品级特征增强\u003C\u002Fb>\n\n| **名称** | **论文** | **LLM主干网络（最大规模）** | **LLM微调策略** | **发表时间** | **链接** |\n|:---:|:---|:---:|:---:|:---:|:---:|\n| LLM4KGC | 知识图谱补全模型是少样本学习者：利用大语言模型进行电商关系标注的实证研究 | PaLM (540B)\u002F ChatGPT | 冻结参数 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.09858v1) |\n| TagGPT | TagGPT：大语言模型是零样本多模态标签生成器 | ChatGPT | 冻结参数 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03022v1) |\n| ICPC | 大语言模型用于用户兴趣旅程建模 | LaMDA (137B) | 全量微调\u002F提示词微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15498) |\n| KAR | 基于大语言模型知识增强的开放世界推荐 | ChatGPT | 冻结参数 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.10933) |\n| PIE | 利用ChatGPT进行产品信息提取 | ChatGPT | 冻结参数 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.14921) |\n| LGIR | 基于LLM的生成对抗网络提升职位推荐效果 | GhatGLM (6B) | 冻结参数 | AAAI 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.10747) |\n| GIRL | 基于大语言模型的生成式职位推荐 | BELLE (7B) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.02157) |\n| LLM-Rec | LLM-Rec：通过提示词引导大语言模型实现个性化推荐 | text-davinci-003 | 冻结参数 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.15780) |\n| HKFR | 异构知识融合：一种基于LLM的个性化推荐新方法 | ChatGPT | 冻结参数 | RecSys 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.03333) |\n| LLaMA-E | LLaMA-E：通过多方面指令遵循赋能电商内容创作 | LLaMA (30B) | LoRA | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.04913) |\n| EcomGPT | EcomGPT：针对电商场景对大语言模型进行任务链式指令微调 | BLOOMZ (7.1B) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.06966) |\n| TF-DCon | 利用大语言模型（LLM）赋能无训练数据集浓缩技术，用于基于内容的推荐 | ChatGPT | 冻结参数 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.09874) |\n| RLMRec | 基于大语言模型的推荐表示学习 | ChatGPT | 冻结参数 | WWW 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.15950) |\n| LLMRec | LLMRec：结合图增强的大语言模型用于推荐 | ChatGPT | 冻结参数 | WSDM 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.00423.pdf) |\n| LLMRG | 利用大语言模型推理图提升推荐系统性能 | GPT4 | 冻结参数 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.10835) |\n| CUP | 基于评论文本生成简洁用户画像以进行推荐 | ChatGPT | 冻结参数 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.01314) |\n| SINGLE | 利用大语言模型建模用户浏览路径以进行文章推荐 | ChatGPT | 冻结参数 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.07619) |\n| SAGCN | 先理解再推荐：利用大语言模型进行语义层面的评论挖掘 | Vicuna (13B) | 冻结参数 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.16275) |\n| UEM | 面向个性化语言提示的用户嵌入模型 | FLAN-T5-base (250M) | 全量微调 | Arxiv 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.04858) |\n| LLMHG | 基于LLM指导的人本解释性推荐多视角超图学习 | GPT4 | 冻结参数 | Arxiv 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.08217) |\n| Llama4Rec | 通过相互增强与适应性聚合将大语言模型融入推荐系统 | LLaMA2 (7B) | 全量微调 | Arxiv 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.13870) |\n| LLM4Vis | LLM4Vis：利用ChatGPT进行可解释的可视化推荐 | ChatGPT | 冻结参数 | EMNLP 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.07652) |\n| LoRec | LoRec：鲁棒的大语言模型序列推荐，抵御中毒攻击 | LLaMA2 | 冻结参数 | SIGIR 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.17723) |\n\n\u003Cb>1.1.2 实例级样本生成\u003C\u002Fb>\n\n| **名称** | **论文** | **最大规模的LLM主干模型** | **LLM微调策略** | **发表时间** | **链接** |\n|:---:|:---|:---:|:---:|:---:|:---:|\n| RecInter | 超越静态测试平台：面向动态推荐系统的交互中心型智能体仿真平台 | GPT-4o | 冻结 | EMNLP 2025 | [[链接]](https:\u002F\u002Faclanthology.org\u002F2025.emnlp-main.956) |\n| GReaT | 语言模型是真实的表格数据生成器 | GPT2-medium (355M) | 全量微调 | ICLR 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.06280) |\n| ONCE | ONCE：利用开源与闭源大型语言模型提升基于内容的推荐效果 | ChatGPT | 冻结 | WSDM 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.06566) |\n| AnyPredict | AnyPredict：用于表格数据预测的基础模型 | ChatGPT | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.12081) |\n| DPLLM | 基于差分隐私大型语言模型进行合成查询生成的隐私保护推荐系统 | T5-XL (3B) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.05973) |\n| MINT | 大型语言模型增强的叙事驱动型推荐 | text-davinci-003 | 冻结 | RecSys 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.02250) |\n| Agent4Rec | 推荐中的生成式智能体研究 | ChatGPT | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.10108) |\n| RecPrompt | RecPrompt：基于大型语言模型的新闻推荐提示词调优框架 | GPT4 | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.10463) |\n| PO4ISR | 面向意图驱动会话推荐的大语言模型 | ChatGPT | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.07552) |\n| BEQUE | 淘宝搜索中基于大型语言模型的长尾查询改写 | ChatGLM (6B) | FFT | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.03758) |\n| Agent4Ranking | Agent4Ranking：通过多智能体LLM进行个性化查询改写实现语义鲁棒排序 | ChatGPT | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.15450) |\n| PopNudge | 通过偏差分析和语言模型增强的数据增广改进对话式推荐系统 | ChatGPT | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.16738) |\n\n\u003C\u002Fp>\n\u003C\u002Fdetails>\n\n\u003Cdetails>\u003Csummary>\u003Cb>1.2 LLM作为特征编码器\u003C\u002Fb>\u003C\u002Fsummary>\n\u003Cp>\n\n\u003Cb>1.2.1 表征增强\u003C\u002Fb>\n\n| **名称** | **论文** | **最大规模的LLM主干模型** | **LLM微调策略** | **发表时间** | **链接** |\n|:---:|:---|:---:|:---:|:---:|:---:|\n| U-BERT | U-BERT：用于改进推荐的用户表征预训练 | BERT-base (110M) | 全量微调 | AAAI 2021 | [[链接]](https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F16557) |\n| UNBERT | UNBERT：新闻推荐中的用户-新闻匹配BERT | BERT-base (110M) | 全量微调 | IJCAI 2021 | [[链接]](https:\u002F\u002Fwww.ijcai.org\u002Fproceedings\u002F2021\u002F462) |\n| PLM-NR | 利用预训练语言模型增强新闻推荐 | RoBERTa-base (125M) | 全量微调 | SIGIR 2021 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.07413) |\n| Pyramid-ERNIE | 基于预训练语言模型的百度搜索排序 | ERNIE (110M) | 全量微调 | KDD 2021 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.11108) |\n| ERNIE-RS | 百度搜索中面向网络规模检索的预训练语言模型 | ERNIE (110M) | 全量微调 | KDD 2021 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.03373) |\n| CTR-BERT | CTR-BERT：针对十亿参数教师模型的成本效益知识蒸馏 | 定制BERT (1.5B) | 全量微调 | ENLSP 2021 | [[链接]](https:\u002F\u002Fneurips2021-nlp.github.io\u002Fpapers\u002F20\u002FCameraReady\u002Fcamera_ready_final.pdf) |\n| SuKD | 在赞助搜索中为CTR预测学习补充性NLP特征 | RoBERTa-large (355M) | 全量微调 | KDD 2022 | [[链接]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3534678.3539064) |\n| PREC | 通过即插即用的预训练器提升深度CTR预测以用于新闻推荐 | BERT-base (110M) | 全量微调 | COLING 2022 | [[链接]](https:\u002F\u002Faclanthology.org\u002F2022.coling-1.249\u002F) |\n| MM-Rec | MM-Rec：视觉语言模型赋能的多模态新闻推荐 | BERT-base (110M) | 全量微调 | SIGIR 2022 | [[链接]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3477495.3531896) |\n| Tiny-NewsRec | Tiny-NewsRec：高效且有效的基于PLM的新闻推荐 | UniLMv2-base (110M) | 全量微调 | EMNLP 2022 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.00944) |\n| PLM4Tag | PTM4Tag：利用预训练模型强化Stack Overflow帖子的标签推荐 | CodeBERT (125M) | 全量微调 | ICPC 2022 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.10965) |\n| TwHIN-BERT | TwHIN-BERT：一种社交增强型多语言推文表征预训练语言模型 | BERT-base (110M) | 全量微调 | Arxiv 2022 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.07562) |\n| LSH | 使用BERT和查询感知LSH改进非正式文档上的代码示例推荐：一项比较研究 | BERT-base (110M) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.03017v1) |\n| LLM2BERT4Rec | 利用大型语言模型进行序列推荐 | text-embedding-ada-002 | 冻结 | RecSys 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.09261) |\n| LLM4ARec | 针对推荐中的个性化方面提取对大型语言模型进行提示调优 | GPT2 (110M) | 提示调优 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.01475) |\n| TIGER | 具有生成式检索的推荐系统 | Sentence-T5-base (223M) | 冻结 | NIPS 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.05065) |\n| TBIN | TBIN：为CTR预测建模长文本行为数据 | BERT-base (110M) | 冻结 | DLP-RecSys 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.08483) |\n| LKPNR | LKPNR：用于个性化新闻推荐框架的LLM和KG | LLaMA2 (7B) | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.12028) |\n| SSNA | 朝着高效有效地适应大型语言模型以用于序列推荐的方向发展 | DistilRoBERTa-base (83M) | 分层适配器调优 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.01612) |\n| CollabContext | 协作情境化：弥合协同过滤与预训练语言模型之间的鸿沟 | Instructor-XL (1.5B) | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.09400) |\n| LMIndexer | 语言模型作为语义索引器 | T5-base (223M) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.07815) |\n| Stack | 基于BERT的集成方法用于客户评论的情感分类及其在电子商务助推营销中的应用 | BERT-base (110M) | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.10782) |\n| 无 | 利用语言模型进行旅游行程推荐 | BERT-base (110M) | 全量微调 | PMAI@IJCAI 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.12355) |\n| UEM | 用于个性化语言提示的用户嵌入模型 | Sentence-T5-base (223M) | 冻结 | Arxiv 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.04858) |\n| Social-LLM | Social-LLM：利用语言模型和社交网络数据大规模建模用户行为 | SBERT-MPNet-base (110M) | 冻结 | Arxiv 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.00893) |\n| LLMRS | LLMRS：解锁基于LLM的软件购买推荐系统的潜力 | MPNet (110M) | 冻结 | Arxiv 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.06676) |\n| KERL | 知识图谱和预训练语言模型增强的对话式推荐系统表征学习 | BERT-mini | 冻结 | TNNLS | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.10967) |\n| 无 | 利用大型语言模型赋能少样本推荐系统——增强表征 | ChatGPT | 冻结 | IEEE Access | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.13557) |\n| 无 | 使用语义ID实现更好的泛化：以推荐排序为例 | 未知 | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.08121) |\n\n\u003Cb>1.2.2 统一跨领域推荐\u003C\u002Fb>\n\n| **名称** | **论文** | **最大规模的LLM主干模型** | **LLM微调策略** | **发表时间** | **链接** |\n|:---:|:---|:---:|:---:|:---:|:---:|\n| ZESRec | 零样本推荐系统 | BERT-base (1.1亿) | 冻结 | Arxiv 2021 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.08318) |\n| UniSRec | 向通用序列表示学习迈进：用于推荐系统的BERT-base模型 | BERT-base (1.1亿) | 冻结 | KDD 2022 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.05941) |\n| TransRec | TransRec：从多模态反馈中学习可迁移的推荐模型 | BERT-base (1.1亿) | 全量微调 | Arxiv 2022 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.06190) |\n| VQ-Rec | 学习向量量化物品表示，用于可迁移的序列推荐系统 | BERT-base (1.1亿) | 冻结 | WWW 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.12316) |\n| IDRec vs MoRec | 推荐系统下一步该走向何方？再探基于ID与基于模态的推荐模型 | BERT-base (1.1亿) | 全量微调 | SIGIR 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.13835) |\n| TransRec | 探索基于适配器的迁移学习在推荐系统中的应用：实证研究与实践启示 | RoBERTa-base (1.25亿) | 分层适配器微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15036) |\n| TCF | 利用大型语言模型探索基于文本的协同过滤上限：发现与见解 | OPT-175B (1750亿) | 冻结\u002F全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.11700) |\n| S&R Foundation | 面向冷启动场景的统一搜索与推荐基础模型 | ChatGLM (60亿) | 冻结 | CIKM 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.08939) |\n| MISSRec | MISSRec：为推荐系统预训练并迁移多模态兴趣感知序列表示 | CLIP-B\u002F32 (4亿) | 全量微调 | MM 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.11175) |\n| UFIN | UFIN：用于多领域点击率预测的通用特征交互网络 | FLAN-T5-base (2.5亿) | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.15493) |\n| PMMRec | 对于可迁移的推荐系统来说，多模态就是全部所需 | RoBERTa-large (3.55亿) | 仅对顶层两层进行微调 | ICDE 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.09602) |\n| Uni-CTR | 基于大型语言模型的多领域点击率预测统一框架 | Sheared-LLaMA (13亿) | LoRA | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.10743) |\n| PCDR | 用于跨领域推荐的提示增强联邦内容表示学习 | BERT-base (1.1亿) | 冻结 | WWW 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.14678) |\n\n\u003C\u002Fp>\n\u003C\u002Fdetails>\n\n\u003Cdetails>\u003Csummary>\u003Cb>1.3 LLM作为评分\u002F排序函数\u003C\u002Fb>\u003C\u002Fsummary>\n\u003Cp>\n\n\u003Cb>1.3.1 物品评分任务\u003C\u002Fb>\n\n| **名称** | **论文** | **最大规模的LLM主干模型** | **LLM微调策略** | **发表时间** | **链接** |\n|:---:|:---|:---:|:---:|:---:|:---:|\n| LMRecSys | 语言模型作为推荐系统：评估与局限性 | GPT2-XL (1.5B) | 全量微调 | ICBINB 2021 | [[链接]](https:\u002F\u002Fopenreview.net\u002Fforum?id=hFx3fY7-m9b) |\n| PTab | PTab：利用预训练语言模型建模表格数据 | BERT-base (110M) | 全量微调 | Arxiv 2022 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2209.08060) |\n| UniTRec | UniTRec：统一的文本到文本Transformer及联合对比学习框架用于基于文本的推荐 | BART (406M) | 全量微调 | ACL 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15756) |\n| Prompt4NR | 面向新闻推荐的提示学习 | BERT-base (110M) | 全量微调 | SIGIR 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.05263) |\n| RecFormer | 文本即一切：为序列化推荐学习语言表征 | LongFormer (149M) | 全量微调 | KDD 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.13731v1) |\n| TabLLM | TabLLM：利用大型语言模型进行少样本表格数据分类 | T0 (11B) | 少样本参数高效微调 | AISTATS 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.10723) |\n| Zero-shot GPT | 零样本推荐作为语言建模 | GPT2-medium (355M) | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.04184) |\n| FLAN-T5 | 大型语言模型能理解用户偏好吗？在用户评分预测任务上评估大型语言模型 | FLAN-5-XXL (11B) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2305.06474.pdf) |\n| BookGPT | BookGPT：由大型语言模型赋能的通用图书推荐框架 | ChatGPT | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15673v1) |\n| TALLRec | TALLRec：一种有效且高效的微调框架，用于使大型语言模型与推荐任务对齐 | LLaMA (7B) | LoRA | RecSys 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.00447) |\n| PBNR | PBNR：基于提示的新闻推荐系统 | T5-small (60M) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.07862) |\n| CR-SoRec | CR-SoRec：BERT驱动的社会化推荐一致性正则化 | BERT-base (110M) | 全量微调 | RecSys 2023 | [[链接]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002FfullHtml\u002F10.1145\u002F3604915.3608844) |\n| PromptRec | 基于提示的个性化冷启动推荐 | LLaMA (7B) | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.17256) |\n| GLRec | 探索大型语言模型在在线职位推荐中理解图数据的能力 | BELLE-LLaMA (7B) | LoRA | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.05722) |\n| BERT4CTR | BERT4CTR：一种将预训练语言模型与非文本特征结合以进行CTR预测的有效框架 | RoBERTa-large (355M) | 全量微调 | KDD 2023 | [[链接]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3580305.3599780) |\n| ReLLa | ReLLa：检索增强型大型语言模型用于推荐中的终身序列行为理解 | Vicuna (13B) | LoRA | WWW 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.11131) |\n| TASTE | 文本匹配通过减少流行度偏差提升序列化推荐效果 | T5-base (223M) | 全量微调 | CIKM 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.14029) |\n| N\u002FA | 揭示基于文本的推荐系统中的挑战性案例 | BERT-base (110M) | 全量微调 | RecSys研讨会2023 | [[链接]](https:\u002F\u002Fceur-ws.org\u002FVol-3476\u002Fpaper5.pdf) |\n| ClickPrompt | ClickPrompt：CTR模型是强大的提示生成器，可用于将语言模型适配到CTR预测任务 | RoBERTa-large (355M) | 全量微调 | WWW 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.09234) |\n| SetwiseRank | 一种基于集合的零样本排序方法，使用大型语言模型实现高效且高效率的排序 | FLAN-T5-XXL (11B) | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.09497) |\n| UPSR | 彻底建模多领域预训练推荐作为语言 | T5-base (223M) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.13540) |\n| LLM-Rec | 一个模型通吃：大型语言模型是领域无关的推荐系统 | OPT (6.7B) | LoRA | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.14304) |\n| LLMRanker | 超越“是”与“否”：通过打细粒度的相关性标签来改进零样本大型语言模型排名器 | FLAN PaLM2 S | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.14122) |\n| CoLLM | CoLLM：将协同嵌入整合到大型语言模型中用于推荐 | Vicuna (7B) | LoRA | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.19488) |\n| FLIP | FLIP：迈向基于ID的模型与预训练语言模型之间针对CTR预测的细粒度对齐 | RoBERTa-large (355M) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.19453) |\n| BTRec | BTRec：基于BERT的轨迹推荐用于个性化旅游 | BERT-base (110M) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.19886) |\n| CLLM4Rec | 协同大型语言模型用于推荐系统 | GPT2 (110M) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.01343) |\n| CUP | 基于评论文本的简洁用户画像进行推荐 | BERT-base (110M) | 最后一层微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.01314) |\n| N\u002FA | 指令蒸馏使大型语言模型成为高效的零样本排名器 | FLAN-T5-XL (3B) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.01555) |\n| CoWPiRec | 基于词的预训练物品表征用于可迁移的推荐 | BERT-base (110M) | 全量微调 | ICDM 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.10501) |\n| RecExplainer | RecExplainer：对齐大型语言模型以提高推荐模型的可解释性 | Vicuna-v1.3 (7B) | LoRA | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.10947) |\n| E4SRec | E4SRec：大型语言模型用于序列化推荐的优雅、有效、高效且可扩展的解决方案 | LLaMA2 (13B) | LoRA | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.02443) |\n| CER | 推荐自然语言解释中的一致性问题 | GPT2 (110M) | 全量微调 | ECAI 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.11356) |\n| LSAT | 大型语言模型推荐系统的增量学习初步研究 | LLaMA (7B) | LoRA | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.15599) |\n| Llama4Rec | 通过相互增强和适应性聚合将大型语言模型融入推荐 | LLaMA2 (7B) | 全量微调 | Arxiv 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.13870) |\n    \n\u003Cb>1.3.2 物品生成任务\u003C\u002Fb>\n\n| **名称** | **论文** | **最大规模的LLM主干模型** | **LLM微调策略** | **发表时间** | **链接** |\n|:---:|:---|:---:|:---:|:---:|:---:|\n| GPT4Rec | GPT4Rec: 个性化推荐与用户兴趣解释的生成框架 | GPT2 (1.1亿) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03879) |\n| VIP5 | VIP5：面向推荐的多模态基础模型 | T5-base (2.23亿) | 分层适配器微调 | EMNLP 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.14302) |\n| P5-ID | 如何为推荐基础模型索引物品ID | T5-small (6千万) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.06569) |\n| FaiRLLM | ChatGPT对推荐公平吗？大型语言模型推荐中的公平性评估 | ChatGPT | 冻结 | RecSys 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.07609) |\n| PALR | PALR：面向推荐的具个性化感知的LLM | LLaMA (70亿) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.07622) |\n| ChatGPT | 大型语言模型是推荐系统的零样本排序器 | ChatGPT | 冻结 | ECIR 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.08845) |\n| AGR | 人工通用推荐系统（AGR）的火花：ChatGPT的早期实验 | ChatGPT | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.04518) |\n| NIR | 基于大型预训练语言模型的零样本下一物品推荐 | GPT3 (1750亿) | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03153) |\n| GPTRec | 使用GPTRec进行生成式序列推荐 | GPT2-medium (3.55亿) | 全量微调 | Gen-IR@SIGIR 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.11114) |\n| ChatNews | ChatGPT在新闻推荐中的初步研究：个性化、提供商公平性、假新闻 | ChatGPT | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.10702) |\n| 无 | 大型语言模型在基于语言和物品偏好的近冷启动推荐中具有竞争力 | PaLM (620亿) | 冻结 | RecSys 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.14225) |\n| LLMSeqPrompt | 利用大型语言模型进行序列推荐 | OpenAI ada模型 | 微调 | RecSys 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.09261) |\n| GenRec | GenRec：用于生成式推荐的大语言模型 | LLaMA (70亿) | LoRA | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.00457) |\n| UP5 | UP5：面向公平性的无偏基础模型 | T5-base (2.23亿) | 前缀微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.12090) |\n| HKFR | 异质知识融合：通过LLM实现个性化推荐的新方法 | ChatGLM (60亿) | LoRA | RecSys 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.03333) |\n| 无 | 大型语言模型的不平等机会：通过职位推荐揭示人口统计学偏见 | ChatGPT | 冻结 | EAAMO 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.02053) |\n| BIGRec | 推荐系统中大型语言模型的两步接地范式 | LLaMA (70亿) | LoRA | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.08434) |\n| KP4SR | 序列推荐中的知识提示微调 | T5-small (6千万) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.08459) |\n| RecSysLLM | 利用大型语言模型构建预训练推荐系统 | GLM (100亿) | LoRA | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.10837) |\n| POD | 针对高效LLM推荐的提示蒸馏 | T5-small (6千万) | 全量微调 | CIKM 2023 | [[链接]](https:\u002F\u002Flileipisces.github.io\u002Ffiles\u002FCIKM23-POD-paper.pdf) |\n| 无 | 以严谨方法评估ChatGPT作为推荐系统 | ChatGPT | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.03613) |\n| RaRS | 检索增强型推荐系统：利用大型语言模型提升推荐系统性能 | ChatGPT | 冻结 | RecSys博士生研讨会2023 | [[链接]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3604915.3608889) |\n| JobRecoGPT | JobRecoGPT——使用LLM的可解释性职位推荐 | GPT4 | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.11805) |\n| LANCER | 重新定义序列推荐：通过内容丰富的语言建模学习动态用户兴趣 | GPT2 (1.1亿) | 前缀微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.10435) |\n| TransRec | 桥接大型语言模型与推荐的多方面范式 | LLaMA (70亿) | LoRA | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.06491) |\n| AgentCF | AgentCF：利用自主语言代理进行推荐系统的协同学习 | text-davinci-003 & gpt-3.5-turbo | 冻结 | WWW 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.09233) |\n| P4LM | 利用语言模型和强化学习实现事实性和个性化推荐 | PaLM2-XS | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.06176) |\n| InstructMK | 在推荐系统中融入大型语言模型的多键值策略 | LLaMA (70亿) | 全量微调 | CIKM GenRec 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.16409) |\n| LightLM | LightLM：用于生成式推荐的轻量级深而窄的语言模型 | T5-small (6千万) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.17488) |\n| LlamaRec | LlamaRec：使用大型语言模型进行排序的两阶段推荐 | LLaMA2 (70亿) | QLoRA | PGAI@CIKM 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.02089) |\n| 无 | 探索GPT-4V（视觉）的推荐能力：初步案例研究 | GPT-4V | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.04199) |\n| 无 | 探索ChatGPT在新闻推荐中的微调 | ChatGPT | gpt-3.5-turbo微调API | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.05850) |\n| 无 | LLM在推荐中是否隐性表现出用户歧视？一项实证研究 | ChatGPT | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.07054) |\n| LC-Rec | 通过整合协作语义来调整大型语言模型以用于推荐 | LLaMA (70亿) | LoRA | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.09049) |\n| DOKE | 知识插件：增强大型语言模型以实现领域特定的推荐 | ChatGPT | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.10779) |\n| ControlRec | ControlRec：弥合语言模型与个性化推荐之间的语义鸿沟 | T5-base (2.23亿) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.16441) |\n| LLaRA | LLaRA：大型语言-推荐助手 | LLaMA2 (70亿) | LoRA | SIGIR 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.02445) |\n| PO4ISR | 大型语言模型用于意图驱动的会话推荐 | ChatGPT | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.07552) |\n| DRDT | DRDT：基于发散思维的动态反思，用于LLM驱动的序列推荐 | ChatGPT | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.11336) |\n| RecPrompt | RecPrompt：利用大型语言模型进行新闻推荐的提示微调框架 | GPT4 | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.10463) |\n| LiT5 | 缩小规模，提升效率：使用Seq2seq编码器-解码器模型进行高效的零样本列表重排序 | T5-XL (30亿) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.16098) |\n| STELLA | 大型语言模型并非稳定的推荐系统 | ChatGPT | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.15746) |\n| Llama4Rec | 通过相互增强和适应性聚合将大型语言模型融入推荐 | LLaMA2 (70亿) | 全量微调 | Arxiv 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.13870) |\n| RECLLM | 理解基于ChatGPT的推荐系统中的偏见：提供商公平性、时间稳定性及近期性 | ChatGPT | 冻结 | Arxiv 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.10545) |\n| DEALRec | 面向LLM推荐的数据高效微调 | LLaMA (70亿) | LoRA | Arxiv 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.17197) |\n\n\u003Cb>1.3.3 混合任务\u003C\u002Fb>\n\n| **名称** | **论文** | **最大基础大模型** | **大模型微调策略** | **发表时间** | **链接** |\n|:---:|:---|:---:|:---:|:---:|:---:|\n| P5 | 推荐即语言处理（RLP）：统一的预训练、个性化提示与预测范式（P5） | T5-base (2.23亿) | 全量微调 | RecSys 2022 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.13366) |\n| M6-Rec | M6-Rec：生成式预训练语言模型是开放式推荐系统 | M6-base (3亿) | 选项微调 | Arxiv 2022 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.08084) |\n| InstructRec | 推荐即指令遵循：一种由大型语言模型赋能的推荐方法 | FLAN-T5-XL (30亿) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.07001) |\n| ChatGPT | ChatGPT 是一个好的推荐系统吗？一项初步研究 | ChatGPT | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.10149) |\n| ChatGPT | ChatGPT 在搜索方面表现如何？探究大型语言模型作为重排序代理 | ChatGPT | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.09542) |\n| ChatGPT | 揭示 ChatGPT 在推荐系统中的能力 | ChatGPT | 冻结 | RecSys 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.02182) |\n| BDLM | 桥接领域特定模型与通用大模型之间的信息鸿沟，用于个性化推荐 | Vicuna (70亿) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.03778) |\n| RecRanker | RecRanker：将大型语言模型通过指令微调用作 Top-k 推荐的排序器 | LLaMA2 (130亿) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.16018) |\n\n\u003C\u002Fp>\n\u003C\u002Fdetails>\n\n\u003Cdetails>\u003Csummary>\u003Cb>1.4 大模型用于用户交互\u003C\u002Fb>\u003C\u002Fsummary>\n\u003Cp>\n\n\u003Cb>1.4.1 任务导向型用户交互\u003C\u002Fb>\n    \n| **名称** | **论文** | **最大基础大模型** | **大模型微调策略** | **发表时间** | **链接** |\n|:---:|:---|:---:|:---:|:---:|:---:|\n| TG-ReDial | 走向主题引导的对话式推荐系统 | BERT-base (1.1亿) & GPT2 (1.1亿) | 未知 | COLING 2020 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2010.04125) |\n| TCP | 跟我来：面向目标驱动的推荐对话系统的对话规划 | BERT-base (1.1亿) | 全量微调 | Arxiv 2022 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.03516) |\n| MESE | 利用上下文感知的商品元信息提升对话式推荐系统的质量 | DistilBERT (6700万) & GPT2 (1.1亿) | 全量微调 | ACL 2022 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.08140) |\n| UniMIND | 面向多目标对话式推荐系统的统一多任务学习框架 | BART-base (1.39亿) | 全量微调 | ACM TOIS 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.06923) |\n| VRICR | 针对对话式推荐的不完全知识图谱上的变分推理 | BERT-base (1.1亿) | 全量微调 | WSDM 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.11868) |\n| KECR | 对话式推荐中的显式知识图谱推理 | BERT-base (1.1亿) & GPT2 (1.1亿) | 冻结 | ACM TIST 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.00783) |\n| 无 | 大型语言模型作为零样本对话式推荐系统 | GPT4 | 冻结 | CIKM 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.10053) |\n| MuseChat | MuseChat：一个面向视频的对话式音乐推荐系统 | Vicuna (70亿) | LoRA | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.06282) |\n| 无 | 电商售前对话中，对话式推荐系统与大型语言模型天生一对 | Chinese-Alpaca (70亿) | LoRA | EMNLP 2023 Findings | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.14626) |\n| 无 | ChatGPT 用于对话式推荐：通过反馈重新提示来优化推荐 | ChatGPT | 冻结 | Arxiv 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.03605) |\n\n\u003Cb>1.4.2 开放式用户交互\u003C\u002Fb>\n    \n| **名称** | **论文** | **最大基础大模型** | **大模型微调策略** | **发表时间** | **链接** |\n|:---:|:---|:---:|:---:|:---:|:---:|\n| BARCOR | BARCOR：迈向对话式推荐系统的统一框架 | BART-base (1.39亿) | 选择性层微调 | Arxiv 2022 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.14257) |\n| RecInDial | RecInDial：基于预训练语言模型的对话式推荐统一框架 | DialoGPT (1.1亿) | 全量微调 | AACL 2022 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.07477) |\n| UniCRS | 通过知识增强的提示学习迈向统一的对话式推荐系统 | DialoGPT-small (1.76亿) | 冻结 | KDD 2022 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.09363) |\n| T5-CR | 多任务端到端训练提升对话式推荐效果 | T5-base (2.23亿) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.06218) |\n| TtW | 言行一致：用于对话式音乐推荐的合成数据生成 | T5-base (2.23亿) & T5-XXL (110亿) | 全量微调和冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.11489) |\n| 无 | 重新思考大型语言模型时代下对话式推荐的评估 | ChatGPT | 冻结 | EMNLP 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.13112) |\n| PECRS | 参数高效的对话式推荐系统作为语言处理任务 | GPT2-medium (3.55亿) | LoRA | EACL 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.14194) |\n\n\u003C\u002Fp>\n\u003C\u002Fdetails>\n\n\u003Cdetails>\u003Csummary>\u003Cb>1.5 大模型用于推荐系统流水线控制器\u003C\u002Fb>\u003C\u002Fsummary>\n\u003Cp>\n    \n| **名称** | **论文** | **最大基础大模型** | **大模型微调策略** | **发表时间** | **链接** |\n|:---:|:---|:---:|:---:|:---:|:---:|\n| Chat-REC | Chat-REC：迈向交互式且可解释的大模型增强型推荐系统 | ChatGPT | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2303.14524) |\n| RecLLM | 在对话式推荐系统中利用大型语言模型 | LLaMA (70亿) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.07961) |\n| RAH | RAH！推荐系统助手—人类：以人类为中心的大型语言模型推荐框架 | GPT4 | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.09904) |\n| RecMind | RecMind：大型语言模型驱动的推荐智能体 | ChatGPT | 冻结 | NAACL 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.14296) |\n| InteRecAgent | 推荐AI智能体：整合大型语言模型实现交互式推荐 | GPT4 | 冻结 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.16505) |\n| CORE | 为推荐系统插上对话智能体的翅膀 | 无 | 无 | NIPS 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.04230) |\n| LLMCRS | 一种大型语言模型增强的对话式推荐系统 | LLaMA (70亿) | 全量微调 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.06212) |\n\n\u003C\u002Fp>\n\u003C\u002Fdetails>\n\n\u003Cdetails>\u003Csummary>\u003Cb>1.6 相关综述论文\u003C\u002Fb>\u003C\u002Fsummary>\n\u003Cp>\n\n| **论文** | **发表刊物** | **链接** |\n|:---|:---:|:---:|\n| GR-LLMs：基于大语言模型的生成式推荐最新进展 | Arxiv 2025 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.06507) |\n| 未来是智能体驱动的：多智能体推荐系统的定义、视角与开放挑战 | Arxiv 2025 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.02097) |\n| 基础模型赋能的推荐系统综述：从特征型、生成式到智能体范式 | Arxiv 2025 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.16420) |\n| 个性化综述：从RAG到智能体 | Arxiv 2025 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.10147) |\n| 大语言模型赋能的推荐与搜索智能体综述：迈向下一代信息检索 | Arxiv 2025 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.05659) |\n| 以智能体为中心的信息获取 | Arxiv 2025 | [[链接]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.19298) |\n| 基于LLM的新闻推荐系统综述 | Arxiv 2025 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.09797) |\n| 推荐系统中LLM赋能的智能体综述 | Arxiv 2025 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.10050) |\n| 面向大语言模型时代的冷启动推荐：全面综述与路线图 | Arxiv 2025 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.01945) |\n| 大语言模型增强的推荐系统：分类、趋势、应用与未来 | Arxiv 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.13432) |\n| 大语言模型智能体时代的推荐系统：综述 | 预印本 | [[链接]](https:\u002F\u002Fwww.researchgate.net\u002Fpublication\u002F386342676_Recommender_Systems_in_the_Era_of_Large_Language_Model_Agents_A_Survey) |\n| 推荐领域大语言模型高效解决方案综述 | Arxiv 2024 | [[链接]](https:\u002F\u002Fwww.researchgate.net\u002Fpublication\u002F385863443_A_Survey_on_Efficient_Solutions_of_Large_Language_Models_for_Recommendation) |\n| 迈向新一代LLM驱动的推荐系统：综述及展望 | Arxiv 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.19744) |\n| 信息检索系统中的偏见与不公平：LLM时代的新挑战 | KDD 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.11457) |\n| 条条大路通罗马：揭示推荐系统在LLM时代的发展轨迹 | Arxiv 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.10081) |\n| 社交和电商推荐系统中生成式AI落地的行业视角综述 | Arxiv 2024 | [[链接]](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2406.06475) |\n| 大语言模型时代生成式搜索与推荐综述 | Arxiv 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.16924) |\n| 搜索引擎服务与大语言模型的结合：愿景与挑战 | Arxiv 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.00128) |\n| 使用生成模型的现代推荐系统综述（Gen-RecSys） | Arxiv 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.00579) |\n| 探讨大语言模型对推荐系统的影响：广泛综述 | Arxiv 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.18590) |\n| 推荐系统的基础模型：综述与新视角 | Arxiv 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.11143) |\n| 针对推荐系统的提示工程：综合框架与实证分析 | Arxiv 2024 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.04997) |\n| 大语言模型时代的用户建模：当前研究与未来方向 | IEEE数据工程简报 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.11518) |\n| 大语言模型用于个性化和可解释性推荐的综述 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2311.12338) |\n| 大语言模型用于生成式推荐：综述与前瞻性讨论 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.01157) |\n| 大语言模型用于信息检索：综述 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.07107) |\n| 当大语言模型与个性化相遇时：挑战与机遇的视角 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.16376) |\n| 大语言模型时代的推荐系统 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.02046) |\n| 大语言模型用于推荐的综述 | Arxiv 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.19860) |\n| 预训练、提示与推荐：推荐系统中语言建模范式的适应性综合综述 | TACL 2023 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2302.03735) |\n| 推荐系统中的自监督学习：综述 | TKDE 2022 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15876) |\n\n\u003C\u002Fp>\n\u003C\u002Fdetails>\n\n\u003Cdetails>\u003Csummary>\u003Cb>1.7 最新研究工作列表\u003C\u002Fb>\u003C\u002Fsummary>\n\u003Cp>\n\n| **Paper** | **Publication** | **Link** |\n|:---|:---:|:---:|\n| Large Language Model Can Interpret Latent Space of Sequential Recommender | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.20487) |\n| Zero-Shot Recommendations with Pre-Trained Large Language Models for Multimodal Nudging | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.01026) |\n| INTERS: Unlocking the Power of Large Language Models in Search with Instruction Tuning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.06532) |\n| Evaluation of Synthetic Datasets for Conversational Recommender Systems | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.08167v1) |\n| Generative Recommendation: Towards Next-generation Recommender Paradigm | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.03516) |\n| Towards Personalized Prompt-Model Retrieval for Generative Recommendation | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.02205) |\n| Generative Next-Basket Recommendation | RecSys 2023 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3604915.3608823) |\n| Unlocking the Potential of Large Language Models for Explainable Recommendations | Arxiv 2023 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.15661) |\n| Logic-Scaffolding: Personalized Aspect-Instructed Recommendation Explanation Generation using LLMs | Falcon (40B) | Frozen | WSDM 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.14345) |\n| Improving Sequential Recommendations with LLMs | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.01339) |\n| A Multi-Agent Conversational Recommender System | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.01135) |\n| TransFR: Transferable Federated Recommendation with Pre-trained Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.01124) |\n| Large Language Model Distilling Medication Recommendation Model | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.02803) |\n| Uncertainty-Aware Explainable Recommendation with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.03366) |\n| Natural Language User Profiles for Transparent and Scrutable Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.05810) |\n| Leveraging LLMs for Unsupervised Dense Retriever Ranking | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.04853) |\n| RA-Rec: An Efficient ID Representation Alignment Framework for LLM-based Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.04527) |\n| A Multi-Agent Conversational Recommender System | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.01135) |\n| Fairly Evaluating Large Language Model-based Recommendation Needs Revisit the Cross-Entropy Loss | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.06216) |\n| SearchAgent: A Lightweight Collaborative Search Agent with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.06360) |\n| Large Language Model Interaction Simulator for Cold-Start Item Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.09176) |\n| Enhancing ID and Text Fusion via Alternative Training in Session-based Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.08921) |\n| eCeLLM: Generalizing Large Language Models for E-commerce from Large-scale, High-quality Instruction Data | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.08831) |\n| LLM-Enhanced User-Item Interactions: Leveraging Edge Information for Optimized Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.09617) |\n| LLM-based Federated Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.09959) |\n| Rethinking Large Language Model Architectures for Sequential Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.09543) |\n| Large Language Model with Graph Convolution for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.08859) |\n| Rec-GPT4V: Multimodal Recommendation with Large Vision-Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.08670) |\n| Enhancing Recommendation Diversity by Re-ranking with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.11506) |\n| Are ID Embeddings Necessary? Whitening Pre-trained Text Embeddings for Effective Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.10602) |\n| SPAR: Personalized Content-Based Recommendation via Long Engagement Attention | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.10555) |\n| Cognitive Personalized Search Integrating Large Language Models with an Efficient Memory Mechanism | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.10548) |\n| Large Language Models as Data Augmenters for Cold-Start Item Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.11724) |\n| Explain then Rank: Scale Calibration of Neural Rankers Using Natural Language Explanations from Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.12276) |\n| LLM4SBR: A Lightweight and Effective Framework for Integrating Large Language Models in Session-based Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.13840) |\n| Breaking the Barrier: Utilizing Large Language Models for Industrial Recommendation Systems through an Inferential Knowledge Graph | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.13750) |\n| User-LLM: Efficient LLM Contextualization with User Embeddings | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.13598) |\n| Stealthy Attack on Large Language Model based Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.14836) |\n| Multi-Agent Collaboration Framework for Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.15235) |\n| Item-side Fairness of Large Language Model-based Recommendation System | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.15215) |\n| Integrating Large Language Models with Graphical Session-Based Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.16539) |\n| Language-Based User Profiles for Recommendation | LLM-IGS@WSDM2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.15623) |\n| BASES: Large-scale Web Search User Simulation with Large Language Model based Agents | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.17505) |\n| Prospect Personalized Recommendation on Large Language Model-based Agent Platform | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.18240) |\n| Sequence-level Semantic Representation Fusion for Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.18166) |\n| Corpus-Steered Query Expansion with Large Language Models | ECAL 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.18031) |\n| NoteLLM: A Retrievable Large Language Model for Note Recommendation | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.01744) |\n| An Interpretable Ensemble of Graph and Language Models for Improving Search Relevance in E-Commerce | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.00923) |\n| LLM-Ensemble: Optimal Large Language Model Ensemble Method for E-commerce Product Attribute Value Extraction | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.00863) |\n| Enhancing Long-Term Recommendation with Bi-level Learnable Large Language Model Planning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.00843) |\n| InteraRec: Interactive Recommendations Using Multimodal Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.00822) |\n| ChatDiet: Empowering Personalized Nutrition-Oriented Food Recommender Chatbots through an LLM-Augmented Framework  | CHASE 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.00781) |\n| Towards Efficient and Effective Unlearning of Large Language Models for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.03536) |\n| Generative News Recommendation | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.03424) |\n| Bridging Language and Items for Retrieval and Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.03952) |\n| Can Small Language Models be Good Reasoners for Sequential Recommendation? | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.04260) |\n| Aligning Large Language Models for Controllable Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.05063) |\n| Personalized Audiobook Recommendations at Spotify Through Graph Neural Networks | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.05185) |\n| Towards Graph Foundation Models for Personalization | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.07478) |\n| CFaiRLLM: Consumer Fairness Evaluation in Large-Language Model Recommender System | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.05668) |\n| CoRAL: Collaborative Retrieval-Augmented Large Language Models Improve Long-tail Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.06447) |\n| RecAI: Leveraging Large Language Models for Next-Generation Recommender Systems | WWW 2024 Demo | [[Link]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.06465.pdf) |\n| KELLMRec: Knowledge-Enhanced Large Language Models for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.06642) |\n| USimAgent: Large Language Models for Simulating Search Users | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.09142) |\n| CALRec: Contrastive Alignment of Generative LLMs For Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.02429) |\n| Integrating Large Language Models with Graphical Session-Based Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.16539) |\n| Language-Based User Profiles for Recommendation | LLM-IGS@WSDM2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.15623) |\n| BASES: Large-scale Web Search User Simulation with Large Language Model based Agents | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.17505) |\n| Prospect Personalized Recommendation on Large Language Model-based Agent Platform | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.18240) |\n| Sequence-level Semantic Representation Fusion for Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.18166) |\n| Corpus-Steered Query Expansion with Large Language Models | EACL 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.18031) |\n| NoteLLM: A Retrievable Large Language Model for Note Recommendation | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.01744) |\n| An Interpretable Ensemble of Graph and Language Models for Improving Search Relevance in E-Commerce | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.00923) |\n| LLM-Ensemble: Optimal Large Language Model Ensemble Method for E-commerce Product Attribute Value Extraction | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.00863) |\n| Enhancing Long-Term Recommendation with Bi-level Learnable Large Language Model Planning | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.00843) |\n| Towards Efficient and Effective Unlearning of Large Language Models for Recommendation | FCS | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.03536) |\n| Generative News Recommendation | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.03424) |\n| Bridging Language and Items for Retrieval and Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.03952) |\n| Can Small Language Models be Good Reasoners for Sequential Recommendation? | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.04260) |\n| Aligning Large Language Models for Controllable Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.05063) |\n| Personalized Audiobook Recommendations at Spotify Through Graph Neural Networks | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.05185) |\n| CFaiRLLM: Consumer Fairness Evaluation in Large-Language Model Recommender System | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.05668) |\n| CoRAL: Collaborative Retrieval-Augmented Large Language Models Improve Long-tail Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.06447) |\n| RecAI: Leveraging Large Language Models for Next-Generation Recommender Systems | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.06465) |\n| KELLMRec: Knowledge-Enhanced Large Language Models for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.06642) |\n| Towards Graph Foundation Models for Personalization | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.07478) |\n| USimAgent: Large Language Models for Simulating Search Users | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.09142) |\n| The Whole is Better than the Sum: Using Aggregated Demonstrations in In-Context Learning for Sequential Recommendation | NAACL 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.10135) |\n| PPM : A Pre-trained Plug-in Model for Click-through Rate Prediction | WWW 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.10049) |\n| Evaluating Large Language Models as Generative User Simulators for Conversational Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.09738) |\n| Towards Unified Multi-Modal Personalization: Large Vision-Language Models for Generative Recommendation and Beyond | ICLR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.10667) |\n| Harnessing Large Language Models for Text-Rich Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.13325) |\n| A Large Language Model Enhanced Sequential Recommender for Joint Video and Comment Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.13574) |\n| Could Small Language Models Serve as Recommenders? Towards Data-centric Cold-start Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.17256) |\n| Play to Your Strengths: Collaborative Intelligence of Conventional Recommender Models and Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.16378) |\n| Reinforcement Learning-based Recommender Systems with Large Language Models for State Reward and Action Modeling | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.16948) |\n| Large Language Models Enhanced Collaborative Filtering | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.17688) |\n| Improving Content Recommendation: Knowledge Graph-Based Semantic Contrastive Learning for Diversity and Cold-Start Users | LREC-COLING 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.18667) |\n| Sequential Recommendation with Latent Relations based on Large Language Model | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.18348) |\n| Enhanced Generative Recommendation via Content and Collaboration Integration | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.18480) |\n| To Recommend or Not: Recommendability Identification in Conversations with Pre-trained Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.18628) |\n| IDGenRec: LLM-RecSys Alignment with Textual ID Learning | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.19021) |\n| Breaking the Length Barrier: LLM-Enhanced CTR Prediction in Long Textual User Behaviors | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.19347) |\n| Make Large Language Model a Better Ranker | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.19181) |\n| Do Large Language Models Rank Fairly? An Empirical Study on the Fairness of LLMs as Rankers | NAACL 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.03192) |\n| IISAN: Efficiently Adapting Multimodal Representation for Sequential Recommendation with Decoupled PEFT | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.02059) |\n| Where to Move Next: Zero-shot Generalization of LLMs for Next POI Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.01855) |\n| Tired of Plugins? Large Language Models Can Be End-To-End Recommender | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.00702) |\n| Aligning Large Language Models with Recommendation Knowledge | NAACL 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.00245) |\n| Enhancing Content-based Recommendation via Large Language Model | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.00236) |\n| DRE: Generating Recommendation Explanations by Aligning Large Language Models at Data-level | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.06311) |\n| Optimization Methods for Personalizing Large Language Models through Retrieval Augmentation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.05970) |\n| Q-PEFT: Query-dependent Parameter Efficient Fine-tuning for Text Reranking with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.04522) |\n| JobFormer: Skill-Aware Job Recommendation with Semantic-Enhanced Transformer | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.04313) |\n| PMG : Personalized Multimodal Generation with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.08677) |\n| The Elephant in the Room: Rethinking the Usage of Pre-trained Language Model in Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.08796) |\n| Exact and Efficient Unlearning for Large Language Model-based Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.10327) |\n| Large Language Models meet Collaborative Filtering: An Efficient All-round LLM-based Recommender System | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.11343) |\n| Behavior Alignment: A New Perspective of Evaluating LLM-based Conversational Recommendation Systems | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.11773) |\n| Generating Diverse Criteria On-the-Fly to Improve Point-wise LLM Rankers | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.11960) |\n| RecGPT: Generative Personalized Prompts for Sequential Recommendation via ChatGPT Training Paradigm | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.08675) |\n| MMGRec: Multimodal Generative Recommendation with Transformer Model | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.16555) |\n| Hi-Gen: Generative Retrieval For Large-Scale Personalized E-commerce Search | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.15675) |\n| Contrastive Quantization based Semantic Code for Generative Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.14774) |\n| ImplicitAVE: An Open-Source Dataset and Multimodal LLMs Benchmark for Implicit Attribute Value Extraction | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.15592) |\n| Large Language Models for Next Point-of-Interest Recommendation | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.17591) |\n| Ranked List Truncation for Large Language Model-based Re-Ranking | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.18185) |\n| Large Language Models as Conversational Movie Recommenders: A User Study | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.19093) |\n| Distillation Matters: Empowering Sequential Recommenders to Match the Performance of Large Language Model | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.00338) |\n| Efficient and Responsible Adaptation of Large Language Models for Robust Top-k Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.00824) |\n| FairEvalLLM. A Comprehensive Framework for Benchmarking Fairness in Large Language Model Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.02219) |\n| Improve Temporal Awareness of LLMs for Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.02778) |\n| CALRec: Contrastive Alignment of Generative LLMs For Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.02429) |\n| Knowledge Adaptation from Large Language Model to Recommendation for Practical Industrial Application | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.03988) |\n| DynLLM: When Large Language Models Meet Dynamic Graph Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.07580) |\n| Learnable Tokenizer for LLM-based Generative Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002F2405.07314) |\n| CELA: Cost-Efficient Language Model Alignment for CTR Prediction | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.10596) |\n| RDRec: Rationale Distillation for LLM-based Recommendation | ACL 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.10587) |\n| EmbSum: Leveraging the Summarization Capabilities of Large Language Models for Content-Based Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2405.11441) |\n| Reindex-Then-Adapt: Improving Large Language Models for Conversational Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.12119) |\n| RecGPT: Generative Pre-training for Text-based Recommendation | ACL 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.12715) |\n| Let Me Do It For You: Towards LLM Empowered Recommendation via Tool Learning | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.15114) |\n| Finetuning Large Language Model for Personalized Ranking | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.16127) |\n| LLMs for User Interest Exploration: A Hybrid Approach | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.16363) |\n| NoteLLM-2: Multimodal Large Representation Models for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.16789) |\n| Multimodality Invariant Learning for Multimedia-Based New Item Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.15783) |\n| SLMRec: Empowering Small Language Models for Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.17890) |\n| Keyword-driven Retrieval-Augmented Large Language Models for Cold-start User Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.19612) |\n| Generating Query Recommendations via LLMs | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.19749) |\n| Large Language Models Enhanced Sequential Recommendation for Long-tail User and Item | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.20646) |\n| DisCo: Towards Harmonious Disentanglement and Collaboration between Tabular and Semantic Space for Recommendation | KDD 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.00011) |\n| LLM-RankFusion: Mitigating Intrinsic Inconsistency in LLM-based Ranking | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.00231) |\n| A Practice-Friendly Two-Stage LLM-Enhanced Paradigm in Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.00333) |\n| Large Language Models as Recommender Systems: A Study of Popularity Bias | Gen-IR@SIGIR24 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.01285) |\n| Privacy in LLM-based Recommendation: Recent Advances and Future Directions | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.01363) |\n| An LLM-based Recommender System Environment | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.01631) |\n| Robust Interaction-based Relevance Modeling for Online E-Commerce and LLM-based Retrieval | ECML-PKDD 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.02135) |\n| Large Language Models Make Sample-Efficient Recommender Systems | FCS | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.02368) |\n| XRec: Large Language Models for Explainable Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.02377) |\n| Exploring User Retrieval Integration towards Large Language Models for Cross-Domain Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.03085) |\n| Large Language Models as Evaluators for Recommendation Explanations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.03248) |\n| Text-like Encoding of Collaborative Information in Large Language Models for Recommendation | ACL 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.03210) |\n| Item-Language Model for Conversational Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.02844) |\n| Improving LLMs for Recommendation with Out-Of-Vocabulary Tokens | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.08477) |\n| On Softmax Direct Preference Optimization for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.09215) |\n| TokenRec: Learning to Tokenize ID for LLM-based Generative Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.10450) |\n| DELRec: Distilling Sequential Pattern to Enhance LLM-based Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.11156) |\n| TourRank: Utilizing Large Language Models for Documents Ranking with a Tournament-Inspired Strategy | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.11678) |\n| Multi-Layer Ranking with Large Language Models for News Source Recommendation | SIGIR 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.11745) |\n| Intermediate Distillation: Data-Efficient Distillation from Black-Box LLMs for Information Retrieval | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.12169) |\n| LLM-enhanced Reranking in Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.12433) |\n| LLM4MSR: An LLM-Enhanced Paradigm for Multi-Scenario Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.12529) |\n| Taxonomy-Guided Zero-Shot Recommendations with LLMs | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.14043) |\n| EAGER: Two-Stream Generative Recommender with Behavior-Semantic Collaboration | KDD 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.14017) |\n| An Investigation of Prompt Variations for Zero-shot LLM-based Rankers | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.14117) |\n| Optimizing Novelty of Top-k Recommendations using Large Language Models and Reinforcement Learning | KDD 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.14169) |\n| Enhancing Collaborative Semantics of Language Model-Driven Recommendations via Graph-Aware Learning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.13235) |\n| Decoding Matters: Addressing Amplification Bias and Homogeneity Issue for LLM-based Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.14900) |\n| FIRST: Faster Improved Listwise Reranking with Single Token Decoding | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.15657) |\n| LLM-Powered Explanations: Unraveling Recommendations Through Subgraph Reasoning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.15859) |\n| DemoRank: Selecting Effective Demonstrations for Large Language Models in Ranking Task | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.16332) |\n| ELCoRec: Enhance Language Understanding with Co-Propagation of Numerical and Categorical Features for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.18825) |\n| Generative Explore-Exploit: Training-free Optimization of Generative Recommender Systems using LLM Optimizers | ACL 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.05255) |\n| ProductAgent: Benchmarking Conversational Product Search Agent with Asking Clarification Questions | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.00942) |\n| MemoCRS: Memory-enhanced Sequential Conversational Recommender Systems with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.04960) |\n| Preference Distillation for Personalized Generative Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.05033) |\n| Towards Bridging the Cross-modal Semantic Gap for Multi-modal Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.05420) |\n| Language Models Encode Collaborative Signals in Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.05441) |\n| A Neural Matrix Decomposition Recommender System Model based on the Multimodal Large Language Model | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.08942) |\n| LLMGR: Large Language Model-based Generative Retrieval in Alipay Search | SIGIR 2024 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3626772.3661364) |\n| Enhancing Sequential Recommenders with Augmented Knowledge from Aligned Large Language Models | SIGIR 2024 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3626772.3657782) |\n| Reinforced Prompt Personalization for Recommendation with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.17115) |\n| Improving Retrieval in Sponsored Search by Leveraging Query Context Signals | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.14346) |\n| Generative Retrieval with Preference Optimization for E-commerce Search | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.19829) |\n| GenRec: Generative Personalized Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.21191v1) |\n| Breaking the Hourglass Phenomenon of Residual Quantization: Enhancing the Upper Bound of Generative Retrieval | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.21488) |\n| Enhancing Taobao Display Advertising with Multimodal Representations: Challenges, Approaches and Insights | CIKM 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.19467) |\n| Leveraging LLM Reasoning Enhances Personalized Recommender Systems | ACL 2024 |[[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.00802) |\n| Multi-Aspect Reviewed-Item Retrieval via LLM Query Decomposition and Aspect Fusion | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.00878) |\n| Lifelong Personalized Low-Rank Adaptation of Large Language Models for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2408.03533) |\n| Exploring Query Understanding for Amazon Product Search | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.02215) |\n| A Decoding Acceleration Framework for Industrial Deployable LLM-based Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.05676) |\n| Prompt Tuning as User Inherent Profile Inference Machine | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.06577) |\n| Beyond Inter-Item Relations: Dynamic Adaptive Mixture-of-Experts for LLM-Based Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.07427) |\n| Review-driven Personalized Preference Reasoning with Large Language Models for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.06276) |\n| DaRec: A Disentangled Alignment Framework for Large Language Model and Recommender System | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.08231) |\n| LLM4DSR: Leveraing Large Language Model for Denoising Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.08208) |\n| EasyRec: Simple yet Effective Language Models for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.08821) |\n| Collaborative Cross-modal Fusion with Large Language Model for Recommendation | CIKM 2024 | [[Link]](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2408.08564) |\n| Customizing Language Models with Instance-wise LoRA for Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.10159) |\n| Efficient and Deployable Knowledge Infusion for Open-World Recommendations via Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2408.10520) |\n| CoRA: Collaborative Information Perception by Large Language Model's Weights for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2408.10645) |\n| GANPrompt: Enhancing Robustness in LLM-Based Recommendations with GAN-Enhanced Diversity Prompts | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.09671) |\n| Harnessing Multimodal Large Language Models for Multimodal Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.09698) |\n| DLCRec: A Novel Approach for Managing Diversity in LLM-Based Recommender Systems | Arxiv | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.12470) |\n| LARR: Large Language Model Aided Real-time Scene Recommendation with Semantic Understanding | RecSys 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.11523) |\n| SC-Rec: Enhancing Generative Retrieval with Self-Consistent Reranking for Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.08686) |\n| Are LLM-based Recommenders Already the Best? Simple Scaled Cross-entropy Unleashes the Potential of Traditional Sequential Recommenders | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.14238) |\n| HRGraph: Leveraging LLMs for HR Data Knowledge Graphs with Information Propagation-based Job Recommendation | KaLLM 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.13521) |\n| An Extremely Data-efficient and Generative LLM-based Reinforcement Learning Agent for Recommenders | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.16032) |\n| CheatAgent: Attacking LLM-Empowered Recommender Systems via LLM Agent | KDD 2024 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3637528.3671837) |\n| Laser: Parameter-Efficient LLM Bi-Tuning for Sequential Recommendation with Collaborative Information | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.01605) |\n| MARS: Matching Attribute-aware Representations for Text-based Sequential Recommendation | CIKM 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.00702) |\n| End-to-End Learnable Item Tokenization for Generative Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.05546) |\n| Incorporate LLMs with Influential Recommender System | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.04827) |\n| Enhancing Sequential Recommendations through Multi-Perspective Reflections and Iteration | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.06377) |\n| STORE: Streamlining Semantic Tokenization and Generative Recommendation with A Single LLM | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.07276) |\n| Multilingual Prompts in LLM-Based Recommenders: Performance Across Languages | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.07604) |\n| Unleash LLMs Potential for Recommendation by Coordinating Twin-Tower Dynamic Semantic Token Generator | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.09253) |\n| Large Language Model Enhanced Hard Sample Identification for Denoising Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Fwww.arxiv.org\u002Fabs\u002F2409.10343) |\n| Chain-of-thought prompting empowered generative user modeling for personalized recommendation | Neural Computing and Applications | [[Link]](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs00521-024-10364-2) |\n| Challenging Fairness: A Comprehensive Exploration of Bias in LLM-Based Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.10825) |\n| Decoding Style: Efficient Fine-Tuning of LLMs for Image-Guided Outfit Recommendation with Preference | CIKM 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.12150) |\n| LLM-Powered Text Simulation Attack Against ID-Free Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.11690) |\n| FLARE: Fusing Language Models and Collaborative Architectures for Recommender Enhancement | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.11699) |\n| Retrieve, Annotate, Evaluate, Repeat: Leveraging Multimodal LLMs for Large-Scale Product Retrieval Evaluation | Arxiv 2024 | [[Link]](http:\u002F\u002Farxiv.org\u002Fabs\u002F2409.11860) |\n| HLLM: Enhancing Sequential Recommendations via Hierarchical Large Language Models for Item and User Modeling | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2409.12740) |\n| Large Language Model Ranker with Graph Reasoning for Zero-Shot Recommendation | ICANN 2024 | [[Link]](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-72344-5_24) |\n| User Knowledge Prompt for Sequential Recommendation | RecSys 2024 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3640457.3691714) |\n| RLRF4Rec: Reinforcement Learning from Recsys Feedback for Enhanced Recommendation Reranking | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.05939) |\n| FELLAS: Enhancing Federated Sequential Recommendation with LLM as External Services | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.04927) |\n| TLRec: A Transfer Learning Framework to Enhance Large Language Models for Sequential Recommendation Tasks | RecSys 2024 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3640457.3691710) |\n| SeCor: Aligning Semantic and Collaborative Representations by Large Language Models for Next-Point-of-Interest Recommendations | RecSys 2024 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3640457.3688124) |\n| Efficient Inference for Large Language Model-based Generative Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.05165) |\n| Instructing and Prompting Large Language Models for Explainable Cross-domain Recommendations | RecSys 2024 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3640457.3688137) |\n| ReLand: Integrating Large Language Models' Insights into Industrial Recommenders via a Controllable Reasoning Pool | RecSys 2024 | [[Link]](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3640457.3688131) |\n| Inductive Generative Recommendation via Retrieval-based Speculation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.02939) |\n| Constructing and Masking Preference Profile with LLMs for Filtering Discomforting Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.05411) |\n| Towards Scalable Semantic Representation for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.09560) |\n| Large Language Models as Narrative-Driven Recommenders | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.13604) |\n| The Moral Case for Using Language Model Agents for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.12123) |\n| RosePO: Aligning LLM-based Recommenders with Human Values | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.12519) |\n| Comprehending Knowledge Graphs with Large Language Models for Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.12229) |\n| Triple Modality Fusion: Aligning Visual, Textual, and Graph Data with Large Language Models for Multi-Behavior Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.12228) |\n| Improving Pinterest Search Relevance Using Large Language Models | CIKM 2024 Workshop | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.17152) |\n| STAR: A Simple Training-free Approach for Recommendations using Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.16458) |\n| End-to-end Training for Recommendation with Language-based User Profiles | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.18870) |\n| Knowledge Graph Enhanced Language Agents for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.19627) |\n| Collaborative Knowledge Fusion: A Novel Approach for Multi-task Recommender Systems via LLMs | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.20642) |\n| Real-Time Personalization for LLM-based Recommendation with Customized In-Context Learning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.23136) |\n| ReasoningRec: Bridging Personalized Recommendations and Human-Interpretable Explanations through LLM Reasoning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.23180) |\n| Beyond Utility: Evaluating LLM as Recommender | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.00331) |\n| Enhancing ID-based Recommendation with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.02041) |\n| LLM4PR: Improving Post-Ranking in Search Engine with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.01178) |\n| Proactive Detection and Calibration of Seasonal Advertisements with Multimodal Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.00780) |\n| Enhancing ID-based Recommendation with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.02041) |\n| Transferable Sequential Recommendation via Vector Quantized Meta Learning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.01785) |\n| Self-Calibrated Listwise Reranking with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.04602) |\n| Enhancing Large Language Model Based Sequential Recommender Systems with Pseudo Labels Reconstruction | ACL Findings 2024 | [[Link]](https:\u002F\u002Faclanthology.org\u002F2024.findings-emnlp.423\u002F) |\n| Unleashing the Power of Large Language Models for Group POI Recommendations | Avrxi 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.13415) |\n| Scaling Laws for Online Advertisement Retrieval | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.13322) |\n| Explainable LLM-driven Multi-dimensional Distillation for E-Commerce Relevance Learning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.13045) |\n| GOT4Rec: Graph of Thoughts for Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.14922) |\n| HARec: Hyperbolic Graph-LLM Alignment for Exploration and Exploitation in Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.13865) |\n| Cross-Domain Recommendation Meets Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.19862) |\n| Explainable CTR Prediction via LLM Reasoning | WSDM 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.02588) |\n| Enabling Explainable Recommendation in E-commerce with LLM-powered Product Knowledge Graph | IJCAI Workshop 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.01837) |\n| Break the ID-Language Barrier: An Adaption Framework for Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.18262) |\n| LEADRE: Multi-Faceted Knowledge Enhanced LLM Empowered Display Advertisement Recommender System | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.13789) |\n| Pre-train, Align, and Disentangle: Empowering Sequential Recommendation with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.04107) |\n| ULMRec: User-centric Large Language Model for Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.05543) |\n| AltFS: Agency-light Feature Selection with Large Language Models in Deep Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.08516) |\n| MRP-LLM: Multitask Reflective Large Language Models for Privacy-Preserving Next POI Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.07796) |\n| MOPI-HFRS: A Multi-objective Personalized Health-aware Food Recommendation System with LLM-enhanced Interpretation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.08847) |\n| SPRec: Leveraging Self-Play to Debias Preference Alignment for Large Language Model-based Recommendations | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.09243) |\n| RecSys Arena: Pair-wise Recommender System Evaluation with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.11068) |\n| CRS Arena: Crowdsourced Benchmarking of Conversational Recommender Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.10514) |\n| Boosting LLM-based Relevance Modeling with Distribution-Aware Robust Learning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.12504) |\n| LLM is Knowledge Graph Reasoner: LLM's Intuition-aware Knowledge Graph Reasoning for Cold-start Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.12464) |\n| Bridging the User-side Knowledge Gap in Knowledge-aware Recommendations with Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.13544) |\n| Sliding Windows Are Not the End: Exploring Full Ranking with Long-Context Large Language Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.14574) |\n| ChainRank-DPO: Chain Rank Direct Preference Optimization for LLM Rankers | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.14405) |\n| Are Longer Prompts Always Better? Prompt Selection in Large Language Models for Recommendation Systems | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.14454) |\n| Towards a Unified Paradigm: Integrating Recommendation Systems as a New Language in Large Models | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.16933) |\n| LLM-Powered User Simulator for Recommender System | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.16984) |\n| Enhancing Item Tokenization for Generative Recommendation through Self-Improvement | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.17171) |\n| Molar: Multimodal LLMs with Collaborative Filtering Alignment for Enhanced Sequential Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.18176) |\n| An Automatic Graph Construction Framework based on Large Language Models for Recommendation | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.18241) |\n| RecLM: Recommendation Instruction Tuning | Arxiv 2024 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.19302) |\n| The Efficiency vs. Accuracy Trade-off: Optimizing RAG-Enhanced LLM Recommender Systems Using Multi-Head Early Exit | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.02173) |\n| Knowledge Graph Retrieval-Augmented Generation for LLM-based Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.02226) |\n| Efficient and Responsible Adaptation of Large Language Models for Robust and Equitable Top-k Recommendations | Avrxi 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.04762) |\n| Collaboration of Large Language Models and Small Recommendation Models for Device-Cloud Recommendation | KDD 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.05647) |\n| Guiding Retrieval using LLM-based Listwise Rankers | Avrxi 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.09186) |\n| Generative Retrieval for Book search | KDD 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.11034) |\n| Full-Stack Optimized Large Language Models for Lifelong Sequential Behavior Comprehension in Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.13344) |\n| Large Language Model driven Policy Exploration for Recommender Systems | WSDM 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.13816) |\n| SampleLLM: Optimizing Tabular Data Synthesis in Recommendations | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.16125) |\n| PatchRec: Multi-Grained Patching for Efficient LLM-based Sequential Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.15087) |\n| Uncertainty Quantification and Decomposition for LLM-based Recommendation | WWW 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.17630) |\n| A Zero-Shot Generalization Framework for LLM-Driven Cross-Domain Sequential Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2501.19232) |\n| RankFlow: A Multi-Role Collaborative Reranking Workflow Utilizing Large Language Models | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.00709) |\n| FACTER: Fairness-Aware Conformal Thresholding and Prompt Engineering for Enabling Fair LLM-Based Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.02966) |\n| Large Language Models Are Universal Recommendation Learners | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.03041) |\n| Intent Representation Learning with Large Language Model for Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.03307) |\n| Boosting Knowledge Graph-based Recommendations through Confidence-Aware Augmentation with Large Language Models | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.03715) |\n| RALLRec: Improving Retrieval Augmented Large Language Model Recommendation with Representation Learning | WWW 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.06101) |\n| Solving the Content Gap in Roblox Game Recommendations: LLM-Based Profile Generation and Reranking | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.06802) |\n| MoLoRec: A Generalizable and Efficient Framework for LLM-Based Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.08271) |\n| Unleashing the Power of Large Language Model for Denoising Recommendation | WWW 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.09058) |\n| Semantic Ads Retrieval at Walmart eCommerce with Language Models Progressively Trained on Multiple Knowledge Domains | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.09089) |\n| Order-agnostic Identifier for Large Language Model-based Generative Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.10833) |\n| G-Refer: Graph Retrieval-Augmented Large Language Model for Explainable Recommendation | WWW 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.12586) |\n| LLM4Tag: Automatic Tagging System for Information Retrieval via Large Language Models | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.13481) |\n| Bursting Filter Bubble: Enhancing Serendipity Recommendations with Aligned Large Language Models | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.13539) |\n| ActionPiece: Contextually Tokenizing Action Sequences for Generative Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.13581) |\n| TALKPLAY: Multimodal Music Recommendation with Large Language Models | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.13713) |\n| Enhancing Cross-Domain Recommendations with Memory-Optimized LLM-Based User Agents | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.13843) |\n| Enhancing LLM-Based Recommendations Through Personalized Reasoning | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.13845) |\n| Lost in Sequence: Do Large Language Models Understand Sequential Recommendation? | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.13909) |\n| InstructAgent: Building User Controllable Recommender via LLM Agent | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.14662) |\n| EAGER-LLM: Enhancing Large Language Models as Recommenders through Exogenous Behavior-Semantic Integration | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.14735) |\n| Efficient AI in Practice: Training and Deployment of Efficient LLMs for Industry Applications | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.14305) |\n| Collaborative Retrieval for Large Language Model-based Conversational Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.14137) |\n| Active Large Language Model-based Knowledge Distillation for Session-based Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.15685) |\n| Training Large Recommendation Models via Graph-Language Token Alignment | WWW 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.18757) |\n| PCL: Prompt-based Continual Learning for User Modeling in Recommender Systems | WWW 2025 | |[[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.19628) |\n| FilterLLM: Text-To-Distribution LLM for Billion-Scale Cold-Start Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.16924) |\n| Towards An Efficient LLM Training Paradigm for CTR Prediction | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.01001) |\n| LLMInit: A Free Lunch from Large Language Models for Selective Initialization of Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.01814) |\n| PersonaX: A Recommendation Agent Oriented User Modeling Framework for Long Behavior Sequence | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.02398) |\n| Towards Next-Generation Recommender Systems: A Benchmark for Personalized Recommendation Assistant with LLMs | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.09382) |\n| Uncovering Cross-Domain Recommendation Ability of Large Language Models | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.07761) |\n| LLM-Driven Usefulness Labeling for IR Evaluation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.08965) |\n| LREF: A Novel LLM-based Relevance Framework for E-commerce | WWW 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.09223) |\n| Process-Supervised LLM Recommenders via Flow-guided Tuning | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.07377) |\n| Image is All You Need: Towards Efficient and Effective Large Language Model-Based Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.06238) |\n| Rank-R1: Enhancing Reasoning in LLM-based Document Rerankers via Reinforcement Learning | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.06034) |\n| Federated Cross-Domain Click-Through Rate Prediction With Large Language Model Augmentation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.16875) |\n| BeLightRec: A lightweight recommender system enhanced with BERT | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.20206) |\n| RALLRec+: Retrieval Augmented Large Language Model Recommendation with Reasoning | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.20430) |\n| Alleviating LLM-based Generative Retrieval Hallucination in Alipay Search | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.21098) |\n| RuleAgent: Discovering Rules for Recommendation Denoising with Autonomous Language Agents | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.23374) |\n| CoRanking: Collaborative Ranking with Small and Large Ranking Agents | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.23427) |\n| Get the Agents Drunk: Memory Perturbations in Autonomous Agent-based Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.23804) |\n| Rec-R1: Bridging Generative Large Language Models and User-Centric Recommendation Systems via Reinforcement Learning | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.24289) |\n| LLM-Augmented Graph Neural Recommenders: Integrating User Reviews | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.02195) |\n| Enhancing Embedding Representation Stability in Recommendation Systems with Semantic ID | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.02137) |\n| Retrieval-Augmented Purifier for Robust LLM-Empowered Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.02458) |\n| Pre-training Generative Recommender with Multi-Identifier Item Tokenization | SIGIR 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.04400) |\n| LLM-Alignment Live-Streaming Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05217) |\n| Decoding Recommendation Behaviors of In-Context Learning LLMs Through Gradient Descent | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.04386) |\n| Automating Personalization: Prompt Optimization for Recommendation Reranking | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.03965) |\n| IterQR: An Iterative Framework for LLM-based Query Rewrite in e-Commercial Search System | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05309) |\n| Multimodal Quantitative Language for Generative Recommendation | ICLR 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05314) |\n| Coherency Improved Explainable Recommendation via Large Language Model | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05315) |\n| VALUE: Value-Aware Large Language Model for Query Rewriting via Weighted Trie in Sponsored Search | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05321) |\n| Large Language Models Enhanced Hyperbolic Space Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05694) |\n| Unified Generative Search and Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05730) |\n| Retrieval Augmented Generation with Collaborative Filtering for Personalized Text Generation | SIGIR 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05731) |\n| StealthRank: LLM Ranking Manipulation via Stealthy Prompt Optimization | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05804) |\n| PathGPT: Leveraging Large Language Models for Personalized Route Generation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.05846) |\n| LLM4Ranking: An Easy-to-use Framework of Utilizing Large Language Models for Document Reranking | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.07439) |\n| How Good Are Large Language Models for Course Recommendation in MOOCs? | Avrxi 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.08208) |\n| Large Language Model Empowered Recommendation Meets All-domain Continual Pre-Training | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.08949) |\n| Enhancing LLM-based Recommendation through Semantic-Aligned Collaborative Knowledge | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.10107) |\n| HistLLM: A Unified Framework for LLM-Based Multimodal Recommendation with User History Encoding and Compression | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.10150) |\n| CROSSAN: Towards Efficient and Effective Adaptation of Multiple Multimodal Foundation Models for Sequential Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.10307) |\n| PinRec: Outcome-Conditioned, Multi-Token Generative Retrieval for Industry-Scale Recommendation Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.10507) |\n| Distilling Transitional Pattern to Large Language Models for Multimodal Session-based Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.10538) |\n| Multi-Modal Hypergraph Enhanced LLM Learning for Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.10541) |\n| Rethinking LLM-Based Recommendations: A Query Generation-Based, Training-Free Approach | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.11889) |\n| Generative Recommendation with Continuous-Token Diffusion | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.12007) |\n| From Reviews to Dialogues: Active Synthesis for Zero-Shot LLM-based Conversational Recommender System | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.15476) |\n| Killing Two Birds with One Stone: Unifying Retrieval and Ranking with a Single Generative Recommendation Model | SIGIR 2025 | [[Link(https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.16454) |\n| Bridge the Domains: Large Language Models Enhanced Cross-domain Sequential Recommendation | SIGIR 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.18383) |\n| Search-Based Interaction For Conversation Recommendation via Generative Reward Model Based Simulated User | SIGIR 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.20458) |\n| Preserving Privacy and Utility in LLM-Based Product Recommendations | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.00951) |\n| Multi-agents based User Values Mining for Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.00981) |\n| Who You Are Matters: Bridging Topics and Social Roles via LLM-Enhanced Logical Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.10940) |\n| Explain What You Mean: Intent Augmented Knowledge Graph Recommender Built With LLM | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.10900) |\n| ThinkRec: Thinking-based recommendation via LLM | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.15091) |\n| DeepRec: Towards a Deep Dive Into the Item Space with Large Language Model Based Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.16810) |\n| Bridging the Gap: Self-Optimized Fine-Tuning for LLM-based Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.20771) |\n| What LLMs Miss in Recommendations: Bridging the Gap with Retrieval-Augmented Collaborative Signals | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.20730) |\n| Reinforced Latent Reasoning for LLM-based Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.19092) |\n| AgentRecBench: Benchmarking LLM Agent-based Personalized Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.19623) |\n| Reason-to-Recommend: Using Interaction-of-Thought Reasoning to Enhance LLM Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.05069) |\n| Generating Long Semantic IDs in Parallel for Recommendation | KDD 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.05781) |\n| RecGPT: A Foundation Model for Sequential Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.06270) |\n| Serendipitous Recommendation with Multimodal LLM | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.08283) |\n| Research on E-Commerce Long-Tail Product Recommendation Mechanism Based on Large-Scale Language Models | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.06316) |\n| LettinGo: Explore User Profile Generation for Recommendation System | Arxiv 2025| [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.18309) |\n| CORONA: A Coarse-to-Fine Framework for Graph-based Recommendation with Large Language Models | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.17281) |\n| CoVE: Compressed Vocabulary Expansion Makes Better LLM-based Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.19993) |\n| LLM2Rec: Large Language Models Are Powerful Embedding Models for Sequential Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.21579) |\n| Reinforcement Fine-Tuned Large Language Models for Next POI Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.21599) |\n| IRanker: Towards Ranking Foundation Model | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.21638) |\n| LLM2Rec: Large Language Models Are Powerful Embedding Models for Sequential Recommendation | KDD 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2506.21579) |\n| FindRec: Stein-Guided Entropic Flow for Multi-Modal Sequential Recommendation | KDD 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.04651) |\n| Heterogeneous User Modeling for LLM-based Recommendation | RecSys 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.04626) |\n| BiFair: A Fairness-aware Training Framework for LLM-enhanced Recommender Systems via Bi-level Optimization | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.04294) |\n| CTR-Guided Generative Query Suggestion in Conversational Search | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.04072) |\n| Boosting Parameter Efficiency in LLM-Based Recommendation through Sophisticated Pruning | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.07064) |\n| A Language-Driven Framework for Improving Personalized Recommendations: Merging LLMs with Traditional Algorithms | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.07251) |\n| LLM-Driven Dual-Level Multi-Interest Modeling for Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.10917) |\n| Revisiting Prompt Engineering: A Comprehensive Evaluation for LLM-based Personalized Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.13525) |\n| R4ec: A Reasoning, Reflection, and Refinement Framework for Recommendation Systems | RecSys 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.17249) |\n| Exploring the Potential of LLMs for Serendipity Evaluation in Recommender Systems | RecSys 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.17290) |\n| Improving the Performance of Sequential Recommendation Systems with an Extended Large Language Model | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.19990) |\n| Integrating LLM-Derived Multi-Semantic Intent into Graph Model for Session-based Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.20147) |\n| A Comprehensive Review on Harnessing Large Language Models to Overcome Recommender System Challenges | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2507.21117) |\n| End-to-End Personalization: Unifying Recommender Systems with Large Language Models | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.01514) |\n| Temporal User Profiling with LLMs: Balancing Short-Term and Long-Term Preferences for Recommendations | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.08454) |\n| LLM-Based Intelligent Agents for Music Recommendation: A Comparison with Classical Content-Based Filtering | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.11671) |\n| AdaptJobRec: Enhancing Conversational Career Recommendation through an LLM-Powered Agentic System | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.13423) |\n| LLM-Enhanced Linear Autoencoders for Recommendation | CIKM 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.13500) |\n| TrackRec: Iterative Alternating Feedback with Chain-of-Thought via Preference Alignment for Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.15388) |\n| MMQ: Multimodal Mixture-of-Quantization Tokenization for Semantic ID Generation and User Behavioral Adaptation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.15281) |\n| MLLMRec: Exploring the Potential of Multimodal Large Language Models in Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.15304) |\n| Membership Inference Attacks on LLM-based Recommender Systems | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2508.18665) |\n| Revealing Potential Biases in LLM-Based Recommender Systems in the Cold Start Setting | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.20401) |\n| Efficient Item ID Generation for Large-Scale LLM-based Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.03746) |\n| Knowledge-Augmented Relation Learning for Complementary Recommendation with Large Language Models | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.05564) |\n| Decoding in Latent Spaces for Efficient Inference in LLM-based Recommendation | EMNLP 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.11524) |\n| Learning Decomposed Contextual Token Representations from Pretrained and Collaborative Signals for Generative Recommendation | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2509.10468) |\n| LLM4Rec: Large Language Models for Multimodal Generative Recommendation with Causal Debiasing | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.01622) |\n| Empowering Denoising Sequential Recommendation with Large Language Model Embeddings | CIKM 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.04239) |\n| GRACE: Generative Representation Learning via Contrastive Policy Optimization | Arxiv 2025 | [[Link]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.04506) |\n\n\u003C\u002Fp >\n\u003C\u002Fdetails>\n\n\n\n## 2. 数据集与基准测试\n\n针对大语言模型相关推荐系统的数据集和基准测试，应保留原始的语义\u002F文本特征，而非使用匿名化的特征ID。\n\n### 2.1 数据集\n\n| **数据集** | **推荐场景** | **链接** |\n|:---:|:---:|:---:|\n| RecSysLLMsP | 社交网络 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.00055) |\n| AmazonQAC | 查询自动补全 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.04129) |\n| NineRec | 9个领域 | [[链接]](https:\u002F\u002Fgithub.com\u002Fwestlake-repl\u002FNineRec) |\n| MicroLens | 视频流媒体 | [[链接]](https:\u002F\u002Fgithub.com\u002Fwestlake-repl\u002FMicroLens?tab=readme-ov-file) |\n| Amazon-Review 2023 | 电商 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.03952) |\n| Reddit-Movie | 对话式 & 电影 | [[链接]](https:\u002F\u002Fgithub.com\u002FAaronHeee\u002FLLMs-as-Zero-Shot-Conversational-RecSys#large-language-models-as-zero-shot-conversational-recommenders) |\n| Amazon-M2 | 电商 | [[链接]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.09688) |\n| MovieLens | 电影 | [[链接]](https:\u002F\u002Fgrouplens.org\u002Fdatasets\u002Fmovielens\u002F1m\u002F) |\n| Amazon | 电商 | [[链接]](https:\u002F\u002Fcseweb.ucsd.edu\u002F~jmcauley\u002Fdatasets.html#amazon_reviews) |\n| BookCrossing | 书籍 | [[链接]](http:\u002F\u002Fwww2.informatik.uni-freiburg.de\u002F~cziegler\u002FBX\u002F) |\n| GoodReads | 书籍 | [[链接]](https:\u002F\u002Fmengtingwan.github.io\u002Fdata\u002Fgoodreads.html) |\n| Anime | 动漫 | [[链接]](https:\u002F\u002Fwww.kaggle.com\u002Fdatasets\u002FCooperUnion\u002Fanime-recommendations-database) |\n| PixelRec | 短视频 | [[链接]](https:\u002F\u002Fgithub.com\u002Fwestlake-repl\u002FPixelRec) |\n| Netflix | 电影 | [[链接]](https:\u002F\u002Fgithub.com\u002FHKUDS\u002FLLMRec) |\n    \n### 2.2 基准测试\n\n| **基准测试** | **网页引用链接** | **论文** |\n|:---:|:---:|:---:|\n| InfoDeepSeek | [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.15872) |\n| RecBench | [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.05493) |\n| RecBench+ | [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.09382) |\n| Shopping MMLU | [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.20745?) |\n| Amazon-M2 (KDD杯2023) | [[链接]](https:\u002F\u002Fwww.aicrowd.com\u002Fchallenges\u002Famazon-kdd-cup-23-multilingual-recommendation-challenge) | [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2307.09688) |\n| LLMRec | [[链接]](https:\u002F\u002Fgithub.com\u002Fwilliamliujl\u002FLLMRec) | [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2308.12241) |\n| OpenP5 | [[链接]](https:\u002F\u002Fgithub.com\u002Fagiresearch\u002FOpenP5) | [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.11134) |\n| TABLET | [[链接]](https:\u002F\u002Fdylanslacks.website\u002FTablet) | [[论文]](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.13188) |\n\n## 3. 相关仓库\n\n| **仓库名称** | **维护者** |\n|:---:|:---:|\n| [rs-llm-paper-list](https:\u002F\u002Fgithub.com\u002Fwwliu555\u002Frs-llm-paper-list) | [wwliu555](https:\u002F\u002Fgithub.com\u002Fwwliu555) |\n| [awesome-recommend-system-pretraining-papers](https:\u002F\u002Fgithub.com\u002Farchersama\u002Fawesome-recommend-system-pretraining-papers) | [archersama](https:\u002F\u002Fgithub.com\u002Farchersama) |\n| [LLM4Rec](https:\u002F\u002Fgithub.com\u002FWLiK\u002FLLM4Rec) | [WLiK](https:\u002F\u002Fgithub.com\u002FWLiK) |\n| [Awesome-LLM4RS-Papers](https:\u002F\u002Fgithub.com\u002Fnancheng58\u002FAwesome-LLM4RS-Papers) | [nancheng58](https:\u002F\u002Fgithub.com\u002Fnancheng58) |\n| [LLM4IR-Survey](https:\u002F\u002Fgithub.com\u002FRUC-NLPIR\u002FLLM4IR-Survey) | [RUC-NLPIR](https:\u002F\u002Fgithub.com\u002FRUC-NLPIR) |\n\n## 贡献\n👍 欢迎为本仓库贡献内容。\n\n如果您发现了相关资源或本仓库中存在错误，请随时提交问题或拉取请求。\n\n**联系方式**: chiangel [点] ljh [at] gmail [点] com\n\n## 引用\n```\n@article{10.1145\u002F3678004,\nauthor = {Lin, Jianghao and Dai, Xinyi and Xi, Yunjia and Liu, Weiwen and Chen, Bo and Zhang, Hao and Liu, Yong and Wu, Chuhan and Li, Xiangyang and Zhu, Chenxu and Guo, Huifeng and Yu, Yong and Tang, Ruiming and Zhang, Weinan},\ntitle = {推荐系统如何受益于大型语言模型：综述},\nyear = {2024},\npublisher = {计算机协会},\naddress = {纽约，纽约州，美国},\nissn = {1046-8188},\nurl = {https:\u002F\u002Fdoi.org\u002F10.1145\u002F3678004},\ndoi = {10.1145\u002F3678004},\njournal = {ACM信息系统汇刊},\nmonth = {7月}\n}\n```","# Awesome-LLM-for-RecSys 快速上手指南\n\n**Awesome-LLM-for-RecSys** 并非一个可直接安装运行的单一软件库，而是一个收录了大语言模型（LLM）在推荐系统（RecSys）领域应用的**论文、资源和代码实现的精选列表**。本指南将帮助开发者快速利用该资源找到适合的研究方向及对应的开源代码。\n\n## 1. 环境准备\n\n由于本项目是资源索引，具体的环境要求取决于你选择的某篇特定论文或其关联的代码仓库。但大多数 LLM 推荐系统项目通常具备以下通用前置依赖：\n\n*   **操作系统**: Linux (Ubuntu 20.04+) 或 macOS。Windows 用户建议使用 WSL2。\n*   **Python 版本**: 建议 Python 3.9 或更高版本。\n*   **深度学习框架**: PyTorch (通常为 2.0+) 或 TensorFlow。\n*   **硬件要求**: \n    *   **推理\u002F微调小模型**: 至少 16GB GPU 显存 (如 RTX 3090\u002F4090)。\n    *   **微调大模型 (7B+)**: 建议多卡环境或使用量化技术 (QLoRA)，单卡至少 24GB 显存。\n*   **必备工具**:\n    ```bash\n    pip install git-lfs\n    git lfs install\n    ```\n\n> **提示**: 在克隆具体论文的代码仓库前，请务必阅读该仓库独立的 `README.md` 以获取精确的环境配置。\n\n## 2. 安装步骤（获取资源列表）\n\n要使用此资源列表，只需将其克隆到本地即可浏览和检索。\n\n### 克隆仓库\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002FCHIANGEL\u002FAwesome-LLM-for-RecSys.git\ncd Awesome-LLM-for-RecSys\n```\n\n### 国内加速方案\n如果访问 GitHub 速度较慢，可以使用国内镜像源或加速代理进行克隆：\n\n```bash\n# 使用 Gitee 镜像（如果有同步）或通用加速方式\ngit clone https:\u002F\u002Fghproxy.com\u002Fhttps:\u002F\u002Fgithub.com\u002FCHIANGEL\u002FAwesome-LLM-for-RecSys.git\ncd Awesome-LLM-for-RecSys\n```\n\n### 查看核心内容\n克隆完成后，主要关注以下文件：\n*   `README.md`: 包含分类详细的论文列表（特征工程、特征编码、生成式推荐等）。\n*   `where-framework-1.png`: LLM 在推荐系统流水线中应用位置的架构图。\n\n## 3. 基本使用\n\n本项目的核心用法是**根据需求查找论文，然后跳转至对应代码仓库**。\n\n### 步骤一：确定应用场景\n打开 `README.md`，根据 LLM 在推荐系统中的角色选择章节：\n*   **1.1 LLM for Feature Engineering**: 需要利用 LLM 增强用户\u002F物品特征或生成合成数据（例如：`LLM4KGC`, `TagGPT`）。\n*   **1.2 LLM as Feature Encoder**: 需要利用预训练语言模型提取文本表示（例如：`U-BERT`, `TIGER`）。\n*   **1.3 LLM as Recommender**: 直接使用 LLM 进行端到端推荐生成（通常在后续章节或最新列表中）。\n\n### 步骤二：定位目标论文与代码\n假设你需要一个**基于提示工程（Prompting）的个性化推荐**示例：\n1.  在 `1.1.1 User- and Item-level Feature Augmentation` 表格中找到 **LLM-Rec**。\n2.  点击表格中的 `[[Link]]` 跳转到 Arxiv 论文详情页阅读原理。\n3.  通常在论文首页或本项目的详细列表中会提供 `Code` 链接（若未直接提供，可在 GitHub 搜索论文标题）。\n\n### 步骤三：运行示例代码（以找到的具体仓库为例）\n一旦进入具体论文的代码仓库（例如 `LLM-Rec`），通用的运行流程如下：\n\n```bash\n# 1. 创建虚拟环境\npython -m venv venv\nsource venv\u002Fbin\u002Factivate  # Windows: venv\\Scripts\\activate\n\n# 2. 安装依赖 (具体文件名视该仓库而定)\npip install -r requirements.txt\n\n# 3. 下载数据集 (通常需手动下载或运行脚本)\n# 示例：许多项目使用 Amazon Review 或 MovieLens 数据集\nmkdir data\n# ... (执行该仓库特定的数据预处理脚本)\n\n# 4. 运行训练或推理\n# 示例命令，具体参数需参考该仓库文档\npython main.py --model_name LLM-Rec --dataset amazon_book --prompt_template default\n```\n\n### 进阶：跟踪最新研究\n该项目维护者每周会在微信公众号更新笔记。\n*   **操作**: 扫描仓库根目录下的 `wechat_for_paper_notes.jpeg` 二维码，获取最新的 LLM+RecSys 论文解读和代码复现技巧。\n*   **最新更新**: 查看 `README.md` 中的 `1.7 Newest Research Work List` 部分，获取尚未归档的最新工作。","某电商平台的算法团队正致力于利用大语言模型（LLM）重构其商品推荐系统，以解决传统模型在理解用户复杂意图和冷启动商品特征提取上的瓶颈。\n\n### 没有 Awesome-LLM-for-RecSys 时\n- **文献检索如大海捞针**：团队成员需在 ArXiv 等平台上手动搜索分散的论文，难以系统性掌握 LLM 在特征工程、排序策略等全链路的应用现状。\n- **技术选型缺乏依据**：面对众多模型（如 LLaMA、ChatGLM）和微调策略（Full Finetuning、LoRA、Prompt Tuning），无法快速对比不同方案在特定场景（如少样本学习）下的优劣。\n- **重复造轮子风险高**：由于缺乏统一的资源索引，团队可能花费数周复现已被证明效果不佳的方法，或忽略了最新的 SOTA（最先进）成果。\n- **跨领域知识融合难**：难以发现如何将知识图谱增强、多模态标签生成等前沿技术与现有推荐架构有效结合的具体案例。\n\n### 使用 Awesome-LLM-for-RecSys 后\n- **一站式获取全景视野**：直接查阅按推荐系统流水线分类的论文列表，迅速定位到“特征增强”或“生成式推荐”等关键领域的最新研究。\n- **精准决策技术路线**：参考列表中详细的\"LLM 骨干网络”与“微调策略”对比表，快速选定适合电商场景的 LoRA 微调方案，大幅缩短验证周期。\n- **紧跟前沿避免滞后**：通过定期更新的“最新研究工作列表”，团队能即时采纳如 TOIS 收录的最新综述观点，确保技术方案始终处于行业领先。\n- **高效落地创新应用**：借鉴列表中如 TagGPT、KAR 等具体项目的实现思路，成功将 LLM 应用于商品标签自动生成和开放世界推荐，显著提升转化率。\n\nAwesome-LLM-for-RecSys 将原本碎片化的学术研究转化为结构化的工程指南，帮助团队从盲目探索转向高效落地，极大加速了下一代智能推荐系统的研发进程。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FCHIANGEL_Awesome-LLM-for-RecSys_2b647181.png","CHIANGEL","Jianghao Lin","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FCHIANGEL_350dd7ee.jpg","Assistant Professor at SJTU",null,"chiangel.ljh@gmail.com","chiangel.github.io","https:\u002F\u002Fgithub.com\u002FCHIANGEL",1527,86,"2026-04-08T13:49:02","MIT",5,"","未说明",{"notes":88,"python":86,"dependencies":89},"该项目是一个论文和资源列表（Awesome List），并非一个可直接运行的软件工具或代码库。README 中列出了多篇研究论文及其使用的不同大模型骨干（如 PaLM, ChatGPT, LLaMA, BERT 等）和微调策略，但未提供统一的安装指南、环境配置或依赖包列表。具体的运行环境需求需参考列表中各篇论文对应的独立代码仓库。",[],[14,35],[92,93,94,95,96,97,98,99],"llm","rs","recsys","large-language-models","recommender-systems","awesome","llm4rec","llm4rs","2026-03-27T02:49:30.150509","2026-04-10T02:45:51.277835",[],[]]