[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-EgoAlpha--prompt-in-context-learning":3,"tool-EgoAlpha--prompt-in-context-learning":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",140436,2,"2026-04-05T23:32:43",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,"2026-04-03T11:11:01",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":10,"last_commit_at":50,"category_tags":51,"status":17},4292,"Deep-Live-Cam","hacksider\u002FDeep-Live-Cam","Deep-Live-Cam 是一款专注于实时换脸与视频生成的开源工具，用户仅需一张静态照片，即可通过“一键操作”实现摄像头画面的即时变脸或制作深度伪造视频。它有效解决了传统换脸技术流程繁琐、对硬件配置要求极高以及难以实时预览的痛点，让高质量的数字内容创作变得触手可及。\n\n这款工具不仅适合开发者和技术研究人员探索算法边界，更因其极简的操作逻辑（仅需三步：选脸、选摄像头、启动），广泛适用于普通用户、内容创作者、设计师及直播主播。无论是为了动画角色定制、服装展示模特替换，还是制作趣味短视频和直播互动，Deep-Live-Cam 都能提供流畅的支持。\n\n其核心技术亮点在于强大的实时处理能力，支持口型遮罩（Mouth Mask）以保留使用者原始的嘴部动作，确保表情自然精准；同时具备“人脸映射”功能，可同时对画面中的多个主体应用不同面孔。此外，项目内置了严格的内容安全过滤机制，自动拦截涉及裸露、暴力等不当素材，并倡导用户在获得授权及明确标注的前提下合规使用，体现了技术发展与伦理责任的平衡。",88924,"2026-04-06T03:28:53",[14,15,13,52],"视频",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":32,"last_commit_at":59,"category_tags":60,"status":17},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[14,35],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":67,"readme_en":68,"readme_zh":69,"quickstart_zh":70,"use_case_zh":71,"hero_image_url":72,"owner_login":73,"owner_name":74,"owner_avatar_url":75,"owner_bio":76,"owner_company":74,"owner_location":77,"owner_email":74,"owner_twitter":74,"owner_website":74,"owner_url":78,"languages":79,"stars":92,"forks":93,"last_commit_at":94,"license":95,"difficulty_score":96,"env_os":97,"env_gpu":98,"env_ram":98,"env_deps":99,"category_tags":102,"github_topics":104,"view_count":32,"oss_zip_url":74,"oss_zip_packed_at":74,"status":17,"created_at":120,"updated_at":121,"faqs":122,"releases":123},4364,"EgoAlpha\u002Fprompt-in-context-learning","prompt-in-context-learning","Awesome resources for in-context learning and prompt engineering: Mastery of the LLMs such as ChatGPT, GPT-3, and FlanT5, with up-to-date and cutting-edge updates. ","prompt-in-context-learning 是由 EgoAlpha 实验室维护的一份开源资源指南，旨在帮助开发者与研究者在大型语言模型（如 ChatGPT、GPT-3、FlanT5 等）时代掌握“上下文学习”与“提示工程”的核心技能。随着通用人工智能（AGI）的临近，如何高效地与 AI 协作已成为关键议题，这份指南正是为了解决用户在面对海量新技术时不知从何入手、缺乏系统性学习路径的痛点而生。\n\n它不仅仅是一个简单的链接集合，更是一个每日更新的动态知识库。内容涵盖了从基础的提示设计技巧、思维链（Chain of Thought）方法，到前沿的代理（Agent）构建、检索增强生成（RAG）以及多模态提示等深度主题。此外，项目还提供了实用的 Playground 实验环境和基于 LangChain 的快速上手教程，让用户能立即将理论转化为实践。\n\n无论是希望提升工作效率的普通职场人、寻求技术突破的 AI 开发者，还是追踪最新学术进展的研究人员，都能从中找到适合自己的学习模块。其独特的亮点在于将晦涩的学术论文与落地的工程实践紧密结合，并持续收录如\"Step 3.5 Flash\"等最新","prompt-in-context-learning 是由 EgoAlpha 实验室维护的一份开源资源指南，旨在帮助开发者与研究者在大型语言模型（如 ChatGPT、GPT-3、FlanT5 等）时代掌握“上下文学习”与“提示工程”的核心技能。随着通用人工智能（AGI）的临近，如何高效地与 AI 协作已成为关键议题，这份指南正是为了解决用户在面对海量新技术时不知从何入手、缺乏系统性学习路径的痛点而生。\n\n它不仅仅是一个简单的链接集合，更是一个每日更新的动态知识库。内容涵盖了从基础的提示设计技巧、思维链（Chain of Thought）方法，到前沿的代理（Agent）构建、检索增强生成（RAG）以及多模态提示等深度主题。此外，项目还提供了实用的 Playground 实验环境和基于 LangChain 的快速上手教程，让用户能立即将理论转化为实践。\n\n无论是希望提升工作效率的普通职场人、寻求技术突破的 AI 开发者，还是追踪最新学术进展的研究人员，都能从中找到适合自己的学习模块。其独特的亮点在于将晦涩的学术论文与落地的工程实践紧密结合，并持续收录如\"Step 3.5 Flash\"等最新的模型动态，帮助用户在快速迭代的 AI 浪潮中保持敏锐，避免被自动化趋势淘汰，转而成为驾驭 AI 的超级学习者。","\u003Cdiv align=\"center\">\n\n\n\u003Cimg src=\".\u002Ffigures\u002FPrompt-EgoAlpha_white.svg\" width=\"600px\">\n\n \u003Cdiv align=\"center\">\n\n [![Typing SVG](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_2f2ed7549e70.png)]()\n \n \u003C\u002Fdiv>\n\n**An Open-Source Engineering Guide for Prompt-in-context-learning from EgoAlpha Lab.**\n\n\u003Cimg width=\"200%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_6d0edfd614be.gif\" \u002F>\n\n\u003C!-- \u003Ch3 align=\"center\">\n\n    \u003Cp>Resources for prompt learning and engineering; Mastery of LLMs like ChatGPT, GPT3, FlanT5, etc.\u003C\u002Fp>\n\n\u003C\u002Fh3> -->\n\n\u003C!-- \u003Ch4 align=\"center\">\n    \u003Cp>\n        \u003Ca href=\".\u002FREADME.md\">English\u003C\u002Fa> |\n        \u003Ca href=\".\u002Fchatgptprompt_zh.md\">简体中文\u003C\u002Fa>\n    \u003Cp>\n\u003C\u002Fh4> -->\n\n\u003Cp align=\"center\">\n\n  \u003Ca href=\"#📜-papers\">📝 Papers\u003C\u002Fa> |\n  \u003Ca href=\".\u002FPlayground.md\">⚡️  Playground\u003C\u002Fa> |\n  \u003Ca href=\".\u002FPromptEngineering.md\">🛠 Prompt Engineering\u003C\u002Fa> |\n  \u003Ca href=\".\u002Fchatgptprompt.md\">🌍 ChatGPT Prompt\u003C\u002Fa> ｜\n  \u003Ca href=\".\u002Flangchain_guide\u002FLangChainTutorial.ipynb\">⛳ LLMs Usage Guide\u003C\u002Fa> \n\n\u003C\u002Fp>\n\n\u003C\u002Fdiv>\n\n\u003Cdiv align=\"center\">\n\n\u003C!-- ![Build](https:\u002F\u002Fimg.shields.io\u002Fappveyor\u002Fbuild\u002Fgruntjs\u002Fgrunt) -->\n\n![version](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fversion-v3.0.0-green)\n![Awesome](https:\u002F\u002Fcdn.rawgit.com\u002Fsindresorhus\u002Fawesome\u002Fd7305f38d29fed78fa85652e3a63e154dd8e8829\u002Fmedia\u002Fbadge.svg)\n\n\u003C!-- ![license](https:\u002F\u002Fimg.shields.io\u002Fbower\u002Fl\u002Fbootstrap?style=plastic) -->\n\n\u003C\u002Fdiv>\n\n> **⭐️ Shining ⭐️:** This is fresh, daily-updated resources for in-context learning and prompt engineering. As Artificial General Intelligence (AGI) is approaching, let's take action and become a super learner so as to position ourselves at the forefront of this exciting era and strive for personal and professional greatness.\n\nThe resources include:\n\n*🎉[Papers](#📜-papers)🎉*:  The latest papers about *In-Context Learning*, *Prompt Engineering*, *Agent*, and *Foundation Models*. \n\n*🎉[Playground](.\u002FPlayground.md)🎉*:  Large language models（LLMs）that enable prompt experimentation. \n\n*🎉[Prompt Engineering](.\u002FPromptEngineering.md)🎉*: Prompt techniques for leveraging large language models. \n\n*🎉[ChatGPT Prompt](.\u002Fchatgptprompt.md)🎉*: Prompt examples that can be applied in our work and daily lives. \n\n*🎉[LLMs Usage Guide](.\u002Fchatgptprompt.md)🎉*: The method for quickly getting started with large language models by using LangChain.\n\nIn the future, there will likely be two types of people on Earth (perhaps even on Mars, but that's a question for Musk): \n- Those who enhance their abilities through the use of AIGC; \n- Those whose jobs are replaced by AI automation.\n\n```\n\n💎EgoAlpha: Hello! human👤, are you ready?\n\n```  \n\n\u003Cimg width=\"200%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_6d0edfd614be.gif\" \u002F>\n\n# Table of Contents\n- [🔥 AI Spotlight](#-ai-spotlight-trending-research-papers)\n- [📜 Papers](#-papers)\n  - [Survey](#survey)\n  - [Prompt Engineering](#prompt-engineering)\n    - [Prompt Design](#prompt-design)\n    - [Chain of Thought](#chain-of-thought)\n    - [In-context Learning](#in-context-learning)\n    - [Retrieval Augmented Generation](#retrieval-augmented-generation)\n    - [Evaluation \\& Reliability](#evaluation--reliability)\n  - [Agent](#agent)\n  - [Multimodal Prompt](#multimodal-prompt)\n  - [Prompt Application](#prompt-application)\n  - [Foundation Models](#foundation-models)\n- [👨‍💻 LLM Usage](#-llm-usage)\n- [✉️ Contact](#️-contact)\n- [🙏 Acknowledgements](#-acknowledgements)\n\n\u003Cimg width=\"200%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_6d0edfd614be.gif\" \u002F>\n\n# 🔥 AI Spotlight: Trending Research Papers\n\u003C!-- 🔥🔥🔥 -->\n\u003C!-- ☄️ **May 1, 2025** *– Buzzing papers everyone’s talking about* -->\n\n\n\n### **[2026-02-13]**\n\n[**Step 3.5 Flash: Open Frontier-Level Intelligence with 11B Active Parameters**](https:\u002F\u002Fhuggingface.co\u002Fpapers\u002F2602.10604) （**New**）\n\n*Published: 2026-02-11*\n\n\u003Cfont color=\"gray\">Ailin Huang, Ang Li, Aobo Kong, Bin Wang, Binxing Jiao, Bo Dong, Bojun Wang, Boyu Chen, Brian Li, Buyun Ma, Chang Su, Changxin Miao, Changyi Wan, Chao Lou, Chen Hu, Chen Xu, Chenfeng Yu, Chengting Fen - [arXiv]\u003C\u002Ffont>\n\n[![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGitHub%20Stars-1,245-blue)](https:\u002F\u002Fgithub.com\u002Fstepfun-ai\u002FStep-3.5-Flash)\n\n---\n\n\n[**QuantaAlpha: An Evolutionary Framework for LLM-Driven Alpha Mining**](https:\u002F\u002Fhuggingface.co\u002Fpapers\u002F2602.07085) （**New**）\n\n*Published: 2026-02-06*\n\n\u003Cfont color=\"gray\">Jun Han, Shuo Zhang, Wei Li, Zhi Yang, Yifan Dong, Tu Hu, Jialuo Yuan, Xiaomin Yu, Yumo Zhu, Fangqi Lou, Xin Guo, Zhaowei Liu, Tianyi Jiang, Ruichuan An, Jingping Liu, Biao Wu, Rongze Chen, Kunyi Wang - [arXiv]\u003C\u002Ffont>\n\n[![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGitHub%20Stars-248-blue)](https:\u002F\u002Fgithub.com\u002FQuantaAlpha\u002FQuantaAlpha)\n\n---\n\n\n[**UMEM: Unified Memory Extraction and Management Framework for Generalizable Memory**](https:\u002F\u002Fhuggingface.co\u002Fpapers\u002F2602.10652) （**New**）\n\n*Published: 2026-02-11*\n\n\u003Cfont color=\"gray\">Yongshi Ye, Hui Jiang, Feihu Jiang, Tian Lan, Yichao Du, Biao Fu, Xiaodong Shi, Qianghuai Jia, Longyue Wang, Weihua Luo - [arXiv]\u003C\u002Ffont>\n\n[![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGitHub%20Stars-247-blue)](https:\u002F\u002Fgithub.com\u002FAIDC-AI\u002FMarco-DeepResearch)\n\n---\n\n\n[**Masked Depth Modeling for Spatial Perception**](https:\u002F\u002Fhuggingface.co\u002Fpapers\u002F2601.17895) （**New**）\n\n*Published: 2026-01-25*\n\n\u003Cfont color=\"gray\">Bin Tan, Changjiang Sun, Xiage Qin, Hanat Adai, Zelin Fu, Tianxiang Zhou, Han Zhang, Yinghao Xu, Xing Zhu, Yujun Shen, Nan Xue - [arXiv]\u003C\u002Ffont>\n\n[![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGitHub%20Stars-862-blue)](https:\u002F\u002Fgithub.com\u002FRobbyant\u002Flingbot-depth) ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-2-9cf)\n\n---\n\n\n[**Idea2Story: An Automated Pipeline for Transforming Research Concepts into Complete Scientific Narratives**](https:\u002F\u002Fhuggingface.co\u002Fpapers\u002F2601.20833) （**New**）\n\n*Published: 2026-01-28*\n\n\u003Cfont color=\"gray\">Tengyue Xu, Zhuoyang Qian, Gaoge Liu, Li Ling, Zhentao Zhang, Biao Wu, Shuo Zhang, Ke Lu, Wei Shi, Ziqi Wang, Zheng Feng, Yan Luo, Shu Xu, Yongjin Chen, Zhibo Feng, Zhuo Chen, Bruce Yuan, Harry Wang,  - [arXiv]\u003C\u002Ffont>\n\n[![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGitHub%20Stars-930-blue)](https:\u002F\u002Fgithub.com\u002FAgentAlphaAGI\u002FIdea2Paper) ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-4-red)\n\n---\n\n\n\n\n[👉 Complete history news 👈](.\u002Fhistorynews.md)\n\n\u003Cimg width=\"200%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_6d0edfd614be.gif\" \u002F>\n\n---\n\n# 📜 Papers\n\n> You can directly click on the title to jump to the corresponding PDF link location\n\n## Survey\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n[**Motion meets Attention: Video Motion Prompts**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.03179) （**2024.07.03**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**Towards a Personal Health Large Language Model**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.06474) （**2024.06.10**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-2-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-6-red)\n\n[**Husky: A Unified, Open-Source Language Agent for Multi-Step Reasoning**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.06469) （**2024.06.10**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-14-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-228-blue)](https:\u002F\u002Fgithub.com\u002Fagent-husky\u002Fhusky-v1)\n\n[**Towards Lifelong Learning of Large Language Models: A Survey**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.06391) （**2024.06.10**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)\n\n[**Towards Semantic Equivalence of Tokenization in Multimodal LLM**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.05127) （**2024.06.07**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-4-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-6-red)\n\n[**LLMs Meet Multimodal Generation and Editing: A Survey**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.19334) （**2024.05.29**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-2-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-206-blue)](https:\u002F\u002Fgithub.com\u002Fyingqinghe\u002Fawesome-llms-meet-multimodal-generation)\n\n[**Tool Learning with Large Language Models: A Survey**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.17935) （**2024.05.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-3-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-106-blue)](https:\u002F\u002Fgithub.com\u002Fquchangle1\u002Fllm-tool-survey)\n\n[**When LLMs step into the 3D World: A Meta-Analysis of 3D Tasks via Multi-modal Large Language Models**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.10255) （**2024.05.16**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-833-blue)](https:\u002F\u002Fgithub.com\u002Factivevisionlab\u002Fawesome-llm-3d)\n\n[**Uncertainty Estimation and Quantification for LLMs: A Simple Supervised Approach**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2404.15993) （**2024.04.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)\n\n[**A Survey on the Memory Mechanism of Large Language Model based Agents**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2404.13501) （**2024.04.21**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-4-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-79-blue)](https:\u002F\u002Fgithub.com\u002Fnuster1128\u002Fllm_agent_memory_survey)\n\n\n\u003C\u002Fdiv>\n\n👉[Complete paper list 🔗 for \"Survey\"](.\u002FPaperList\u002Fsurvey.md)👈\n\n## Prompt Engineering\n\n### Prompt Design\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n[**LLaRA: Supercharging Robot Learning Data for Vision-Language Policy**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.20095) （**2024.06.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-3-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-77-blue)](https:\u002F\u002Fgithub.com\u002Flostxine\u002Fllara)\n\n[**Dataset Size Recovery from LoRA Weights**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.19395) （**2024.06.27**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**Dual-Phase Accelerated Prompt Optimization**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.13443) （**2024.06.19**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**From RAGs to rich parameters: Probing how language models utilize external knowledge over parametric information for factual queries**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.12824) （**2024.06.18**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-8-red)\n\n[**VoCo-LLaMA: Towards Vision Compression with Large Language Models**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.12275) （**2024.06.18**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-10-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-58-blue)](https:\u002F\u002Fgithub.com\u002FYxxxb\u002FVoCo-LLaMA)\n\n[**LaMDA: Large Model Fine-Tuning via Spectrally Decomposed Low-Dimensional Adaptation**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.12832) （**2024.06.18**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**The Impact of Initialization on LoRA Finetuning Dynamics**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.08447) （**2024.06.12**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-3-red)\n\n[**An Empirical Study on Parameter-Efficient Fine-Tuning for MultiModal Large Language Models**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.05130) （**2024.06.07**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-1-red)\n\n[**Cross-Context Backdoor Attacks against Graph Prompt Learning**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.17984) （**2024.05.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)\n\n[**Yuan 2.0-M32: Mixture of Experts with Attention Router**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.17976) （**2024.05.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-160-blue)](https:\u002F\u002Fgithub.com\u002Fieit-yuan\u002Fyuan2.0-m32)\n\n\n\u003C\u002Fdiv>\n\n👉[Complete paper list 🔗 for \"Prompt Design\"](.\u002FPaperList\u002FPromptDesignList.md)👈\n\n### Chain of Thought\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n[**An Empirical Study on Parameter-Efficient Fine-Tuning for MultiModal Large Language Models**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.05130) （**2024.06.07**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-1-red)\n\n[**Cantor: Inspiring Multimodal Chain-of-Thought of MLLM**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2404.16033) （**2024.04.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)\n\n[**nicolay-r at SemEval-2024 Task 3: Using Flan-T5 for Reasoning Emotion Cause in Conversations with Chain-of-Thought on Emotion States**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.03361) （**2024.04.04**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-5-blue)](https:\u002F\u002Fgithub.com\u002Fnicolay-r\u002Fthor-ecac)\n\n[**Visualization-of-Thought Elicits Spatial Reasoning in Large Language Models**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.03622) （**2024.04.04**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-25-red)\n\n[**Can Small Language Models Help Large Language Models Reason Better?: LM-Guided Chain-of-Thought**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.03414) （**2024.04.04**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-17-red)\n\n[**Visual CoT: Unleashing Chain-of-Thought Reasoning in Multi-Modal Language Models**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.16999) （**2024.03.25**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-16-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-63-blue)](https:\u002F\u002Fgithub.com\u002Fdeepcs233\u002Fvisual-cot)\n\n[**A Chain-of-Thought Prompting Approach with LLMs for Evaluating Students' Formative Assessment Responses in Science**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.14565) （**2024.03.21**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**NavCoT: Boosting LLM-Based Vision-and-Language Navigation via Learning Disentangled Reasoning**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.07376) （**2024.03.12**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-5-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-18-blue)](https:\u002F\u002Fgithub.com\u002Fexpectorlin\u002Fnavcot)\n\n[**ERA-CoT: Improving Chain-of-Thought through Entity Relationship Analysis**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.06932) （**2024.03.11**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-6-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-27-blue)](https:\u002F\u002Fgithub.com\u002Foceanntwt\u002Fera-cot)\n\n[**Bias-Augmented Consistency Training Reduces Biased Reasoning in Chain-of-Thought**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.05518) （**2024.03.08**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n\n\u003C\u002Fdiv>\n\n👉[Complete paper list 🔗 for \"Chain of Thought\"](.\u002FPaperList\u002FChainofThoughtList.md)👈\n\n### In-context Learning\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n[**LaMDA: Large Model Fine-Tuning via Spectrally Decomposed Low-Dimensional Adaptation**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.12832) （**2024.06.18**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**The Impact of Initialization on LoRA Finetuning Dynamics**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.08447) （**2024.06.12**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-3-red)\n\n[**An Empirical Study on Parameter-Efficient Fine-Tuning for MultiModal Large Language Models**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.05130) （**2024.06.07**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-1-red)\n\n[**Leveraging Visual Tokens for Extended Text Contexts in Multi-Modal Learning**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.02547) （**2024.06.04**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-6-blue)](https:\u002F\u002Fgithub.com\u002Fshowlab\u002FVisInContext)\n\n[**Learning to grok: Emergence of in-context learning and skill composition in modular arithmetic tasks**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.02550) （**2024.06.04**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-2-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-3-blue)](https:\u002F\u002Fgithub.com\u002Fablghtianyi\u002FICL_Modular_Arithmetic)\n\n[**Long Context is Not Long at All: A Prospector of Long-Dependency Data for Large Language Models**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.17915) （**2024.05.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-39-blue)](https:\u002F\u002Fgithub.com\u002FOctober2001\u002FProLong)\n\n[**Efficient Prompt Tuning by Multi-Space Projection and Prompt Fusion**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.11464) （**2024.05.19**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**MAML-en-LLM: Model Agnostic Meta-Training of LLMs for Improved In-Context Learning**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.11446) （**2024.05.19**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**Improving Diversity of Commonsense Generation by Large Language Models via In-Context Learning**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2404.16807) （**2024.04.25**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)\n\n[**Stronger Random Baselines for In-Context Learning**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2404.13020) （**2024.04.19**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-1-blue)](https:\u002F\u002Fgithub.com\u002Fgyauney\u002Fmax-random-baseline)\n\n\n\u003C\u002Fdiv>\n\n👉[Complete paper list 🔗 for \"In-context Learning\"](.\u002FPaperList\u002FInContextLearningList.md)👈\n\n### Retrieval Augmented Generation\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n[**Retrieval-Augmented Mixture of LoRA Experts for Uploadable Machine Learning**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.16989) （**2024.06.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**Enhancing RAG Systems: A Survey of Optimization Strategies for Performance and Scalability**](https:\u002F\u002Fdoi.org\u002F10.55041\u002Fijsrem35402) （**2024.06.04**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**Enhancing Noise Robustness of Retrieval-Augmented Language Models with Adaptive Adversarial Training**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.20978) （**2024.05.31**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)\n\n[**Accelerating Inference of Retrieval-Augmented Generation via Sparse Context Selection**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.16178) （**2024.05.25**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**DocReLM: Mastering Document Retrieval with Language Model**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.11461) （**2024.05.19**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**UniRAG: Universal Retrieval Augmentation for Multi-Modal Large Language Models**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.10311) （**2024.05.16**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**ChatHuman: Language-driven 3D Human Understanding with Retrieval-Augmented Tool Reasoning**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.04533) （**2024.05.07**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**REASONS: A benchmark for REtrieval and Automated citationS Of scieNtific Sentences using Public and Proprietary LLMs**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.02228) （**2024.05.03**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)\n\n[**Superposition Prompting: Improving and Accelerating Retrieval-Augmented Generation**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.06910) （**2024.04.10**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-14-red)\n\n[**Untangle the KNOT: Interweaving Conflicting Knowledge and Reasoning Skills in Large Language Models**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.03577) （**2024.04.04**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-1-blue)](https:\u002F\u002Fgithub.com\u002Fthu-keg\u002Fknot)\n\n\n\u003C\u002Fdiv>\n\n👉[Complete paper list 🔗 for \"Retrieval Augmented Generation\"](.\u002FPaperList\u002FKnowledgeAugmentedPromptList.md)👈\n\n\n### Evaluation & Reliability\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n[**CELLO: Causal Evaluation of Large Vision-Language Models**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.19131) （**2024.06.27**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-4-blue)](https:\u002F\u002Fgithub.com\u002Fopencausalab\u002Fcello)\n\n[**PrExMe! Large Scale Prompt Exploration of Open Source LLMs for Machine Translation and Summarization Evaluation**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.18528) （**2024.06.26**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**Revisiting Referring Expression Comprehension Evaluation in the Era of Large Multimodal Models**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.16866) （**2024.06.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-5-blue)](https:\u002F\u002Fgithub.com\u002Fjierunchen\u002Fref-l4)\n\n[**OR-Bench: An Over-Refusal Benchmark for Large Language Models**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.20947) （**2024.05.31**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**TimeChara: Evaluating Point-in-Time Character Hallucination of Role-Playing Large Language Models**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.18027) （**2024.05.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**Subtle Biases Need Subtler Measures: Dual Metrics for Evaluating Representative and Affinity Bias in Large Language Models**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.14555) （**2024.05.23**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**HW-GPT-Bench: Hardware-Aware Architecture Benchmark for Language Models**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.10299) （**2024.05.16**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-9-blue)](https:\u002F\u002Fgithub.com\u002Fautoml\u002Fhw-gpt-bench)\n\n[**Multimodal LLMs Struggle with Basic Visual Network Analysis: a VNA Benchmark**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.06634) （**2024.05.10**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-1-blue)](https:\u002F\u002Fgithub.com\u002Fevanup\u002Fvna_benchmark)\n\n[**Vibe-Eval: A hard evaluation suite for measuring progress of multimodal language models**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.02287) （**2024.05.03**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-6-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-139-blue)](https:\u002F\u002Fgithub.com\u002Freka-ai\u002Freka-vibe-eval)\n\n[**Causal Evaluation of Language Models**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.00622) （**2024.05.01**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-42-blue)](https:\u002F\u002Fgithub.com\u002FOpenCausaLab\u002FCaLM)\n\n\n\u003C\u002Fdiv>\n\n👉[Complete paper list 🔗 for \"Evaluation & Reliability\"](.\u002FPaperList\u002FEvaluationReliabilityList.md)👈\n\n## Agent\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n[**Cooperative Multi-Agent Deep Reinforcement Learning Methods for UAV-aided Mobile Edge Computing Networks**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.03280) （**2024.07.03**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**Symbolic Learning Enables Self-Evolving Agents**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.18532) （**2024.06.26**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-8-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-4.8k-blue)](https:\u002F\u002Fgithub.com\u002Faiwaves-cn\u002Fagents)\n\n[**Adversarial Attacks on Multimodal Agents**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.12814) （**2024.06.18**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-1-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-24-blue)](https:\u002F\u002Fgithub.com\u002Fchenwu98\u002Fagent-attack)\n\n[**DigiRL: Training In-The-Wild Device-Control Agents with Autonomous Reinforcement Learning**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.11896) （**2024.06.14**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-5-red)\n\n[**Transforming Wearable Data into Health Insights using Large Language Model Agents**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.06464) （**2024.06.10**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-2-red)\n\n[**Neuromorphic dreaming: A pathway to efficient learning in artificial agents**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.15616) （**2024.05.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**Fine-Tuning Large Vision-Language Models as Decision-Making Agents via Reinforcement Learning**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.10292) （**2024.05.16**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-3-green)\n\n[**Learning Multi-Agent Communication from Graph Modeling Perspective**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.08550) （**2024.05.14**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-3-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-7-blue)](https:\u002F\u002Fgithub.com\u002Fcharleshsc\u002FCommFormer)\n\n[**Smurfs: Leveraging Multiple Proficiency Agents with Context-Efficiency for Tool Planning**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.05955) （**2024.05.09**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-10-blue)](https:\u002F\u002Fgithub.com\u002Ffreedomintelligence\u002Fsmurfs)\n\n[**Unveiling Disparities in Web Task Handling Between Human and Web Agent**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.04497) （**2024.05.07**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n\n\u003C\u002Fdiv>\n\n👉[Complete paper list 🔗 for \"Agent\"](.\u002FPaperList\u002FAgentList.md)👈\n\n## Multimodal Prompt\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n[**InternLM-XComposer-2.5: A Versatile Large Vision Language Model Supporting Long-Contextual Input and Output**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.03320) （**2024.07.03**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-4-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-2.0k-blue)](https:\u002F\u002Fgithub.com\u002Finternlm\u002Finternlm-xcomposer)\n\n[**LLaRA: Supercharging Robot Learning Data for Vision-Language Policy**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.20095) （**2024.06.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-3-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-77-blue)](https:\u002F\u002Fgithub.com\u002Flostxine\u002Fllara)\n\n[**Web2Code: A Large-scale Webpage-to-Code Dataset and Evaluation Framework for Multimodal LLMs**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.20098) （**2024.06.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-33-blue)](https:\u002F\u002Fgithub.com\u002Fmbzuai-llm\u002Fweb2code)\n\n[**LLaVolta: Efficient Multi-modal Models via Stage-wise Visual Context Compression**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.20092) （**2024.06.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-20-blue)](https:\u002F\u002Fgithub.com\u002Fbeckschen\u002Fllavolta)\n\n[**Cambrian-1: A Fully Open, Vision-Centric Exploration of Multimodal LLMs**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.16860) （**2024.06.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-67-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-1.4k-blue)](https:\u002F\u002Fgithub.com\u002Fcambrian-mllm\u002Fcambrian)\n\n[**VoCo-LLaMA: Towards Vision Compression with Large Language Models**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.12275) （**2024.06.18**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-10-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-58-blue)](https:\u002F\u002Fgithub.com\u002FYxxxb\u002FVoCo-LLaMA)\n\n[**Beyond LLaVA-HD: Diving into High-Resolution Large Multimodal Models**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.08487) （**2024.06.12**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-11-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-90-blue)](https:\u002F\u002Fgithub.com\u002Fyfzhang114\u002Fslime)\n\n[**An Empirical Study on Parameter-Efficient Fine-Tuning for MultiModal Large Language Models**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.05130) （**2024.06.07**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-1-red)\n\n[**Leveraging Visual Tokens for Extended Text Contexts in Multi-Modal Learning**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.02547) （**2024.06.04**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-6-blue)](https:\u002F\u002Fgithub.com\u002Fshowlab\u002FVisInContext)\n\n[**DeCo: Decoupling Token Compression from Semantic Abstraction in Multimodal Large Language Models**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.20985) （**2024.05.31**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n\u003C\u002Fdiv>\n\n👉[Complete paper list 🔗 for \"Multimodal Prompt\"](.\u002FPaperList\u002Fmultimodalprompt.md)👈\n\n## Prompt Application\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n[**IncogniText: Privacy-enhancing Conditional Text Anonymization via LLM-based Private Attribute Randomization**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.02956) （**2024.07.03**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**Web2Code: A Large-scale Webpage-to-Code Dataset and Evaluation Framework for Multimodal LLMs**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.20098) （**2024.06.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-33-blue)](https:\u002F\u002Fgithub.com\u002Fmbzuai-llm\u002Fweb2code)\n\n[**OMG-LLaVA: Bridging Image-level, Object-level, Pixel-level Reasoning and Understanding**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.19389) （**2024.06.27**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-2-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-7-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-934-blue)](https:\u002F\u002Fgithub.com\u002Flxtgh\u002Fomg-seg)\n\n[**Adversarial Search Engine Optimization for Large Language Models**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.18382) （**2024.06.26**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**VideoLLM-online: Online Video Large Language Model for Streaming Video**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.11816) （**2024.06.17**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**Regularizing Hidden States Enables Learning Generalized Reward Model for LLMs**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.10216) （**2024.06.14**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**Autoregressive Model Beats Diffusion: Llama for Scalable Image Generation**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.06525) （**2024.06.10**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-43-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-957-blue)](https:\u002F\u002Fgithub.com\u002Ffoundationvision\u002Fllamagen)\n\n[**Language models emulate certain cognitive profiles: An investigation of how predictability measures interact with individual differences**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.04988) （**2024.06.07**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**PaCE: Parsimonious Concept Engineering for Large Language Models**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.04331) （**2024.06.06**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-1-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-15-blue)](https:\u002F\u002Fgithub.com\u002Fpeterljq\u002Fparsimonious-concept-engineering)\n\n[**Yuan 2.0-M32: Mixture of Experts with Attention Router**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.17976) （**2024.05.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-160-blue)](https:\u002F\u002Fgithub.com\u002Fieit-yuan\u002Fyuan2.0-m32)\n\n\n\u003C\u002Fdiv>\n\n👉[Complete paper list 🔗 for \"Prompt Application\"](.\u002FPaperList\u002Fpromptapplication.md)👈\n\n## Foundation Models\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n[**TheoremLlama: Transforming General-Purpose LLMs into Lean4 Experts**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.03203) （**2024.07.03**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**Pedestrian 3D Shape Understanding for Person Re-Identification via Multi-View Learning**](https:\u002F\u002Fdoi.org\u002F10.1109\u002FTCSVT.2024.3358850) （**2024.07.01**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-3-green)\n\n[**Token Erasure as a Footprint of Implicit Vocabulary Items in LLMs**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.20086) （**2024.06.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**OMG-LLaVA: Bridging Image-level, Object-level, Pixel-level Reasoning and Understanding**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.19389) （**2024.06.27**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-2-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-7-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-934-blue)](https:\u002F\u002Fgithub.com\u002Flxtgh\u002Fomg-seg)\n\n[**Fundamental Problems With Model Editing: How Should Rational Belief Revision Work in LLMs?**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.19354) （**2024.06.27**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-2-red)\n\n[**Efficient World Models with Context-Aware Tokenization**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.19320) （**2024.06.27**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-1-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-49-blue)](https:\u002F\u002Fgithub.com\u002Fvmicheli\u002Fdelta-iris)\n\n[**The Remarkable Robustness of LLMs: Stages of Inference?**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.19384) （**2024.06.27**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-7-red)\n\n[**ResumeAtlas: Revisiting Resume Classification with Large-Scale Datasets and Large Language Models**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.18125) （**2024.06.26**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-1-blue)](https:\u002F\u002Fgithub.com\u002Fnoran-mohamed\u002FResume-Classification-Dataset)\n\n[**AITTI: Learning Adaptive Inclusive Token for Text-to-Image Generation**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.12805) （**2024.06.18**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-4-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-5-blue)](https:\u002F\u002Fgithub.com\u002Fitsmag11\u002Faitti)\n\n[**Unveiling Encoder-Free Vision-Language Models**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.11832) （**2024.06.17**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-6-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-75-blue)](https:\u002F\u002Fgithub.com\u002Fbaaivision\u002Feve)\n\n\n\u003C\u002Fdiv>\n\n👉[Complete paper list 🔗 for \"Foundation Models\"](.\u002FPaperList\u002Ffoundationmodels.md)👈\n\n\u003C!-- ### 📌 Hard Prompt\u002F Discrete Prompt\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n[**Winner-Take-All Column Row Sampling for Memory Efficient Adaptation of Language Model**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15265) （**2023.05.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-4-blue)](https:\u002F\u002Fgithub.com\u002Fzirui-ray-liu\u002Fwtacrs)\n\n[**How to Distill your BERT: An Empirical Study on the Impact of Weight Initialisation and Distillation Objectives**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15032) （**2023.05.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-14-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-9-blue)](https:\u002F\u002Fgithub.com\u002Fmainlp\u002Fhow-to-distill-your-bert)\n\n[**ChatAgri: Exploring Potentials of ChatGPT on Cross-linguistic Agricultural Text Classification**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15024) （**2023.05.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-93-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-38-blue)](https:\u002F\u002Fgithub.com\u002Falbert-jin\u002Fagricultural_textual_classification_chatgpt)\n\n[**Cheap and Quick: Efficient Vision-Language Instruction Tuning for Large Language Models**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15023) （**2023.05.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-63-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-491-blue)](https:\u002F\u002Fgithub.com\u002Fluogen1996\u002Flavin)\n\n[**LLMDet: A Large Language Models Detection Tool**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15004) （**2023.05.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-11-red)\n\n[**OverPrompt: Enhancing ChatGPT Capabilities through an Efficient In-Context Learning Approach**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.14973) （**2023.05.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-1-red)\n\n[**Interpretable by Design Visual Question Answering**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.14882) （**2023.05.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-4-red)\n\n[**In-Context Demonstration Selection with Cross Entropy Difference**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.14726) （**2023.05.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-10-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-3.4k-blue)](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002Flmops)\n\n[**LogicLLM: Exploring Self-supervised Logic-enhanced Training for Large Language Models**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.13718) （**2023.05.23**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-14-red)\n\n[**Enhancing Cross-lingual Natural Language Inference by Soft Prompting with Multilingual Verbalizer**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.12761) （**2023.05.22**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-12-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-2-blue)](https:\u002F\u002Fgithub.com\u002Fthu-bpm\u002Fsoftmv)\n\n\n\u003C\u002Fdiv>\n\n👉[Complete paper list 🔗 for \"Hard Prompt\"](.\u002FPaperList\u002FHardPromptList.md)👈\n\n### 📌 Soft Prompt\u002F Continuous Prompt\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n\n\u003C\u002Fdiv>\n\n👉[Complete paper list 🔗 for \"Soft Prompt\"](.\u002FPaperList\u002FSoftPromptList.md)👈 -->\n\n\u003C!-- ## Prompt for Knowledge Graph\n\n\u002F\u002F __PAPER_LIST__:{field:'Prompt Design',size:10,state:'corrected',type:'lite'}\n\n👉[Complete paper list 🔗 for \"Prompt for Knowledge Graph\"](.\u002FPaperList\u002FPromptKnowledgeGraphList.md)👈 --> \n\n\u003Cimg width=\"200%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_6d0edfd614be.gif\" \u002F>\n\n\u003C!-- # 🎓 Citation\n\nIf you find our work helps, please star our project and cite our paper. Thanks a lot!\n\n```\n\n综述论文可以放在这个位置\n\n``` -->\n\n\u003C!-- \u003Cimg width=\"200%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_6d0edfd614be.gif\" \u002F> -->\n\n# 👨‍💻 LLM Usage\nLarge language models (LLMs) are becoming a revolutionary technology that is shaping the development of our era. Developers can create applications that were previously only possible in our imaginations by building LLMs. However, using these LLMs often comes with certain technical barriers, and even at the introductory stage, people may be intimidated by cutting-edge technology: Do you have any questions like the following?\n\n- ❓ *How can LLM be built using programming?* \n- ❓ *How can it be used and deployed in your own programs?* \n\n💡 If there was a tutorial that could be accessible to all audiences, not just computer science professionals, it would provide detailed and comprehensive guidance to quickly get started and operate in a short amount of time, ultimately achieving the goal of being able to use LLMs flexibly and creatively to build the programs they envision. And now, just for you: the most detailed and comprehensive Langchain beginner's guide, sourced from the official langchain website but with further adjustments to the content, accompanied by the most detailed and annotated code examples, teaching code lines by line and sentence by sentence to all audiences.\n\n**Click 👉[here](.\u002Flangchain_guide\u002FLangChainTutorial.ipynb)👈 to take a quick tour of getting started with LLM.**\n\n\u003Cimg width=\"200%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_6d0edfd614be.gif\" \u002F>\n\n# ✉️ Contact\n\nThis repo is maintained by [EgoAlpha Lab](https:\u002F\u002Fgithub.com\u002FEgoAlpha). Questions and discussions are welcome via `helloegoalpha@gmail.com`.\n\nWe are willing to engage in discussions with friends from the academic and industrial communities, and explore the latest developments in prompt engineering and in-context learning together.\n\n\u003Cimg width=\"200%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_6d0edfd614be.gif\" \u002F>\n\n# 🙏 Acknowledgements\n\nThanks to the PhD students from [EgoAlpha Lab](https:\u002F\u002Fgithub.com\u002FEgoAlpha) and other workers who participated in this repo. We will improve the project in the follow-up period and maintain this community well. We also would like to express our sincere gratitude to the authors of the relevant resources. Your efforts have broadened our horizons and enabled us to perceive a more wonderful world.\n\n\n\u003C!-- \u003Cimg width=\"200%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_6d0edfd614be.gif\" \u002F> -->\n\n\u003C!-- # 👨‍👩‍👧‍👦 Contributors\n\n## Main Contributors\n* [Yu Liu]()\n* [Yifei Cao](https:\u002F\u002Fgithub.com\u002Fcyfedu1024)\n* [Jizhe Yu]()\n* [Yuan Yao]()\n* [He Qi]() -->\n\n\n\u003C!-- ## Guest Contributors\n* [No] -->\n\n\u003C!-- \u003Cimg width=\"200%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_6d0edfd614be.gif\" \u002F> -->\n\n\u003C!-- # 📔 License\n\nThis project is open source and available under the MIT\n\n\u003Cdiv align=\"center\">\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_e3d5f5142734.png\"\u002F>\n\u003C\u002Fdiv> -->","\u003Cdiv align=\"center\">\n\n\n\u003Cimg src=\".\u002Ffigures\u002FPrompt-EgoAlpha_white.svg\" width=\"600px\">\n\n \u003Cdiv align=\"center\">\n\n [![Typing SVG](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_2f2ed7549e70.png)]()\n \n \u003C\u002Fdiv>\n\n**EgoAlpha实验室提供的关于上下文提示学习的开源工程指南。**\n\n\u003Cimg width=\"200%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_6d0edfd614be.gif\" \u002F>\n\n\u003C!-- \u003Ch3 align=\"center\">\n\n    \u003Cp>用于提示学习与工程的资源；掌握如ChatGPT、GPT3、FlanT5等大型语言模型。\u003C\u002Fp>\n\n\u003C\u002Fh3> -->\n\n\u003C!-- \u003Ch4 align=\"center\">\n    \u003Cp>\n        \u003Ca href=\".\u002FREADME.md\">英语\u003C\u002Fa> |\n        \u003Ca href=\".\u002Fchatgptprompt_zh.md\">简体中文\u003C\u002Fa>\n    \u003Cp>\n\u003C\u002Fh4> -->\n\n\u003Cp align=\"center\">\n\n  \u003Ca href=\"#📜-papers\">📝 论文\u003C\u002Fa> |\n  \u003Ca href=\".\u002FPlayground.md\">⚡️ 实验平台\u003C\u002Fa> |\n  \u003Ca href=\".\u002FPromptEngineering.md\">🛠 提示工程\u003C\u002Fa> |\n  \u003Ca href=\".\u002Fchatgptprompt.md\">🌍 ChatGPT提示词\u003C\u002Fa> ｜\n  \u003Ca href=\".\u002Flangchain_guide\u002FLangChainTutorial.ipynb\">⛳ LLMs使用指南\u003C\u002Fa> \n\n\u003C\u002Fp>\n\n\u003C\u002Fdiv>\n\n\u003Cdiv align=\"center\">\n\n\u003C!-- ![Build](https:\u002F\u002Fimg.shields.io\u002Fappveyor\u002Fbuild\u002Fgruntjs\u002Fgrunt) -->\n\n![version](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fversion-v3.0.0-green)\n![Awesome](https:\u002F\u002Fcdn.rawgit.com\u002Fsindresorhus\u002Fawesome\u002Fd7305f38d29fed78fa85652e3a63e154dd8e8829\u002Fmedia\u002Fbadge.svg)\n\n\u003C!-- ![license](https:\u002F\u002Fimg.shields.io\u002Fbower\u002Fl\u002Fbootstrap?style=plastic) -->\n\n\u003C\u002Fdiv>\n\n> **⭐️ 闪耀⭐️：** 这是每日更新的最新资源，专注于上下文学习和提示工程。随着通用人工智能（AGI）的临近，让我们立即行动起来，成为超级学习者，从而站在这一激动人心时代的最前沿，追求个人与职业上的卓越。\n\n资源包括：\n\n*🎉[论文](#📜-papers)🎉*: 关于*上下文学习*、*提示工程*、*智能体*和*基础模型*的最新研究论文。\n\n*🎉[实验平台](.\u002FPlayground.md)🎉*: 支持提示实验的大规模语言模型。\n\n*🎉[提示工程](.\u002FPromptEngineering.md)🎉*: 利用大规模语言模型的提示技巧。\n\n*🎉[ChatGPT提示词](.\u002Fchatgptprompt.md)🎉*: 可应用于工作和日常生活的提示词示例。\n\n*🎉[LLMs使用指南](.\u002Fchatgptprompt.md)🎉*: 使用LangChain快速上手大型语言模型的方法。\n\n未来，地球上可能会出现两类人（也许在火星上也会如此，不过那得问马斯克）：\n- 那些通过AIGC提升自身能力的人；\n- 那些被AI自动化取代工作的人。\n\n```\n\n💎EgoAlpha: 你好！人类👤，你准备好了吗？\n\n```  \n\n\u003Cimg width=\"200%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_6d0edfd614be.gif\" \u002F>\n\n# 目录\n- [🔥 AI聚焦](#-ai-spotlight-trending-research-papers)\n- [📜 论文](#-papers)\n  - [综述](#survey)\n  - [提示工程](#prompt-engineering)\n    - [提示设计](#prompt-design)\n    - [思维链](#chain-of-thought)\n    - [上下文学习](#in-context-learning)\n    - [检索增强生成](#retrieval-augmented-generation)\n    - [评估与可靠性](#evaluation--reliability)\n  - [智能体](#agent)\n  - [多模态提示](#multimodal-prompt)\n  - [提示应用](#prompt-application)\n  - [基础模型](#foundation-models)\n- [👨‍💻 LLMs使用](#-llm-usage)\n- [✉️ 联系方式](#️-contact)\n- [🙏 致谢](#-acknowledgements)\n\n\u003Cimg width=\"200%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_6d0edfd614be.gif\" \u002F>\n\n# 🔥 AI聚焦：热门研究论文\n\u003C!-- 🔥🔥🔥 -->\n\u003C!-- ☄️ **2025年5月1日** *– 大家都在热议的论文* -->\n\n\n\n### **[2026-02-13]**\n\n[**Step 3.5 Flash：以110亿活跃参数开启前沿级智能**](https:\u002F\u002Fhuggingface.co\u002Fpapers\u002F2602.10604) （**新**）\n\n*发表日期：2026年2月11日*\n\n\u003Cfont color=\"gray\">黄艾琳、李昂、孔阿波、王斌、焦彬星、董博、王博俊、陈博宇、李布莱恩、马步云、苏昌、苗昌欣、万昌义、楼超、胡晨、徐晨、余晨峰、分程婷 - [arXiv]\u003C\u002Ffont>\n\n[![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGitHub%20Stars-1,245-blue)](https:\u002F\u002Fgithub.com\u002Fstepfun-ai\u002FStep-3.5-Flash)\n\n---\n\n\n[**QuantaAlpha：面向可泛化的记忆的进化式框架**](https:\u002F\u002Fhuggingface.co\u002Fpapers\u002F2602.07085) （**新**）\n\n*发表日期：2026年2月6日*\n\n\u003Cfont color=\"gray\">韩军、张硕、李伟、杨志、董一凡、胡图、袁嘉洛、于晓敏、朱宇墨、娄方奇、郭鑫、刘兆威、江天毅、安瑞川、刘京平、吴彪、陈荣泽、王坤义 - [arXiv]\u003C\u002Ffont>\n\n[![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGitHub%20Stars-248-blue)](https:\u002F\u002Fgithub.com\u002FQuantaAlpha\u002FQuantaAlpha)\n\n---\n\n\n[**UMEM：面向通用化记忆的统一内存提取与管理框架**](https:\u002F\u002Fhuggingface.co\u002Fpapers\u002F2602.10652) （**新**）\n\n*发表日期：2026年2月11日*\n\n\u003Cfont color=\"gray\">叶永世、江辉、江飞虎、兰田、杜一超、傅彪、石晓东、贾强怀、王龙跃、罗伟华 - [arXiv]\u003C\u002Ffont>\n\n[![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGitHub%20Stars-247-blue)](https:\u002F\u002Fgithub.com\u002FAIDC-AI\u002FMarco-DeepResearch)\n\n---\n\n\n[**基于掩码深度建模的空间感知**](https:\u002F\u002Fhuggingface.co\u002Fpapers\u002F2601.17895) （**新**）\n\n*发表日期：2026年1月25日*\n\n\u003Cfont color=\"gray\">谭斌、孙长江、秦夏格、阿代哈纳特、富泽林、周天翔、张汉、许英浩、朱兴、沈宇君、薛楠 - [arXiv]\u003C\u002Ffont>\n\n[![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGitHub%20Stars-862-blue)](https:\u002F\u002Fgithub.com\u002FRobbyant\u002Flingbot-depth) ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-2-9cf)\n\n---\n\n\n[**Idea2Story：将科研概念自动转化为完整科学叙事的流水线**](https:\u002F\u002Fhuggingface.co\u002Fpapers\u002F2601.20833) （**新**）\n\n*发表日期：2026年1月28日*\n\n\u003Cfont color=\"gray\">许腾越、钱卓阳、刘高歌、凌莉、张振涛、吴彪、张硕、陆科、史伟、王子琪、冯正、罗燕、徐淑、陈勇进、冯志博、陈卓、袁布鲁斯、王哈里 - [arXiv]\u003C\u002Ffont>\n\n[![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGitHub%20Stars-930-blue)](https:\u002F\u002Fgithub.com\u002FAgentAlphaAGI\u002FIdea2Paper) ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-4-red)\n\n---\n\n\n\n\n[👉 完整历史新闻 👈](.\u002Fhistorynews.md)\n\n\u003Cimg width=\"200%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_6d0edfd614be.gif\" \u002F>\n\n---\n\n# 📜 论文\n\n> 您可以直接点击标题跳转到相应的PDF链接位置\n\n## 调查\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n[**运动遇见注意力：视频运动提示**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.03179) （**2024.07.03**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**迈向个人健康大型语言模型**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.06474) （**2024.06.10**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-2-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-6-red)\n\n[**哈士奇：用于多步推理的统一开源语言代理**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.06469) （**2024.06.10**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-14-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-228-blue)](https:\u002F\u002Fgithub.com\u002Fagent-husky\u002Fhusky-v1)\n\n[**迈向大型语言模型的终身学习：一项调查**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.06391) （**2024.06.10**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)\n\n[**迈向多模态LLM中分词的语义等价性**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.05127) （**2024.06.07**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-4-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-6-red)\n\n[**LLMs与多模态生成和编辑：一项调查**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.19334) （**2024.05.29**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-2-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-206-blue)](https:\u002F\u002Fgithub.com\u002Fyingqinghe\u002Fawesome-llms-meet-multimodal-generation)\n\n[**使用大型语言模型进行工具学习：一项调查**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.17935) （**2024.05.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-3-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-106-blue)](https:\u002F\u002Fgithub.com\u002Fquchangle1\u002Fllm-tool-survey)\n\n[**当LLMs步入3D世界：通过多模态大型语言模型对3D任务的元分析**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.10255) （**2024.05.16**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-833-blue)](https:\u002F\u002Fgithub.com\u002Factivevisionlab\u002Fawesome-llm-3d)\n\n[**LLMs的不确定性估计与量化：一种简单的监督方法**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2404.15993) （**2024.04.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)\n\n[**基于大型语言模型的代理记忆机制调查**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2404.13501) （**2024.04.21**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-4-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-79-blue)](https:\u002F\u002Fgithub.com\u002Fnuster1128\u002Fllm_agent_memory_survey)\n\n\n\u003C\u002Fdiv>\n\n👉[“调查”完整论文列表 🔗](.\u002FPaperList\u002Fsurvey.md)👈\n\n## 提示工程\n\n### 提示设计\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n[**LLaRA：为视觉-语言策略增强机器人学习数据**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.20095) （**2024.06.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-3-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-77-blue)](https:\u002F\u002Fgithub.com\u002Flostxine\u002Fllara)\n\n[**从LoRA权重中恢复数据集大小**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.19395) （**2024.06.27**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**双阶段加速提示优化**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.13443) （**2024.06.19**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**从RAG到丰富参数：探究语言模型如何在事实性查询中利用外部知识而非参数信息**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.12824) （**2024.06.18**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-8-red)\n\n[**VoCo-LLaMA：迈向利用大型语言模型进行视觉压缩**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.12275) （**2024.06.18**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-10-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-58-blue)](https:\u002F\u002Fgithub.com\u002FYxxxb\u002FVoCo-LLaMA)\n\n[**LaMDA：通过频谱分解的低维适应进行大型模型微调**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.12832) （**2024.06.18**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**初始化对LoRA微调动态的影响**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.08447) （**2024.06.12**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-3-red)\n\n[**关于多模态大型语言模型参数高效微调的实证研究**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.05130) （**2024.06.07**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-1-red)\n\n[**针对图提示学习的跨上下文后门攻击**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.17984) （**2024.05.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)\n\n[**Yuan 2.0-M32：带有注意力路由器的专家混合模型**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.17976) （**2024.05.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-160-blue)](https:\u002F\u002Fgithub.com\u002Fieit-yuan\u002Fyuan2.0-m32)\n\n\n\u003C\u002Fdiv>\n\n👉[“提示设计”完整论文列表 🔗](.\u002FPaperList\u002FPromptDesignList.md)👈\n\n### 思维链\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n[**多模态大型语言模型的参数高效微调的实证研究**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.05130) （**2024.06.07**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-1-red)\n\n[**Cantor：激发MLLM的多模态思维链**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2404.16033) （**2024.04.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)\n\n[**nicolay-r 在SemEval-2024任务3中的工作：使用Flan-T5结合情绪状态的思维链推理对话中的情感原因**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.03361) （**2024.04.04**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-5-blue)](https:\u002F\u002Fgithub.com\u002Fnicolay-r\u002Fthor-ecac)\n\n[**思维可视化激发大型语言模型的空间推理能力**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.03622) （**2024.04.04**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-25-red)\n\n[**小型语言模型能否帮助大型语言模型更好地进行推理？：LM引导的思维链**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.03414) （**2024.04.04**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-17-red)\n\n[**视觉CoT：释放多模态语言模型中的思维链推理能力**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.16999) （**2024.03.25**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-16-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-63-blue)](https:\u002F\u002Fgithub.com\u002Fdeepcs233\u002Fvisual-cot)\n\n[**利用LLM进行思维链提示的方法，用于评估学生在科学学科中的形成性评价回答**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.14565) （**2024.03.21**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**NavCoT：通过学习解耦推理提升基于LLM的视觉-语言导航能力**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.07376) （**2024.03.12**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-5-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-18-blue)](https:\u002F\u002Fgithub.com\u002Fexpectorlin\u002Fnavcot)\n\n[**ERA-CoT：通过实体关系分析改进思维链**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.06932) （**2024.03.11**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-6-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-27-blue)](https:\u002F\u002Fgithub.com\u002Foceanntwt\u002Fera-cot)\n\n[**偏见增强的一致性训练可减少思维链中的偏见推理**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2403.05518) （**2024.03.08**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n\n\u003C\u002Fdiv>\n\n👉[“思维链”的完整论文列表 🔗](.\u002FPaperList\u002FChainofThoughtList.md)👈\n\n### 上下文学习\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n[**LaMDA：基于谱分解低维适应的大模型微调**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.12832) （**2024.06.18**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**初始化对LoRA微调动态的影响**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.08447) （**2024.06.12**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-3-red)\n\n[**多模态大型语言模型的参数高效微调的实证研究**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.05130) （**2024.06.07**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-1-red)\n\n[**在多模态学习中利用视觉标记扩展文本上下文**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.02547) （**2024.06.04**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-6-blue)](https:\u002F\u002Fgithub.com\u002Fshowlab\u002FVisInContext)\n\n[**学会领悟：模块化算术任务中上下文学习与技能组合的涌现**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.02550) （**2024.06.04**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-2-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-3-blue)](https:\u002F\u002Fgithub.com\u002Fablghtianyi\u002FICL_Modular_Arithmetic)\n\n[**长上下文其实并不长：大型语言模型的长依赖数据勘探者**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.17915) （**2024.05.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-39-blue)](https:\u002F\u002Fgithub.com\u002FOctober2001\u002FProLong)\n\n[**通过多空间投影与提示融合实现高效的提示调优**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.11464) （**2024.05.19**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**MAML-en-LLM：面向改进上下文学习的LLM无模型元训练**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.11446) （**2024.05.19**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**通过上下文学习提升大型语言模型常识生成的多样性**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2404.16807) （**2024.04.25**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)\n\n[**更强的上下文学习随机基线**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2404.13020) （**2024.04.19**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-1-blue)](https:\u002F\u002Fgithub.com\u002Fgyauney\u002Fmax-random-baseline)\n\n\n\u003C\u002Fdiv>\n\n👉[“上下文学习”的完整论文列表 🔗](.\u002FPaperList\u002FInContextLearningList.md)👈\n\n### 检索增强生成\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n[**用于可上传机器学习的检索增强LoRA专家混合模型**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.16989) （**2024.06.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**增强RAG系统：性能与可扩展性的优化策略综述**](https:\u002F\u002Fdoi.org\u002F10.55041\u002Fijsrem35402) （**2024.06.04**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**通过自适应对抗训练提升检索增强语言模型的噪声鲁棒性**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.20978) （**2024.05.31**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)\n\n[**基于稀疏上下文选择加速检索增强生成的推理**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.16178) （**2024.05.25**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**DocReLM：利用语言模型掌握文档检索**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.11461) （**2024.05.19**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**UniRAG：面向多模态大型语言模型的通用检索增强**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.10311) （**2024.05.16**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**ChatHuman：基于检索增强工具推理的语言驱动三维人体理解**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.04533) （**2024.05.07**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**REASONS：使用公共和专有LLM对科学语句进行检索与自动引用的基准测试**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.02228) （**2024.05.03**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)\n\n[**叠加提示：改进并加速检索增强生成**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.06910) （**2024.04.10**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-14-red)\n\n[**解开KNOT：在大型语言模型中交织矛盾的知识与推理能力**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.03577) （**2024.04.04**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-1-blue)](https:\u002F\u002Fgithub.com\u002Fthu-keg\u002Fknot)\n\n\n\u003C\u002Fdiv>\n\n👉[“检索增强生成”完整论文列表 🔗](.\u002FPaperList\u002FKnowledgeAugmentedPromptList.md)👈\n\n\n### 评估与可靠性\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n[**CELLO：大型视觉-语言模型的因果评估**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.19131) （**2024.06.27**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-4-blue)](https:\u002F\u002Fgithub.com\u002Fopencausalab\u002Fcello)\n\n[**PrExMe！开源LLM的大规模提示探索：用于机器翻译和摘要评估**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.18528) （**2024.06.26**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**在大型多模态模型时代重新审视指代表达理解评估**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.16866) （**2024.06.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-5-blue)](https:\u002F\u002Fgithub.com\u002Fjierunchen\u002Fref-l4)\n\n[**OR-Bench：大型语言模型的过度拒绝基准测试**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.20947) （**2024.05.31**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**TimeChara：评估角色扮演型大型语言模型的时间点人物幻觉**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.18027) （**2024.05.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**细微偏差需要更精细的衡量：用于评估大型语言模型代表性偏差与亲和力偏差的双重指标**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.14555) （**2024.05.23**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**HW-GPT-Bench：面向语言模型的硬件感知架构基准测试**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.10299) （**2024.05.16**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-9-blue)](https:\u002F\u002Fgithub.com\u002Fautoml\u002Fhw-gpt-bench)\n\n[**多模态LLM在基础视觉网络分析方面表现不佳：VNA基准测试**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.06634) （**2024.05.10**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-1-blue)](https:\u002F\u002Fgithub.com\u002Fevanup\u002Fvna_benchmark)\n\n[**Vibe-Eval：一套用于衡量多模态语言模型进展的严格评估套件**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.02287) （**2024.05.03**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-6-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-139-blue)](https:\u002F\u002Fgithub.com\u002Freka-ai\u002Freka-vibe-eval)\n\n[**语言模型的因果评估**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.00622) （**2024.05.01**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-42-blue)](https:\u002F\u002Fgithub.com\u002FOpenCausaLab\u002FCaLM)\n\n\n\u003C\u002Fdiv>\n\n👉[“评估与可靠性”完整论文列表 🔗](.\u002FPaperList\u002FEvaluationReliabilityList.md)👈\n\n## 代理\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n[**用于无人机辅助移动边缘计算网络的协作式多智能体深度强化学习方法**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.03280) （**2024.07.03**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**符号学习赋能自我进化的智能体**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.18532) （**2024.06.26**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-8-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-4.8k-blue)](https:\u002F\u002Fgithub.com\u002Faiwaves-cn\u002Fagents)\n\n[**多模态智能体的对抗攻击**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.12814) （**2024.06.18**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-1-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-24-blue)](https:\u002F\u002Fgithub.com\u002Fchenwu98\u002Fagent-attack)\n\n[**DigiRL：通过自主强化学习训练野外设备控制智能体**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.11896) （**2024.06.14**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-5-red)\n\n[**利用大型语言模型智能体将可穿戴数据转化为健康洞察**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.06464) （**2024.06.10**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-2-red)\n\n[**神经形态梦境：通往人工智能体高效学习的途径**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.15616) （**2024.05.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**通过强化学习将大型视觉-语言模型微调为决策智能体**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.10292) （**2024.05.16**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-3-green)\n\n[**从图建模视角学习多智能体通信**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.08550) （**2024.05.14**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-3-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-7-blue)](https:\u002F\u002Fgithub.com\u002Fcharleshsc\u002FCommFormer)\n\n[**史努比：利用上下文高效的多能力智能体进行工具规划**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.05955) （**2024.05.09**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-10-blue)](https:\u002F\u002Fgithub.com\u002Ffreedomintelligence\u002Fsmurfs)\n\n[**揭示人类与网络智能体在网页任务处理上的差异**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.04497) （**2024.05.07**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n\n\u003C\u002Fdiv>\n\n👉[“代理”完整论文列表 🔗](.\u002FPaperList\u002FAgentList.md)👈\n\n## 多模态提示\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n[**InternLM-XComposer-2.5：支持长上下文输入输出的多功能大型视觉语言模型**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.03320) （**2024.07.03**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-4-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-2.0k-blue)](https:\u002F\u002Fgithub.com\u002Finternlm\u002Finternlm-xcomposer)\n\n[**LLaRA：为视觉-语言策略增强机器人学习数据**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.20095) （**2024.06.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-3-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-77-blue)](https:\u002F\u002Fgithub.com\u002Flostxine\u002Fllara)\n\n[**Web2Code：面向多模态大模型的大规模网页转代码数据集及评估框架**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.20098) （**2024.06.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-33-blue)](https:\u002F\u002Fgithub.com\u002Fmbzuai-llm\u002Fweb2code)\n\n[**LLaVolta：通过分阶段视觉上下文压缩实现高效多模态模型**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.20092) （**2024.06.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-20-blue)](https:\u002F\u002Fgithub.com\u002Fbeckschen\u002Fllavolta)\n\n[**寒武纪-1：完全开放、以视觉为中心的多模态大模型探索**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.16860) （**2024.06.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-67-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-1.4k-blue)](https:\u002F\u002Fgithub.com\u002Fcambrian-mllm\u002Fcambrian)\n\n[**VoCo-LLaMA：迈向基于大型语言模型的视觉压缩**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.12275) （**2024.06.18**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-10-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-58-blue)](https:\u002F\u002Fgithub.com\u002FYxxxb\u002FVoCo-LLaMA)\n\n[**超越LLaVA-HD：深入高分辨率大型多模态模型**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.08487) （**2024.06.12**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-11-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-90-blue)](https:\u002F\u002Fgithub.com\u002Fyfzhang114\u002Fslime)\n\n[**关于多模态大型语言模型参数高效微调的实证研究**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.05130) （**2024.06.07**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-1-red)\n\n[**在多模态学习中利用视觉标记扩展文本上下文**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.02547) （**2024.06.04**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-6-blue)](https:\u002F\u002Fgithub.com\u002Fshowlab\u002FVisInContext)\n\n[**DeCo：在多模态大型语言模型中解耦标记压缩与语义抽象**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.20985) （**2024.05.31**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n\u003C\u002Fdiv>\n\n👉[“多模态提示”完整论文列表 🔗](.\u002FPaperList\u002Fmultimodalprompt.md)👈\n\n## 提示应用\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n[**IncogniText：基于大模型的隐私保护型条件文本匿名化——通过私有属性随机化实现**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.02956) （**2024年7月3日**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**Web2Code：面向多模态大模型的大规模网页转代码数据集与评估框架**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.20098) （**2024年6月28日**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-33-blue)](https:\u002F\u002Fgithub.com\u002Fmbzuai-llm\u002Fweb2code)\n\n[**OMG-LLaVA：打通图像级、目标级与像素级的推理与理解**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.19389) （**2024年6月27日**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-2-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-7-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-934-blue)](https:\u002F\u002Fgithub.com\u002Flxtgh\u002Fomg-seg)\n\n[**针对大型语言模型的对抗性搜索引擎优化**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.18382) （**2024年6月26日**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**VideoLLM-online：用于流式视频的在线视频大语言模型**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.11816) （**2024年6月17日**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**对隐藏状态进行正则化可使大语言模型学习到通用奖励模型**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.10216) （**2024年6月14日**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**自回归模型胜过扩散模型：Llama用于可扩展的图像生成**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.06525) （**2024年6月10日**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-1-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-43-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-957-blue)](https:\u002F\u002Fgithub.com\u002Ffoundationvision\u002Fllamagen)\n\n[**语言模型可模拟特定的认知特征：探究预测性指标如何与个体差异相互作用**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.04988) （**2024年6月7日**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**PaCE：面向大型语言模型的简约概念工程**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.04331) （**2024年6月6日**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-1-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-15-blue)](https:\u002F\u002Fgithub.com\u002Fpeterljq\u002Fparsimonious-concept-engineering)\n\n[**Yuan 2.0-M32：带有注意力路由机制的专家混合模型**](https:\u002F\u002Fdoi.org\u002F10.48550\u002FarXiv.2405.17976) （**2024年5月28日**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-160-blue)](https:\u002F\u002Fgithub.com\u002Fieit-yuan\u002Fyuan2.0-m32)\n\n\n\u003C\u002Fdiv>\n\n👉[“提示应用”的完整论文列表 🔗](.\u002FPaperList\u002Fpromptapplication.md)👈\n\n## 基础模型\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n[**TheoremLlama：将通用大语言模型转化为Lean4专家**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.03203) （**2024.07.03**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**基于多视角学习的行人三维形状理解用于人员重识别**](https:\u002F\u002Fdoi.org\u002F10.1109\u002FTCSVT.2024.3358850) （**2024.07.01**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-3-green)\n\n[**令牌擦除作为大语言模型中隐式词汇项的足迹**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.20086) （**2024.06.28**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)\n\n[**OMG-LLaVA：连接图像级、物体级和像素级推理与理解**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.19389) （**2024.06.27**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-2-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-7-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-934-blue)](https:\u002F\u002Fgithub.com\u002Flxtgh\u002Fomg-seg)\n\n[**模型编辑的基本问题：大语言模型中的理性信念修正应如何运作？**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.19354) （**2024.06.27**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-2-red)\n\n[**具有上下文感知分词的高效世界模型**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.19320) （**2024.06.27**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-1-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-49-blue)](https:\u002F\u002Fgithub.com\u002Fvmicheli\u002Fdelta-iris)\n\n[**大语言模型的惊人鲁棒性：推理阶段吗？**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.19384) （**2024.06.27**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-7-red)\n\n[**ResumeAtlas：利用大规模数据集和大型语言模型重新审视简历分类**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.18125) （**2024.06.26**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-1-blue)](https:\u002F\u002Fgithub.com\u002Fnoran-mohamed\u002FResume-Classification-Dataset)\n\n[**AITTI：学习用于文本到图像生成的自适应包容性标记**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.12805) （**2024.06.18**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-4-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-5-blue)](https:\u002F\u002Fgithub.com\u002Fitsmag11\u002Faitti)\n\n[**揭秘无编码器视觉-语言模型**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.11832) （**2024.06.17**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-6-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-75-blue)](https:\u002F\u002Fgithub.com\u002Fbaaivision\u002Feve)\n\n\n\u003C\u002Fdiv>\n\n👉[“基础模型”完整论文列表🔗](.\u002FPaperList\u002Ffoundationmodels.md)👈\n\n\u003C!-- ### 📌 硬提示\u002F离散提示\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n[**赢者通吃列行采样用于语言模型的内存高效适配**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15265) （**2023.05.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-4-blue)](https:\u002F\u002Fgithub.com\u002Fzirui-ray-liu\u002Fwtacrs)\n\n[**如何蒸馏你的BERT：关于权重初始化和蒸馏目标影响的实证研究**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15032) （**2023.05.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-14-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-9-blue)](https:\u002F\u002Fgithub.com\u002Fmainlp\u002Fhow-to-distill-your-bert)\n\n[**ChatAgri：探索ChatGPT在跨语言农业文本分类中的潜力**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15024) （**2023.05.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-93-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-38-blue)](https:\u002F\u002Fgithub.com\u002Falbert-jin\u002Fagricultural_textual_classification_chatgpt)\n\n[**廉价且快速：大型语言模型的高效视觉-语言指令微调**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15023) （**2023.05.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-63-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-491-blue)](https:\u002F\u002Fgithub.com\u002Fluogen1996\u002Flavin)\n\n[**LLMDet：一款大型语言模型检测工具**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.15004) （**2023.05.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-11-red)\n\n[**OverPrompt：通过高效的上下文内学习方法增强ChatGPT能力**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.14973) （**2023.05.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-1-red)\n\n[**设计之初即具可解释性的视觉问答**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.14882) （**2023.05.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-4-red)\n\n[**基于交叉熵差值的上下文示范选择**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.14726) （**2023.05.24**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-10-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-3.4k-blue)](https:\u002F\u002Fgithub.com\u002Fmicrosoft\u002Flmops)\n\n[**LogicLLM：探索大型语言模型的自监督逻辑增强训练**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.13718) （**2023.05.23**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-14-red)\n\n[**通过多语言口译器的软提示增强跨语言自然语言推理**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2305.12761) （**2023.05.22**）\n\n![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCitations-0-green)  ![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMendeley%20Readers-12-red)  [![](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FGithub%20Stars-2-blue)](https:\u002F\u002Fgithub.com\u002Fthu-bpm\u002Fsoftmv)\n\n\n\u003C\u002Fdiv>\n\n👉[“硬提示”完整论文列表🔗](.\u002FPaperList\u002FHardPromptList.md)👈\n\n### 📌 软提示\u002F连续提示\n\n\u003Cdiv style=\"line-height:0.2em;\">\n\n\n\n\n\u003C\u002Fdiv>\n\n👉[“软提示”完整论文列表🔗](.\u002FPaperList\u002FSoftPromptList.md)👈 -->\n\n\u003C!-- ## 针对知识图谱的提示\n\n\u002F\u002F __PAPER_LIST__:{field:'Prompt Design',size:10,state:'corrected',type:'lite'}\n\n👉[针对“知识图谱提示”的完整论文列表🔗](.\u002FPaperList\u002FPromptKnowledgeGraphList.md)👈 --> \n\n\u003Cimg width=\"200%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_6d0edfd614be.gif\" \u002F>\n\n\u003C!-- # 🎓 引用\n\n如果您觉得我们的工作有所帮助，请为我们的项目点赞并引用我们的论文。非常感谢！\n\n```\n\n综述论文可以放在这个位置\n\n``` -->\n\n\u003C!-- \u003Cimg width=\"200%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_6d0edfd614be.gif\" \u002F> -->\n\n# 👨‍💻 大型语言模型的使用\n大型语言模型（LLMs）正成为推动我们这个时代发展的革命性技术。通过构建这些模型，开发者能够创造出过去只能存在于想象中的应用。然而，使用这些模型往往伴随着一定的技术门槛，即便是初学者，也可能会被这项前沿技术吓倒：你是否有如下疑问呢？\n\n- ❓ *如何通过编程来构建一个大型语言模型？*\n- ❓ *如何在自己的程序中使用并部署它？*\n\n💡 如果有一份面向所有人群、而不仅仅是计算机专业人士的教程，能够提供详细且全面的指导，帮助大家在短时间内快速上手并熟练操作，最终实现灵活、创造性地运用大型语言模型来构建自己心目中的应用，那该有多好！而现在，这份最详尽、最全面的LangChain入门指南就在这里——内容源自LangChain官方文档，并经过进一步优化调整，同时配有最细致、带注释的代码示例，逐行逐句讲解，适合所有读者。\n\n**点击👉[这里](.\u002Flangchain_guide\u002FLangChainTutorial.ipynb)👈，即可快速开启你的LLM入门之旅。**\n\n\u003Cimg width=\"200%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_6d0edfd614be.gif\" \u002F>\n\n# ✉️ 联系方式\n\n本仓库由[EgoAlpha Lab](https:\u002F\u002Fgithub.com\u002FEgoAlpha)维护。欢迎通过`helloegoalpha@gmail.com`与我们交流讨论。\n\n我们非常乐意与学术界和工业界的朋友们展开对话，共同探索提示工程和上下文学习领域的最新进展。\n\n\u003Cimg width=\"200%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_6d0edfd614be.gif\" \u002F>\n\n# 🙏 致谢\n\n感谢[EgoAlpha Lab](https:\u002F\u002Fgithub.com\u002FEgoAlpha)的博士生们以及其他参与本项目的工作人员。我们将在后续阶段不断完善该项目，并持续维护好这个社区。同时，我们也向相关资源的作者们致以诚挚的谢意。正是你们的努力拓宽了我们的视野，让我们得以感知一个更加精彩的世界。\n\n\n\u003C!-- \u003Cimg width=\"200%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_6d0edfd614be.gif\" \u002F> -->\n\n\u003C!-- # 👨‍👩‍👧‍👦 贡献者\n\n## 主要贡献者\n* [刘宇]()\n* [曹一飞](https:\u002F\u002Fgithub.com\u002Fcyfedu1024)\n* [于继哲]()\n* [姚源]()\n* [齐鹤]() -->\n\n\n\u003C!-- ## 特邀贡献者\n* [无] -->\n\n\u003C!-- \u003Cimg width=\"200%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_6d0edfd614be.gif\" \u002F> -->\n\n\u003C!-- # 📔 许可证\n\n本项目为开源项目，采用MIT许可证。 \n\n\u003Cdiv align=\"center\">\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_readme_e3d5f5142734.png\"\u002F>\n\u003C\u002Fdiv> -->","# prompt-in-context-learning 快速上手指南\n\n`prompt-in-context-learning` 是由 EgoAlpha Lab 开源的工程指南资源库，旨在帮助开发者掌握大语言模型（LLM）的上下文学习（In-Context Learning）与提示工程（Prompt Engineering）。本项目主要包含前沿论文汇总、实验游乐场（Playground）、提示工程技术文档及 LangChain 使用指南。\n\n> **注意**：本项目主要为**资源索引与文档集合**，而非单一的 Python 安装包。以下指南将指导你如何获取资源并在本地运行相关的实验代码（如 LangChain 教程）。\n\n## 环境准备\n\n在开始之前，请确保你的开发环境满足以下要求：\n\n*   **操作系统**：Linux, macOS 或 Windows (推荐 WSL2)\n*   **Python 版本**：Python 3.8 或更高版本\n*   **包管理工具**：pip 或 conda\n*   **前置依赖**：\n    *   Git (用于克隆仓库)\n    *   Jupyter Notebook \u002F JupyterLab (用于运行 `.ipynb` 示例代码)\n    *   基础 AI 库：`langchain`, `openai`, `transformers` 等（将在安装步骤中配置）\n\n## 安装步骤\n\n### 1. 克隆项目仓库\n\n首先，将项目代码克隆到本地。国内用户建议使用 Gitee 镜像（如有）或通过代理加速 GitHub 访问。\n\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002FEgoAlpha\u002Fprompt-in-context-learning.git\ncd prompt-in-context-learning\n```\n\n### 2. 创建虚拟环境并安装依赖\n\n为了隔离依赖，建议创建独立的虚拟环境。本项目中的可运行代码主要集中在 `langchain_guide` 目录下。\n\n```bash\n# 创建虚拟环境\npython -m venv venv\n\n# 激活环境\n# Linux\u002FmacOS:\nsource venv\u002Fbin\u002Factivate\n# Windows:\nvenv\\Scripts\\activate\n\n# 安装基础依赖 (根据 langchain_guide 中的具体要求)\npip install langchain openai transformers torch jupyterlab pandas\n```\n\n> **💡 国内加速建议**：\n> 推荐使用清华或阿里镜像源加速 pip 安装：\n> ```bash\n> pip install -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple langchain openai transformers torch jupyterlab pandas\n> ```\n\n### 3. 配置 API Key\n\n大多数示例（特别是涉及 ChatGPT 或其他商业模型的部分）需要配置 API Key。请在运行前设置环境变量：\n\n```bash\n# Linux\u002FmacOS\nexport OPENAI_API_KEY=\"your-api-key-here\"\n\n# Windows (PowerShell)\n$env:OPENAI_API_KEY=\"your-api-key-here\"\n```\n\n## 基本使用\n\n本项目核心用法是阅读整理的论文资源以及在 Jupyter 中运行实践教程。\n\n### 1. 浏览资源文档\n\n你可以直接在浏览器中打开克隆下来的 Markdown 文件，查看分类整理的论文和提示词技巧：\n\n*   **提示工程技巧**：打开 `PromptEngineering.md`\n*   **ChatGPT 实战案例**：打开 `chatgptprompt.md`\n*   **最新论文列表**：打开 `README.md` 查看 \"AI Spotlight\" 和 \"Papers\" 章节\n\n### 2. 运行 LangChain 入门教程\n\n项目提供了基于 LangChain 的快速上手指南，这是最直接的代码实践方式。\n\n**启动 Jupyter Lab：**\n\n```bash\njupyter lab\n```\n\n**打开示例笔记：**\n\n在 Jupyter 界面中，导航至 `langchain_guide` 文件夹，打开 `LangChainTutorial.ipynb`。\n\n**最简单的代码示例：**\n\n在 Notebook 单元格中，你可以尝试以下基础流程（基于 LangChain 框架）：\n\n```python\nfrom langchain.llms import OpenAI\nfrom langchain.prompts import PromptTemplate\n\n# 初始化 LLM (确保已设置 OPENAI_API_KEY)\nllm = OpenAI(temperature=0.7)\n\n# 定义提示模板\ntemplate = \"\"\"\nYou are an expert in prompt engineering. \nExplain the concept of {concept} in simple terms with an example.\n\"\"\"\nprompt = PromptTemplate(input_variables=[\"concept\"], template=template)\n\n# 生成内容\nchain = prompt | llm\nresponse = chain.invoke({\"concept\": \"In-Context Learning\"})\n\nprint(response)\n```\n\n### 3. 探索 Playground\n\n访问项目中的 `Playground.md` 文件，其中列出了支持提示词实验的在线大模型平台链接。你可以直接点击链接跳转至相应平台进行零代码的提示词测试。\n\n---\n\n**下一步建议**：\n深入阅读 `PromptEngineering.md` 中的 \"Chain of Thought\" 和 \"Retrieval Augmented Generation\" 章节，结合代码示例进阶学习高级提示技巧。","某电商公司的数据分析师需要每天从成千上万条杂乱的用户评论中提取情感倾向和关键产品问题，并生成结构化报告供产品团队参考。\n\n### 没有 prompt-in-context-learning 时\n- 每次更换分析维度（如从“物流速度”切换到“包装质量”）都需要重新编写复杂的提示词，试错成本极高。\n- 模型经常忽略特定的输出格式要求（如 JSON 结构），导致后续自动化脚本频繁报错，需人工反复清洗数据。\n- 面对新出现的专业术语或网络 slang，模型理解偏差大，且无法通过简单示例快速纠正，只能依赖昂贵的微调训练。\n- 缺乏系统的最佳实践指引，团队成员各自为战，提示词质量参差不齐，难以复用优秀案例。\n\n### 使用 prompt-in-context-learning 后\n- 直接利用库中成熟的 In-Context Learning 论文技巧和模板，仅需在提示词中插入几条典型示例，即可让模型瞬间掌握新的分析维度。\n- 参考 Playground 和 Prompt Engineering 章节中的格式化范例，模型能稳定输出标准 JSON 数据，实现了从提取到报表生成的全链路自动化。\n- 遇到新术语时，运用检索增强生成（RAG）和少样本学习策略，无需重新训练模型，仅靠更新上下文示例即可大幅提升识别准确率。\n- 团队依托该资源库建立了统一的提示词规范，成员可快速复用经过验证的 ChatGPT Prompt 案例，协作效率显著提升。\n\nprompt-in-context-learning 将原本依赖直觉和运气的提示词编写，转化为了一套可复用、可迭代的标准工程方法，让普通开发者也能轻松驾驭大模型的核心能力。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEgoAlpha_prompt-in-context-learning_c3cdd4ed.png","EgoAlpha",null,"https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FEgoAlpha_89d23931.jpg","AGI: Advancing AI towards human-like intelligence: exploring perception, cognition, and evolution. --- My name is alpha, a robot. Hello world. Are you ready? ","Cambridge","https:\u002F\u002Fgithub.com\u002FEgoAlpha",[80,84,88],{"name":81,"color":82,"percentage":83},"Jupyter Notebook","#DA5B0B",98.6,{"name":85,"color":86,"percentage":87},"HTML","#e34c26",1.4,{"name":89,"color":90,"percentage":91},"CSS","#663399",0,2018,156,"2026-04-06T06:35:56","MIT",1,"","未说明",{"notes":100,"python":98,"dependencies":101},"该项目是一个开源的工程指南和资源列表（Awesome List），主要包含论文链接、提示词工程教程、Playground 指引和 LangChain 使用指南。它不是一个可直接安装运行的软件包或模型库，因此 README 中未提供具体的操作系统、GPU、内存、Python 版本或依赖库等运行环境需求。用户主要是阅读文档、访问外部链接或在自己的环境中参考其提供的代码示例（如 LangChain 教程）。",[],[103,35,14,13],"插件",[105,106,107,108,109,110,111,112,113,114,115,116,117,118,119],"chain-of-thought","chatbot","chatgpt","chatgpt-api","cot","in-context-learning","language-modeling","large-language-model","llm","pre-training","prompt","prompt-based-learning","prompt-engineering","prompt-learning","ai-agent","2026-03-27T02:49:30.150509","2026-04-06T18:54:35.449456",[],[]]