[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-EnnengYang--Awesome-Model-Merging-Methods-Theories-Applications":3,"tool-EnnengYang--Awesome-Model-Merging-Methods-Theories-Applications":61},[4,18,28,37,45,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":24,"last_commit_at":25,"category_tags":26,"status":17},9989,"n8n","n8n-io\u002Fn8n","n8n 是一款面向技术团队的公平代码（fair-code）工作流自动化平台，旨在让用户在享受低代码快速构建便利的同时，保留编写自定义代码的灵活性。它主要解决了传统自动化工具要么过于封闭难以扩展、要么完全依赖手写代码效率低下的痛点，帮助用户轻松连接 400 多种应用与服务，实现复杂业务流程的自动化。\n\nn8n 特别适合开发者、工程师以及具备一定技术背景的业务人员使用。其核心亮点在于“按需编码”：既可以通过直观的可视化界面拖拽节点搭建流程，也能随时插入 JavaScript 或 Python 代码、调用 npm 包来处理复杂逻辑。此外，n8n 原生集成了基于 LangChain 的 AI 能力，支持用户利用自有数据和模型构建智能体工作流。在部署方面，n8n 提供极高的自由度，支持完全自托管以保障数据隐私和控制权，也提供云端服务选项。凭借活跃的社区生态和数百个现成模板，n8n 让构建强大且可控的自动化系统变得简单高效。",184740,2,"2026-04-19T23:22:26",[16,14,13,15,27],"插件",{"id":29,"name":30,"github_repo":31,"description_zh":32,"stars":33,"difficulty_score":10,"last_commit_at":34,"category_tags":35,"status":17},10095,"AutoGPT","Significant-Gravitas\u002FAutoGPT","AutoGPT 是一个旨在让每个人都能轻松使用和构建 AI 的强大平台，核心功能是帮助用户创建、部署和管理能够自动执行复杂任务的连续型 AI 智能体。它解决了传统 AI 应用中需要频繁人工干预、难以自动化长流程工作的痛点，让用户只需设定目标，AI 即可自主规划步骤、调用工具并持续运行直至完成任务。\n\n无论是开发者、研究人员，还是希望提升工作效率的普通用户，都能从 AutoGPT 中受益。开发者可利用其低代码界面快速定制专属智能体；研究人员能基于开源架构探索多智能体协作机制；而非技术背景用户也可直接选用预置的智能体模板，立即投入实际工作场景。\n\nAutoGPT 的技术亮点在于其模块化“积木式”工作流设计——用户通过连接功能块即可构建复杂逻辑，每个块负责单一动作，灵活且易于调试。同时，平台支持本地自托管与云端部署两种模式，兼顾数据隐私与使用便捷性。配合完善的文档和一键安装脚本，即使是初次接触的用户也能在几分钟内启动自己的第一个 AI 智能体。AutoGPT 正致力于降低 AI 应用门槛，让人人都能成为 AI 的创造者与受益者。",183572,"2026-04-20T04:47:55",[13,36,27,14,15],"语言模型",{"id":38,"name":39,"github_repo":40,"description_zh":41,"stars":42,"difficulty_score":10,"last_commit_at":43,"category_tags":44,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":46,"name":47,"github_repo":48,"description_zh":49,"stars":50,"difficulty_score":24,"last_commit_at":51,"category_tags":52,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",161147,"2026-04-19T23:31:47",[14,13,36],{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":24,"last_commit_at":59,"category_tags":60,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",109154,"2026-04-18T11:18:24",[14,15,13],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":67,"readme_en":68,"readme_zh":69,"quickstart_zh":70,"use_case_zh":71,"hero_image_url":72,"owner_login":73,"owner_name":74,"owner_avatar_url":75,"owner_bio":74,"owner_company":74,"owner_location":74,"owner_email":74,"owner_twitter":74,"owner_website":76,"owner_url":77,"languages":74,"stars":78,"forks":79,"last_commit_at":80,"license":74,"difficulty_score":81,"env_os":82,"env_gpu":83,"env_ram":83,"env_deps":84,"category_tags":87,"github_topics":88,"view_count":24,"oss_zip_url":74,"oss_zip_packed_at":74,"status":17,"created_at":109,"updated_at":110,"faqs":111,"releases":112},10034,"EnnengYang\u002FAwesome-Model-Merging-Methods-Theories-Applications","Awesome-Model-Merging-Methods-Theories-Applications","Model Merging in LLMs, MLLMs, and Beyond: Methods, Theories, Applications and Opportunities. ACM Computing Surveys, 2026.","Awesome-Model-Merging-Methods-Theories-Applications 是一个专注于大语言模型（LLM）、多模态大模型及更广泛机器学习领域的“模型合并”技术资源库。它系统性地整理了相关的前沿论文、理论方法与应用案例，旨在填补该领域缺乏全面综述的空白。\n\n在人工智能开发中，训练或微调大型模型往往需要昂贵的计算资源和原始数据。模型合并技术提供了一种高效的替代方案：无需重新训练或访问原始数据，仅通过整合多个现有模型的参数，即可创造出性能更强或功能更多样的新模型。本资源库正是为了帮助从业者深入理解并应用这一技术而生。\n\n这里特别适合 AI 研究人员、算法工程师以及对模型优化感兴趣的开发者使用。其独特亮点在于构建了一套全新的分类体系，将合并方法细致划分为“合并前优化”（如权重对齐、子空间微调）、“合并中策略”（如动态路由、基于权重的合并）以及“理论基础分析”等多个维度。此外，资源库还特别标注了那些在 70 亿参数及以上规模模型中进行过实验验证的研究，为用户筛选高价值方案提供了直观参考。无论是希望降低算力成本，还是探索多任务学习、持续学习等应用场景，都能在这里找到系","Awesome-Model-Merging-Methods-Theories-Applications 是一个专注于大语言模型（LLM）、多模态大模型及更广泛机器学习领域的“模型合并”技术资源库。它系统性地整理了相关的前沿论文、理论方法与应用案例，旨在填补该领域缺乏全面综述的空白。\n\n在人工智能开发中，训练或微调大型模型往往需要昂贵的计算资源和原始数据。模型合并技术提供了一种高效的替代方案：无需重新训练或访问原始数据，仅通过整合多个现有模型的参数，即可创造出性能更强或功能更多样的新模型。本资源库正是为了帮助从业者深入理解并应用这一技术而生。\n\n这里特别适合 AI 研究人员、算法工程师以及对模型优化感兴趣的开发者使用。其独特亮点在于构建了一套全新的分类体系，将合并方法细致划分为“合并前优化”（如权重对齐、子空间微调）、“合并中策略”（如动态路由、基于权重的合并）以及“理论基础分析”等多个维度。此外，资源库还特别标注了那些在 70 亿参数及以上规模模型中进行过实验验证的研究，为用户筛选高价值方案提供了直观参考。无论是希望降低算力成本，还是探索多任务学习、持续学习等应用场景，都能在这里找到系统的理论支持与实战指引。","\nA comprehensive list of papers about **'[Model Merging in LLMs, MLLMs, and Beyond: Methods, Theories, Applications and Opportunities. ACM Computing Surveys, 2026.](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.07666)'**.\n\n---\n\n> [!IMPORTANT]\n> Contributions welcome:\n> \n> [Contact us](#contact) or submit a pull request for unlisted relevant papers, content clarifications, or categorization adjustments, and update relevant information once your paper is accepted. Thank you!\n\n---\n\n## 💥 News 💥\n\n- 🔥🔥🔥 Our [survey](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3787849), accepted by ACM Computing Surveys, please [cite](#citation) it or the library if helpful.\n- 🔥🔥🔥 We flagged papers using models of size **$\\geq$ 7B** (or small-sized mainstream LLMs) in their experiments.\n\n---\n\n## Abstract\n>\n> Model merging is an efficient empowerment technique in the machine learning community that does not require the collection of raw training data and does not require expensive computation. As model merging becomes increasingly prevalent across various fields, it is crucial to understand the available model merging techniques comprehensively. However, there is a significant gap in the literature regarding a systematic and thorough review of these techniques. To address this gap, this survey provides a comprehensive overview of model merging methods and theories, their applications in various domains and settings, and future research directions. Specifically, we first propose a new taxonomic approach that exhaustively discusses existing model merging methods. Secondly, we discuss the application of model merging techniques in large language models, multimodal large language models, and 10+ machine learning subfields, including continual learning, multi-task learning, few-shot learning, etc. Finally, we highlight the remaining challenges of model merging and discuss future research directions.\n\n\n\u003Ccenter>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEnnengYang_Awesome-Model-Merging-Methods-Theories-Applications_readme_e4c2e8fe2346.png\" alt=\"Model Merging\" width=\"800\"\u002F>\n\u003C\u002Fcenter>\n\n## Citation\n\nIf you find our paper or this resource helpful, please consider cite:\n\n```\n@article{yang2026ModelMergingSurvey,\n  author = {Yang, Enneng and Shen, Li and Guo, Guibing and Wang, Xingwei and Cao, Xiaochun and Zhang, Jie and Tao, Dacheng},\n  title = {Model Merging in LLMs, MLLMs, and Beyond: Methods, Theories, Applications, and Opportunities},\n  year = {2026},\n  issue_date = {June 2026},\n  publisher = {Association for Computing Machinery},\n  address = {New York, NY, USA},\n  volume = {58},\n  number = {8},\n  issn = {0360-0300},\n  url = {https:\u002F\u002Fdoi.org\u002F10.1145\u002F3787849},\n  doi = {10.1145\u002F3787849},\n  journal = {ACM Comput. Surv.},\n  month = feb,\n  articleno = {216},\n  numpages = {41}\n}\n```\n\nThanks!\n\n******\n\n## Framework\n\n- [💥 News 💥](#-news-)\n- [Abstract](#abstract)\n- [Citation](#citation)\n- [Framework](#framework)\n- [Survey](#survey)\n- [Benchmark\u002FEvaluation](#benchmarkevaluation)\n- [Advanced Methods](#advanced-methods)\n  - [Pre-Merging Methods](#pre-merging-methods)\n    - [Better Fine-tuning](#better-fine-tuning)\n      - [Linearization Fine-tuning](#linearization-fine-tuning)\n      - [Subspace Fine-tuning](#subspace-fine-tuning)\n      - [Sharpness-aware Fine-tuning](#sharpness-aware-fine-tuning)\n      - [Others](#others)\n    - [Architecture Transformation](#architecture-transformation)\n    - [Weight Alignment](#weight-alignment)\n  - [During Merging Methods](#during-merging-methods)\n    - [Basic Merging Methods](#basic-merging-methods)\n    - [Weighted-based Merging Methods](#weighted-based-merging-methods)\n    - [Subspace-based Merging Method (Sparse or Low-rank Subspace)](#subspace-based-merging-method-sparse-or-low-rank-subspace)\n    - [Routing-based Merging Methods (Dynamic Merging)](#routing-based-merging-methods-dynamic-merging)\n    - [Post-calibration based Methods](#post-calibration-based-methods)\n  - [Other Merging Methods](#other-merging-methods)\n  - [Theories or Analysis of Model Merging](#theories-or-analysis-of-model-merging)\n- [Application of Model Merging in Foundation Models](#application-of-model-merging-in-foundation-models)\n  - [Model Merging in Large Language Models](#model-merging-in-large-language-models)\n    - [Human Preference Alignment for LLMs](#human-preference-alignment-for-llms)\n    - [Detoxification of LLMs](#detoxification-of-llms)\n    - [Knowledge Editing\u002FUnlearning of LLMs](#knowledge-editingunlearning-of-llms)\n    - [Faster Training of LLMs](#faster-training-of-llms)\n    - [Faster Reasoning of LLMs](#faster-reasoning-of-llms)\n    - [Improving Computational Efficiency of MoE-based LLM](#improving-computational-efficiency-of-moe-based-llm)\n    - [Mixing Datasets via Model Merging](#mixing-datasets-via-model-merging)\n    - [LLM Agent Merging](#llm-agent-merging)\n    - [Combine the Capabilities of Expert LLMs](#combine-the-capabilities-of-expert-llms)\n  - [Model Merging in Multimodal Large Language Models](#model-merging-in-multimodal-large-language-models)\n    - [Model Merging for Multimodal Fusion](#model-merging-for-multimodal-fusion)\n    - [Model Merging for Cross-Modal Knowledge Transfer](#model-merging-for-cross-modal-knowledge-transfer)\n    - [Combine the Capabilities of Expert MLLMs](#combine-the-capabilities-of-expert-mllms)\n  - [Model Merging in Image Generative Models](#model-merging-in-image-generative-models)\n    - [Style Mixing in Generative Models](#style-mixing-in-generative-models)\n    - [Reducing Training Cost of Generative Models](#reducing-training-cost-of-generative-models)\n    - [Enhancing the Faithfulness (or Generation Quality) of Diffusion Models](#enhancing-the-faithfulness-or-generation-quality-of-diffusion-models)\n    - [Deepfake Detection](#deepfake-detection)\n  - [Model Merging in Video Generative Models](#model-merging-in-video-generative-models)\n    - [Enhancing Motion Modeling](#enhancing-motion-modeling)\n- [Application of Model Merging in Different Machine Learning Subfields](#application-of-model-merging-in-different-machine-learning-subfields)\n  - [Model Merging in Continual Learning](#model-merging-in-continual-learning)\n    - [Model Merging to Mitigate Catastrophic Forgetting](#model-merging-to-mitigate-catastrophic-forgetting)\n  - [Model Merging in Multi-Task\u002FMulti-Objective\u002FMulti-Domain\u002FAuxiliary Learning](#model-merging-in-multi-taskmulti-objectivemulti-domainauxiliary-learning)\n    - [Model Merging for Knowledge Transfer in Multi-Task Learning](#model-merging-for-knowledge-transfer-in-multi-task-learning)\n    - [Model Merging for Knowledge Transfer in Multi-Objective Optimization](#model-merging-for-knowledge-transfer-in-multi-objective-optimization)\n    - [Model Merging for Knowledge Transfer in Multi-Domain Learning](#model-merging-for-knowledge-transfer-in-multi-domain-learning)\n    - [Model Merging for Knowledge Transfer in Auxiliary Learning](#model-merging-for-knowledge-transfer-in-auxiliary-learning)\n  - [Model Merging in Out-of-Distribution\u002FDomain Generalization](#model-merging-in-out-of-distributiondomain-generalization)\n    - [Model Merging for Better Out-of-Distribution Generalization](#model-merging-for-better-out-of-distribution-generalization)\n    - [Model Merging for Better Domain Generalization or Domain Adaptation](#model-merging-for-better-domain-generalization-or-domain-adaptation)\n  - [Model Merging in Federated Learning](#model-merging-in-federated-learning)\n    - [Model Merging for Local Knowledge Aggregation](#model-merging-for-local-knowledge-aggregation)\n  - [Model Merging in Zero-shot\u002FFew-shot Learning](#model-merging-in-zero-shotfew-shot-learning)\n    - [Model Merging for Cross-task Generalization in Zero-shot Learning](#model-merging-for-cross-task-generalization-in-zero-shot-learning)\n    - [Model Merging for Cross-task Generalization in Few-shot Learning](#model-merging-for-cross-task-generalization-in-few-shot-learning)\n  - [Model Merging in Adversarial Learning](#model-merging-in-adversarial-learning)\n    - [Model Merging as an Attack](#model-merging-as-an-attack)\n    - [Model Merging as a Defense or Intellectual Property Protection](#model-merging-as-a-defense-or-intellectual-property-protection)\n- [Other Applications](#other-applications)\n- [Contact](#contact)\n\n\n\n----------\n\n## Survey\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** |\n| --------------- | :----: | :----: |\n| [Model Merging in the Era of Large Language Models: Methods, Applications, and Future Directions](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.09938)| 2026 | Arxiv |\n| [Scaling Intelligence Through Model Merging: A Comprehensive Survey](https:\u002F\u002Fd197for5662m48.cloudfront.net\u002Fdocuments\u002Fpublicationstatus\u002F290780\u002Fpreprint_pdf\u002F716bd23c7315eead7ee9fd24fa7b4290.pdf)| 2025 | Arxiv |\n| [Democratizing AI Through Model Fusion: A Comprehensive Review and Future Directions](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS295016012500049X)| 2025 | Arxiv |\n| [From Task-Specific Models to Unified Systems: A Review of Model Merging Approaches](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.08998)| 2025 | Arxiv |\n| [SoK: On Finding Common Ground in Loss Landscapes Using Deep Model Merging Techniques](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.12927)| 2024 | Arxiv |\n| [Model Merging in LLMs, MLLMs, and Beyond: Methods, Theories, Applications and Opportunities](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.07666)| 2024 | Arxiv |\n| [A Survey on Model MoErging: Recycling and Routing Among Specialized Experts for Collaborative Learning](https:\u002F\u002Fwww.arxiv.org\u002Fpdf\u002F2408.07057)| 2024 | Arxiv |\n| [Merge, Ensemble, and Cooperate! A Survey on Collaborative Strategies in the Era of Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.06089)| 2024 | Arxiv |\n| [Learn From Model Beyond Fine-Tuning: A Survey](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.08184)| 2023 | Arxiv |\n| [Deep Model Fusion: A Survey](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.15698)| 2023 | Arxiv |\n\n## Benchmark\u002FEvaluation\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [crdt-merge](https:\u002F\u002Fgithub.com\u002Fmgillr\u002Fcrdt-merge)| 2026 | Github | CRDT-based distributed model merging with formal convergence guarantees. 25 strategies (SLERP, TIES, DARE, Fisher, evolutionary). Two-layer OR-Set architecture enabling conflict-free multi-node merge.\n| [An Empirical Survey of Model Merging Algorithms for Social Bias Mitigation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.02689)| 2025 | Arxiv | LLAMA-2-7B, LLAMA-3-8B, LLAMA-3.1-8B, QWEN2-7B\n| [A Systematic Study of Model Merging Techniques in Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.21437)| 2025 | Arxiv | Llama-3.2-3B-Instruct, Llama-3.1-8B-Instruct, Qwen3-4B, Qwen3-8B\n| [FusionBench: A Comprehensive Benchmark of Deep Model Fusion](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.03280)| 2025 | JMLR | Mistral-7B-v0.1, MetaMath-Mistral-7B, dolphin-2.1-mistral-7b, speechless-code-mistral-7b-v1.0\n| [Towards Performance Consistency in Multi-Level Model Collaboration](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fpapers\u002FLi_Towards_Performance_Consistency_in_Multi-Level_Model_Collaboration_ICCV_2025_paper.pdf)| 2025 | ICCV |\n| [Model Merging Scaling Laws in Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.24244)| 2025 | Arxiv | Qwen2.5 0.5, 1.5, 3, 7, 14, 32, 72B\n| [FBMS: An R Package for Flexible Bayesian Model Selection and Model Averaging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.00753)| 2025 | Arxiv |\n| [Unifying Multimodal Large Language Model Capabilities and Modalities via Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.19892)| 2025 | Arxiv | Qwen2-VL-7B-Base, Vicuna-7B-v1.5 |\n| [MergeBench: A Benchmark for Merging Domain-Specialized LLMs](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.10833)| 2025 | Arxiv |Llama-3.2-3B, Llama3.1-8B, Gemma-2-2B and Gemma-2-9B |\n| [Mergenetic: a Simple Evolutionary Model Merging Library](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.11427)| 2025 | System Demonstrations |Mistral-7B\n| [RobustMerge: Parameter-Efficient Model Merging for MLLMs with Direction Robustness](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.17159)| 2025 | NeurIPS | LLaVA-v1.5-7B\n| [Mix Data or Merge Models? Balancing the Helpfulness, Honesty, and  Harmlessness of Large Language Model via Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.06876v1)| 2025 | Arxiv | Llama-3-8B-Instruct, Mistral-7B-Instruct-v0.2 |\n| [How to Merge Your Multimodal Models Over Time?](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.06712)| 2024 | Arxiv |  \n| [Mix Data or Merge Models? Optimizing for Diverse Multi-Task Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.10801)| 2024 | Arxiv |  Aya 23 8B\n| [A Unified View of Delta Parameter Editing in Post-Trained Large-Scale Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.13841)| 2024 | Arxiv | LLaMA3-8B-Instruct,  Qwen2-7B-Instruct, Mistral-7B-Instruct-v0.3,\n| [Model-GLUE: Democratized LLM Scaling for A Large Model Zoo in the Wild](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.05357) |2024 | NeurIPS Track on Datasets and Benchmarks | Synthia-7B-v1.2, Llama-2-7b-evolcodealpaca, OpenHermes-7B, pygmalion-2-7b, Llama-2-7b-chat-hf, BeingWell_llama2_7b, MetaMath-7B-V1.0, vicuna-7b-v1.5, Platypus2-7B, GOAT-7B-Community, Llama-2-7b-WikiChat-fused, dolphin-llama2-7b, MetaMath-Llemma-7B, CodeLlama-7b-Instruct-hf, Magicoder-S-CL-7B, CrystalChat|\n| [What Matters for Model Merging at Scale?](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.03617)| 2024 | Arxiv | PaLM-2 (1B, 8B, 24B, 64B), PaLM-2-IT (1B, 8B, 24B, 64B)|\n| [Realistic Evaluation of Model Merging for Compositional Generalization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.18314)| 2024 | Arxiv |\n| [Fine-tuning large language models for domain adaptation: Exploration of training strategies, scaling, model merging and synergistic capabilities](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.03444) | 2024 | Arxiv |Llama-3.1-8B, Mistral-7B-v0.3|\n| [Arcee's MergeKit: A Toolkit for Merging Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.13257)| 2024 | Arxiv | Llama2-7B-Chat, Meditron-7B|\n\n## Advanced Methods\n\n\n\u003Ccenter>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEnnengYang_Awesome-Model-Merging-Methods-Theories-Applications_readme_bca0e6ebfb95.png\" alt=\"Model Merging\" width=\"800\"\u002F>\n\u003C\u002Fcenter>\n\n\n### Pre-Merging Methods\n\n\u003Ccenter>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEnnengYang_Awesome-Model-Merging-Methods-Theories-Applications_readme_3ce879c270fa.png\" alt=\"Model Merging\" width=\"800\"\u002F>\n\u003C\u002Fcenter>\n\n\n#### Better Fine-tuning\n\n##### Linearization Fine-tuning\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [Dataless Weight Disentanglement in Task Arithmetic via Kronecker-Factored Approximate Curvature](https:\u002F\u002Fopenreview.net\u002Fpdf?id=32mrjmaeMP) | 2026 | ICLR |\n| [Fine-Tuning Attention Modules Only: Enhancing Weight Disentanglement in Task Arithmetic](https:\u002F\u002Fopenreview.net\u002Fpdf?id=dj0TktJcVI) | 2025 |  ICLR |\n| [Tangent Transformers for Composition,Privacy and Removal](https:\u002F\u002Fopenreview.net\u002Fpdf?id=VLFhbOCz5D) | 2024 |ICLR  |\n| [Parameter Efficient Multi-task Model Fusion with Partial Linearization](https:\u002F\u002Fopenreview.net\u002Fpdf?id=iynRvVVAmH) |  2024 |ICLR  |\n| [Task Arithmetic in the Tangent Space: Improved Editing of Pre-Trained Models](https:\u002F\u002Fopenreview.net\u002Fpdf?id=0A9f2jZDGW) | 2023 | NeurIPS |\n\n\u003C!-- | [Fine-Tuning Linear Layers Only Is a Simple yet Effective Way for Task Arithmetic](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.07089) | 2024 |  Arxiv | -->\n\n\n##### Subspace Fine-tuning\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [Unraveling LoRA Interference: Orthogonal Subspaces for Robust Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.22934) | 2025 |  Arxiv | Llama3-8B |\n| [Efficient Model Editing with Task-Localized Sparse Fine-tuning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.02620) | 2025 | ICLR |\n\n##### Sharpness-aware Fine-tuning\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [Mitigating Parameter Interference in Model Merging via Sharpness-Aware Fine-Tuning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.14662) | 2025 | ICLR |\n\n##### Others\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [MergOPT: A Merge-Aware Optimizer for Robust Model Merging](https:\u002F\u002Fopenreview.net\u002Fforum?id=C21rz8mo65) | 2026 | ICLR | Llama3.1-8B-Instruct\n\n\n#### Architecture Transformation\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [Model Assembly Learning with Heterogeneous Layer Weight Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.21657)| 2025 | ICLR Workshop |\n| [Training-free Heterogeneous Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.00061)| 2025 |Arxiv\n| [Knowledge fusion of large language models](https:\u002F\u002Fopenreview.net\u002Fpdf?id=jiDsk12qcz) | 2024 |  ICLR | Llama-2 7B, OpenLLaMA 7B, MPT 7B |\n| [Knowledge Fusion of Chat LLMs: A Preliminary Technical Report](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.16107) | 2024 |Arxiv  | NH2-Mixtral-8x7B, NH2-Solar-10.7B, and OpenChat-3.5-7B |\n| [On Cross-Layer Alignment for Model Fusion of Heterogeneous Neural Networks](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.15538) |2023  |ICASSP   |\n| [GAN Cocktail: mixing GANs without dataset access](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2022\u002Fpapers_ECCV\u002Fpapers\u002F136830207.pdf) | 2022 | ECCV |\n\n#### Weight Alignment\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [Transport and Merge: Cross-Architecture Merging for Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.05495) | 2026 |  Arxiv | LLaMA-3 8B\n| [Symmetry-Aware Graph Metanetwork Autoencoders: Model Merging through Parameter Canonicalization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.12601) | 2025 | TAG-DS |\n| [Understanding Mode Connectivity via Parameter Space Symmetry](https:\u002F\u002Fopenreview.net\u002Fpdf?id=E8dMQGsKZv) | 2025 | ICML |\n| [Update Your Transformer to the Latest Release: Re-Basin of Task Vectors](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.22697)| 2025 | ICML  |\n| [Model Assembly Learning with Heterogeneous Layer Weight Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.21657)| 2025 | ICLR Workshop |\n| [Beyond the Permutation Symmetry of Transformers: The Role of Rotation for Model Fusion](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.00264)| 2025 | Arxiv |\n| [The Non-Local Model Merging Problem: Permutation Symmetries and Variance Collapse](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.12766)| 2024 | Arxiv |\n| [Equivariant Deep Weight Space Alignment](https:\u002F\u002Fopenreview.net\u002Fpdf\u002F6d437eeb362255b4b2d75a5c6847880fb4a00e3c.pdf) | 2024 | ICML  |\n| [Harmony in diversity: Merging neural networks with canonical correlation analysis](https:\u002F\u002Fopenreview.net\u002Fpdf?id=XTr8vwAr2D) | 2024 | ICML |\n| [Transformer fusion with optimal transport](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.05719) | 2024 | ICLR  |\n| [Layerwise linear mode connectivity](https:\u002F\u002Fopenreview.net\u002Fpdf?id=LfmZh91tDI) | 2024 | ICLR |\n| [ZipIt! Merging Models from Different Tasks without Training](https:\u002F\u002Fopenreview.net\u002Fpdf?id=LEYUkvdUhq) | 2024 |ICLR  |\n| [Proving linear mode connectivity of neural networks via optimal transport](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.19103) | 2024 | AISTATS |\n| [Training-Free Pretrained Model Merging](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fpapers\u002FXu_Training-Free_Pretrained_Model_Merging_CVPR_2024_paper.pdf) | 2024 |CVPR  |\n| [Merging LoRAs like Playing LEGO: Pushing the Modularity of LoRA to Extremes Through Rank-Wise Clustering](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.16167)| 2024 | Arxiv | Llama2-7b, Llama2-13b |\n| [C2M3: Cycle-Consistent Multi Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.17897) | 2024 | NeurIPS |\n| [PLeaS--Merging Models with Permutations and Least Squares](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.02447)| 2024 | Arxiv |\n| [Rethink Model Re-Basin and the Linear Mode Connectivity](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.05966) | 2024 | Arxiv |\n| [Git Re-Basin: Merging Models modulo Permutation Symmetries](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.04836) | 2023 | ICLR |\n| [Re-basin via implicit Sinkhorn differentiation](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FPena_Re-Basin_via_Implicit_Sinkhorn_Differentiation_CVPR_2023_paper.pdf) | 2023 | CVPR |\n| [Plateau in Monotonic Linear Interpolation--A \"Biased\" View of Loss Landscape for Deep Networks](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2210.01019)| 2023 | ICLR |\n| [Linear Mode Connectivity of Deep Neural Networks via Permutation Invariance and Renormalization](https:\u002F\u002Fopenreview.net\u002Fpdf?id=gU5sJ6ZggcX)| 2023 | ICLR |\n| [REPAIR: REnormalizing Permuted Activations for Interpolation Repair](https:\u002F\u002Fopenreview.net\u002Fpdf?id=gU5sJ6ZggcX) |2023  | ICLR |\n| [Going beyond linear mode connectivity: The layerwise linear feature connectivity](https:\u002F\u002Fpapers.nips.cc\u002Fpaper_files\u002Fpaper\u002F2023\u002Ffile\u002Fbf3ee5a5422b0e2a88b0c9c6ed3b6144-Paper-Conference.pdf) |  2023 |NeurIPS |\n| [The role of permutation invariance in linear mode connectivity of neural networks](https:\u002F\u002Fopenreview.net\u002Fpdf?id=dNigytemkL) | 2022 | ICLR |\n| [What can linear interpolation of neural network loss landscapes tell us?](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.16004) |2022 | ICML |\n| [Loss Surface Simplexes for Mode Connecting Volumes and Fast Ensembling](https:\u002F\u002Fproceedings.mlr.press\u002Fv139\u002Fbenton21a\u002Fbenton21a.pdf) | 2021 | ICML |\n| [Analyzing Monotonic Linear Interpolation in Neural Network Loss Landscapes](https:\u002F\u002Fproceedings.mlr.press\u002Fv139\u002Flucas21a\u002Flucas21a.pdf) | 2021 | ICML |\n| [Geometry of the Loss Landscape in Overparameterized Neural Networks: Symmetries and Invariances](https:\u002F\u002Fproceedings.mlr.press\u002Fv139\u002Fsimsek21a\u002Fsimsek21a.pdf)| 2021 | ICML |\n| [Linear Mode Connectivity and the Lottery Ticket Hypothesis](https:\u002F\u002Fproceedings.mlr.press\u002Fv119\u002Ffrankle20a\u002Ffrankle20a.pdf) | 2020 | ICML |\n| [Optimizing mode connectivity via neuron alignment](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2009.02439) | 2020 | NeurIPS |\n| [Model fusion via optimal transport](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Ffile\u002Ffb2697869f56484404c8ceee2985b01d-Paper.pdf) | 2020  | NeurIPS |\n| [Uniform convergence may be unable to explain generalization in deep learning](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2019\u002Ffile\u002F05e97c207235d63ceb1db43c60db7bbb-Paper.pdf) |  2019 | NeurIPS |\n| [Explaining landscape connectivity of low-cost solutions for multilayer nets](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2019\u002Ffile\u002F46a4378f835dc8040c8057beb6a2da52-Paper.pdf)|  2019 | NeurIPS |\n| [Essentially no barriers in neural network energy landscape](https:\u002F\u002Fproceedings.mlr.press\u002Fv80\u002Fdraxler18a\u002Fdraxler18a.pdf) | 2018 | ICML  |\n| [Loss Surfaces, Mode Connectivity, and Fast Ensembling of DNNs](https:\u002F\u002Fpapers.nips.cc\u002Fpaper_files\u002Fpaper\u002F2018\u002Ffile\u002Fbe3087e74e9100d4bc4c6268cdbe8456-Paper.pdf)|  2018 | NeurIPS |\n\n### During Merging Methods\n\n\n\u003Ccenter>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEnnengYang_Awesome-Model-Merging-Methods-Theories-Applications_readme_5e853bb8e4c4.png\" alt=\"Model Merging\" width=\"800\"\u002F>\n\u003C\u002Fcenter>\n\n\n\n#### Basic Merging Methods\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [Composing parameter-efficient modules with arithmetic operation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.14870) | 2023 | NeurIPS |\n| [Editing models with task arithmetic](https:\u002F\u002Fopenreview.net\u002Fpdf?id=6t0Kwf8-jrj) | 2023 | ICLR |\n| [Model fusion via optimal transport](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Ffile\u002Ffb2697869f56484404c8ceee2985b01d-Paper.pdf) |2020  | NeurIPS |\n| [Weight averaging for neural networks and local resampling schemes](https:\u002F\u002Fciteseerx.ist.psu.edu\u002Fdocument?repid=rep1&type=pdf&doi=a34e789c0f76b860b6e3bc1b7fa04054ccb75c3b) | 1996 | AAAI Workshop  |\n| [Acceleration of stochastic approximation by averaging](https:\u002F\u002Fepubs.siam.org\u002Fdoi\u002Fabs\u002F10.1137\u002F0330046?journalCode=sjcodc)| 1992 | IAM Journal on Control and Optimization\n| [Animating rotation with quaternion curves (Spherical Linear Interpolation (SLERP) Model Merging)](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F325165.325242) | 1985 | SIGGRAPH Computer Graphics |\n\n#### Weighted-based Merging Methods\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [Label-Free Cross-Task LoRA Merging with Null-Space Compression](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.26317)| 2026 | Arxiv | LLAMA-3 8B, LLAVA-1.5-7B\n| [The Mean is the Mirage: Entropy-Adaptive Model Merging under Heterogeneous Domain Shifts in Medical Imaging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.21372)| 2026 | Arxiv | \n| [LARV: Data-Free Layer-wise Adaptive Rescaling Veneer for Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.09413)| 2026 | Arxiv | \n| [Souper-Model: How Simple Arithmetic Unlocks State-of-the-Art LLM Performance](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.13254v1)| 2025 | Arxiv | xLAM-2-70b, CoALM-70B, watt-tool-70B, functionary-medium-70B, xLAM-2-8b, ToolACE-2-8B, watt-tool-8B, BitAgent-8B, CoALM-8B | \n| [Superpose Task-specific Features for Model Merging](https:\u002F\u002Faclanthology.org\u002F2025.emnlp-main.210.pdf)| 2025 | EMNLP | Llama-2-7B\n| [T3: Test-Time Model Merging in VLMs for Zero-Shot Medical Imaging Analysis](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.27265)| 2025 | Arxiv |\n| [Weight Weaving: Parameter Pooling for Data-Free Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.13921)| 2025 | Arxiv |\n| [Expert Merging: Model Merging with Unsupervised Expert Alignment and Importance-Guided Layer Chunking](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.25712)| 2025 | Arxiv |Mistral-7B, InternVL, Qwen2-VL\n| [Variational Task Vector Composition](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.18208)| 2025 |NeurIPS  |\n| [RegMean++: Enhancing Effectiveness and Generalization of Regression Mean for Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.03121)| 2025 |Arxiv  |\n| [StatsMerging: Statistics-Guided Model Merging via Task-Specific Teacher Distillation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.04567)| 2025 |Arxiv  |\n| [SeMe: Training-Free Language Model Merging via Semantic Alignment](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.20144)| 2025 |Arxiv  |\n| [NAN: A Training-Free Solution to Coefficient Estimation in Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.16148)| 2025 |Arxiv  |LLaMA2-13B, WizardLM-13B, WizardMath-13B, LLaVA-v1.5-13B, LLaVA-1.6-13B, Math-LLaVA|\n| [Leveraging Submodule Linearity Enhances Task Arithmetic Performance in LLMs](https:\u002F\u002Fopenreview.net\u002Fpdf?id=irPcM6X5FV)| 2025 |ICLR  | Llama-2-7B and Llama-2-13B\n| [Layer-Aware Task Arithmetic: Disentangling Task-Specific and Instruction-Following Knowledge](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.20186)| 2025 |Arxiv  | Gemma-2-9B, Llama-3-8B |\n| [Sens-Merging: Sensitivity-Guided Parameter Balancing for Merging Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.12420v1)| 2025 |Arxiv  | LLaMA-2 7B series, Mistral 7B series, LLaMA-2 13B series |\n| [RankMean: Module-Level Importance Score for Merging Fine-tuned Large Language Models](https:\u002F\u002Faclanthology.org\u002F2024.findings-acl.104.pdf)| 2024 | ACL  |\n| [Non-Uniform Parameter-Wise Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.15467)| 2024 |Arxiv  |\n| [How to Weight Multitask Finetuning? Fast Previews via Bayesian Model-Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.08147)| 2024 |Arxiv  |\n| [LiNeS: Post-training Layer Scaling Prevents Forgetting and Enhances Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.17146)| 2024 |Arxiv  |\n| [Merging in a Bottle: Differentiable Adaptive Merging (DAM) and the Path from Averaging to Automation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.08371)| 2024 |Arxiv  |shisa-gamma-7b, WizardMath-7B-V1.1, Abel-7B-002, Llama-3-SauerkrautLM-8b-Instruct, Llama-3-Open-Ko-8B, llama-3-sqlcoder-8b, Meta-Llama-3-8B |\n| [Knowledge Composition using Task Vectors with Learned Anisotropic Scaling](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.02880) | 2024 |Arxiv  |\n| [MetaGPT: Merging Large Language Models Using Model Exclusive Task Arithmetic](https:\u002F\u002Faclanthology.org\u002F2024.emnlp-main.102.pdf) | 2024 |EMNLP  | LLaMA-2-7B, Mistral-7B, LLaMA-2-13B |\n| [Checkpoint Merging via Bayesian Optimization in LLM Pretraining](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.19390) |  2024 |Arxiv  | Baichuan2-220B, Baichuan2-440B, Baichuan2-660B, Baichuan2-1540B, Baichuan2-1760B, Baichuan2-1980B, Baichuan2-2200B, Baichuan2-2420B, DeepSeek-1400B, DeepSeek-1600B, DeepSeek-1800B, DeepSeek-2000B |\n| [Arcee’s MergeKit: A Toolkit for Merging Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.13257) | 2024 |Arxiv  | Llama2-7B-Chat, Meditron-7B|\n| [Evolutionary optimization of model merging recipes](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.13187) | 2024 |Arxiv  | shisa-gamma-7b-v1, WizardMath-7B-V1.1, Arithmo2-Mistral-7B, Abel-7B-002, Mistral-7B-v0.1, LLaVA-1.6-Mistral-7B|\n| [XFT: Unlocking the Power of Code Instruction Tuning by Simply Merging Upcycled Mixture-of-Experts](https:\u002F\u002Faclanthology.org\u002F2024.acl-long.699.pdf)| 2024 | ACL |\n| [AdaMerging: Adaptive Model Merging for Multi-Task Learning](https:\u002F\u002Fopenreview.net\u002Fpdf?id=nZP6NgD3QY) | 2024  | ICLR |\n| [Model Merging by Uncertainty-Based Gradient Matching](https:\u002F\u002Fopenreview.net\u002Fpdf?id=D7KJmfEDQP) | 2024  | ICLR |\n| [Merging by Matching Models in Task Subspaces](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.04339) | 2024  | TMLR |\n| [Fisher Mask Nodes for Language Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.09891) | 2024 | LREC-COLING |\n| [Erasure Coded Neural Network Inference via Fisher Averaging](https:\u002F\u002Fshiqiang.wang\u002Fpapers\u002FDJ_ISIT2024.pdf)| 2024 | ISIT |\n| [Dataless Knowledge Fusion by Merging Weights of Language Models](https:\u002F\u002Fopenreview.net\u002Fpdf?id=FCnohuR6AnM) | 2023  | ICLR |\n| [Merging models with fisher-weighted averaging](https:\u002F\u002Fopenreview.net\u002Fpdf?id=LSKlp_aceOC) | 2022  | NeurIPS |\n\n#### Subspace-based Merging Method (Sparse or Low-rank Subspace)\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: |:----: |\n| [Diet Your LLM: Dimension-wise Global Pruning of LLMs via Merging Task-specific Importance Score](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.23985)| 2026 | Arxiv | Gemma-2 9B, Qwen2.5-7B, Phi-4-mini\n| [DC-Merge: Improving Model Merging with Directional Consistency](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.06242)| 2026 | CVPR | LLaVA\n| [CoMoL: Efficient Mixture of LoRA Experts via Dynamic Core Space Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.00573)| 2026 | Arxiv |Qwen3-8B and Llama3.1-8B\n| [Model Merging in the Essential Subspace](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.20208)| 2026 | Arxiv |\n| [Beyond Parameter Arithmetic: Sparse Complementary Fusion for Distribution-Aware Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.11717)| 2026 | Arxiv | Mistral-7B, Qwen2.5-14B, and Qwen2.5-32B\n| [Orthogonal Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.05943)| 2026 | Arxiv |  Llama-3.1-8B, Qwen2.5-VL-7B-Instruct, Llama-3.2-3B\n| [When Shared Knowledge Hurts: Spectral Over-Accumulation in Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.05536)| 2026 | Arxiv |\n| [Merging Beyond: Streaming LLM Updates via Activation-Guided Rotations](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.03237)| 2026 | Arxiv |  Qwen2.5-7B, Qwen2.5-14B\n| [AdaRank: Adaptive Rank Pruning for Enhanced Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.22178)| 2026 | ICLR |\n| [Decomposing Task Vectors for Refined Model Editing](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.22511)| 2025 | Arxiv | \n| [Stay Unique, Stay Efficient: Preserving Model Personality in Multi-Task Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.01461) | 2025 | Arxiv |  Qwen-14B\n| [Towards Reversible Model Merging For Low-rank Weights](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.14163) | 2025 | Arxiv |\n| [Purifying Task Vectors in Knowledge-Aware Subspace for Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.14697)| 2025 | Arxiv | LLaMA-2-7B\n| [RobustMerge: Parameter-Efficient Model Merging for MLLMs with Direction Robustness](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.17159v3)| 2025 | NeurIPS | LLaVA\n| [Accurate and Efficient Low-Rank Model Merging in Core Space](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.17786)| 2025 | NeurIPS |\n| [Efficient Multi-Source Knowledge Transfer by Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.19353)| 2025 | Arxiv |\n| [One Size Does Not Fit All: A Distribution-Aware Sparsification for More Precise Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.06163)| 2025 | Arxiv |\n| [NegMerge: Sign-Consensual Weight Merging for Machine Unlearning](https:\u002F\u002Fopenreview.net\u002Fpdf?id=ZbWXovStjD)| 2025 | ICML |\n| [Subspace-Boosted Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.16506)| 2025 | Arxiv |\n| [Training-free LLM Merging for Multi-task Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.12379)| 2025 | Arxiv |\n| [Merging Smarter, Generalizing Better: Enhancing Model Merging on OOD Data](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.09093)| 2025 | Arxiv |\n| [Locate-then-Merge: Neuron-Level Parameter Fusion for Mitigating Catastrophic Forgetting in Multimodal LLMs](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.16703)| 2025 | Arxiv | Mistral-7B, Llama3-8B |\n| [CALM: Consensus-Aware Localized Merging for Multi-Task Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.13406)| 2025 |ICML  |\n| [Merge-Friendly Post-Training Quantization for Multi-Target Domain Adaptation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.23651)| 2025 | ICML |\n| [Adaptive LoRA Merge with Parameter Pruning for Low-Resource Generation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.24174)| 2025 | ACL | Llama-3-8B-Instruct\n| [Decom-Renorm-Merge: Model Merging on the Right Space Improves Multitasking](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.23117)| 2025 | Arxiv | LLaMA3.1-8B\n| [CAT Merging: A Training-Free Approach for Resolving Conflicts in Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.06977)| 2025 | Arxiv |\n| [LoRI: Reducing Cross-Task Interference in Multi-Task LowRank Adaptation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.07448)| 2025 | Arxiv | Llama-3-8B and Mistral-7B |\n| [Task Vector Quantization for Memory-Efficient Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.06921)| 2025 | Arxiv |\n| [Disentangling Task Interference within Neurons: Model Merging in Alignment with Neuronal Mechanisms](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.05320)| 2025 | Arxiv | Llama-2-7b |\n| [Exploring Sparse Adapters for Scalable Merging of Parameter Efficient Experts](https:\u002F\u002Fopenreview.net\u002Fpdf?id=8wt2eKkVe6) | 2025 | ICLR 2025 Workshop |\n| [LEWIS (LayEr WIse Sparsity) -- A Training Free Guided Model Merging Approach](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.03874)| 2025 | ICLR 2025 Workshop |Gemma-9b, LLaMA 3.1 8b |\n| [CABS: Conflict-Aware and Balanced Sparsification for Enhancing Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.01874)| 2025 | Arxiv |Mistral-7b-v0.1, WildMarcoroni-Variant1-7B and WestSeverus-7B-DPO-v2 |\n| [Low-Rank and Sparse Model Merging for Multi-Lingual Speech Recognition and Translation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.17380)| 2025 | Arxiv |\n| [LED-Merging: Mitigating Safety-Utility Conflicts in Model Merging with Location-Election-Disjoint](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.16770)| 2025 | Arxiv |Llama-3- 8B, Mistral-7B, and Llama2-13B |\n| [Parameter Efficient Merging for Multimodal Large Language Models with Complementary Parameter Adaptation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.17159)| 2025 | Arxiv |\n| [Optimal Brain Iterative Merging: Mitigating Interference in LLM Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.12217)| 2025 | Arxiv | Llama-2-13b, WizardMath-13B-V1.0, WizardLM13B-V1.2, llama-2-13b-codealpaca\n| [Superpose Singular Features for Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.10698)| 2025 | Arxiv | Llama-2-7B\n| [STAR: Spectral Truncation and Rescale for Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.10339)| 2025 | NAACL |  Mistral-7B-Instruct|\n| [No Task Left Behind: Isotropic Model Merging with Common and Task-Specific Subspaces](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.04959)| 2025 | Arxiv |  \n| [Merging Models on the Fly Without Retraining: A Sequential Approach to Scalable Continual Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.09522)| 2025  |NeurIPS  | |\n| [Modeling Multi-Task Model Merging as Adaptive Projective Gradient Descent](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.01230)| 2025 | Arxiv |  \n| [Revisiting Weight Averaging for Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.12153)| 2024  |Arxiv  | |\n| [Task Singular Vectors: Reducing Task Interference in Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.00081) | 2025  |CVPR  | |\n| [Less is More: Efficient Model Merging with Binary Task Switch](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.00054)|  2024 |Arxiv  |\n| [FREE-Merging: Fourier Transform for Model Merging with Lightweight Experts](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.16815)|2024  |Arxiv  | Qwen-14B (LoRA),  LLaMa2-13B, WizardLM-13B, WizardMath-13B, WizardCoderPython-13B |\n| [Beyond Task Vectors: Selective Task Arithmetic Based on Importance Metrics](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.16139)|2024  |Arxiv  | |\n| [Parameter Competition Balancing for Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.02396v1)| 2024 | NeurIPS  | Llama-2-7b |\n| [Language Models are Super Mario: Absorbing Abilities from Homologous Models as a Free Lunch](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.03099) | 2024 | ICML  | WizardLM-13B, WizardMath-13B, and llama-2-13b-codealpaca, Mistral-7B|\n| [Localizing Task Information for Improved Model Merging and Compression](https:\u002F\u002Fopenreview.net\u002Fattachment?id=DWT9uiGjxT&name=pdf) | 2024 | ICML | |\n| [Sparse Model Soups: A Recipe for Improved Pruning via Model Averaging](https:\u002F\u002Fopenreview.net\u002Fpdf?id=xx0ITyHp3u) |2024  |ICLR  | |\n| [Model merging with svd to tie the knots](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.19735)|2024  |Arxiv  |Llama3-8B |\n| [NegMerge: Consensual Weight Negation for Strong Machine Unlearning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.05583)|2024  |Arxiv  | |\n| [Localize-and-Stitch: Efficient Model Merging via Sparse Task Arithmetic](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.13656)|2024  |Arxiv  | |\n| [Activated Parameter Locating via Causal Intervention for Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.09485)|2024  |Arxiv  | Llama-2-chat-7B|\n| [PAFT: A Parallel Training Paradigm for Effective LLM Fine-Tuning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.17923)| 2024 | Arxiv  |Mistral-7B-v0.1, Llama-3-8B, Neurotic-7B, MoMo-70B|\n| [DELLA-Merging: Reducing Interference in Model Merging through Magnitude-Based Sampling](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.11617)|2024  |Arxiv  |Llama-2-13b-code-alpaca, WizardLM, Wizard-Math, WizardCoder-Python|\n| [EMR-Merging: Tuning-Free High-Performance Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.17461) |2024  |NeurIPS  | |\n| [DPPA: Pruning Method for Large Language Model to Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.02799) |2024  |Arxiv  | LLaMa 2 |\n| [Model breadcrumbs: Scaling multi-task model merging with sparse masks](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.06795) |2023  |Arxiv  | |\n| [Concrete Subspace Learning based Interference Elimination for Multi-task Model Fusion](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.06173) | 2023  |Arxiv  | |\n| [ComPEFT: Compression for Communicating Parameter Efficient Updates via Sparsification and Quantization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.13171) | 2023  |Arxiv  | LLaMA 7B, 13B, 33B, and 65B|\n| [Effective and ParameterEfficient Reusing Fine-Tuned Models](https:\u002F\u002Fopenreview.net\u002Fpdf?id=13D1zn0mpd) | 2023 | Openreview |\n| [Resolving Interference When Merging Models](https:\u002F\u002Fopenreview.net\u002Fpdf?id=xtaX3WyCj1) | 2023  |  NeurIPS | |\n| [Task-Specific Skill Localization in Fine-tuned Language Model](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.06600)|  2023| ICML | |\n\n#### Routing-based Merging Methods (Dynamic Merging)\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [TECS-L (Golden MoE): Dense-to-MoE Expert Splitting Framework](https:\u002F\u002Fgithub.com\u002Fneed-singularity\u002FTECS-L)| 2026 | GitHub | Mistral-7B |\n| [Fine-Grained Model Merging via Modular Expert Recombination](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.06552)| 2026 | Arxiv |\n| [MIN-Merging: Merge the Important Neurons for Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.17890) | 2025 | Arxiv |\n| [SE-Merging: A Self-Enhanced Approach for Dynamic Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.18135) | 2025 | Arxiv |\n| [Adaptive Task Vectors for Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.03426) | 2025 | Arxiv |LLaMA3-8B and Mistral-7B |\n| [Dynamic Fisher-weighted Model Merging via Bayesian Optimization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.18992)| 2025 | Arxiv |\n| [Data-Adaptive Weight-Ensembling for Multi-task Model Fusion](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs11263-025-02434-2)| 2025 | IJCV |\n| [MASS: MoErging through Adaptive Subspace Selection](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.05342)| 2025 | Arxiv |\n| [Dynamic Model Merging with Mixture of Weights](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10900479\u002F)| 2025 | TCSVT |\n| [CAMEx: Curvature-aware Merging of Experts](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.18821)| 2025 | ICLR |\n| [1bit-Merging: Dynamic Quantized Merging for Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.10743)| 2025 | Arxiv | LLaMA-2 7B, Mistral 7B, and LLaMA-2 13B |\n| [MergeME: Model Merging Techniques for Homogeneous and Heterogeneous MoEs](https:\u002F\u002Fpapers-pdfs.assets.alphaxiv.org\u002F2502.00997v3.pdf)| 2025 | Arxiv |\n| [Mediator: Memory-efficient LLM Merging with Less Parameter Conflicts and Uncertainty Based Routing](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.04411)|  2025 |Arxiv  | Qwen-2.5-7B, LLaMA-3.2-8B |\n| [Adapting Foundation Models via Training-free Dynamic Weight Interpolation](https:\u002F\u002Fopenreview.net\u002Fpdf?id=yyv54uPM0z)|  2024 | NeurIPS 2024 Workshop  |\n| [Efficient and Effective Weight-Ensembling Mixture of Experts for Multi-Task Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.21804) |  2024 |Arxiv  |\n| [DaWin: Training-free Dynamic Weight Interpolation for Robust Adaptation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.03782) | 2024 | NeurIPS 2024 Workshop |\n| [Merging Multi-Task Models via Weight-Ensembling Mixture of Experts](https:\u002F\u002Fopenreview.net\u002Fpdf\u002F2aee8072945cd0485e619dd88c35566610cd5042.pdf) |  2024| ICML |\n| [Learning to Route Among Specialized Experts for Zero-Shot Generalization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.05859)|2024  | ICML  |\n| [Merge, Then Compress: Demystify Efficient SMoE with Hints from Its Routing Policy](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.01334) |2024  | ICLR |\n| [Soft merging of experts with adaptive routing](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.03745) | 2024 | TMLR |\n| [SMILE: Zero-Shot Sparse Mixture of Low-Rank Experts Construction From Pre-Trained Foundation Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.10174) |2024 |Arxiv  | Mistral-7B-v0.1, MetaMath-Mistral-7B, dolphin-2.1-mistral-7b, speechless-code-mistral-7b-v1.0|\n| [Twin-Merging: Dynamic Integration of Modular Expertise in Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.15479) |  2024 | NeurIPS  |Qwen-14B|\n| [Self-MoE: Towards Compositional Large Language Models with Self-Specialized Experts](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.12034) |2024 |Arxiv  |Gemma-7B, LLaMA-2 7B & 13B, Mistral 7B,  LLaMA-3 8B|\n| [Towards Efficient Pareto Set Approximation via Mixture of Experts Based Model Fusion](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.09770) |  2024 |Arxiv  |\n| [Sparse Upcycling: Training Mixture-of-Experts from Dense Checkpoints](http:\u002F\u002Farxiv.org\u002Fabs\u002F2212.05055) | 2023 | ICLR |\n\n\u003C!-- | [Branch-Train-MiX: Mixing Expert LLMs into a Mixture-of-Experts LLM](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.07816) |  2024 |Arxiv  | -->\n\n#### Post-calibration based Methods\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [MAGIC: Achieving Superior Model Merging via Magnitude Calibration](https:\u002F\u002Fgithub.com\u002Flyymuwu\u002FMAGIC)| 2025 | Arxiv  |OLMo-3-7B\n| [Towards Minimizing Feature Drift in Model Merging: Layer-wise Task Vector Fusion for Adaptive Knowledge Integration](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.23859)| 2025 |NeurIPS  |\n| [Multi-Task Model Fusion via Adaptive Merging](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10887718)|2025  | ICASSP |\n| [Representation Surgery in Model Merging with Probabilistic Modeling](https:\u002F\u002Fopenreview.net\u002Fpdf?id=a02CH43z1G)|2025  | ICML |\n| [Parameter-Efficient Interventions for Enhanced Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.17023)|2024  | Arxiv |\n| [Tint Your Models Task-wise for Improved Multi-task Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.13526)|2024  | Arxiv |\n| [SurgeryV2: Bridging the Gap Between Model Merging and Multi-Task Learning with Deep Representation Surgery](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.14389)|2024  | Arxiv |\n| [Representation Surgery for Multi-Task Model Merging](https:\u002F\u002Fopenreview.net\u002Fpdf\u002F602906ec02919eb95d78d634321fcba1b68a2f03.pdf) |2024  | ICML |\n\n\n### Other Merging Methods\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [Task Alignment: A simple and effective proxy for model merging in computer vision](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.12935)| 2026 | Arxiv | \n| [Model Merging via Data-Free Covariance Estimation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.01329)| 2026 | Arxiv | \n| [Resolving Interference (RI): Disentangling Models for Improved Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.13467)| 2026 | Arxiv | \n| [BD-Merging: Bias-Aware Dynamic Model Merging with Evidence-Guided Contrastive Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.03920)| 2026 | Arxiv | \n| [ACE-Merging: Data-Free Model Merging with Adaptive Covariance Estimation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.02945)| 2026 | Arxiv | \n| [Training-Free Cross-Architecture Merging for Graph Neural Networks](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.19332)| 2026 | Arxiv | \n| [Gradient-Sign Masking for Task Vector Transport Across Pre-Trained Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.09658)| 2026 | ICLR | Flan-T5 |\n| [Transporting Task Vectors across Different Architectures without Training](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.12952)| 2026 | Arxiv | \n| [MergePipe: A Budget-Aware Parameter Management System for Scalable LLM Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.13273)| 2026 | Arxiv | Llama3.1-8B, Llama-3.2-3B, Qwen3-0.6B, Qwen3-1.7B, and Qwen3-8B\n| [DisTaC: Conditioning Task Vectors via Distillation for Robust Model Merging](https:\u002F\u002Fopenreview.net\u002Fpdf?id=W70w5JCzdq)| 2026 | ICLR |\n| [Sparsity-Aware Evolution for Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.08218)| 2026 | Arxiv |\n| [AutoMerge: Search-Based Model Merging Framework for Effective Model Reuse](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.22748)| 2026 | Arxiv  | Llama2-7B-Chat, Llama2-7B-Code\n| [Model Merging via Multi-Teacher Knowledge Distillation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.21288)| 2025 | Arxiv  |\n| [Bridging Training and Merging Through Momentum-Aware Optimization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.17109)| 2025 | Arxiv  |\n| [From Coefficients to Directions: Rethinking Model Merging with Directional Alignment](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.00391)| 2025 | Arxiv  |\n| [Escaping Optimization Stagnation: Taking Steps Beyond Task Arithmetic via Difference Vectors](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.17987)| 2025 | Arxiv  |\n| [Model Merging with Functional Dual Anchors](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.21223)| 2025 | Arxiv  |\n| [Black-box Model Merging for Language-Model-as-a-Service with Massive Model Repositories](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.12951)| 2025 | Arxiv  |\n| [Rethinking Layer-wise Model Merging through Chain of Merges](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.21421v1)| 2025 | Arxiv  |Llama 3-8B\n| [Competition and Attraction Improve Model Fusion](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.16204)| 2025 | Arxiv  | WizardMath 7B v1.0, AgentEvol 7B\n| [PSO-Merging: Merging Models Based on Particle Swarm Optimization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.19839)| 2025 | Arxiv  | Llama-3-8B, Llama-2-13B, and Mistral-7B-v0.3 | \n| [DisTaC: Conditioning Task Vectors via Distillation for Robust Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.01148)| 2025 | Arxiv  |\n| [Navigating the Accuracy-Size Trade-Off with Flexible Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.23209)| 2025 | Arxiv  |\n| [Efficient Multi-Task Inferencing: Model Merging with Gromov-Wasserstein Feature Alignment](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.09774?)| 2025 | Arxiv  |\n| [Reinforced Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.21272)| 2025 | Arxiv  |\n| [FW-Merging: Scaling Model Merging with Frank-Wolfe Optimization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.12649)| 2025 | Arxiv  | LLaMA2-7B\n| [Whoever Started the Interference Should End It: Guiding Data-Free Model Merging via Task Vectors](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.08099)| 2025 | Arxiv  | WizardLM-13B (LM), WizardMath-13B (Math), and llama-2-13bcodealpaca (Code) |\n| [GNNMERGE: Merging of GNN Models Without Accessing Training Data](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.03384)| 2025 | Arxiv  |\n| [MERGE3: Efficient Evolutionary Merging on Consumer-grade GPUs](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.10436)| 2025 | ICML  | Mistral-7B\n| [Activation-Informed Merging of Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.02421?)| 2025 | Arxiv | Llama-2-13b, WizardLM-13B, WizardMath-13B, llama-2-13b-code-alpaca\n| [Scalable Model Merging with Progressive Layer-wise Distillation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.12706)| 2025 | Arxiv | WizardLM-13B, WizardMath-13B and llama-2-13b-code-alpaca\n| [Fine, I’ll Merge It Myself: A Multi-Fidelity Framework for Automated Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.04030)| 2025 | Arxiv |  Llama-2-13, WizardLM13B, WizardMath-13, llama-2-13b-code-alpaca |\n| [Task Arithmetic in Trust Region: A Training-Free Model Merging Approach to Navigate Knowledge Conflicts](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.15065)| 2025 | ICLR |  \n| [Fine-tuning Aligned Classifiers for Merging Outputs: Towards a Superior Evaluation Protocol in Model Merging](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.13526)| 2024 | Arxiv |\n| [Multi-Task Model Merging via Adaptive Weight Disentanglement](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.18729)| 2024 | Arxiv |\n| [Rethinking Weight-Averaged Model-merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.09263)| 2024 | Arxiv |\n| [ATM: Improving Model Merging by Alternating Tuning and Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.03055)| 2024 | Arxiv |\n| [HM3: Hierarchical Multi-Objective Model Merging for Pretrained Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.18893) | 2024 | Arxiv | Llama-2-7B-Chat, WizardMath-7B, CodeLlama-7B|\n| [Weight Scope Alignment: A Frustratingly Easy Method for Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.12237) | 2024 | Arxiv |\n| [It’s Morphing Time: Unleashing the Potential of Multiple LLMs via Multi-objective Optimization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.00487) | 2024 | Arxiv | Qwen1.5-7B-Chat, Liberated-Qwen1.5-7B, firefly-qwen1.5-en-7B |\n| [Toward Data Efficient Model Merging between Different Datasets without Performance Degradation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.05641v2)| 2024 | JMLR |\n| [SOLAR 10.7B: Scaling Large Language Models with Simple yet Effective Depth Up-Scaling](http:\u002F\u002Farxiv.org\u002Fabs\u002F2312.15166) | 2023 | Arxiv |SOLAR 10.7B, SOLAR 10.7B-Instruct|\n\n### Theories or Analysis of Model Merging\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [An Empirical Study and Theoretical Explanation on Task-Level Model-Merging Collapse](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.09463)| 2026 | Arxiv | Qwen2.5-3B, 7B, and 14B, Llama3.1-8B\n| [Trade-offs in Ensembling, Merging and Routing Among Parameter-Efficient Experts](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.03535)| 2026 | Arxiv |\n| [Enough is as good as a feast: A Comprehensive Analysis of How Reinforcement Learning Mitigates Task Conflicts in LLMs](https:\u002F\u002Fopenreview.net\u002Fpdf?id=N4l4Jp50R4)| 2026 | ICLR | Llama-3.2-3B, Llama-3.1-8B, and Mistral-Small-3-24B\n| [M-Loss: Quantifying Model Merging Compatibility with Limited Unlabeled Data](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.08564)| 2026 | Arxiv |\n| [WSM: Decay-Free Learning Rate Schedule via Checkpoint Merging for LLM Pre-training](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.17634) | 2026 | ICLR | Ling-mini-16B\n| [Demystifying Mergeability: Interpretable Properties to Predict Model Merging Success](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.22285)| 2026 | Arxiv |\n| [Understanding Model Merging: A Unified Generalization Framework for Heterogeneous Experts](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.21690)| 2026 | Arxiv |\n| [Will it Merge? On The Causes of Model Mergeability](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.06672)| 2026 | Arxiv | Llama-3.2-3B、Qwen-2.5-3B、Mistral-7B-Instruct-v0.2\n| [How does the optimizer implicitly bias the model merging loss landscape?](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.04686)| 2025 | Arxiv |\n| [On Task Vectors and Gradients](https:\u002F\u002Fwww.arxiv.org\u002Fpdf\u002F2508.16082)| 2025 | Arxiv |\n| [Why Do More Experts Fail? A Theoretical Analysis of Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.21226) | 2025 | Arxiv |\n| [When is Task Vector Provably Effective for Model Editing? A Generalization Analysis of Nonlinear Transformers](https:\u002F\u002Fopenreview.net\u002Fpdf?id=iX7eHHE5Tx) | 2025 | ICLR |\n| [Multi-Level Collaboration in Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.01268) | 2025 | Arxiv |\n| [Low-rank bias, weight decay, and model merging in neural networks](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.17340)| 2025 | Arxiv |\n| [Understanding SGD with Exponential Moving Average: A Case Study in Linear Regression](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.14123)| 2025 | Arxiv |\n| [SeWA: Selective Weight Average via Probabilistic Masking](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.10119)| 2025 | Arxiv |\n| [Efficient Model Editing with Task Vector Bases: A Theoretical Framework and Scalable Approach](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.01015)|  2025 |Arxiv |\n| [Task Arithmetic Through The Lens Of One-Shot Federated Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.18607)|  2024 |Arxiv | WizardLM-13B, WizardMath-13B, Llama-2-13B-Code-Alpaca, Llama2-13B|\n| [A Unified Analysis for Finite Weight Averaging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.13169v1)|  2024 |Arxiv |\n| [WASH: Train your Ensemble with Communication-Efficient Weight Shuffling, then Average](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.17517) |  2024 |Arxiv |\n| [On the Emergence of Cross-Task Linearity in Pretraining-Finetuning Paradigm](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.03660)| 2024 | ICML |\n| [Generalization Analysis of Stochastic Weight Averaging with General Sampling](https:\u002F\u002Fproceedings.mlr.press\u002Fv235\u002Fwang24bl.html)| 2024 | ICML |\n| [Diverse weight averaging for out-of-distribution generalization](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2022\u002Ffile\u002F46108d807b50ad4144eb353b5d0e8851-Paper-Conference.pdf) | 2022 | NeurIPS |\n| [Ensemble of averages: Improving model selection and boosting performance in domain generalization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.10832) | 2022 | NeurIPS |\n| [Stability analysis and generalization bounds of adversarial training](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2210.00960)| 2022 | NeurIPS |\n| [The role of permutation invariance in linear mode connectivity of neural networks](https:\u002F\u002Fopenreview.net\u002Fpdf?id=dNigytemkL) | 2022 | ICLR |\n| [Swad: Domain generalization by seeking flat minima](https:\u002F\u002Fopenreview.net\u002Fpdf?id=zkHlu_3sJYU) | 2021 |  NeurIPS|\n| [Linear Mode Connectivity and the Lottery Ticket Hypothesis](https:\u002F\u002Fproceedings.mlr.press\u002Fv119\u002Ffrankle20a\u002Ffrankle20a.pdf) | 2020 | ICML |\n| [Stochastic Weight Averaging in Parallel: Large-Batch Training That Generalizes](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2001.02312) |  2020 |  ICLR |\n| [Optimizing mode connectivity via neuron alignment](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2009.02439) | 2020 |  NeurIPS |\n| [Uniform convergence may be unable to explain generalization in deep learning](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2019\u002Ffile\u002F05e97c207235d63ceb1db43c60db7bbb-Paper.pdf) |  2019 | NeurIPS |\n| [Parallelizing stochastic gradient descent for least squares regression: mini-batching, averaging, and model misspecification](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1610.03774) | 2018 | JMLR |\n| [Iterate averaging as regularization for stochastic gradient descent](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1802.08009) | 2018 | Arxiv |\n| [Essentially no barriers in neural network energy landscape](https:\u002F\u002Fproceedings.mlr.press\u002Fv80\u002Fdraxler18a\u002Fdraxler18a.pdf) | 2018 | ICML |\n| [Averaging weights leads to wider optima and better generalization](https:\u002F\u002Fauai.org\u002Fuai2018\u002Fproceedings\u002Fpapers\u002F313.pdf) | 2018 | UAI |\n| [Train faster, generalize better: Stability of stochastic gradient descent](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1509.01240) | 2016 | ICML  |\n\n----------\n\n## Application of Model Merging in Foundation Models\n\n\u003Ccenter>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEnnengYang_Awesome-Model-Merging-Methods-Theories-Applications_readme_130cf2922167.png\" alt=\"Model Merging\" width=\"800\"\u002F>\n\u003C\u002Fcenter>\n\n\n### Model Merging in Large Language Models\n\n\u003Ccenter>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEnnengYang_Awesome-Model-Merging-Methods-Theories-Applications_readme_91bbbb3fbbac.png\" alt=\"Model Merging\" width=\"800\"\u002F>\n\u003C\u002Fcenter>\n\n#### Human Preference Alignment for LLMs\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [Navigating the Alignment-Calibration Trade-off: A Pareto-Superior Frontier via Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.17426)| 2025 | Arxiv  | Gemma-3-12B, Gemma-3-27B, Qwen2.5-7B |\n| [BILLY: Steering Large Language Models via Merging Persona Vectors for Creative Generation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.10157)| 2025 | Arxiv  |Qwen-2.5-7B-Instruct, Llama-3.1-8B-Instruct |\n| [Personality Vector: Modulating Personality of Large Language Models by Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.19727)| 2025 | EMNLP  | Llama-3.1-8B-Instruct, Qwen2.5-7B-Instruct |\n| [SafeMERGE: Preserving Safety Alignment in Fine-Tuned LLMs via Selective Layer-Wise Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.17239v1)| 2025 | Arxiv  |Llama-2-7B-Chat, Qwen-2-7B-Instruct |\n| [Bone Soups: A Seek-and-Soup Model Merging Approach for Controllable Multi-Objective Generation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.10762)| 2025 | Arxiv  |LLaMA-2 7B\n| [Model soup for better rlhf: Weight space averaging to improve alignment in llms](https:\u002F\u002Fopenreview.net\u002Fforum?id=QNW3Z3f5SD)| 2024 | NeurIPS 2024 Workshop  | Llama2-7B, Mistral-7B, Gemma-2B |\n| [Safeguard Fine-Tuned LLMs Through Pre- and Post-Tuning Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.19512)| 2024 | Arxiv  | Llama-3-8B-Instruct\n| [SafetyDPO: Scalable Safety Alignment for Text-to-Image Generation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.10493)| 2024 | Arxiv  |\n| [H3Fusion: Helpful, Harmless, Honest Fusion of Aligned LLMs](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.17792)| 2024 | Arxiv  |LLaMA-2 7B\n| [Baichuan Alignment Technical Report](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.14940)| 2024 | Arxiv  | Qwen2-Nova-72B, Llama3-PBM-Nova-70B |\n| [Conditioned Language Policy: A General Framework for Steerable Multi-Objective Finetuning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.15762)| 2024 | Arxiv  |\n| [DogeRM: Equipping Reward Models with Domain Knowledge through Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.01470)| 2024 | Arxiv  | MetaMath-7B, MAmmoTH-7B, LLaMA2-7B|\n| [PAFT: A Parallel Training Paradigm for Effective LLM Fine-Tuning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.17923)| 2024 | Arxiv  |Mistral-7B-v0.1, Llama-3-8B|\n| [Model Merging and Safety Alignment: One Bad Model Spoils the Bunch](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.14563) |  2024 | Arxiv  | Mistral-0.2-7B-Instruct, LLaMA-3-8B-Instruct, OpenBioLLM-8B, MAmmoTH2-7B, WizardMath-1.1-7B|\n| [Towards Comprehensive Post Safety Alignment of Large Language Models via Safety Patching](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.13820)|  2024 | Arxiv  |LLaMA-2-7B-Chat, LLaMA-3-8B-Instruct, Mistral7B-Instruct-v0.1 and Gemma1.1-7B-it|\n| [Disperse-Then-Merge: Pushing the Limits of Instruction Tuning via Alignment Tax Reduction](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.13432)| 2024 | Arxiv  | Llama-2-7b |\n| [Online Merging Optimizers for Boosting Rewards and Mitigating Tax in Alignment](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.17931) |  2024 | Arxiv  | Qwen1.5-7B, LLaMa3-8B |\n| [A safety realignment framework via subspace-oriented model fusion for large language models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.09055) |  2024 | Arxiv  | WizardLM-7B |\n| [Weak-to-strong extrapolation expedites alignment](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.16792) | 2024 | Arxiv  | zephyr-7b, starling-7b, snorkel-7b, llama3-8b, internlm2-7b, internlm2-20b, tulu-2-dpo-7b, tulu-2-dpo-13b, tulu-2-dpo-70b|\n| [Language Models are Homer Simpson! Safety Re-Alignment of Fine-tuned Language Models through Task Arithmetic](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.11746) | 2024 | Arxiv  | Llama-2-7BChat |\n| [Rewarded soups: towards pareto-optimal alignment by interpolating weights fine-tuned on diverse rewards](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.04488) |2023  | NeurIPS |  LLaMA-7b|\n| [Personalized soups: Personalized large language model alignment via post-hoc parameter merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.11564) | 2023 | Arxiv  |Tulu-7B LM|\n\n\u003C!-- | [Safety Arithmetic: A Framework for Test-time Safety Alignment of Language Models by Steering Parameters and Activations](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.11801) | 2024 | Arxiv  | llama2-7b-chat-hf, mistral-7b-instruct-v0.2, WIZARDMATH-7B, Llama Math, Llama-2-7b-evolcodealpaca|-->\n\n#### Detoxification of LLMs\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [Surgical, Cheap, and Flexible: Mitigating False Refusal in Language Models via Single Vector Ablation](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.03415) | 2025 |  ICLR | GEMMA-7B-IT, LLAMA2-7B\u002F13B\u002F70B-CHAT, LLAMA3-8B-INST | \n| [3DM: Distill, Dynamic Drop, and Merge for Debiasing Multi-modal Large Language Models](https:\u002F\u002Faclanthology.org\u002F2025.findings-acl.722.pdf) | 2025 |  ACL | LLaVA-1.5-7b, InternVL-2.5-8b, LLaVA-1.5-7b and ChatGLM4-9b |\n| [Expanding before Inferring: Enhancing Factuality in Large Language Models through Premature Layers Interpolation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.02973) | 2025 |  Arxiv | LLAMA3-8B-Instruct, Mistral-7B-Instruct-v0.2 |\n| [Bias Vector: Mitigating Biases in Language Models with Task Arithmetic Approach](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.11679) | 2024 |  Arxiv |\n| [Separate the Wheat from the Chaff: Model Deficiency Unlearning via Parameter-Efficient Module Operation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2308.08090) | 2024 |  AAAI | LLaMA-7B  |\n| [Mitigating Social Biases in Language Models through Unlearning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.13551) | 2024 |  Arxiv | LLaMA-2 7B |\n| [Fine-Grained Detoxification via Instance-Level Prefixes for Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.15202) | 2024 |  Arxiv | Llama-2-7B, Llama-2-chat-7B, Vicuna-7B, Llama-2-13B|\n| [Composing Parameter-Efficient Modules with Arithmetic Operation](https:\u002F\u002Fopenreview.net\u002Fpdf?id=5r3e27I9Gy) | 2023 | NeurIPS  |\n| [Editing models with task arithmetic](https:\u002F\u002Fopenreview.net\u002Fpdf?id=6t0Kwf8-jrj) | 2023 | ICLR |\n| [Elastic Weight Removal for Faithful and Abstractive Dialogue Generation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.17574) | 2023 |  Arxiv |\n\n#### Knowledge Editing\u002FUnlearning of LLMs\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [Per-parameter Task Arithmetic for Unlearning in Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.22030) | 2026  | Arxiv | Llama3.2 1B Instruct\n| [Model Merging for Knowledge Editing](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.12384)| 2025  | ACL | Qwen2.5-7B-Instruct\n| [Exact Unlearning of Finetuning Data via Model Merging at Scale](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.04626) | 2025  | Arxiv |\n| [ZJUKLAB at SemEval-2025 Task 4: Unlearning via Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.21088) | 2025  | Arxiv | OLMo-7B-0724-Instruct\n| [Exact Unlearning of Finetuning Data via Model Merging at Scale](https:\u002F\u002Fopenreview.net\u002Fpdf?id=u89LDBIyDe)|2025  |ICLR 2025 Workshop MCDC  | |\n| [NegMerge: Consensual Weight Negation for Strong Machine Unlearning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.05583)|2024  |Arxiv  | |\n| [Split, Unlearn, Merge: Leveraging Data Attributes for More Effective Unlearning in LLMs](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.11780)|2024  |Arxiv  | ZEPHYR-7B-BETA, LLAMA2-7B|\n| [Towards Safer Large Language Models through Machine Unlearning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.10058) | 2024 | ACL | LLAMA2-7B, LLAMA2-13B |\n| [Editing models with task arithmetic](https:\u002F\u002Fopenreview.net\u002Fpdf?id=6t0Kwf8-jrj) | 2023 | ICLR |\n| [Forgetting before Learning: Utilizing Parametric Arithmetic for Knowledge Updating in Large Language Model](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.08011) | 2023 | Arxiv | LLAMA2-7B, LLAMA-7B, BLOOM-7B|\n| [Fuse to Forget: Bias Reduction and Selective Memorization through Model Fusion](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.07682) | 2023 | Arxiv |\n\n#### Faster Training of LLMs\n\n | **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n | --------------- | :----: | :----: | :----: |\n | [Mashup Learning: Faster Finetuning by Remixing Past Checkpoints](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.10156)| 2026 |  Arxiv |\n | [GTR-Turbo: Merged Checkpoint is Secretly a Free Teacher for Agentic VLM Training](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.13043)| 2025 |  Arxiv |Qwen2.5-VL-7B \n | [Soup-of-Experts: Pretraining Specialist Models via Parameters Averaging](https:\u002F\u002Fopenreview.net\u002Fforum?id=MFNIka7nx0)| 2025 |  ICML |\n | [Local Mixtures of Experts: Essentially Free Test-Time Training via Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.14136)| 2025 |  Arxiv |\n | [Merge to Mix: Mixing Datasets via Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.16066)| 2025 |  Arxiv | Llama-3-8B-Instruct\n | [Model Merging in Pre-training of Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.12082)| 2025 |  Arxiv |Seed-MoE-1.3B\u002F13B, SeedMoE-10B\u002F100B, Seed-MoE-15B\u002F150B |\n | [Parameter-Efficient Checkpoint Merging via Metrics-Weighted Averaging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.18580) | 2025 |  Arxiv |\n | [DEM: Distribution Edited Model for Training with Mixed Data Distributions](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.15570) | 2024 |  Arxiv |  OpenLLaMA  7B and 13B|\n | [Checkpoint Merging via Bayesian Optimization in LLM Pretraining](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.19390) | 2024 |  Arxiv | Baichuan2-220B, Baichuan2-440B, Baichuan2-660B, Baichuan2-1540B, Baichuan2-1760B, Baichuan2-1980B, Baichuan2-2200B, Baichuan2-2420B, DeepSeek-1400B, DeepSeek-1600B, DeepSeek-1800B, DeepSeek-2000B|\n | [ColD Fusion: Collaborative Descent for Distributed Multitask Finetuning](https:\u002F\u002Faclanthology.org\u002F2023.acl-long.46.pdf) |2023  |  ACL|\n | [Early Weight Averaging meets High Learning Rates for LLM Pre-training](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.03241) |  2023 |NeurIPS Workshop  |\n | [Stop wasting my time! saving days of imagenet and bert training with latest weight averaging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.14981) | 2022 |NeurIPS Workshop  |\n | [Fusing finetuned models for better pretraining](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.03044) | 2022 |Arxiv  |\n\n#### Faster Reasoning of LLMs\n | **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n | --------------- | :----: | :----: | :----: |\n | [Multi-objective Evolutionary Merging Enables Efficient Reasoning Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.06465) | 2026 | Arxiv | DeepSeek-R1-Distill-Qwen 1.5B, 7B, and 14B\n | [Data-Free Layer-Adaptive Merging via Fisher Information for Long-to-Short Reasoning LLMs](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.21705) | 2026 | Arxiv | Qwen2.5-Math-7B,DeepSeek-R1-Distill-Qwen-7B\n | [RAIN-Merging: A Gradient-Free Method to Enhance Instruction Following in Large Reasoning Models with Preserved Thinking Format](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.22538) | 2026 | ICLR |  Qwen2.5-1.5B\u002F14B\u002F32B, and Llama-3.1-8B\n | [Reasoning Pattern Alignment Merging for Adaptive Reasoning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.03506) | 2026 | Arxiv | (i) Qwen3-4B-Thinking (Long-CoT) and Qwen3-4B-Instruct (Short-CoT); (ii) DeepSeekR1-Distill-Qwen-1.5B (Long-CoT) and Qwen2.5- Math-1.5B (Short-CoT)\n | [Revisiting Model Interpolation for Efficient Reasoning](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.10977) | 2025 | Arxiv | Qwen3-4B\n | [Unlocking Efficient Long-to-Short LLM Reasoning with Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.20641)| 2025 |Arxiv  | Qwen2.5-32B, DeepSeek-R1-32B |\n | [Kimi k1.5: Scaling Reinforcement Learning with LLMs](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.12599?)| 2025 |Arxiv  | Kimi k1.5\n  \n#### Improving Computational Efficiency of MoE-based LLM\n\n | **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n | --------------- | :----: | :----: | :----: |\n | [REAM: Merging Improves Pruning of Experts in LLMs](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.04356)| 2026 | Arxiv  |  Qwen3-30B-A3B-Instruct-2507,  Qwen3-Coder-Next, GLM-4.5-Air\n | [Upcycled and Merged MoE Reward Model for Mitigating Reward Hacking](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.00724)| 2025 | Arxiv  | \n | [PuzzleMoE: Efficient Compression of Large Mixture-of-Experts Models via Sparse Expert Merging and Bit-packed inference](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.04805)| 2025 | Arxiv  | Mixtral-8x7B, Deepseek-MoE\n | [Enhanced Expert Merging for Mixture-of-Experts in Graph Foundation Models](https:\u002F\u002Fopenreview.net\u002Fpdf?id=fyqqd1lHDb) | 2025 | Arxiv  | LLaMA-3.1-8B\n | [Expert Merging in Sparse Mixture of Experts with Nash Bargaining](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.16138)| 2025 |Arxiv  |Qwen1.5-MoE-14B, DeepSeek-MoE-16B |\n | [MergeMoE: Efficient Compression of MoE Models via Expert Output Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.14436)| 2025 |Arxiv  | DeepSeekMoE, Qwen1.5-MoE-A2.7B, and Qwen3-30B-A3B |\n | [Faster, Smaller, and Smarter: Task-Aware Expert Merging for Online MoE Inference](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.21966)| 2025 |Arxiv  | \n | [Sub-MoE: Efficient Mixture-of-Expert LLMs Compression via Subspace Expert Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.23266) | 2025 |Arxiv  | Mixtral 8x7B, Qwen3- 235B-A22B, Qwen1.5-MoE-A2.7B, and DeepSeekMoE-16B-Base\n | [On Linear Mode Connectivity of Mixture-of-Experts Architectures](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.11348)| 2025 |NeurIPS  |  \n | [Merge, then compress: Demystify efficient SMoe with hints from its routing policy](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.01334)|2024|ICLR|fairseq-moe15b SMoE\n | [Merging Experts into One: Improving Computational Efficiency of Mixture of Experts](https:\u002F\u002Faclanthology.org\u002F2023.emnlp-main.907.pdf) | 2023 |EMNLP  | \n\n#### Mixing Datasets via Model Merging\n | **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n | --------------- | :----: | :----: | :----: |\n | [OPTIMER: Optimal Distribution Vector Merging Is Better than Data Mixing for Continual Pre-Training](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.28858)| 2026 |  Arxiv |Gemma 3 27B\n | [Linear Model Merging Unlocks Simple and Scalable Multimodal Data Mixture Optimization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.04937)| 2026 |  Arxiv | Qwen2-VL-2B and Intern3.5-VL-2B\n | [Decouple Searching from Training: Scaling Data Mixing via Model Merging for Large Language Model Pre-training](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.00747)| 2026 |  Arxiv | Qwen3-1.7B\n | [Multi-task Code LLMs: Data Mix or Model Merge?](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.21115)| 2026 | Arxiv  | Qwen Coder 2.5 7B, DeepSeek 7B\n | [MergeMix: Optimizing Mid-Training Data Mixtures via Learnable Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.17858)| 2026 |  Arxiv | 8B and 16B MoE\n | [Merge to Mix: Mixing Datasets via Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.16066)| 2025 |  Arxiv | Llama-3-8B-Instruct\n\n \n#### LLM Agent Merging\n\n  | **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n  | --------------- | :----: | :----: | :----: |\n  | [Behavior Knowledge Merge in Reinforced Agentic Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.13572)| 2026 | Arxiv | RL-trained agentic models\n  | [ARM: Role-Conditioned Neuron Transplantation for Training-Free Generalist LLM Agent Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.07309)| 2026 | Arxiv | Simia-Tau-SFT-Qwen3-8B, SimiaOfficeBench-SFT-Qwen3-8B, and Simia-AgentBench-SFT-Qwen3-8B\n  | [Divide, Optimize, Merge: Scalable Fine-Grained Generative Optimization for LLM Agents](https:\u002F\u002Faclanthology.org\u002F2025.findings-emnlp.1034\u002F)| 2025 | EMNLP | o3-mini\n  | [AgentMerge: Enhancing Generalization in Fine-Tuned LLM Agents](https:\u002F\u002Fopenreview.net\u002Fpdf?id=nZmAwmi2gr))| 2024 | NeurIPS | Llama3.1-8B \n  | [Agent Skill Acquisition for Large Language Models via CycleQD](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.14735) |2024 |Arxiv | Llama3-8B-Instruct|\n \n#### Combine the Capabilities of Expert LLMs\n\n  | **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n  | --------------- | :----: | :----: | :----: |\n  | [Merge and Conquer: Instructing Multilingual Models by Adding Target Language Weights](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.28263)| 2026 | Arxiv | Llama 3.1 8B, Qwen3 8B, Qwen3 14B\n  | [Preference-Aligned LoRA Merging: Preserving Subspace Coverage and Addressing Directional Anisotropy](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.26299)| 2026 | Arxiv | LLaMA-3-8B\n  | [Label-Free Cross-Task LoRA Merging with Null-Space Compression](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.26317)| 2026 | Arxiv | LLAMA-3 8B, LLAVA-1.5-7B\n  | [AdaLTM: Adaptive Layer-wise Task Vector Merging for Categorical Speech Emotion Recognition with ASR Knowledge Integration](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.25041)| 2026 | Arxiv | \n  | [Functionality-Oriented LLM Merging on the Fisher–Rao Manifold](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.04972)| 2026 | Arxiv | Qwen2.5-14B, Qwen2.5-14B-Instruct-1M, Qwen2.5-Coder-14B-Instruct, DeepSeek-R1-Distill-Qwen-14B, OpenReasoning-Nemotron-14B\n  | [The Appeal and Reality of Recycling LoRAs with Adaptive Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.12323)| 2026 | Arxiv |   Llama3.1 8B-Instruct\n  | [LS-Merge: Merging Language Models in Latent Space](https:\u002F\u002Fopenreview.net\u002Fpdf?id=VSDV0SWwOC)| 2026 | ICLR | Gemma-3-1B-it, Gemma-3-4B-it, Llama-3-1B-instruct, Llama-2-7b\n  | [Bagging-Based Model Merging for Robust General Text Embeddings](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.05787)| 2026 | Arxiv |  Qwen3-4B\n  | [Data-driven Clustering and Merging of Adapters for On-device Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.17441)| 2026 | Arxiv |  Llama 3.2 3B, Qwen 2.5 1.5B and StableLM 2 1.6B\n  | [Improving Training Efficiency and Reducing Maintenance Costs via Language Specific Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.16127)| 2026 | Arxiv | Llama-3.1-8b-Instruct\n  | [SimMerge: Learning to Select Merge Operators from Similarity Signals](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.09473)| 2026 | Arxiv | 7B to 111B\n  | [Multi-Stage Evolutionary Model Merging with Meta Data Driven Curriculum Learning for Sentiment-Specialized Large Language Modeling](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.06780)| 2026 | Arxiv |\n  | [ReasonAny: Incorporating Reasoning Capability to Any Model via Simple and Effective Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.05560)| 2026 | Arxiv | QwQ-32B-Preview, Meditron3-Qwen2.5-7B and MMed-Llama3-8B, WiroAIFinance-Qwen-7B and WiroAI-Finance-Llama8B\n  | [Reliable Cultural Knowledge Preservation in Multilingual LLMs through Model Merging](https:\u002F\u002Fraw.githubusercontent.com\u002Fmlresearch\u002Fv310\u002Fmain\u002Fassets\u002Fnguyen25b\u002Fnguyen25b.pdf)| 2025 | Arxiv |Qwen-2.5-3B\n  | [AlignMerge - Alignment-Preserving Large Language Model Merging via Fisher-Guided Geometric Constraints](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.16245)| 2025 | Arxiv | LLaMA-3 8B, Mistral 7B, Qwen 2, Phi-3.5, Gemma 2\n  | [Grow Up and Merge: Scaling Strategies for Efficient Language Adaptation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.10772)| 2025 | Arxiv | \n  | [Adapting Chat Language Models Using Only Target Unlabeled Language Data](https:\u002F\u002Fopenreview.net\u002Fpdf?id=6IdoIKowfe)| 2025 | TMLR | Qwen2.5 7B, Llama 3.1 8B, Qwen3 14B\n  | [RCP-Merging: Merging Long Chain-of-Thought Models with Domain-Specific Models by Considering Reasoning Capability as Prior](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.03140) | 2026 |  AAAI | Qwen2.5-7B, Llama3.1-8B\n  | [Souper-Model: How Simple Arithmetic Unlocks State-of-the-Art LLM Performance](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.13254v1)| 2025 | Arxiv | xLAM-2-70b, CoALM-70B, watt-tool-70B, functionary-medium-70B, xLAM-2-8b, ToolACE-2-8B, watt-tool-8B, BitAgent-8B, CoALM-8B | \n  | [SPEAR-MM: Selective Parameter Evaluation and Restoration via Model Merging for Efficient Financial LLM Adaptation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.08500)| 2025 | Arxiv |\n  | [Merging Continual Pretraining Models for Domain-Specialized LLMs: A Case Study in Finance](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.02451)| 2025 | Arxiv | Llama-3-8B, Llama-2-7B \n  | [Extracting and Combining Abilities For Building Multi-lingual Ability-enhanced Large Language Models](https:\u002F\u002Faclanthology.org\u002F2025.emnlp-main.887.pdf)| 2025 | EMNLP | LLaMA-3 8B\n  | [Bridging Dialectal Gaps in Arabic Medical LLMs through Model Merging](https:\u002F\u002Faclanthology.org\u002F2025.arabicnlp-main.27.pdf)| 2025 | arabicnlp | \n  | [Adapting Multilingual Models to Code-Mixed Tasks via Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.19782)| 2025 | Arxiv |\n  | [Harmonizing Diverse Models: A Layer-wise Merging Strategy for Consistent Generation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.14915)| 2025 | Arxiv | Llama-3.1-8B-Instruct and Gemma-3-12B-Instruct\n  | [ABC: Towards a Universal Code Styler through Model Merging](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3763104)| 2025 | ACM on Programming Languages | Qwen2.5-Coder, Deepseek-Coder |\n  | [Family Matters: Language Transfer and Merging for Adapting Small LLMs to Faroese](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.00810)| 2025 | Arxiv |\n  | [Expert Merging: Model Merging with Unsupervised Expert Alignment and Importance-Guided Layer Chunking](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.25712)| 2025 | Arxiv |Mistral-7B, InternVL, Qwen2-VL\n  | [The Thinking Spectrum: An Empirical Study of Tunable Reasoning in LLMs through Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.22034)| 2025 | Arxiv | Qwen3-30B-A3B-Thinking-2507, Qwen3-30B-A3B-Instruct-2507 |\n  | [MLM: Multi-linguistic LoRA Merging](https:\u002F\u002Fopenreview.net\u002Fattachment?id=VAnFWVbYxG&name=pdf) 2025 | NeurIPS WorkShop | LLaMA-3.2 (1B and 3B)\n  | [Model Merging Scaling Laws in Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.24244)| 2025 | Arxiv | Qwen2.5 0.5, 1.5, 3, 7, 14, 32, 72B\n  | [Harnessing Optimization Dynamics for Curvature-Informed Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.11167)| 2025 |  Arxiv | Llama-3.1-8B\n  | [Kwai Keye-VL 1.5 Technical Report](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.01563)| 2025 |  Arxiv |Keye-VL-8B\n  | [Reasoning Vectors: Transferring Chain-of-Thought Capabilities via Task Arithmetic](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.01363)| 2025 |  Arxiv | QWEN2.5-7B|\n  | [Surrogate Benchmarks for Model Merging Optimization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.02555)| 2025 |  Arxiv| EvoLLM-JP-v1-7B, shisa-gamma-7b-v1 |\n  | [Tensorized Clustered LoRA Merging for Multi-Task Interference](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.03999)| 2025 |  Arxiv| Mistral-7B\n  | [Efficient Compositional Multi-tasking for On-device Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.16083)| 2025 |  Arxiv|  Llama 3.1 70B\n  | [HydraOpt: Navigating the Efficiency-Performance Trade-off of Adapter Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.17706)| 2025 |  Arxiv|\n  | [Exploring Sparse Adapters for Scalable Merging of Parameter Efficient Experts](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.07140)| 2025 |  Arxiv|\n  | [Merging Large Language Models for Enhanced Code Generation: A Comparative Study of Model Merging Techniques Across Programming Languages](https:\u002F\u002Fwww.diva-portal.org\u002Fsmash\u002Fget\u002Fdiva2:1973270\u002FFULLTEXT01.pdf)| 2025 |  Open Access in DiVA |CodeQwen1.5-7B, DeepSeek-Coder-6.7b-Base, CodeLlama-34B |\n  | [On Fairness of Task Arithmetic: The Role of Task Vectors](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.24262)| 2025 |  Arxiv| LLaMA2-7B\n  | [The Unreasonable Effectiveness of Model Merging for Cross-Lingual Transfer in LLMs](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.18356)| 2025 |  Arxiv|FALCON 3 7B, QWEN2.5 7B Instruct, LLAMA 3.1 8B Instruct, AYA Expanse 8B\n  | [Model Merging is Secretly Certifiable: Non-Vacuous Generalisation Bounds for Low-Shot Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.15798)| 2025 |  Arxiv|MetaMath-Mistral-7B, Dolphin-2.1-Mistral-7B and Speechless-Code-Mistral-7Bv1.0\n  | [Training-free LLM Merging for Multi-task Learning](https:\u002F\u002Fopenreview.net\u002Fpdf?id=m6A6HoCKvt)| 2025 |ACL  | Echelon-AI\u002FMed-Qwen2-7B, shtdbb\u002Fqwen2-7b-med, Qwen2-Instruct |\n  | [ParamΔ for Direct Weight Mixing: Post-Train Large Language Model at Zero Cost](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.21023)| 2025 |Arxiv  | Llama3-inst-70B, Llama3-base-70B, Llama3.1-base-70B |\n  | [Beyond ‘Aha!’: Toward Systematic Meta-Abilities Alignment in Large Reasoning Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.10554)| 2025 |Arxiv  |Qwen2.5-7B, Qwen2.5-32B|\n  | [Unified Multi-Task Learning & Model Fusion for Efficient Language Model Guardrailing](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.19333)| 2025 |Arxiv  |\n  | [Adapting Language-Specific LLMs to a Reasoning Model in One Day via Model Merging -- An Open Recipe](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.09056)| 2025 |Arxiv  | Typhoon2 R1 70B, Deepseek R1 70B |\n  | [Efficient Model Development through Fine-tuning Transfer](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.20110v1)| 2025 |Arxiv  | Llama 3.1 8B\n  | [Command A: An Enterprise-Ready Large Language Model](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.00698v1)| 2025 |Arxiv  | Command R7B\n  | [Extrapolation Merging: Keep Improving With Extrapolation and Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.04834)| 2025 |Arxiv  |Qwen2-7B, Meta-Llama-3-8B, Mistral-Nemo-Base-2407-12B, Qwen1.5-14B |\n  | [Light-R1: Curriculum SFT, DPO and RL for Long COT from Scratch and Beyond](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.10460)| 2025 |Arxiv  |Light-R1-32B|\n  | [FuseChat-3.0: Preference Optimization Meets Heterogeneous Model Fusion](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.04222v1)| 2025 |Arxiv  |Gemma-2-27B-it, Mistral-Large-Instruct-2407, Qwen-2.5-72B-Instruct, and Llama-3.1-70B-Instruct |\n  | [Superficial Self-Improved Reasoners Benefit from Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.02103)| 2025 |Arxiv  |Llama2-7B\n  | [Nature-Inspired Population-Based Evolution of Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.01155)| 2025 |Arxiv  |\n  | [Layer-Aware Task Arithmetic: Disentangling Task-Specific and Instruction-Following Knowledge](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.20186)| 2025 |Arxiv  | Gemma-2-9B, Llama-3-8B |\n  | [Mixup Model Merge: Enhancing Model Merging Performance through Randomized Linear Interpolation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.15434)| 2025 | Arxiv | WizardLM-13B, WizardMath-13B, llama-2-13b-code-alpaca\n  | [LoRE-Merging: Exploring Low-Rank Estimation For Large Language Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.10749)| 2025 | Arxiv | NuminaMath-7B, DeepSeek-Math-7B-Base, LLaMA-series models, WizardMath-13B\n  | [Merging Language and Domain Specific Models: The Impact on Technical Vocabulary Acquisition](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.12001)| 2025 | Arxiv | ContactDoctor-8B\n  | [Transferring Textual Preferences to Vision-Language Understanding through Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.13487)| 2025 | Arxiv | Llama-3.2-11B-Vision -Instruct, Llama-3.1-Tulu-2-8B-uf-mean-rm, Llama-3.1-Tulu-3-8B-RM\n  | [Optimal Brain Iterative Merging: Mitigating Interference in LLM Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.12217)| 2025 | Arxiv | Llama-2-13b, WizardMath-13B-V1.0, WizardLM13B-V1.2, llama-2-13b-codealpaca\n  | [An Open Recipe: Adapting Language-Specific LLMs to a Reasoning Model in One Day via Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.09056)| 2025 | Arxiv |Typhoon2 70B Instruct, DeepSeek R1 70B Distill, Llama 3.1 70B, Llama 3.3 70B |\n  | [Fine, I’ll Merge It Myself: A Multi-Fidelity Framework for Automated Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.04030)| 2025 | Arxiv | WizardLM-13B, WizardMath-13B, and llama-2-13b-code-alpaca |\n  | [Skill Expansion and Composition in Parameter Space](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.05932)| 2025 | Arxiv\n  | [InfiFusion: A Unified Framework for Enhanced Cross-Model Reasoning via LLM Fusion](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.02795)| 2025 | Arxiv | Qwen2.5-Coder-14B-Instruct, Qwen2.5-14B-Instruct, and Mistral-Small-24B-Instruct-2501 |\n  | [Channel Merging: Preserving Specialization for Merged Experts](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.15283)|2025 |AAAI | Dolphin-2.2.1-Mistral-7B, Speechless-Code-Mistral-7B, MetaMathMistral-7B, Chinese-Mistral-7BInstruct-v0.1 |\n  | [Weighted-reward preference optimization for implicit model fusion](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.03187)| 2025 | ICLR  | LLaMA3-8B-Instruct |\n  | [Enhancing Perception Capabilities of Multimodal LLMs with Training-Free Fusion](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.01289)|2024 |Arxiv | MiniGemini-8B and SLIME-8B |\n  | [AgentMerge: Enhancing Generalization in Fine-Tuned LLM Agents](https:\u002F\u002Fopenreview.net\u002Fpdf?id=nZmAwmi2gr)|2024 |Arxiv | Llama3.1-8B |\n  | [JRadiEvo: A Japanese Radiology Report Generation Model Enhanced by Evolutionary Optimization of Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.09933) |2024 |Arxiv |Bunny-v1_1-Llama-3-8B-V, MMed-Llama-3-8B-EnIns, OpenBioLLM-Llama3-8B, Llama-3-Swallow-8B-Instruct-v0.1|\n  | [If You Can’t Use Them, Recycle Them: Optimizing Merging at Scale Mitigates Performance Tradeoffs](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.04144)|2024 |Arxiv | Command R+ 104B |\n  | [Agent Skill Acquisition for Large Language Models via CycleQD](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.14735) |2024 |Arxiv | Llama3-8B-Instruct|\n  | [Collaboratively adding new knowledge to an LLM](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.14753)|2024 |Arxiv | Meta-Llama-3-8B|\n  | [Unconstrained Model Merging for Enhanced LLM Reasoning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.13699)|2024 |Arxiv |CodeLlama-7B-Ins, CodeLlama-70B-Ins, Deepseek-Coder-Ins-v1.5, Qwen2.5-Math-7B-Ins, WizardMath-7B-V1.1, OpenMath-Mistral 7B, MetaMath-7B, MetaMath-70B |\n  | [LoRA Soups: Merging LoRAs for Practical Skill Composition Tasks](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.13025)|2024 |Arxiv | Llama-7b, Llama2-7b-chat |\n  | [Merge to Learn: Efficiently Adding Skills to Language Models with Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.12937)|2024 |Arxiv | Llama 2 7B |\n  | [Exploring Model Kinship for Merging Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.12613) |2024 |Arxiv |Mistral-7B, Mistral-7b-instruct-v0.2, MetaMath-mistral-7b, Open-chat-3.5-1210 |\n  | [Merging in a Bottle: Differentiable Adaptive Merging (DAM) and the Path from Averaging to Automation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.08371)| 2024 |Arxiv  |shisa-gamma-7b, WizardMath-7B-V1.1, Abel-7B-002, Llama-3-SauerkrautLM-8b-Instruct, Llama-3-Open-Ko-8B, llama-3-sqlcoder-8b, Meta-Llama-3-8B |\n  | [Layer Swapping for Zero-Shot Cross-Lingual Transfer in Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.01335) |2024 |Arxiv | LLAMA 3.1 8B |\n  | [What Matters for Model Merging at Scale?](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.03617)| 2024 | Arxiv | PaLM-2 (1B, 8B, 24B, 64B), PaLM-2-IT (1B, 8B, 24B, 64B)|\n  | [HM3: Hierarchical Multi-Objective Model Merging for Pretrained Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.18893) | 2024 | Arxiv | Llama-2-7B-Chat, WizardMath-7B, CodeLlama-7B|\n  | [FUSECHAT: Knowledge Fusion of Chat Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.07990) | 2024 |  Arxiv| OpenChat-3.5-7B, Starling-LM-7B-alpha, NH2-SOLAR-10.7B, InternLM2-Chat-20B, Mixtral-8x7B-Instruct, and Qwen-1.5-Chat-72B|\n  | [SQL-GEN: Bridging the Dialect Gap for Text-to-SQL Via Synthetic Data And Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.12733) | 2024 |  Arxiv|CodeLlama 7B|\n  | [It’s Morphing Time: Unleashing the Potential of Multiple LLMs via Multi-objective Optimization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.00487) | 2024 | Arxiv | Qwen1.5-7B-Chat, Liberated-Qwen1.5-7B, firefly-qwen1.5-en-7B |\n  | [Knowledge Fusion By Evolving Weights of Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.12208) | 2024 | ACL |\n  | [LLM Merging: Building LLMs Efficiently through Merging](https:\u002F\u002Fopenreview.net\u002Fpdf?id=TiRQ4Gl4Ir)| 2024 |  NeurIPS 2024 Competition Track | LLaMA-7B, Mistral-7B, Gemma-7B |\n  | [Extend Model Merging from Fine-Tuned to Pre-Trained Large Language Models via Weight Disentanglement](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.03092)| 2024 |  Arxiv | Qwen1.5-7B, Qwen1.5-Chat-7B, Sailor-7B, Qwen1.5-14B, Qwen1.5-Chat-14B, Sailor-14B, WizardLM-13B, WizardMath-13B, llama-2-13b-code-alpaca |\n  | [It’s Morphing Time: Unleashing the Potential of Multiple LLMs via Multi-objective Optimization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.00487)| 2024 |  Arxiv | Qwen1.5-7B-Chat, Liberated-Qwen1.5-7B,  firefly-qwen1.5-en-7B|\n  | [MetaGPT: Merging Large Language Models Using Model Exclusive Task Arithmetic](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.11385) | 2024 | Arxiv | LLaMA-2-7B, Mistral-7B, LLaMA-2-13B |\n  | [PROMETHEUS 2: An Open Source Language Model Specialized in Evaluating Other Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.01535)| 2024 | Arxiv | Mistral-Instruct-7B, Mixtral-Instruct-8x7B|\n  | [Knowledge fusion of large language models](https:\u002F\u002Fopenreview.net\u002Fpdf?id=jiDsk12qcz) | 2024 |  ICLR | Llama-2 7B, OpenLLaMA 7B, MPT 7B |\n  | [Language models are super mario: Absorbing abilities from homologous models as a free lunch](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.03099) | 2024 | ICML | WizardLM-13B, WizardMath-13B, and llama-2-13b-codealpaca, Mistral-7B|\n  | [Controlled Text Generation via Language Model Arithmetic](https:\u002F\u002Fopenreview.net\u002Fpdf?id=SLw9fp4yI6) | 2024 | ICML | MPT-7B, Pythia-12B, Llama-2-Chat-13B |\n  | [MeteoRA: Multiple-tasks Embedded LoRA for Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.13053v2)|  2024 |Arxiv  | LlaMA2-13B and LlaMA3-8B (LoRA) |\n  | [Evolutionary optimization of model merging recipes](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.13187) | 2024 | Arxiv | shisa-gamma-7b-v1, WizardMath-7B-V1.1, Arithmo2-Mistral-7B, Abel-7B-002, Mistral-7B-v0.1, LLaVA-1.6-Mistral-7B|\n  | [Branch-Train-MiX: Mixing Expert LLMs into a Mixture-of-Experts LLM](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.07816) |  2024 |Arxiv  | Llama-2-7B |\n  | [Knowledge Fusion of Chat LLMs: A Preliminary Technical Report](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.16107) | 2024 |Arxiv  | NH2-Mixtral-8x7B, NH2-Solar-10.7B, OpenChat-3.5-7B|\n\n**Note: The following papers are from: [LLM Merging Competition at NeurIPS 2024](https:\u002F\u002Fllm-merging.github.io\u002F)**\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Models** |\n| --------------- | :----: | :----: | :----: |\n| [Llm merging: Building llms efficiently through merging](https:\u002F\u002Fopenreview.net\u002Fforum?id=TiRQ4Gl4Ir)| 2024 | LLM Merging Competition at NeurIPS | - |\n| [Towards an approach combining Knowledge Graphs and Prompt Engineering for Merging Large Language Models](https:\u002F\u002Fopenreview.net\u002Fattachment?id=0I0yYOxHxV&name=pdf)| 2024 | LLM Merging Competition at NeurIPS | meta-llama\u002FLlama-2-7b; microsoft_phi1\u002F2\u002F3 |\n| [Model Merging using Geometric Median of Task Vectors](https:\u002F\u002Fopenreview.net\u002Fpdf?id=4VD2jMqJbN)| 2024 | LLM Merging Competition at NeurIPS | flan_t5_xl |\n| [Interpolated Layer-Wise Merging for NeurIPS 2024 LLM Merging Competition](https:\u002F\u002Fopenreview.net\u002Fattachment?id=taHV1M0KlB&name=pdf)| 2024 | LLM Merging Competition at NeurIPS | suzume-llama-3-8B-multilingual-orpo-borda-top75, Barcenas-Llama3-8bORPO, Llama-3-8B-Ultra-Instruct-SaltSprinkle, MAmmoTH2-8B-Plus, Daredevil-8B|\n| [A Model Merging Method](https:\u002F\u002Fopenreview.net\u002Fpdf?id=zcnDi0i23y)| 2024 | LLM Merging Competition at NeurIPS | - |\n| [Differentiable DARE-TIES for NeurIPS 2024 LLM Merging Competition](https:\u002F\u002Fopenreview.net\u002Fattachment?id=4jqff9QeUD&name=pdf)| 2024 | LLM Merging Competition at NeurIPS | suzume-llama-3-8B-multilingualorpo-borda-top75, MAmmoTH2-8B-Plus and Llama-3-Refueled  |\n| [LLM Merging Competition Technical Report: Efficient Model Merging with Strategic Model Selection, Merging, and Hyperparameter Optimization](https:\u002F\u002Fopenreview.net\u002Fattachment?id=Xl8uuaNj1X&name=pdf)| 2024 | LLM Merging Competition at NeurIPS | MaziyarPanahi\u002FLlama3-8B-Instruct-v0.8, MaziyarPanahi\u002FLlama-3-8B-Instruct-v0.9, shenzhiwang\u002FLlama3-8B-Chinese-Chat,  lightblue\u002Fsuzume-llama-3-8B-multilingual |\n| [Simple Llama Merge: What Kind of LLM Do We Need?](https:\u002F\u002Fopenreview.net\u002Fattachment?id=VndTgXbAgz&name=pdf)| 2024 | LLM Merging Competition at NeurIPS | Hermes-2-Pro-Llama-3-8B, and Daredevil-8B |\n| [LLM Merging Competition Technical Report for NeurIPS 2024: Efficiently Building Large Language Models through Merging](https:\u002F\u002Fopenreview.net\u002Fattachment?id=rJ1miae6PJ&name=pdf) | 2024 | LLM Merging Competition at NeurIPS | Mistral-7B-Instruct94 v2, Llama3-8B-Instruct, Flan-T5-large, Gemma-7B-Instruct, and WizardLM-2-7B |\n| [MoD: A Distribution-Based Approach for Merging Large Language Models](https:\u002F\u002Fopenreview.net\u002Fattachment?id=v2tZ9bNcS5&name=pdf) | 2024 | LLM Merging Competition at NeurIPS |  Qwen2.5-1.5B and Qwen2.5-7B |\n\n### Model Merging in Multimodal Large Language Models\n\n\u003Ccenter>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEnnengYang_Awesome-Model-Merging-Methods-Theories-Applications_readme_022a63f4c846.png\" alt=\"Model Merging\" width=\"800\"\u002F>\n\u003C\u002Fcenter>\n\n#### Model Merging for Multimodal Fusion\n\n  | **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n  | --------------- | :----: | :----: | :----: |\n  | [Jointly training large autoregressive multimodal models](https:\u002F\u002Fopenreview.net\u002Fpdf?id=5jcav5RcKw) | 2024 | ICLR |\n  | [Model Composition for Multimodal Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.12750) | 2024 |ACL  | Vicuna-7B-v1.5|\n  | [π-Tuning: Transferring Multimodal Foundation Models with Optimal Multi-task Interpolation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.14381) | 2023 | ICML |\n  | [An Empirical Study of Multimodal Model Merging](https:\u002F\u002Faclanthology.org\u002F2023.findings-emnlp.105.pdf) | 2023  | EMNLP |\n  | [UnIVAL: Unified Model for Image, Video, Audio and Language Tasks](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.16184) | 2023 |  TMLR |\n\n#### Model Merging for Cross-Modal Knowledge Transfer\n\n  | **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n  | --------------- | :----: | :----: | :----: |\n  | [Multimodal Attention Merging for Improved Speech Recognition and Audio Event Classification](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.14378) |  2024 | ICASSP Workshop  |\n\n#### Combine the Capabilities of Expert MLLMs\n\n  | **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n  | --------------- | :----: | :----: | :----: |\n  | [Reasoning Resides in Layers: Restoring Temporal Reasoning in Video-Language Models with Layer-Selective Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.11399)| 2026 | Arxiv |  LongVA-7B, InternVL3-8B, Qwen3-VL-4B\n  | [One Model to Translate Them All? A Journey to Mount Doom for Multilingual Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.02881)| 2026 | Arxiv | Qwen-2.5-3B-Instruct\n  | [Tug-of-War No More: Harmonizing Accuracy and Robustness in Vision-Language Models via Stability-Aware Task Vector Merging](https:\u002F\u002Fopenreview.net\u002Fforum?id=KOO1cDm2bt)| 2026 | ICLR | LLaVA-1.5-7B, OpenFlamingo-9B\n  | [SSAM: Singular Subspace Alignment for Merging Multimodal Large Language Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F2603.21584)| 2026 | Arxiv |\n  | [ES-Merging: Biological MLLM Merging via Embedding Space Signals](https:\u002F\u002Farxiv.org\u002Fabs\u002F2603.14405)| 2026 | Arxiv |\n  | [VisCodex: Unified Multimodal Code Generation via Merging Vision and Coding Models](https:\u002F\u002Fopenreview.net\u002Fpdf?id=RU76KTF1Da)| 2026 | ICLR | VisCodex-8B, VisCodex-33B\n  | [FRISM: Fine-Grained Reasoning Injection via Subspace-Level Model Merging for Vision–Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.21187)| 2026 | Arxiv | Qwen2.5-VL-7B-Instruct, DeepSeekR1-Distill-Qwen-7B, Qwen2.5-VL-32B-Instruct, QwQ-32B\n  | [PlaM: Training-Free Plateau-Guided Model Merging for Better Visual Grounding in MLLMs](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.07645)| 2026 | Arxiv |LLaVA-v1.5-7B, Qwen2.5-VL-7B-Instruct, Qwen3-VL-8B-Instruct\n  | [Where and What Matters: Sensitivity-Aware Task Vectors for Many-Shot Multimodal In-Context Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.08246)| 2026 | AAAI | Qwen-VL-7B, Idefics2-8B\n  | [MergeVLA: Cross-Skill Model Merging Toward a Generalist Vision-Language-Action Agent](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.19434)| 2025 | Arxiv | Qwen2.5-0.5B\n  | [Tiny-R1V: Lightweight Multimodal Unified Reasoning Model via Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.08987)| 2025 | Arxiv |\n  | [Model Merging to Maintain Language-Only Performance in Developmentally Plausible Multimodal Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.01845)| 2025 | Arxiv |\n  | [Expert Merging: Model Merging with Unsupervised Expert Alignment and Importance-Guided Layer Chunking](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.25712)| 2025 | Arxiv |Mistral-7B, InternVL, Qwen2-VL\n  | [UQ-Merge: Uncertainty Guided Multimodal Large Language Model Merging](https:\u002F\u002Faclanthology.org\u002F2025.findings-acl.73.pdf)| 2025 | ACL |  LLaVA-v1.5-7B |\n  | [Graft: Integrating the Domain Knowledge via Efficient Parameter Synergy for MLLMs](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.23940)| 2025 | Arxiv | Qwen2-VL-2B |\n  | [Unifying Multimodal Large Language Model Capabilities and Modalities via Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.19892)| 2025 | Arxiv | Qwen2-VL-7B-Base, Vicuna-7B-v1.5 |\n  | [Bring Reason to Vision: Understanding Perception and Reasoning through Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.05464)| 2025 |ICML  | LLaVA-NeXT-8B, Idefics2-8B, InternVL2-76B |\n  | [REMEDY: Recipe Merging Dynamics in Large Vision-Language Models](https:\u002F\u002Fopenreview.net\u002Fpdf?id=iX7eHHE5Tx)| 2025 | ICLR | LLaVA-1.5 (Vicuna-7B)\n  | [RobustMerge: Parameter-Efficient Model Merging for MLLMs with Direction Robustness](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.17159)| 2025 | NeurIPS | LLaVA-v1.5-7B\n  | [Parameter Efficient Merging for Multimodal Large Language Models with Complementary Parameter Adaptation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.17159)| 2025 | Arxiv | LLaVA\n  | [AdaMMS: Model Merging for Heterogeneous Multimodal Large Language Models with Unsupervised Coefficient Optimization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.23733)| 2025 | Arxiv | LLaVA-OneVision-7B, Qwen2-VL-7B, LLaVA-v1.5-7B, CogVLM-chat-7B|\n  | [Transferring Textual Preferences to Vision-Language Understanding through Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.13487v1)| 2025 | Arxiv | Llama-3.2-11B-Vision-Instruct, Llama-3.1-Tulu-2-8B-uf-meanrm, Llama-3.1-Tulu-3- 8B-RM, Llama-3.1-8B|\n\n### Model Merging in Image Generative Models\n\n\u003Ccenter>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEnnengYang_Awesome-Model-Merging-Methods-Theories-Applications_readme_022a63f4c846.png\" alt=\"Model Merging\" width=\"800\"\u002F>\n\u003C\u002Fcenter>\n\n#### Style Mixing in Generative Models\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [DiffGraph: An Automated Agent-driven Model Merging Framework for In-the-Wild Text-to-Image Generation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.20470)| 2026 | Arxiv |Stable Diffusion v1.5, FLUX.1 Dev\n| [GimmBO: Interactive Generative Image Model Merging via Bayesian Optimization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.18585)| 2026 | Arxiv |\n| [Rethinking Inter-LoRA Orthogonality in Adapter Merging: Insights from Orthogonal Monte Carlo Dropout](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.03262)| 2025 | Arxiv |\n| [BlockLoRA: Modular Customization of Diffusion Models via Blockwise-Parameterized Low-Rank Adaptation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.08575v1)| 2025 | Arxiv |\n| [LoRA.rar: Learning to Merge LoRAs via Hypernetworks for Subject-Style Conditioned Image Generation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.05148)| 2024 | Arxiv | LLaVA-Critic 7b |\n| [IterIS: Iterative Inference-Solving Alignment for LoRA Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.15231) | 2024 | Arxiv |\n| [Diffusion Soup: Model Merging for Text-to-Image Diffusion Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.08431) | 2024 | ECCV |\n| [MaxFusion: Plug&Play Multi-Modal Generation in Text-to-Image Diffusion Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.09977) |  2024 | Arxiv |\n| [MoLE: Mixture of LoRA Experts](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.13628) |  2024| ICLR |\n| [LoRA-Composer: Leveraging Low-Rank Adaptation for Multi-Concept Customization in Training-Free Diffusion Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.11627) |  2024 | Arxiv |\n| [Multi-LoRA Composition for Image Generation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.16843) |  2024 | Arxiv |\n| [Mix-of-Show: Decentralized Low-Rank Adaptation for Multi-Concept Customization of Diffusion Models](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2023\u002Ffile\u002F3340ee1e4a8bad8d32c35721712b4d0a-Paper-Conference.pdf) |  2023 | NeurIPS |\n| [Merging loras](https:\u002F\u002Fgithub.com\u002Fcloneofsimo\u002Flora) | 2023  | (github) |\n| [ZipLoRA: Any Subject in Any Style by Effectively Merging LoRAs](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.13600) | 2023 | Arxiv |\n| [GAN Cocktail: mixing GANs without dataset access](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2022\u002Fpapers_ECCV\u002Fpapers\u002F136830207.pdf) | 2022 | ECCV |\n\n\u003C!-- | [Merging Improves Self-Critique Against Jailbreak Attacks]() |  2024 | Arxiv | -->\n\n#### Reducing Training Cost of Generative Models\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [Linear Combination of Saved Checkpoints Makes Consistency and Diffusion Models Better](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.02241) | 2024 | Arxiv |\n| [A Unified Module for Accelerating STABLE-DIFFUSION: LCM-LORA](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.16024)| 2024 | Arxiv |\n\n#### Enhancing the Faithfulness (or Generation Quality) of Diffusion Models\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [Decouple-Then-Merge: Towards Better Training for Diffusion Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.06664)|  2024 | Arxiv |\n| [SELMA: Learning and Merging Skill-Specific Text-to-Image Experts with Auto-Generated Data](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.06952) |  2024 | Arxiv |\n\n#### Deepfake Detection\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n[Real-Aware Residual Model Merging for Deepfake Detection](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.24367)| 2025 | Arxiv |\n\n### Model Merging in Video Generative Models\n\n#### Enhancing Motion Modeling\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [Extrapolating and Decoupling Image-to-Video Generation Models: Motion Modeling is Easier Than You Think](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.00948)|  2025 | CVPR | Dynamicrafter，SVD |\n\n----------\n\n## Application of Model Merging in Different Machine Learning Subfields\n\n\u003Ccenter>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEnnengYang_Awesome-Model-Merging-Methods-Theories-Applications_readme_cbfafcf2092b.png\" alt=\"Model Merging\" width=\"800\"\u002F>\n\u003C\u002Fcenter>\n\n### Model Merging in Continual Learning\n\n#### Model Merging to Mitigate Catastrophic Forgetting\n\n  | **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n  | --------------- | :----: | :----: | :----: |\n  | [MAny: Merge Anything for Multimodal Continual Instruction Tuning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.14016)| 2026  |Arxiv  | LLaVA-1.5-7B and InternVL-Chat7B\n  | [BidirLM: From Text to Omnimodal Bidirectional Encoders by Adapting and Composing Causal LLMs](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.02045)| 2026  |Arxiv  | Qwen3-1.7B and Qwen3-0.6B\n  | [Countering Catastrophic Forgetting of Large Language Models for Better Instruction Following via Weight-Space Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.01538)| 2026  |Arxiv  | Llama-3.1-8B-Base\n  | [Mapping Post-Training Forgetting in Language Models at Scale](https:\u002F\u002Fopenreview.net\u002Fpdf?id=qCIg2WGudx)| 2026  |ICLR  |\n  | [LCA: Local Classifier Alignment for Continual Learning](https:\u002F\u002Fopenreview.net\u002Fpdf?id=3uINmRldVW)| 2026  |ICLR  |\n  | [MERGETUNE: Continued fine-tuning of vision-language models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.10497)| 2026  |Arxiv  |\n  | [Merge before Forget: A Single LoRA Continual Learning via Continual Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.23017)| 2025  |Arxiv  |Llama-2-7B-chat, Llama-2-13B-chat, Qwen2.5-7B\n  | [Robust Finetuning of Vision-Language-Action Robot Policies via Parameter Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.08333)| 2025  |Arxiv  |\n  | [Merging without Forgetting: Continual Fusion of Task-Specific Models via Optimal Transport](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.19561)| 2025  |Arxiv  |\n  | [MergeSlide: Continual Model Merging and Task-to-Class Prompt-Aligned Inference for Lifelong Learning on Whole Slide Images](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.13099)| 2025  |Arxiv  |\n  | [RECALL: REpresentation-aligned Catastrophic-forgetting ALLeviation via Hierarchical Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.20479)| 2025  |Arxiv  |Qwen2-7B-Instruct, Llama-2-7B-chat |\n  | [DitHub: A Modular Framework for Incremental Open-Vocabulary Object Detection](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.09271)| 2025 | NeurIPS |\n  | [K-Merge: Online Continual Merging of Adapters for On-device Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.13537)| 2025  |Arxiv  |\n  | [Toward a Holistic Approach to Continual Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.23592)| 2025  |Arxiv  |\n  | [Null-Space Filtering for Data-Free Continual Model Merging: Preserving Stability, Promoting Plasticity](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.21413)| 2026  |ICLR  |\n  | [AIMMerging: Adaptive Iterative Model Merging Using Training Trajectories for Language Model Continual Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.17348)| 2025 | EMNLP | LLaMA2-7B, LLaMA2-13B |\n  | [HAM: Hierarchical Adapter Merging for Scalable Continual Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.13211)| 2025 | Arxiv |\n  | [Learn from Downstream and Be Yourself in Multimodal Large Language Models Fine-Tuning](https:\u002F\u002Fopenreview.net\u002Fpdf?id=FKqmIAnkrb)| 2025 | ICML |LLaVA-1.5-7B \n  | [DuET: Dual Incremental Object Detection via Exemplar-Free Task Arithmetic](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.21260)| 2025 | Arxiv |\n  | [Integrating Task-Specific and Universal Adapters for Pre-Trained Model-based Class-Incremental Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.08165)| 2025 | ICCV |\n  | [Forgetting of task-specific knowledge in model merging-based continual learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.23311)| 2025 | Arxiv |\n  | [Modular Delta Merging with Orthogonal Constraints: A Scalable Framework for Continual and Reversible Model Composition](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.20997) | 2025 | Arxiv |\n  | [RegCL: Continual Adaptation of Segment Anything Model via Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.12297) | 2025 | Arxiv |\n  | [Continual Learning in Vision-Language Models via Aligned Model Merging](http:\u002F\u002Farxiv.org\u002Fabs\u002F2506.03189) | 2025 | Arxiv |\n  | [Train with Perturbation, Infer after Merging: A Two-Stage Framework for Continual Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.22389)| 2025  |Arxiv  |\n  | [MINGLE: Mixture of Null-Space Gated Low-Rank Experts for Test-Time Continual Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.11883)| 2025  |NeurIPS  |\n  | [Analysis of Model Merging Methods for Continual Updating of Foundation Models in Distributed Data Settings](https:\u002F\u002Fwww.mdpi.com\u002F2076-3417\u002F15\u002F9\u002F5196)| 2025  |Arxiv  |  Applied Sciences\n  | [BECAME: BayEsian Continual Learning with Adaptive Model MErging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.02666v1)| 2025  |Arxiv  |\n  | [Merge then Realign: Simple and Effective Modality-Incremental Continual Learning for Multimodal LLMs](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.07663)| 2025  |Arxiv  | Llama-3-8B-Instruct |\n  | [Cost-Efficient Continual Learning with Sufficient Exemplar Memory](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.07274)| 2025  |Arxiv  | |\n  | [Continual Model Merging without Data: Dual Projections for Balancing Stability and Plasticity](https:\u002F\u002Fopenreview.net\u002Fpdf?id=zD5cUX67b9)| 2025  |NeurIPS  | |\n  | [Merging Models on the Fly Without Retraining: A Sequential Approach to Scalable Continual Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.09522)| 2025  |NeurIPS  | |\n  | [Soup to go: mitigating forgetting during continual learning with model averaging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.05559) | 2025 |Arxiv  | Llama 2 (7B) |\n  | [Adapter Merging with Centroid Prototype Mapping for Scalable Class-Incremental Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.18219)| 2024 |Arxiv  |\n  | [Parameter Averaging is All You Need to Prevent Forgetting](https:\u002F\u002Fpoonehmousavi.github.io\u002Fassets\u002Fpublications\u002F2010_machine_readable_dictionaries\u002FPARAMETER_AVERAGING_IS_ALL_YOU_NEED_TO_PREVENT_FORGETTING.pdf)| 2024 | SLT Workshop |\n  | [DESIRE: Dynamic Knowledge Consolidation for Rehearsal-Free Continual Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.19154)| 2024 |Arxiv  |\n  | [Adaptive LoRA Merging for Efficient Domain Incremental Learning](https:\u002F\u002Fopenreview.net\u002Fpdf?id=tlB5eonGEk)| 2024 | NeurIPS Workshop  |\n  | [LiNeS: Post-training Layer Scaling Prevents Forgetting and Enhances Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.17146)| 2024 |Arxiv  |\n  | [Model Tailor: Mitigating Catastrophic Forgetting in Multi-modal Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.12048) | 2024 |ICML  | InstructBLIP (Vicuna-7B), LLaVA-1.5 (Vicuna7B) |\n  | [Adaptive Discovering and Merging for Incremental Novel Class Discovery](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.03382) | 2024 |AAAI  |\n  | [MagMax: Leveraging Model Merging for Seamless Continual Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.06322) | 2024 |  ECCV |\n  | [Lm-cocktail: Resilient tuning of language models via model merging](https:\u002F\u002Faclanthology.org\u002F2024.findings-acl.145.pdf) |  2024 | ACL Findings | Llama-2-chat-7b |\n  | [Backward Compatibility During Data Updates by Weight Interpolation](https:\u002F\u002Faclanthology.org\u002F2024.eacl-long.174.pdf)|  2024 | EACL |\n  | [Learning to Route for Dynamic Adapter Composition in Continual Learning with Language Models](https:\u002F\u002Faclanthology.org\u002F2024.findings-emnlp.38.pdf) |  2024 |  EMNLP Findings |\n  | [Mitigating Catastrophic Forgetting in Language Transfer via Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.08699) |  2024 |  Arxiv | MISTRAL-7B, LLAMA-3-8B|\n  | [Domain Adaptation of Llama3-70B-Instruct through Continual Pre-Training and Model Merging: A Comprehensive Evaluation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.14971) |  2024 |  Arxiv | Llama3-70B|\n  | [Lottery Ticket Adaptation: Mitigating Destructive Interference in LLMs](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.16797) |  2024 |  Arxiv | Mistral-7B, Llama-3-8B |\n  | [WARP: On the Benefits of Weight Averaged Rewarded Policies](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.16768) | 2024 | Arxiv | Gemma-7B|\n  | [A Second-Order perspective on Compositionality and Incremental Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.16350) | 2024 |  Arxiv |\n  | [DynaMMo: Dynamic Model Merging for Efficient Class Incremental Learning for Medical Images](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.14099) |  2024|  Arxiv |\n  | [DAM: Dynamic Adapter Merging for Continual Video QA Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.08755) |  2024 | Arxiv |\n  | [Task-Specific Skill Localization in Fine-tuned Language Model](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.06600)|  2023| ICML |\n  | [Tangent model composition for ensembling and continual fine-tuning](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FLiu_Tangent_Model_Composition_for_Ensembling_and_Continual_Fine-tuning_ICCV_2023_paper.pdf) |  2023| ICCV |\n  | [A Unified Continual Learning Framework with General Parameter-Efficient Tuning](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FGao_A_Unified_Continual_Learning_Framework_with_General_Parameter-Efficient_Tuning_ICCV_2023_paper.pdf) |  2023| ICCV |\n  | [Task Arithmetic with LoRA for Continual Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.02428) |  2023 | NeurIPS Workshop |\n  | [Mitigating the Alignment Tax of RLHF](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.06256)|  2023 | Arxiv | Mistral-7B|\n  | [PAINT: Patching open-vocabulary models by interpolating weights](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2208.05592) |2022  |NeurIPS   |\n  | [Robust fine-tuning of zero-shot models](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FWortsman_Robust_Fine-Tuning_of_Zero-Shot_Models_CVPR_2022_paper.pdf) |2022  |CVPR  |\n\n### Model Merging in Multi-Task\u002FMulti-Objective\u002FMulti-Domain\u002FAuxiliary Learning\n\n#### Model Merging for Knowledge Transfer in Multi-Task Learning\n\n  | **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n  | --------------- | :----: | :----: | :----: |\n  | [G-Merging: Graph Models Merging for Parameter-Efficient Multi-Task Knowledge Consolidation](https:\u002F\u002Fopenreview.net\u002Fpdf?id=FoTtvLkkfU)| 2026 | ICLR  |\n  | [Multi-task Code LLMs: Data Mix or Model Merge?](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.21115)| 2026 | Arxiv  | Qwen Coder 2.5 7B, DeepSeek 7B\n  | [DivMerge: A divergence-based model merging method for multi-tasking](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.02108)| 2025 | Arxiv  |\n  | [Single-Input Multi-Output Model Merging: Leveraging Foundation Models for Dense Multi-Task Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.11268)| 2025 | Arxiv  |\n  | [Improving General Text Embedding Model: Tackling Task Conflict and Data Imbalance through Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.15035)| 2024 |Arxiv  |\n  | [LiNeS: Post-training Layer Scaling Prevents Forgetting and Enhances Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.17146)| 2024 |Arxiv  |\n  | [Mix Data or Merge Models? Optimizing for Diverse Multi-Task Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.10801)| 2024 |Arxiv  |Aya 23 8B|\n  | [Foldable SuperNets: Scalable Merging of Transformers with Different Initializations and Tasks](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.01483v1) | 2024 |Arxiv  |\n  | [Task Prompt Vectors: Effective Initialization through Multi-Task Soft-Prompt Transfer](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.01119) | 2024 |Arxiv  |\n  | [Evolutionary optimization of model merging recipes](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.13187) | 2024 |Arxiv  | shisa-gamma-7b-v1, WizardMath-7B-V1.1, Arithmo2-Mistral-7B, Abel-7B-002, Mistral-7B-v0.1, LLaVA-1.6-Mistral-7B|\n  | [Language Models are Super Mario: Absorbing Abilities from Homologous Models as a Free Lunch](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.03099) | 2024 | ICML  | WizardLM-13B, WizardMath-13B, and llama-2-13b-codealpaca, Mistral-7B|\n  | [Representation Surgery for Multi-Task Model Merging](https:\u002F\u002Fopenreview.net\u002Fpdf\u002F602906ec02919eb95d78d634321fcba1b68a2f03.pdf) |2024  | ICML |\n  | [Merging Multi-Task Models via Weight-Ensembling Mixture of Experts](https:\u002F\u002Fopenreview.net\u002Fpdf\u002F2aee8072945cd0485e619dd88c35566610cd5042.pdf) |  2024| ICML |\n  | [ZipIt! Merging Models from Different Tasks without Training](https:\u002F\u002Fopenreview.net\u002Fpdf?id=LEYUkvdUhq) | 2024 |ICLR  |\n  | [AdaMerging: Adaptive Model Merging for Multi-Task Learning](https:\u002F\u002Fopenreview.net\u002Fpdf?id=nZP6NgD3QY) | 2024  | ICLR |\n  | [Merging Decision Transformers: Weight Averaging for Forming Multi-Task Policies](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.07551)| 2023 |Arxiv  |\n  | [Resolving Interference When Merging Models](https:\u002F\u002Fopenreview.net\u002Fpdf?id=xtaX3WyCj1) | 2023  |  NeurIPS |\n  | [Editing models with task arithmetic](https:\u002F\u002Fopenreview.net\u002Fpdf?id=6t0Kwf8-jrj) | 2023 | ICLR |\n\n#### Model Merging for Knowledge Transfer in Multi-Objective Optimization\n\n  | **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n  | --------------- | :----: | :----: | :----: |\n  | [From Parameter to Representation: A Closed-Form Approach for Controllable Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.10943) | 2026 | AAAI  |\n  | [Merge and Guide: Unifying Model Merging and Guided Decoding for Controllable Multi-Objective Generation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.03782) | 2025 | Arxiv  | LLaMA-2-7B\n  | [Pareto Merging: Multi-Objective Optimization for Preference-Aware Model Merging](https:\u002F\u002Fopenreview.net\u002Fpdf?id=D7qRwx6BOS)| 2025 | ICML  |\n  | [Bone Soups: A Seek-and-Soup Model Merging Approach for Controllable Multi-Objective Generation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.10762)| 2025 | Arxiv  |LLaMA-2 7B\n  | [You Only Merge Once: Learning the Pareto Set of Preference-Aware Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.12105) |  2024 |Arxiv  |\n  | [Towards Efficient Pareto Set Approximation via Mixture of Experts Based Model Fusion](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.09770) |  2024 |Arxiv  |\n  | [MAP: Low-compute Model Merging with Amortized Pareto Fronts via Quadratic Approximation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.07529) | 2024 |Arxiv  | Llama3-8B|\n\n#### Model Merging for Knowledge Transfer in Multi-Domain Learning\n\n  | **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n  | --------------- | :----: | :----: | :----: |\n  | [Domain-Adaptive Model Merging across Disconnected Modes](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.05957) | 2026 | Arxiv  | \n  | [Bridging Domains through Subspace-Aware Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.05768) | 2026 | Arxiv  | \n  | [Exploring the potential and limitations of Model Merging for Multi-Domain Adaptation in ASR](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.05354) | 2026 | Arxiv  | \n  | [To Mix or To Merge: Toward Multi-Domain Reinforcement Learning for Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.12566) | 2026 | Arxiv  | Qwen3-4B-Base\n  | [MMGRid: Navigating Temporal-aware and Cross-domain Generative Recommendation via Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.15930) | 2026 | Arxiv  | Qwen3-0.6B \n  | [MergeRec: Model Merging for Data-Isolated Cross-Domain Sequential Recommendation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.01753)| 2026 | KDD |\n  | [DEM: Distribution Edited Model for Training with Mixed Data Distributions](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.15570) | 2024 |  Arxiv |  OpenLLaMA-7B, OpenLLaMA-13B |\n  | [Merging Vision Transformers from Different Tasks and Domains](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.16240) | 2023 |Arxiv  |\n\n#### Model Merging for Knowledge Transfer in Auxiliary Learning\n\n  | **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n  | --------------- | :----: | :----: | :----: |\n  | [ForkMerge: Mitigating Negative Transfer in Auxiliary-Task Learning](https:\u002F\u002Fopenreview.net\u002Fpdf?id=vZHk1QlBQW) | 2023 | NeurIPS |\n\n### Model Merging in Out-of-Distribution\u002FDomain Generalization\n\n#### Model Merging for Better Out-of-Distribution Generalization\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [Exploring the potential and limitations of Model Merging for Multi-Domain Adaptation in ASR](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.05354) | 2026 | Arxiv  | \n| [Model soups need only one ingredient](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.09689)| 2026 | Arxiv |\n| [System Report for CCL25-Eval Task 10: Prompt-Driven Large Language Model Merge for Fine-Grained Chinese Hate Speech Detection](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.09563)| 2025 | Arxiv |Qwen2.5-7B-Instruct\n| [Merging Smarter, Generalizing Better: Enhancing Model Merging on OOD Data](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.09093)| 2025 | Arxiv |\n| [Out-of-Distribution Graph Models Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.03674)| 2025 | Arxiv |\n| [SeWA: Selective Weight Average via Probabilistic Masking](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.10119)| 2025 | Arxiv |\n| [When, Where and Why to Average Weights?](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.06761)| 2025 | Arxiv |\n| [DaWin: Training-free Dynamic Weight Interpolation for Robust Adaptation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.03782) | 2024 | NeurIPS 2024 Workshop |\n| [Mitigating Training Imbalance in LLM Fine-Tuning via Selective Parameter Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.03743) | 2024 | Arxiv | Llama-2-7b|\n| [ReVLA: Reverting Visual Domain Limitation of Robotic Foundation Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.15250)| 2024 | Arxiv |\n| [Sparse Model Soups: A Recipe for Improved Pruning via Model Averaging](https:\u002F\u002Fopenreview.net\u002Fpdf?id=xx0ITyHp3u) |2024  |ICLR  |\n| [Warm: On the benefits of weight averaged reward models](https:\u002F\u002Fopenreview.net\u002Fpdf?id=s7RDnNUJy6) |2024  | ICML  |\n| [Scalable Learned Model Soup on a Single GPU: An Efficient Subspace Training Strategy](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.03641) |2024  | ECCV |\n| [Adaptive Stochastic Weight Averaging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.19092) | 2024 | JMLR\n| [Population parameter averaging (papa)](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.03094) | 2024 | TMLR |\n| [WARP: On the Benefits of Weight Averaged Rewarded Policies](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.16768) | 2024 | Arxiv | Mistral 7B, Mixtral 8x7B|\n| [WASH: Train your Ensemble with Communication-Efficient Weight Shuffling, then Average](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.17517) | 2024 | Arxiv |\n| [Model Stock: All we need is just a few fine-tuned models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.19522) |2024  | Arxiv |\n| [Lookaround Optimizer: 𝑘 steps around, 1 step average](https:\u002F\u002Fopenreview.net\u002Fpdf?id=k1Xy5zCNOJ) | 2023 | NeurIPS |\n| [Model ratatouille: Recycling diverse models for out-of-distribution generalization](https:\u002F\u002Fproceedings.mlr.press\u002Fv202\u002Frame23a\u002Frame23a.pdf) | 2023 | ICML |\n| [Trainable Weight Averaging: Efficient Training by Optimizing Historical Solutions](https:\u002F\u002Fopenreview.net\u002Fpdf?id=8wbnpOJY-f) | 2023 | ICLR |\n| [Lookaround Optimizer: k steps around, 1 step average](https:\u002F\u002Fopenreview.net\u002Fpdf?id=k1Xy5zCNOJ) |2023  |  NeurIPS|\n| [AdapterSoup: Weight Averaging to Improve Generalization of Pretrained Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.07027) |  2023 |EACL\n| [Dart: Diversify aggregate-repeat training improves generalization of neural networks](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FJain_DART_Diversify-Aggregate-Repeat_Training_Improves_Generalization_of_Neural_Networks_CVPR_2023_paper.pdf) |2023  | CVPR |\n| [When do flat minima optimizers work?](https:\u002F\u002Fopenreview.net\u002Fpdf?id=vDeh2yxTvuh) |  2022| NeurIPS |\n| [Model soups: averaging weights of multiple fine-tuned models improves accuracy without increasing inference time](https:\u002F\u002Fproceedings.mlr.press\u002Fv162\u002Fwortsman22a\u002Fwortsman22a.pdf) | 2022 | ICML |\n| [Diverse weight averaging for out-of-distribution generalization](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2022\u002Ffile\u002F46108d807b50ad4144eb353b5d0e8851-Paper-Conference.pdf) |  2022|NeurIPS  |\n| [Robust fine-tuning of zero-shot models](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FWortsman_Robust_Fine-Tuning_of_Zero-Shot_Models_CVPR_2022_paper.pdf) |2022  |CVPR  |\n| [Neural networks with late-phase weights](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.12927) |  2021 | ICLR |\n| [Stochastic Weight Averaging in Parallel: Large-Batch Training That Generalizes Well](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2001.02312) | 2020 | ICLR |\n| [SWALP: Stochastic weight averaging in low precision training](https:\u002F\u002Fproceedings.mlr.press\u002Fv97\u002Fyang19d\u002Fyang19d.pdf) |2019  | ICML |\n| [Averaging weights leads to wider optima and better generalization](https:\u002F\u002Fauai.org\u002Fuai2018\u002Fproceedings\u002Fpapers\u002F313.pdf) | 2018 | UAI |\n| [Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2017\u002Ffile\u002F68053af2923e00204c3ca7c6a3150cf7-Paper.pdf) |2017  | NeurIPS |\n\n#### Model Merging for Better Domain Generalization or Domain Adaptation\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [Selecting and Merging: Towards Adaptable and Scalable Named Entity Recognition with Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.22813)| 2025 | Arxiv | Qwen2.5-7B, Llama3.1-8B |\n| [Harmonizing and Merging Source Models for CLIP-based Domain Generalization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.09446)| 2025 | Arxiv |\n| [Realistic Evaluation of Model Merging for Compositional Generalization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.18314)| 2024 | Arxiv |\n| [Layer-wise Model Merging for Unsupervised Domain Adaptation in Segmentation Tasks](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.15813)| 2024 | Arxiv |\n| [Training-Free Model Merging for Multi-target Domain Adaptation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.13771)| 2024 | Arxiv |\n| [Domain Adaptation of Llama3-70B-Instruct through Continual Pre-Training and Model Merging: A Comprehensive Evaluation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.14971) |  2024 |  Arxiv | Llama3-70B|\n| [Ensemble of averages: Improving model selection and boosting performance in domain generalization](https:\u002F\u002Fopenreview.net\u002Fpdf?id=peZSbfNnBp4) | 2022 | NeurIPS |\n| [Swad: Domain generalization by seeking flat minima](https:\u002F\u002Fopenreview.net\u002Fpdf?id=zkHlu_3sJYU) |  2021| NeurIPS |\n\n### Model Merging in Federated Learning\n\n#### Model Merging for Local Knowledge Aggregation\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [FedMerge: Federated Model Merging for Personalization](https:\u002F\u002Fscholar.google.com\u002Fscholar_url?url=https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F39113\u002F43075&hl=en&sa=X&d=1003636543056490924&ei=dcC-afi-LoKq6rQPo6Gk2As&scisig=AFtJQiw8mqG2DDhpKAJPVgWDpcKq&oi=scholaralrt&hist=vWBd1VsAAAAJ:1360192736361724487:AFtJQix2MBKNfqG8ZHqg__7tkl0l&html=&pos=0&folt=art)| 2026 | AAAI | \n| [Communication-Efficient Personalized Adaptation via Federated-Local Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.18658)| 2026 | Arxiv | LLaMA-3.2-3B-Instruct\n| [On The Surprising Effectiveness of a Single Global Merging in Decentralized Learning](https:\u002F\u002Fopenreview.net\u002Fpdf?id=zrFnwRHuQo)| 2026 | ICLR |\n| [Bi-level Personalization for Federated Foundation Models: A Task-vector Aggregation Approach](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.12697)| 2025 | Arxiv | LLaMA-7B\n| [Intrinsic Training Signals for Federated Learning Aggregation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.06813)| 2025 | ICIAP |\n| [Breaking the Aggregation Bottleneck in Federated Recommendation: A Personalized Model Merging Approach](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.12386)| 2025 | Arxiv |\n| [A Single Merging Suffices: Recovering Server-based Learning Performance in Decentralized Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.06542)| 2025 | Arxiv |\n| [Closed-form merging of parameter-efficient modules for Federated Continual Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.17961) | 2025 | ICLR |\n| [Never Start from Scratch: Expediting On-Device LLM Personalization via Explainable Model Selection](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.13938)| 2025 | Arxiv |\n| [FedMerge: Federated Personalization via Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.06768)| 2025 | Arxiv |\n| [Personalized Language Models via Privacy-Preserving Evolutionary Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.18008v1)| 2025 | Arxiv | Llama-2-7b, Mistral-7B-Instruct v0.2 |\n| [FedAWA: Adaptive Optimization of Aggregation Weights in Federated Learning Using Client Vectors](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.15842)| 2025 | Arxiv |\n| [Many-Task Federated Fine-Tuning via Unified Task Vectors](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.06376)| 2025 | Arxiv |\n| [PrivFusion: Privacy-Preserving Model Fusion via Decentralized Federated Graph Matching](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10643309\u002F)| 2024 | TKDE |\n| [Model Trip: Enhancing Privacy and Fairness in Model Fusion Across Multi-Federations for Trustworthy Global Healthcare](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10597838\u002F)| 2024 | ICDE |\n| [DapperFL: Domain Adaptive Federated Learning with Model Fusion Pruning for Edge Devices](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.05823)| 2024 | NeurIPS |\n| [FuseFL: One-Shot Federated Learning through the Lens of Causality with Progressive Model Fusion](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.20380)| 2024 | Arxiv |\n| [Local Superior Soups: A Catalyst for Model Merging in Cross-Silo Federated Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.23660)| 2024 | Arxiv |\n| [DIMAT: Decentralized Iterative Merging-And-Training for Deep Learning Models](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fpapers\u002FSaadati_DIMAT_Decentralized_Iterative_Merging-And-Training_for_Deep_Learning_Models_CVPR_2024_paper.pdf) | 2024 | CVPR |\n| [FedFisher: Leveraging Fisher Information for One-Shot Federated Learning](https:\u002F\u002Fproceedings.mlr.press\u002Fv238\u002Fjhunjhunwala24a\u002Fjhunjhunwala24a.pdf) | 2024 | AISTATS |\n| [lo-fi: distributed fine-tuning without communication](https:\u002F\u002Fopenreview.net\u002Fpdf?id=1U0aPkBVz0)| 2023 | TMLR |\n| [Revisiting Weighted Aggregation in Federated Learning with Neural Networks](https:\u002F\u002Fproceedings.mlr.press\u002Fv202\u002Fli23s\u002Fli23s.pdf)| 2023|  ICML |\n| [Deep neural network fusion via graph matching with applications to model ensemble and federated learning](https:\u002F\u002Fproceedings.mlr.press\u002Fv162\u002Fliu22k\u002Fliu22k.pdf) | 2022 |  ICML |\n| [Federated Learning with Matched Averaging](https:\u002F\u002Fopenreview.net\u002Fpdf?id=BkluqlSFDS) |  2020 |  ICLR|\n| [Tackling the objective inconsistency problem in heterogeneous federated optimization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.07481) |2020  | NeurIPS |\n| [Model fusion via optimal transport](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Ffile\u002Ffb2697869f56484404c8ceee2985b01d-Paper.pdf) |2020  |  NeurIPS|\n| [Bayesian nonparametric federated learning of neural networks](https:\u002F\u002Fproceedings.mlr.press\u002Fv97\u002Fyurochkin19a\u002Fyurochkin19a.pdf) | 2019 | ICML |\n| [Learning private neural language modeling with attentive aggregation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1812.07108) |2019  | IJCNN |\n| [Communication-Efficient Learning of Deep Networks from Decentralized Data](https:\u002F\u002Fproceedings.mlr.press\u002Fv54\u002Fmcmahan17a\u002Fmcmahan17a.pdf) | 2017 |  AISTATS |\n\n### Model Merging in Zero-shot\u002FFew-shot Learning\n\n#### Model Merging for Cross-task Generalization in Zero-shot Learning\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [Task Vector in TTS: Toward Emotionally Expressive Dialectal Speech Synthesis](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.18699) |   2026 |Arxiv | \n| [Model Merging Improves Zero-Shot Generalization in Bioacoustic Foundation Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.05171) |2025 |NeurIPS Workshop | LLAMA-3.1-8B-INSTRUCT\n| [Investigating Task Arithmetic for Zero-Shot Information Retrieval](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.00649) |2025 |SIGIR |  LLama-2-7b\n| [Retraining-Free Merging of Sparse Mixture-of-Experts via Hierarchical Clustering](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.08589) |2024 |Arxiv | Qwen 60x2.7B, Qwen 45x2.7B, Qwen 30x2.7B, Mixtral 8x7B, Mixtral 6x7B, Mixtral 4x7B|\n| [Layer Swapping for Zero-Shot Cross-Lingual Transfer in Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.01335) |2024 |Arxiv | LLAMA 3.1 8B |\n| [Learning to Route Among Specialized Experts for Zero-Shot Generalization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.05859)|2024  | ICML  |\n| [Towards Modular LLMs by Building and Reusing a Library of LoRAs](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.11157) |2024  | ICML  | Mistral-7B |\n| [Chat Vector: A Simple Approach to Equip LLMs With New Language Chat Capabilities](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.04799) |2024  |ACL  | LLaMA-2 13B, Chinese-LLaMA-13B, Chinese-Alpaca-13B, Mistral-7B, llama-2-ko-7b|\n| [Unlocking the Potential of Model Merging for Low-Resource Languages](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.03994)|   2024 |Arxiv | Llama-2-7B|\n| [Diffusion Soup: Model Merging for Text-to-Image Diffusion Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.08431) | 2024 | Arxiv |\n| [No Train but Gain: Language Arithmetic for training-free Language Adapters enhancement](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.15737) | 2024 |Arxiv |\n| [MaxFusion: Plug&Play Multi-Modal Generation in Text-to-Image Diffusion Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.09977) | 2024 |Arxiv |\n| [AdaMergeX: Cross-Lingual Transfer with Large Language Models via Adaptive Adapter Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.18913) | 2024 |Arxiv | Llama2-7b|\n| [Model Composition for Multimodal Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.12750) |  2024 | Arxiv | Vicuna-7B-v1.5|\n| [Exploring the Benefits of Training Expert Language Models over Instruction Tuning](https:\u002F\u002Fopenreview.net\u002Fpdf?id=VAA1itvsNQ)|  2023 | ICML |\n| [Token-Level Adaptation of LoRA Adapters for Downstream Task Generalization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.10847)|  2023 | Arxiv | Llama-2-7b|\n| [Language and Task Arithmetic with Parameter-Efficient Layers for Zero-Shot Summarization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.09344) |  2023 | Arxiv | PaLM 2-S|\n\n#### Model Merging for Cross-task Generalization in Few-shot Learning\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [Task Arithmetic with Support Languages for Low-Resource ASR](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.07038) |   2026 |Arxiv | \n| [Unlocking Tuning-Free Few-Shot Adaptability in Visual Foundation Models by Recycling Pre-Tuned LoRAs](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.02220) | 2025 | CVPR |\n| [LoRA-Flow: Dynamic LoRA Fusion for Large Language Models in Generative Tasks](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.11455) | 2024 | ACL | Llama-2- 7B|\n| [LoraHub: Efficient Cross-Task Generalization via Dynamic LoRA Composition](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.13269) | 2024 |  COLM | Llama-2-7B, Llama-2-13B |\n| [LoraRetriever: Input-Aware LoRA Retrieval and Composition for Mixed Tasks in the Wild](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.09997)  | 2024 | ACL |\n| [Does Combining Parameter-efficient Modules Improve Few-shot Transfer Accuracy?](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.15414) |   2024 |Arxiv |\n| [MerA: Merging pretrained adapters for few-shot learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2308.15982) |2023  | Arxiv |\n| [Multi-Head Adapter Routing for Cross-Task Generalization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.03831)|2023  | NeurIPS |\n\n### Model Merging in Adversarial Learning\n\n#### Model Merging as an Attack\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [When Safe Models Merge into Danger: Exploiting Latent Vulnerabilities in LLM Fusion](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.00627)| 2026  | Arxiv | Tulu-2-7b, Llama-3.1-Tulu-3-8B-DPO, OpenChat-3.5-0106\n| [Backdoor Vectors: a Task Arithmetic View on Backdoor Attacks and Defenses](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.08016)| 2025  | Arxiv |\n| [Merge Now, Regret Later: The Hidden Cost of Model Merging is Adversarial Transferability](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.23689)| 2025  | Arxiv |\n| [Be Cautious When Merging Unfamiliar LLMs: A Phishing Model Capable of Stealing Privacy](https:\u002F\u002Faclanthology.org\u002F2025.findings-acl.713.pdf)| 2025  | ACL | Llama-3.2-3b-it, Gemma-2-2b-it, Qwen-2.5-3b-it, and Phi-3.5-mini-it |\n| [Merge Hijacking: Backdoor Attacks to Model Merging of Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.23561)| 2025  | Arxiv | LLaMA3.1-8B\n| [From Purity to Peril: Backdooring Merged Models From “Harmless” Benign Components](https:\u002F\u002Fwww.usenix.org\u002Fsystem\u002Ffiles\u002Fconference\u002Fusenixsecurity25\u002Fsec25cycle1-prepub-702-wang-lijin.pdf)| 2025  | Arxiv | LLaMA2-7B-chat, Mistral-7B-v0.1\n| [Merger-as-a-Stealer: Stealing Targeted PII from Aligned LLMs with Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.16094)| 2025  | Arxiv |\n| [Be Cautious When Merging Unfamiliar LLMs: A Phishing Model Capable of Stealing Privacy](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.11533) | 2025  | Arxiv |\n| [LoBAM: LoRA-Based Backdoor Attack on Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.16746) | 2024  | Arxiv |\n| [BadMerging: Backdoor Attacks Against Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.07362) | 2024  | CCS |\n| [LoRA-as-an-Attack! Piercing LLM Safety Under The Share-and-Play Scenario](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.00108) | 2024  | ACL | Llama-2-7B|\n\n#### Model Merging as a Defense or Intellectual Property Protection\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [Defending against Backdoor Attacks via Module Switching](https:\u002F\u002Fopenreview.net\u002Fpdf?id=ieCOL2YAqv)| 2026 |  ICLR | \n| [Making Models Unmergeable via Scaling-Sensitive Loss Landscape](https:\u002F\u002Farxiv.org\u002Fabs\u002F2601.21898)| 2026 |  Arxiv | \n| [Merging Triggers, Breaking Backdoors: Defensive Poisoning for Instruction-Tuned Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.04448)| 2026 |  Arxiv | Llama2-7B and Qwen3-8B \n| [Do Not Merge My Model! Safeguarding Open-Source LLMs Against Unauthorized Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.10712)| 2026 |  AAAI|  LLaMA-2-13B, WizardLM-13B, WizardMath-13B, LLaMA-2-13B-Code Alpaca |\n| [Defending Unauthorized Model Merging via Dual-Stage Weight Protection](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.11851)| 2025 |  Arxiv|\n| [Model Unmerging: Making Your Models Unmergeable for Secure Model Sharing](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.01548)| 2025 |  Arxiv|\n| [POSTER: Investigating Transferability of Adversarial Examples in Model Merging](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3708821.3735347)| 2025 |  ASIA CCS |\n| [RouteMark: A Fingerprint for Intellectual Property Attribution in Routing-based Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.01784)| 2025 |  Arxiv|\n| [MergeGuard: Efficient Thwarting of Trojan Attacks in Machine Learning Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.04015)| 2025 |  Arxiv|\n| [BadJudge: Backdoor Vulnerabilities of LLM-As-A-Judge](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.00596v1)| 2025 |  Arxiv| Mistral-7B-Instruct-v0.2, Meta-Llama3-8B |\n| [Disrupting Model Merging: A Parameter-Level Defense Without Sacrificing Accuracy](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.07661)| 2025 |  ICCV |\n| [Large Language Models Merging for Enhancing the Link Stealing Attack on Graph Neural Networks](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.05830)| 2024 |  Arxiv| Vicuna-7B, Vicuna-13B|\n| [Strong Copyright Protection for Language Models via Adaptive Model Fusion](https:\u002F\u002Fopenreview.net\u002Fpdf?id=vAG7GrZZUF) | 2024 |  ICML| LLaMa2 7B, StarCoder 7B |\n| [Hyper Adversarial Tuning for Boosting Adversarial Robustness of Pretrained Large Vision Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.05951)| 2024 |  Arxiv|\n| [REEF: Representation Encoding Fingerprints for Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.14273)| 2024 |  Arxiv| Evollm-jp-7b, Shisa-gamma-7b-v1, Wizardmath-7b-1.1, Abel-7b-002, Llama-2-7b, Openllama-2-7b, Mpt-7b,  Internlm2-chat-20b, Mixtral-8x7b-instruct, Qwen-1.5-chat-72b |\n| [Mitigating the Backdoor Effect for Multi-Task Model Merging via Safety-Aware Subspace](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.13910)| 2024 |  Arxiv|\n| [MergePrint: Robust Fingerprinting against Merging Large Language Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.08604)| 2024 |  Arxiv| LLaMA-2-7B, WizardMath-7B-V1.0, LLaMA-2-7B-CHAT |\n| [Avoiding Copyright Infringement via Machine Unlearning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.10952v1) | 2024 |  Arxiv | Llama3-8B |\n| [Merging Improves Self-Critique Against Jailbreak Attacks](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.07188) | 2024 |  Arxiv| Mistral-7B, Mixtral-8x7B|\n| [Have You Merged My Model? On The Robustness of Large Language Model IP Protection Methods Against Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.05188) |  2024 |  Arxiv| LLaMA-2-7B, LLaMA-2-7B-CHAT, WizardMath-7B-V1.0|\n| [Here’s a Free Lunch: Sanitizing Backdoored Models with Model Merge](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.19334) | 2024  | ACL |\n| [Revisiting adapters with adversarial training](https:\u002F\u002Fopenreview.net\u002Fpdf?id=HPdxC1THU8T) |2023 |ICLR |\n| [Seasoning model soups for robustness to adversarial and natural distribution shifts](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FCroce_Seasoning_Model_Soups_for_Robustness_to_Adversarial_and_Natural_Distribution_CVPR_2023_paper.pdf) |2023 | CVPR |\n\n## Other Applications\n\n| **Paper Title** | **Year** | **Conference\u002FJournal** | **Remark** |\n| --------------- | :----: | :----: | :----: |\n| [Securing the Floor and Raising the Ceiling: A Merging-based Paradigm for Multi-modal Search Agents](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.01416v1)| 2026 | Arxiv |\n| [ACE-Brain-0: Spatial Intelligence as a Shared Scaffold for Universal Embodiments](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.03198v1)| 2026 | Arxiv |\n| [Sparse Task Vector Mixup with Hypernetworks for Efficient Knowledge Transfer in Whole-Slide Image Prognosis](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.10526)| 2026 | Arxiv \n| [Less Finetuning, Better Retrieval: Rethinking LLM Adaptation for Biomedical Retrievers via Synthetic Data and Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.04731)| 2026 | Arxiv |  Qwen3-0.6B, Gemma-2B, Phi4-3.8B\n| [When Domain Pretraining Interferes with Instruction Alignment: An Empirical Study of Adapter Merging in Medical LLMs](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.18350)| 2026 | Arxiv | 14B-parameter LLM\n| [MergeRec: Model Merging for Data-Isolated Cross-Domain Sequential Recommendation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.01753)| 2026 | KDD |\n| [Rare Word Recognition and Translation Without Fine-Tuning via Task Vector in Speech Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.21894)| 2025 | Arxiv | \n| [System Report for CCL25-Eval Task 10: Prompt-Driven Large Language Model Merge for Fine-Grained Chinese Hate Speech Detection](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.09563)| 2025 | Arxiv |Qwen2.5-7B-Instruct\n| [Group-Aware Partial Model Merging for Children’s Automatic Speech Recognition](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.23098)| 2025 |  Arxiv | \n| [Subtract the Corruption: Training-Data-Free Corrective Machine Unlearning using Task Arithmetic](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.18660)| 2025 |  Arxiv | \n| [RecCocktail: A Generalizable and Efficient Framework for LLM-Based Recommendation](https:\u002F\u002Fle-wu.com\u002Ffiles\u002FPublications\u002FCONFERENCES\u002FAAAI26-RecCocktail.pdf)| 2025 |  AAAI | Llama-3.1-8B\n| [A Novel Hierarchical Integration Method for Efficient Model Merging in Medical LLMs](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.13373)| 2025 |  Arxiv | Mistral-7B\n| [WeaveRec: An LLM-Based Cross-Domain Sequential Recommendation Framework with Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.26546)| 2025 |  Arxiv | Qwen2-7B\n| [Effect of Model Merging in Domain-Specific Ad-hoc Retrieval](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.21966)| 2025 |  Arxiv|\n| [Look the Other Way: Designing ‘Positive’ Molecules with Negative Data via Task Arithmetic](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.17876)| 2025 |  Arxiv|\n| [Transferring Visual Explainability of Self-Explaining Models through Task Arithmetic](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.04380)| 2025 |  Arxiv|\n| [Distilling a speech and music encoder with task arithmetic](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.13270)| 2025 |  Arxiv|\n| [MedSAMix: A Training-Free Model Merging Approach for Medical Image Segmentation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.11032)| 2025 |  Arxiv|\n| [Oscillation-Reduced MXFP4 Training for Vision Transformers](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.20853v2)| 2025 |  ICML |\n| [Transferring Visual Explainability of Self-Explaining Models through Task Arithmetic](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.04380)| 2025 |  Arxiv|\n| [Temporal Information Retrieval via Time-Specifier Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.06782)| 2025 |  Arxiv|\n| [Generative Representational Learning of Foundation Models for Recommendation](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.11999)| 2025 |  Arxiv|\n| [Towards Model Merging for Tabular Telecommunications Data](https:\u002F\u002Fwww.diva-portal.org\u002Fsmash\u002Fget\u002Fdiva2:1968615\u002FFULLTEXT01.pdf)| 2025 |  Arxiv|\n| [CultureMERT: Continual Pre-Training for Cross-Cultural Music Representation Learning](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.17818)| 2025 |  Arxiv|\n| [U-Net Transplant: The Role of Pre-training for Model Merging in 3D Medical Segmentation](https:\u002F\u002Firis.unimore.it\u002Fhandle\u002F11380\u002F1380716)| 2025 |  International Conference on Medical Image Computing and Computer Assisted Intervention|\n| [CodeMerge: Codebook-Guided Model Merging for Robust Test-Time Adaptation in Autonomous Driving](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.16524)| 2025 |  Arxiv|\n| [Mixture of Latent Experts Using Tensor Products](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.16671)|  2024| TMLR |\n| [In-Model Merging for Enhancing the Robustness of Medical Imaging Classification Models](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.20516v1)| 2025 |  Arxiv|\n| [Self-supervised Normality Learning and Divergence Vector-guided Model Merging for Zero-shot Congenital Heart Disease Detection in Fetal Ultrasound Videos](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.07799v1)| 2025 |  Arxiv|\n| [A Sliding Layer Merging Method for Efficient Depth-Wise Pruning in LLMs](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.19159v1)| 2025 |  Arxiv| LLaMA-2-7B\n| [Self-supervised Normality Learning and Divergence Vector-guided Model Merging for Zero-shot Congenital Heart Disease Detection in Fetal Ultrasound Videos](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.07799)| 2025 |  Arxiv|\n| [MedForge: Building Medical Foundation Models Like Open Source Software Development](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.16055)| 2025 |  Arxiv|\n| [Cultural Palette: Pluralising Culture Alignment via Multi-agent Palette](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.11167)| 2024 |  Arxiv|\n| [Mitigating Training Imbalance in LLM Fine-Tuning via Selective Parameter Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.03743)| 2024 |  EMNLP | Llama-2-7b|\n| [Is Multiple Object Tracking a Matter of Specialization?](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.00553)| 2024 |  NeurIPS|\n| [Tracking Universal Features Through Fine-Tuning and Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.12391)| 2024 |  Arxiv|\n| [HM3: Heterogeneous Multi-Class Model Merging](https:\u002F\u002Fwww.arxiv.org\u002Fpdf\u002F2409.19173) | 2024 |  Arxiv|\n| [Emotion Arithmetic: Emotional Speech Synthesis via Weight Space Interpolation](https:\u002F\u002Fwww.ee.iitb.ac.in\u002Fcourse\u002F~daplab\u002Fpublications\u002F2024\u002Fkalyan24_interspeech.pdf) | 2024 | Interspeech\n| [Erasure Coded Neural Network Inference via Fisher Averaging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.01420) | 2024 |  Arxiv|\n| [MergeRepair: An Exploratory Study on Merging Task-Specific Adapters in Code LLMs for Automated Program Repair](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.09568) | 2024 |  Arxiv|\n| [Model Tells You Where to Merge: Adaptive KV Cache Merging for LLMs on Long-Context Tasks](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.08454v2) | 2024 |  Arxiv| Llama2-7B, Llama2-13B-chat, Mistral-7B-instruct|\n| [Scaling Up Personalized Image Aesthetic Assessment via Task Vector Customization](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.07176)| 2024 | Arxiv |\n| [An Attribute Interpolation Method in Speech Synthesis by Model Merging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.00766) | 2024 |  Arxiv|\n| [Task Arithmetic can Mitigate Synthetic-to-Real Gap in Automatic Speech Recognition](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.02925) | 2024 |  Arxiv|\n| [MedMerge: Merging Models for Effective Transfer Learning to Medical Imaging Tasks](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.11646) | 2024 |  Arxiv|\n| [Experts Weights Averaging: A New General Training Scheme for Vision Transformers](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2308.06093)| 2023 | Arxiv |\n| [One Student Knows All Experts Know: From Sparse to Dense](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2201.10890)| 2022 | Arxiv |\n| [Meta-Learning PAC-Bayes Priors in Model Averaging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1912.11252)| 2019 | AAAI |\n\n----------\n\n**Star History**\n\n[![Star History Chart](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEnnengYang_Awesome-Model-Merging-Methods-Theories-Applications_readme_d83ade9f22e4.png)](https:\u002F\u002Fstar-history.com\u002F#EnnengYang\u002FAwesome-Model-Merging-Methods-Theories-Applications&Date)\n\n----------\n\n## Contact\n\u003C!-- **Contact** -->\n\nWe welcome all researchers to contribute to this repository **'model merging in foundation models or machine learning'**.\n\nIf you have a related paper that was not added to the library, please contact us.\n\nEmail: \u003Cennengyang@qq.com> \u002F \u003Cennengyang@gmail.com>\n","关于 **“LLMs、MLLMs 及其扩展领域的模型合并：方法、理论、应用与机遇。ACM 计算综述，2026 年。”** 的全面论文列表。\n\n---\n\n> [!IMPORTANT]\n> 欢迎贡献：\n> \n> 请通过 [联系我们](#contact) 或提交拉取请求，添加未列出的相关论文、内容澄清或分类调整；待您的论文被接收后，请及时更新相关信息。感谢！\n\n---\n\n## 💥 新闻 💥\n\n- 🔥🔥🔥 我们的 [综述](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3787849) 已被 ACM 计算综述期刊接收，请在有帮助时 [引用](#citation) 该文或本资源库。\n- 🔥🔥🔥 我们已标记出实验中使用了规模 **$\\geq$ 7B**（或小型主流 LLM）模型的论文。\n\n---\n\n## 摘要\n>\n> 模型合并是机器学习领域中一种高效的赋能技术，它无需收集原始训练数据，也无需高昂的计算成本。随着模型合并在各个领域的日益普及，全面理解现有的模型合并技术至关重要。然而，目前文献中缺乏对这些技术进行系统性、深入梳理的综述。为此，本综述全面概述了模型合并的方法与理论、其在不同领域和场景中的应用，以及未来的研究方向。具体而言，我们首先提出了一种新的分类方法，详尽地讨论了现有模型合并技术；其次，探讨了模型合并技术在大型语言模型、多模态大型语言模型以及持续学习、多任务学习、少样本学习等十余个机器学习子领域的应用；最后，我们指出了模型合并仍面临的挑战，并展望了未来的研究方向。\n\n\n\u003Ccenter>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEnnengYang_Awesome-Model-Merging-Methods-Theories-Applications_readme_e4c2e8fe2346.png\" alt=\"模型合并\" width=\"800\"\u002F>\n\u003C\u002Fcenter>\n\n## 引用\n\n如果您认为我们的论文或本资源有所帮助，请考虑引用以下内容：\n\n```\n@article{yang2026ModelMergingSurvey,\n  author = {Yang, Enneng and Shen, Li and Guo, Guibing and Wang, Xingwei and Cao, Xiaochun and Zhang, Jie and Tao, Dacheng},\n  title = {LLMs、MLLMs 及其扩展领域的模型合并：方法、理论、应用与机遇},\n  year = {2026},\n  issue_date = {2026年6月},\n  publisher = {计算机协会},\n  address = {美国纽约州纽约市},\n  volume = {58},\n  number = {8},\n  issn = {0360-0300},\n  url = {https:\u002F\u002Fdoi.org\u002F10.1145\u002F3787849},\n  doi = {10.1145\u002F3787849},\n  journal = {ACM 计算综述},\n  month = feb,\n  articleno = {216},\n  numpages = {41}\n}\n```\n\n谢谢！\n\n******\n\n## 框架\n\n- [💥 新闻 💥](#-news-)\n- [摘要](#abstract)\n- [引用](#citation)\n- [框架](#framework)\n- [综述](#survey)\n- [基准测试\u002F评估](#benchmarkevaluation)\n- [高级方法](#advanced-methods)\n  - [合并前方法](#pre-merging-methods)\n    - [更好的微调](#better-fine-tuning)\n      - [线性化微调](#linearization-fine-tuning)\n      - [子空间微调](#subspace-fine-tuning)\n      - [尖锐度感知微调](#sharpness-aware-fine-tuning)\n      - [其他](#others)\n    - [架构转换](#architecture-transformation)\n    - [权重对齐](#weight-alignment)\n  - [合并中方法](#during-merging-methods)\n    - [基础合并方法](#basic-merging-methods)\n    - [基于权重的合并方法](#weighted-based-merging-methods)\n    - [基于子空间的合并方法（稀疏或低秩子空间）](#subspace-based-merging-method-sparse-or-low-rank-subspace)\n    - [基于路由的合并方法（动态合并）](#routing-based-merging-methods-dynamic-merging)\n    - [后校准方法](#post-calibration-based-methods)\n  - [其他合并方法](#other-merging-methods)\n  - [模型合并的理论或分析](#theories-or-analysis-of-model-merging)\n- [模型合并在基础模型中的应用](#application-of-model-merging-in-foundation-models)\n  - [大型语言模型中的模型合并](#model-merging-in-large-language-models)\n    - [LLM的人类偏好对齐](#human-preference-alignment-for-llms)\n    - [LLM的去毒化](#detoxification-of-llms)\n    - [LLM的知识编辑\u002F遗忘](#knowledge-editingunlearning-of-llms)\n    - [加速LLM的训练](#faster-training-of-llms)\n    - [加速LLM的推理](#faster-reasoning-of-llms)\n    - [提升基于MoE的LLM的计算效率](#improving-computational-efficiency-of-moe-based-llm)\n    - [通过模型合并混合数据集](#mixing-datasets-via-model-merging)\n    - [LLM代理合并](#llm-agent-merging)\n    - [整合专家LLM的能力](#combine-the-capabilities-of-expert-llms)\n  - [多模态大型语言模型中的模型合并](#model-merging-in-multimodal-large-language-models)\n    - [用于多模态融合的模型合并](#model-merging-for-multimodal-fusion)\n    - [用于跨模态知识迁移的模型合并](#model-merging-for-cross-modal-knowledge-transfer)\n    - [整合专家MLLM的能力](#combine-the-capabilities-of-expert-mllms)\n  - [图像生成模型中的模型合并](#model-merging-in-image-generative-models)\n    - [生成模型中的风格混合](#style-mixing-in-generative-models)\n    - [降低生成模型的训练成本](#reducing-training-cost-of-generative-models)\n    - [提升扩散模型的真实性（或生成质量）](#enhancing-the-faithfulness-or-generation-quality-of-diffusion-models)\n    - [深度伪造检测](#deepfake-detection)\n  - [视频生成模型中的模型合并](#model-merging-in-video-generative-models)\n    - [增强运动建模](#enhancing-motion-modeling)\n- [模型合并在不同机器学习子领域的应用](#application-of-model-merging-in-different-machine-learning-subfields)\n  - [持续学习中的模型合并](#model-merging-in-continual-learning)\n    - [通过模型合并缓解灾难性遗忘](#model-merging-to-mitigate-catastrophic-forgetting)\n  - [多任务\u002F多目标\u002F多领域\u002F辅助学习中的模型合并](#model-merging-in-multi-taskmulti-objectivemulti-domainauxiliary-learning)\n    - [用于多任务学习中知识迁移的模型合并](#model-merging-for-knowledge-transfer-in-multi-task-learning)\n    - [用于多目标优化中知识迁移的模型合并](#model-merging-for-knowledge-transfer-in-multi-objective-optimization)\n    - [用于多领域学习中知识迁移的模型合并](#model-merging-for-knowledge-transfer-in-multi-domain-learning)\n    - [用于辅助学习中知识迁移的模型合并](#model-merging-for-knowledge-transfer-in-auxiliary-learning)\n  - [分布外\u002F领域泛化中的模型合并](#model-merging-in-out-of-distributiondomain-generalization)\n    - [用于更好分布外泛化的模型合并](#model-merging-for-better-out-of-distribution-generalization)\n    - [用于更好领域泛化或领域适应的模型合并](#model-merging-for-better-domain-generalization-or-domain-adaptation)\n  - [联邦学习中的模型合并](#model-merging-in-federated-learning)\n    - [用于本地知识聚合的模型合并](#model-merging-for-local-knowledge-aggregation)\n  - [零样本\u002F少样本学习中的模型合并](#model-merging-in-zero-shotfew-shot-learning)\n    - [用于零样本学习中跨任务泛化的模型合并](#model-merging-for-cross-task-generalization-in-zero-shot-learning)\n    - [用于少样本学习中跨任务泛化的模型合并](#model-merging-for-cross-task-generalization-in-few-shot-learning)\n  - [对抗学习中的模型合并](#model-merging-in-adversarial-learning)\n    - [作为攻击的模型合并](#model-merging-as-an-attack)\n    - [作为防御或知识产权保护的模型合并](#model-merging-as-a-defense-or-intellectual-property-protection)\n- [其他应用](#other-applications)\n- [联系方式](#contact)\n\n\n\n----------\n\n## 调查研究\n\n| **论文标题** | **年份** | **会议\u002F期刊** |\n| --------------- | :----: | :----: |\n| [大语言模型时代的模型合并：方法、应用与未来方向](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.09938)| 2026 | Arxiv |\n| [通过模型合并扩展智能：综合综述](https:\u002F\u002Fd197for5662m48.cloudfront.net\u002Fdocuments\u002Fpublicationstatus\u002F290780\u002Fpreprint_pdf\u002F716bd23c7315eead7ee9fd24fa7b4290.pdf)| 2025 | Arxiv |\n| [通过模型融合 democratize AI：全面回顾与未来方向](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS295016012500049X)| 2025 | Arxiv |\n| [从任务特定模型到统一系统：模型合并方法综述](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.08998)| 2025 | Arxiv |\n| [SoK：利用深度模型合并技术在损失景观中寻找共同点](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.12927)| 2024 | Arxiv |\n| [LLMs、MLLMs 及其以外的模型合并：方法、理论、应用与机遇](https:\u002F\u002Farxiv.org\u002Fabs\u002F2408.07666)| 2024 | Arxiv |\n| [模型 MoErging 综述：为协作学习在专业专家之间进行回收与路由](https:\u002F\u002Fwww.arxiv.org\u002Fpdf\u002F2408.07057)| 2024 | Arxiv |\n| [合并、集成与合作！大语言模型时代协作策略综述](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.06089)| 2024 | Arxiv |\n| [超越微调的学习：综述](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.08184)| 2023 | Arxiv |\n| [深度模型融合：综述](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.15698)| 2023 | Arxiv |\n\n## 基准测试\u002F评估\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [crdt-merge](https:\u002F\u002Fgithub.com\u002Fmgillr\u002Fcrdt-merge)| 2026 | Github | 基于 CRDT 的分布式模型合并，具有形式化的收敛保证。包含 25 种策略（SLERP、TIES、DARE、Fisher、进化等）。采用两层 OR-Set 架构，实现无冲突的多节点合并。\n| [用于缓解社会偏见的模型合并算法实证调查](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.02689)| 2025 | Arxiv | LLAMA-2-7B、LLAMA-3-8B、LLAMA-3.1-8B、QWEN2-7B\n| [大型语言模型中模型合并技术的系统性研究](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.21437)| 2025 | Arxiv | Llama-3.2-3B-Instruct、Llama-3.1-8B-Instruct、Qwen3-4B、Qwen3-8B\n| [FusionBench：深度模型融合的全面基准测试](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.03280)| 2025 | JMLR | Mistral-7B-v0.1、MetaMath-Mistral-7B、dolphin-2.1-mistral-7b、speechless-code-mistral-7b-v1.0\n| [迈向多层次模型协作中的性能一致性](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2025\u002Fpapers\u002FLi_Towards_Performance_Consistency_in_Multi-Level_Model_Collaboration_ICCV_2025_paper.pdf)| 2025 | ICCV |\n| [大型语言模型中的模型合并缩放规律](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.24244)| 2025 | Arxiv | Qwen2.5 0.5、1.5、3、7、14、32、72B\n| [FBMS：用于灵活贝叶斯模型选择和模型平均的 R 包](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.00753)| 2025 | Arxiv |\n| [通过模型合并统一多模态大语言模型的能力与模态](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.19892)| 2025 | Arxiv | Qwen2-VL-7B-Base、Vicuna-7B-v1.5 |\n| [MergeBench：领域专用 LLM 合并的基准测试](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.10833)| 2025 | Arxiv | Llama-3.2-3B、Llama3.1-8B、Gemma-2-2B 和 Gemma-2-9B |\n| [Mergenetic：一个简单的进化式模型合并库](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.11427)| 2025 | 系统演示 | Mistral-7B\n| [RobustMerge：面向 MLLMs 的参数高效模型合并，具备方向鲁棒性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.17159)| 2025 | NeurIPS | LLaVA-v1.5-7B\n| [混合数据还是合并模型？通过模型合并平衡大型语言模型的有用性、诚实性和无害性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.06876v1)| 2025 | Arxiv | Llama-3-8B-Instruct、Mistral-7B-Instruct-v0.2 |\n| [如何随时间合并您的多模态模型？](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.06712)| 2024 | Arxiv |\n| [混合数据还是合并模型？优化多样化的多任务学习](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.10801)| 2024 | Arxiv | Aya 23 8B\n| [对大规模预训练模型中 Delta 参数编辑的统一视角](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.13841)| 2024 | Arxiv | LLaMA3-8B-Instruct、Qwen2-7B-Instruct、Mistral-7B-Instruct-v0.3，\n| [Model-GLUE：为野外大型模型动物园提供民主化的 LLM 扩展](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.05357) |2024 | NeurIPS 数据集与基准测试赛道 | Synthia-7B-v1.2、Llama-2-7b-evolcodealpaca、OpenHermes-7B、pygmalion-2-7b、Llama-2-7b-chat-hf、BeingWell_llama2_7b、MetaMath-7B-V1.0、vicuna-7b-v1.5、Platypus2-7B、GOAT-7B-Community、Llama-2-7b-WikiChat-fused、dolphin-llama2-7b、MetaMath-Llemma-7B、CodeLlama-7b-Instruct-hf、Magicoder-S-CL-7B、CrystalChat|\n| [大规模模型合并的关键是什么？](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.03617)| 2024 | Arxiv | PaLM-2（1B、8B、24B、64B）、PaLM-2-IT（1B、8B、24B、64B）|\n| [针对组合泛化能力的模型合并现实评估](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.18314)| 2024 | Arxiv |\n| [为领域适应而微调大型语言模型：探索训练策略、缩放、模型合并及协同能力](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.03444) | 2024 | Arxiv | Llama-3.1-8B、Mistral-7B-v0.3 |\n| [Arcee's MergeKit：大型语言模型合并工具包](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.13257)| 2024 | Arxiv | Llama2-7B-Chat、Meditron-7B|\n\n## 高级方法\n\n\n\u003Ccenter>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEnnengYang_Awesome-Model-Merging-Methods-Theories-Applications_readme_bca0e6ebfb95.png\" alt=\"模型合并\" width=\"800\"\u002F>\n\u003C\u002Fcenter>\n\n\n### 合并前方法\n\n\u003Ccenter>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEnnengYang_Awesome-Model-Merging-Methods-Theories-Applications_readme_3ce879c270fa.png\" alt=\"模型合并\" width=\"800\"\u002F>\n\u003C\u002Fcenter>\n\n\n#### 更好的微调\n\n##### 线性化微调\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [通过克罗内克分解近似曲率实现任务算术中的无数据权重解耦](https:\u002F\u002Fopenreview.net\u002Fpdf?id=32mrjmaeMP) | 2026 | ICLR |\n| [仅微调注意力模块：提升任务算术中的权重解耦](https:\u002F\u002Fopenreview.net\u002Fpdf?id=dj0TktJcVI) | 2025 | ICLR |\n| [切空间变换器用于组合、隐私和移除](https:\u002F\u002Fopenreview.net\u002Fpdf?id=VLFhbOCz5D) | 2024 | ICLR |\n| [通过部分线性化实现参数高效的多任务模型融合](https:\u002F\u002Fopenreview.net\u002Fpdf?id=iynRvVVAmH) | 2024 | ICLR |\n| [切空间中的任务算术：改进预训练模型的编辑](https:\u002F\u002Fopenreview.net\u002Fpdf?id=0A9f2jZDGW) | 2023 | NeurIPS |\n\n\u003C!-- | [仅微调线性层是进行任务算术的简单而有效的方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.07089) | 2024 | Arxiv | -->\n\n\n##### 子空间微调\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [解析LoRA干扰：用于稳健模型合并的正交子空间](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.22934) | 2025 | Arxiv | Llama3-8B |\n| [基于任务局部化稀疏微调的高效模型编辑](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.02620) | 2025 | ICLR |\n\n##### 锐度感知微调\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [通过锐度感知微调缓解模型合并中的参数干扰](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.14662) | 2025 | ICLR |\n\n##### 其他\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [MergOPT：一种面向稳健模型合并的合并感知优化器](https:\u002F\u002Fopenreview.net\u002Fforum?id=C21rz8mo65) | 2026 | ICLR | Llama3.1-8B-Instruct\n\n\n#### 架构转换\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [异构层权重融合的模型组装学习](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.21657)| 2025 | ICLR研讨会 |\n| [无需训练的异构模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.00061)| 2025 |Arxiv\n| [大型语言模型的知识融合](https:\u002F\u002Fopenreview.net\u002Fpdf?id=jiDsk12qcz) | 2024 |  ICLR | Llama-2 7B、OpenLLaMA 7B、MPT 7B |\n| [聊天型LLM的知识融合：初步技术报告](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.16107) | 2024 |Arxiv  | NH2-Mixtral-8x7B、NH2-Solar-10.7B以及OpenChat-3.5-7B |\n| [关于异构神经网络模型融合的跨层对齐](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.15538) |2023  |ICASSP   |\n| [GAN鸡尾酒：无需数据集即可混合GAN](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2022\u002Fpapers_ECCV\u002Fpapers\u002F136830207.pdf) | 2022 | ECCV |\n\n#### 权重对齐\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [运输与合并：面向大型语言模型的跨架构合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.05495) | 2026 | Arxiv | LLaMA-3 8B\n| [对称感知图元网络自编码器：通过参数规范化实现模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.12601) | 2025 | TAG-DS |\n| [通过参数空间对称性理解模式连通性](https:\u002F\u002Fopenreview.net\u002Fpdf?id=E8dMQGsKZv) | 2025 | ICML |\n| [将你的Transformer更新到最新版本：任务向量的再盆地化](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.22697)| 2025 | ICML  |\n| [基于异构层权重合并的模型组装学习](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.21657)| 2025 | ICLR Workshop |\n| [超越Transformer的置换对称性：旋转在模型融合中的作用](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.00264)| 2025 | Arxiv |\n| [非局部模型合并问题：置换对称性与方差坍缩](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.12766)| 2024 | Arxiv |\n| [等变深度权重空间对齐](https:\u002F\u002Fopenreview.net\u002Fpdf\u002F6d437eeb362255b4b2d75a5c6847880fb4a00e3c.pdf) | 2024 | ICML  |\n| [多样性中的和谐：利用典型相关分析合并神经网络](https:\u002F\u002Fopenreview.net\u002Fpdf?id=XTr8vwAr2D) | 2024 | ICML |\n| [基于最优传输的Transformer融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.05719) | 2024 | ICLR  |\n| [逐层线性模式连通性](https:\u002F\u002Fopenreview.net\u002Fpdf?id=LfmZh91tDI) | 2024 | ICLR |\n| [ZipIt! 在无需训练的情况下合并不同任务的模型](https:\u002F\u002Fopenreview.net\u002Fpdf?id=LEYUkvdUhq) | 2024 |ICLR  |\n| [通过最优传输证明神经网络的线性模式连通性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.19103) | 2024 | AISTATS |\n| [无需训练的预训练模型合并](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fpapers\u002FXu_Training-Free_Pretrained_Model_Merging_CVPR_2024_paper.pdf) | 2024 |CVPR  |\n| [像玩乐高一样合并LoRA：通过秩级聚类将LoRA的模块化推向极致](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.16167)| 2024 | Arxiv | Llama2-7b, Llama2-13b |\n| [C2M3：循环一致的多模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.17897) | 2024 | NeurIPS |\n| [PLeaS--利用置换和最小二乘法合并模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.02447)| 2024 | Arxiv |\n| [重新思考模型再盆地化与线性模式连通性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.05966) | 2024 | Arxiv |\n| [Git再盆地化：在置换对称性模下合并模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.04836) | 2023 | ICLR |\n| [通过隐式Sinkhorn微分进行再盆地化](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FPena_Re-Basin_via_Implicit_Sinkhorn_Differentiation_CVPR_2023_paper.pdf) | 2023 | CVPR |\n| [单调线性插值中的平台期——对深度网络损失景观的“偏颇”视角](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2210.01019)| 2023 | ICLR |\n| [基于置换不变性和归一化的深度神经网络线性模式连通性](https:\u002F\u002Fopenreview.net\u002Fpdf?id=gU5sJ6ZggcX)| 2023 | ICLR |\n| [REPAIR：为修复插值而对置换激活进行重新归一化](https:\u002F\u002Fopenreview.net\u002Fpdf?id=gU5sJ6ZggcX) |2023  | ICLR |\n| [超越线性模式连通性：逐层线性特征连通性](https:\u002F\u002Fpapers.nips.cc\u002Fpaper_files\u002Fpaper\u002F2023\u002Ffile\u002Fbf3ee5a5422b0e2a88b0c9c6ed3b6144-Paper-Conference.pdf) |  2023 |NeurIPS |\n| [置换不变性在神经网络线性模式连通性中的作用](https:\u002F\u002Fopenreview.net\u002Fpdf?id=dNigytemkL) | 2022 | ICLR |\n| [神经网络损失景观的线性插值能告诉我们什么？](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2106.16004) |2022 | ICML |\n| [用于模式连接体积和快速集成的损失曲面单形体](https:\u002F\u002Fproceedings.mlr.press\u002Fv139\u002Fbenton21a\u002Fbenton21a.pdf) | 2021 | ICML |\n| [分析神经网络损失景观中的单调线性插值](https:\u002F\u002Fproceedings.mlr.press\u002Fv139\u002Flucas21a\u002Flucas21a.pdf) | 2021 | ICML |\n| [过参数化神经网络中损失景观的几何结构：对称性与不变性](https:\u002F\u002Fproceedings.mlr.press\u002Fv139\u002Fsimsek21a\u002Fsimsek21a.pdf)| 2021 | ICML |\n| [线性模式连通性与彩票假说](https:\u002F\u002Fproceedings.mlr.press\u002Fv119\u002Ffrankle20a\u002Ffrankle20a.pdf) | 2020 | ICML |\n| [通过神经元对齐优化模式连通性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2009.02439) | 2020 | NeurIPS |\n| [基于最优传输的模型融合](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Ffile\u002Ffb2697869f56484404c8ceee2985b01d-Paper.pdf) | 2020  | NeurIPS |\n| [均匀收敛可能无法解释深度学习中的泛化](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2019\u002Ffile\u002F05e97c207235d63ceb1db43c60db7bbb-Paper.pdf) |  2019 | NeurIPS |\n| [解释多层网络低成本解的景观连通性](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2019\u002Ffile\u002F46a4378f835dc8040c8057beb6a2da52-Paper.pdf)|  2019 | NeurIPS |\n| [神经网络能量景观中几乎不存在障碍](https:\u002F\u002Fproceedings.mlr.press\u002Fv80\u002Fdraxler18a\u002Fdraxler18a.pdf) | 2018 | ICML  |\n| [DNN的损失曲面、模式连通性和快速集成](https:\u002F\u002Fpapers.nips.cc\u002Fpaper_files\u002Fpaper\u002F2018\u002Ffile\u002Fbe3087e74e9100d4bc4c6268cdbe8456-Paper.pdf)|  2018 | NeurIPS |\n\n\n\n### 合并方法概述\n\n\n\u003Ccenter>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEnnengYang_Awesome-Model-Merging-Methods-Theories-Applications_readme_5e853bb8e4c4.png\" alt=\"模型合并\" width=\"800\"\u002F>\n\u003C\u002Fcenter>\n\n\n\n#### 基本合并方法\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [利用算术运算组合参数高效的模块](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.14870) | 2023 | NeurIPS |\n| [使用任务算术编辑模型](https:\u002F\u002Fopenreview.net\u002Fpdf?id=6t0Kwf8-jrj) | 2023 | ICLR |\n| [基于最优传输的模型融合](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Ffile\u002Ffb2697869f56484404c8ceee2985b01d-Paper.pdf) |2020  | NeurIPS |\n| [神经网络的权重平均及局部重采样方案](https:\u002F\u002Fciteseerx.ist.psu.edu\u002Fdocument?repid=rep1&type=pdf&doi=a34e789c0f76b860b6e3bc1b7fa04054ccb75c3b) | 1996 | AAAI Workshop  |\n| [通过平均加速随机逼近](https:\u002F\u002Fepubs.siam.org\u002Fdoi\u002Fabs\u002F10.1137\u002F0330046?journalCode=sjcodc)| 1992 | IAM Journal on Control and Optimization\n| [用四元数曲线实现旋转动画（球面线性插值(SLERP)模型合并）](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F325165.325242) | 1985 | SIGGRAPH Computer Graphics |\n\n#### 基于加权的合并方法\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [无标签跨任务LoRA合并与零空间压缩](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.26317)| 2026 | Arxiv | LLAMA-3 8B, LLAVA-1.5-7B\n| [均值是幻象：医学影像中异质领域迁移下的熵自适应模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.21372)| 2026 | Arxiv | \n| [LARV：用于模型合并的无数据逐层自适应缩放贴面](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.09413)| 2026 | Arxiv | \n| [Souper-Model：简单算术如何解锁最先进的LLM性能](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.13254v1)| 2025 | Arxiv | xLAM-2-70b、CoALM-70B、watt-tool-70B、functionary-medium-70B、xLAM-2-8b、ToolACE-2-8B、watt-tool-8B、BitAgent-8B、CoALM-8B | \n| [叠加任务特定特征进行模型合并](https:\u002F\u002Faclanthology.org\u002F2025.emnlp-main.210.pdf)| 2025 | EMNLP | Llama-2-7B\n| [T3：在VLM中进行测试时模型合并，用于零样本医学影像分析](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.27265)| 2025 | Arxiv |\n| [权重编织：用于无数据模型合并的参数池化](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.13921)| 2025 | Arxiv |\n| [专家合并：基于无监督专家对齐和重要性引导分层切块的模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.25712)| 2025 | Arxiv |Mistral-7B、InternVL、Qwen2-VL\n| [变分任务向量组合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.18208)| 2025 |NeurIPS  |\n| [RegMean++：提升回归均值在模型合并中的有效性和泛化能力](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.03121)| 2025 |Arxiv  |\n| [StatsMerging：通过任务特定教师蒸馏实现统计指导的模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.04567)| 2025 |Arxiv  |\n| [SeMe：通过语义对齐实现无训练语言模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.20144)| 2025 |Arxiv  |\n| [NAN：一种无需训练的模型合并系数估计解决方案](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.16148)| 2025 |Arxiv  |LLaMA2-13B、WizardLM-13B、WizardMath-13B、LLaVA-v1.5-13B、LLaVA-1.6-13B、Math-LLaVA|\n| [利用子模块线性提高LLM中任务算术性能](https:\u002F\u002Fopenreview.net\u002Fpdf?id=irPcM6X5FV)| 2025 |ICLR  | Llama-2-7B和Llama-2-13B\n| [层感知的任务算术：解耦任务特定与指令遵循知识](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.20186)| 2025 |Arxiv  | Gemma-2-9B、Llama-3-8B |\n| [Sens-Merging：基于敏感性引导的参数平衡用于大型语言模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.12420v1)| 2025 |Arxiv  | LLaMA-2 7B系列、Mistral 7B系列、LLaMA-2 13B系列 |\n| [RankMean：用于微调后大型语言模型合并的模块级重要性评分](https:\u002F\u002Faclanthology.org\u002F2024.findings-acl.104.pdf)| 2024 | ACL  |\n| [非均匀逐参数模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.15467)| 2024 |Arxiv  |\n| [如何为多任务微调赋权？通过贝叶斯模型合并快速预览](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.08147)| 2024 |Arxiv  |\n| [LiNeS：训练后层缩放防止遗忘并增强模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.17146)| 2024 |Arxiv  |\n| [瓶中合并：可微分适应性合并（DAM）以及从平均到自动化的路径](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.08371)| 2024 |Arxiv  |shisa-gamma-7b、WizardMath-7B-V1.1、Abel-7B-002、Llama-3-SauerkrautLM-8b-Instruct、Llama-3-Open-Ko-8B、llama-3-sqlcoder-8b、Meta-Llama-3-8B |\n| [使用具有学习到各向异性缩放的任务向量进行知识组合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.02880) | 2024 |Arxiv  |\n| [MetaGPT：利用模型专属任务算术合并大型语言模型](https:\u002F\u002Faclanthology.org\u002F2024.emnlp-main.102.pdf) | 2024 |EMNLP  | LLaMA-2-7B、Mistral-7B、LLaMA-2-13B |\n| [通过贝叶斯优化在LLM预训练中进行检查点合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.19390) |  2024 |Arxiv  | Baichuan2-220B、Baichuan2-440B、Baichuan2-660B、Baichuan2-1540B、Baichuan2-1760B、Baichuan2-1980B、Baichuan2-2200B、Baichuan2-2420B、DeepSeek-1400B、DeepSeek-1600B、DeepSeek-1800B、DeepSeek-2000B |\n| [Arcee’s MergeKit：大型语言模型合并工具包](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.13257) | 2024 |Arxiv  | Llama2-7B-Chat、Meditron-7B|\n| [模型合并配方的进化优化](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.13187) | 2024 |Arxiv  | shisa-gamma-7b-v1、WizardMath-7B-V1.1、Arithmo2-Mistral-7B、Abel-7B-002、Mistral-7B-v0.1、LLaVA-1.6-Mistral-7B|\n| [XFT：通过简单合并升级版混合专家模型释放代码指令微调的力量](https:\u002F\u002Faclanthology.org\u002F2024.acl-long.699.pdf)| 2024 | ACL |\n| [AdaMerging：面向多任务学习的适应性模型合并](https:\u002F\u002Fopenreview.net\u002Fpdf?id=nZP6NgD3QY) | 2024  | ICLR |\n| [基于不确定性梯度匹配的模型合并](https:\u002F\u002Fopenreview.net\u002Fpdf?id=D7KJmfEDQP) | 2024  | ICLR |\n| [通过在任务子空间中匹配模型进行合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.04339) | 2024  | TMLR |\n| [用于语言模型合并的费舍尔掩码节点](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.09891) | 2024 | LREC-COLING |\n| [通过费舍尔平均进行纠删码神经网络推理](https:\u002F\u002Fshiqiang.wang\u002Fpapers\u002FDJ_ISIT2024.pdf)| 2024 | ISIT |\n| [通过合并语言模型权重实现无数据知识融合](https:\u002F\u002Fopenreview.net\u002Fpdf?id=FCnohuR6AnM) | 2023  | ICLR |\n| [用费舍尔加权平均合并模型](https:\u002F\u002Fopenreview.net\u002Fpdf?id=LSKlp_aceOC) | 2022  | NeurIPS |\n\n#### 基于子空间的合并方法（稀疏或低秩子空间）\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: |:----: |\n| [Diet Your LLM: 通过合并任务特定重要性得分对大语言模型进行维度级全局剪枝](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.23985)| 2026 | Arxiv | Gemma-2 9B, Qwen2.5-7B, Phi-4-mini\n| [DC-Merge: 基于方向一致性的模型合并改进方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.06242)| 2026 | CVPR | LLaVA\n| [CoMoL: 基于动态核心空间合并的高效LoRA专家混合方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.00573)| 2026 | Arxiv |Qwen3-8B 和 Llama3.1-8B\n| [本质子空间中的模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.20208)| 2026 | Arxiv | \n| [超越参数算术：面向分布感知的稀疏互补融合用于模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.11717)| 2026 | Arxiv | Mistral-7B、Qwen2.5-14B 和 Qwen2.5-32B\n| [正交模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.05943)| 2026 | Arxiv |  Llama-3.1-8B、Qwen2.5-VL-7B-Instruct、Llama-3.2-3B\n| [当共享知识成为负担：模型合并中的谱过累积问题](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.05536)| 2026 | Arxiv | \n| [超越合并：基于激活引导旋转的流式大语言模型更新](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.03237)| 2026 | Arxiv |  Qwen2.5-7B、Qwen2.5-14B\n| [AdaRank: 用于增强模型合并的自适应秩剪枝方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.22178)| 2026 | ICLR | \n| [分解任务向量以实现精细化的模型编辑](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.22511)| 2025 | Arxiv | \n| [保持独特，保持高效：在多任务合并中保留模型个性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.01461) | 2025 | Arxiv |  Qwen-14B\n| [面向低秩权重的可逆模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.14163) | 2025 | Arxiv | \n| [在知识感知子空间中净化任务向量以用于模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.14697)| 2025 | Arxiv | LLaMA-2-7B\n| [RobustMerge: 具有方向鲁棒性的参数高效多模态大语言模型合并方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.17159v3)| 2025 | NeurIPS | LLaVA\n| [核心空间中精确高效的低秩模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.17786)| 2025 | NeurIPS | \n| [通过模型合并实现高效的多源知识迁移](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.19353)| 2025 | Arxiv | \n| [一刀切并不适用：面向分布的稀疏化技术以实现更精准的模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.06163)| 2025 | Arxiv | \n| [NegMerge: 基于符号共识的权重合并以支持机器去学习](https:\u002F\u002Fopenreview.net\u002Fpdf?id=ZbWXovStjD)| 2025 | ICML | \n| [子空间增强型模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.16506)| 2025 | Arxiv | \n| [无需训练的大语言模型多任务学习合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.12379)| 2025 | Arxiv | \n| [更智能地合并，更好地泛化：提升OOD数据上的模型合并性能](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.09093)| 2025 | Arxiv | \n| [定位后合并：神经元级别的参数融合以缓解多模态大语言模型中的灾难性遗忘](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.16703)| 2025 | Arxiv | Mistral-7B、Llama3-8B |\n| [CALM: 面向多任务学习的一致性感知局部合并方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.13406)| 2025 |ICML  |\n| [面向多目标领域适应的合并友好型后训练量化](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.23651)| 2025 | ICML | \n| [结合参数剪枝的自适应LoRA合并以支持低资源生成](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.24174)| 2025 | ACL | Llama-3-8B-Instruct\n| [分解-归一化-合并：在正确空间上进行模型合并可提升多任务处理能力](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.23117)| 2025 | Arxiv | LLaMA3.1-8B\n| [CAT合并：一种无需训练的解决模型合并冲突的方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.06977)| 2025 | Arxiv | \n| [LoRI: 减少多任务低秩适配中的跨任务干扰](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.07448)| 2025 | Arxiv | Llama-3-8B 和 Mistral-7B |\n| [任务向量量化以实现内存高效的模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.06921)| 2025 | Arxiv | \n| [解耦神经元内的任务干扰：与神经机制对齐的模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.05320)| 2025 | Arxiv | Llama-2-7b |\n| [探索稀疏适配器以实现参数高效专家的可扩展合并](https:\u002F\u002Fopenreview.net\u002Fpdf?id=8wt2eKkVe6) | 2025 | ICLR 2025 Workshop | \n| [LEWIS（逐层稀疏）——一种无需训练的指导性模型合并方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.03874)| 2025 | ICLR 2025 Workshop |Gemma-9b、LLaMA 3.1 8b |\n| [CABS: 冲突感知且平衡的稀疏化技术以提升模型合并效果](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.01874)| 2025 | Arxiv |Mistral-7b-v0.1、WildMarcoroni-Variant1-7B 和 WestSeverus-7B-DPO-v2 |\n| [面向多语种语音识别与翻译的低秩稀疏模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.17380)| 2025 | Arxiv | \n| [LED-合并：通过位置选举分离来缓解模型合并中的安全与效用冲突](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.16770)| 2025 | Arxiv |Llama-3-8B、Mistral-7B 和 Llama2-13B |\n| [面向多模态大型语言模型的参数高效合并及互补参数适配](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.17159)| 2025 | Arxiv | \n| [最优脑迭代合并：缓解大语言模型合并中的干扰](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.12217)| 2025 | Arxiv | Llama-2-13b、WizardMath-13B-V1.0、WizardLM13B-V1.2、llama-2-13b-codealpaca\n| [叠加奇异特征以进行模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.10698)| 2025 | Arxiv | Llama-2-7B\n| [STAR: 谱截断与重缩放用于模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.10339)| 2025 | NAACL |  Mistral-7B-Instruct|\n| [不让任何任务掉队：结合通用与任务特定子空间的各向同性模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.04959)| 2025 | Arxiv |  \n| [无需再训练即可实时合并模型：一种用于可扩展持续模型合并的顺序方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.09522)| 2025  |NeurIPS  | |\n| [将多任务模型合并建模为自适应投影梯度下降](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.01230)| 2025 | Arxiv |  \n| [重新审视用于模型合并的权重平均法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.12153)| 2024  |Arxiv  | |\n| [任务奇异向量：减少模型合并中的任务干扰](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.00081) | 2025  |CVPR  | |\n| [少即是多：采用二值任务切换实现高效模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.00054)|  2024 |Arxiv  |\n| [FREE-合并：利用傅里叶变换实现轻量级专家参与的模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.16815)|2024  |Arxiv  | Qwen-14B (LoRA)、  LLaMa2-13B、WizardLM-13B、WizardMath-13B、WizardCoderPython-13B |\n| [超越任务向量：基于重要性指标的选择性任务算术](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.16139)|2024  |Arxiv  | |\n| [用于模型合并的参数竞争平衡](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.02396v1)| 2024 | NeurIPS  | Llama-2-7b |\n| [语言模型就像超级马里奥：从同源模型中免费吸收能力](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.03099) | 2024 | ICML  | WizardLM-13B、WizardMath-13B、llama-2-13b-codealpaca、Mistral-7B|\n| [定位任务信息以改善模型合并与压缩](https:\u002F\u002Fopenreview.net\u002Fattachment?id=DWT9uiGjxT&name=pdf) | 2024 | ICML | |\n| [稀疏模型汤：通过模型平均实现更好剪枝的配方](https:\u002F\u002Fopenreview.net\u002Fpdf?id=xx0ITyHp3u) |2024  |ICLR  | |\n| [利用SVD进行模型合并以理清复杂关系](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.19735)|2024  |Arxiv  |Llama3-8B |\n| [NegMerge: 基于共识的权重否定以实现强大的机器去学习](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.05583)|2024  |Arxiv  | |\n| [定位并拼接：通过稀疏任务算术实现高效模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.13656)|2024  |Arxiv  | |\n| [通过因果干预定位激活参数以进行模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.09485)|2024  |Arxiv  | Llama-2-chat-7B|\n| [PAFT: 一种用于有效微调大语言模型的并行训练范式](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.17923)| 2024 | Arxiv  |Mistral-7B-v0.1、Llama-3-8B、Neurotic-7B、MoMo-70B|\n| [DELLA-合并：通过基于幅度的采样减少模型合并中的干扰](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.11617)|2024  |Arxiv  |Llama-2-13b-code-alpaca、WizardLM、Wizard-Math、WizardCoder-Python|\n| [EMR-合并：无需调优的高性能模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.17461) |2024  |NeurIPS  | |\n| [DPPA: 用于大语言模型到模型合并的剪枝方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.02799) |2024  |Arxiv  | LLaMa 2 |\n| [模型 breadcrumbs: 利用稀疏掩码实现多任务模型合并的规模化](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.06795) |2023  |Arxiv  | |\n| [基于具体子空间学习的干扰消除以实现多任务模型融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.06173) | 2023  |Arxiv  | |\n| [ComPEFT: 通过稀疏化和量化实现参数高效更新通信的压缩方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.13171) | 2023  |Arxiv  | LLaMA 7B、13B、33B 和 65B|\n| [有效且参数高效的复用微调模型](https:\u002F\u002Fopenreview.net\u002Fpdf?id=13D1zn0mpd) | 2023 | Openreview |\n| [解决模型合并时的干扰问题](https:\u002F\u002Fopenreview.net\u002Fpdf?id=xtaX3WyCj1) | 2023  |  NeurIPS | |\n| [微调语言模型中的任务特定技能定位](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.06600)|  2023| ICML | |\n\n#### 基于路由的合并方法（动态合并）\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [TECS-L (Golden MoE): 密集到MoE专家拆分框架](https:\u002F\u002Fgithub.com\u002Fneed-singularity\u002FTECS-L)| 2026 | GitHub | Mistral-7B |\n| [通过模块化专家重组进行细粒度模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.06552)| 2026 | Arxiv |\n| [MIN-Merging: 为模型合并而合并重要神经元](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.17890) | 2025 | Arxiv |\n| [SE-Merging: 一种用于动态模型合并的自我增强方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.18135) | 2025 | Arxiv |\n| [大型语言模型的自适应任务向量](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.03426) | 2025 | Arxiv |LLaMA3-8B和Mistral-7B |\n| [基于贝叶斯优化的动态Fisher加权模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.18992)| 2025 | Arxiv |\n| [面向多任务模型融合的数据自适应权重集成](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs11263-025-02434-2)| 2025 | IJCV |\n| [MASS: 通过自适应子空间选择进行MoErging](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.05342)| 2025 | Arxiv |\n| [带有权重混合的动态模型合并](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F10900479\u002F)| 2025 | TCSVT |\n| [CAMEx: 曲率感知的专家合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.18821)| 2025 | ICLR |\n| [1bit-Merging: 大型语言模型的动态量化合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.10743)| 2025 | Arxiv | LLaMA-2 7B、Mistral 7B和LLaMA-2 13B |\n| [MergeME: 面向同质与异质MoE的模型合并技术](https:\u002F\u002Fpapers-pdfs.assets.alphaxiv.org\u002F2502.00997v3.pdf)| 2025 | Arxiv |\n| [Mediator: 基于路由的低参数冲突和不确定性内存高效LLM合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.04411)| 2025 |Arxiv  | Qwen-2.5-7B、LLaMA-3.2-8B |\n| [通过无训练动态权重插值调整基础模型](https:\u002F\u002Fopenreview.net\u002Fpdf?id=yyv54uPM0z) | 2024 | NeurIPS 2024研讨会 |\n| [面向多任务模型合并的高效且有效的专家混合权重集成](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.21804) | 2024 |Arxiv  |\n| [DaWin: 用于稳健适应的无训练动态权重插值](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.03782) | 2024 | NeurIPS 2024研讨会 |\n| [通过专家混合权重集成合并多任务模型](https:\u002F\u002Fopenreview.net\u002Fpdf\u002F2aee8072945cd0485e619dd88c35566610cd5042.pdf) | 2024 | ICML |\n| [学习在专业专家之间路由以实现零样本泛化](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.05859)|2024  | ICML  |\n| [先合并再压缩：从其路由策略中揭示高效SMoE的秘密](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.01334) |2024  | ICLR |\n| [具有自适应路由的专家软合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.03745) | 2024 | TMLR |\n| [SMILE: 从预训练基础模型构建零样本稀疏低秩专家混合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.10174) |2024 |Arxiv  | Mistral-7B-v0.1、MetaMath-Mistral-7B、dolphin-2.1-mistral-7b、speechless-code-mistral-7b-v1.0|\n| [Twin-Merging: 模型合并中的模块化专业知识动态整合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.15479) | 2024 | NeurIPS  |Qwen-14B|\n| [Self-MoE: 朝着具有自我专业化专家的组合式大型语言模型发展](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.12034) |2024 |Arxiv  |Gemma-7B、LLaMA-2 7B & 13B、Mistral 7B、LLaMA-3 8B|\n| [通过基于专家混合的模型融合实现高效的帕累托集近似](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.09770) | 2024 |Arxiv  |\n| [稀疏升级：从密集检查点训练专家混合](http:\u002F\u002Farxiv.org\u002Fabs\u002F2212.05055) | 2023 | ICLR |\n\n\u003C!-- | [Branch-Train-MiX: 将专家LLM混合成专家混合LLM](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.07816) | 2024 |Arxiv  | -->\n\n#### 校准后方法\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [MAGIC: 通过幅度校准实现卓越的模型合并](https:\u002F\u002Fgithub.com\u002Flyymuwu\u002FMAGIC)| 2025 | Arxiv  |OLMo-3-7B\n| [迈向最小化模型合并中的特征漂移：用于自适应知识整合的逐层任务向量融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.23859)| 2025 |NeurIPS  |\n| [通过适应性合并进行多任务模型融合](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?tp=&arnumber=10887718)|2025  | ICASSP |\n| [在模型合并中使用概率建模进行表征手术](https:\u002F\u002Fopenreview.net\u002Fpdf?id=a02CH43z1G)|2025  | ICML |\n| [用于增强模型合并的参数高效干预](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.17023)|2024  | Arxiv |\n| [按任务为您的模型调色以改善多任务模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.13526)|2024  | Arxiv |\n| [SurgeryV2: 通过深度表征手术弥合模型合并与多任务学习之间的差距](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.14389)|2024  | Arxiv |\n| [用于多任务模型合并的表征手术](https:\u002F\u002Fopenreview.net\u002Fpdf\u002F602906ec02919eb95d78d634321fcba1b68a2f03.pdf) |2024  | ICML |\n\n### 其他合并方法\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [任务对齐：计算机视觉中简单有效的模型合并代理](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.12935)| 2026 | Arxiv | \n| [基于无数据协方差估计的模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.01329)| 2026 | Arxiv | \n| [解决干扰（RI）：解耦模型以改进模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.13467)| 2026 | Arxiv | \n| [BD-Merging：基于证据引导的对比学习的偏见感知动态模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.03920)| 2026 | Arxiv | \n| [ACE-Merging：自适应协方差估计的无数据模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.02945)| 2026 | Arxiv | \n| [面向图神经网络的无训练跨架构合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.19332)| 2026 | Arxiv | \n| [用于跨预训练模型传输任务向量的梯度符号掩码](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.09658)| 2026 | ICLR | Flan-T5 |\n| [在不同架构之间无训练地传输任务向量](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.12952)| 2026 | Arxiv | \n| [MergePipe：面向可扩展LLM合并的预算感知参数管理系统](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.13273)| 2026 | Arxiv | Llama3.1-8B、Llama-3.2-3B、Qwen3-0.6B、Qwen3-1.7B和Qwen3-8B\n| [DisTaC：通过蒸馏调节任务向量以实现稳健模型合并](https:\u002F\u002Fopenreview.net\u002Fpdf?id=W70w5JCzdq)| 2026 | ICLR |\n| [面向模型合并的稀疏性感知进化](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.08218)| 2026 | Arxiv |\n| [AutoMerge：基于搜索的有效模型复用框架](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.22748)| 2026 | Arxiv  | Llama2-7B-Chat、Llama2-7B-Code\n| [通过多教师知识蒸馏进行模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.21288)| 2025 | Arxiv  |\n| [通过动量感知优化连接训练与合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.17109)| 2025 | Arxiv  |\n| [从系数到方向：通过方向对齐重新思考模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.00391)| 2025 | Arxiv  |\n| [摆脱优化停滞：通过差异向量迈出超越任务算术的步伐](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.17987)| 2025 | Arxiv  |\n| [具有功能双重锚点的模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.21223)| 2025 | Arxiv  |\n| [面向拥有海量模型库的语言模型即服务的黑盒模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.12951)| 2025 | Arxiv  |\n| [通过合并链重新思考逐层模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.21421v1)| 2025 | Arxiv  |Llama 3-8B\n| [竞争与吸引促进模型融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.16204)| 2025 | Arxiv  | WizardMath 7B v1.0、AgentEvol 7B\n| [PSO-Merging：基于粒子群优化的模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.19839)| 2025 | Arxiv  | Llama-3-8B、Llama-2-13B和Mistral-7B-v0.3 | \n| [DisTaC：通过蒸馏调节任务向量以实现稳健模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.01148)| 2025 | Arxiv  |\n| [通过灵活模型合并应对准确率与规模之间的权衡](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.23209)| 2025 | Arxiv  |\n| [高效多任务推理：基于Gromov-Wasserstein特征对齐的模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.09774?)| 2025 | Arxiv  |\n| [强化模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.21272)| 2025 | Arxiv  |\n| [FW-Merging：利用Frank-Wolfe优化扩展模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.12649)| 2025 | Arxiv  | LLaMA2-7B\n| [谁引发了干扰就该结束它：通过任务向量指导无数据模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.08099)| 2025 | Arxiv  | WizardLM-13B (语言模型)、WizardMath-13B (数学)和 llama-2-13b-codealpaca (代码) |\n| [GNNMERGE：无需访问训练数据即可合并GNN模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.03384)| 2025 | Arxiv  |\n| [MERGE3：在消费级GPU上进行高效的进化式合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.10436)| 2025 | ICML  | Mistral-7B\n| [大型语言模型的激活信息驱动合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.02421?)| 2025 | Arxiv | Llama-2-13B、WizardLM-13B、WizardMath-13B、llama-2-13b-code-alpaca\n| [通过渐进式逐层蒸馏实现可扩展模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.12706)| 2025 | Arxiv | WizardLM-13B、WizardMath-13B和llama-2-13b-code-alpaca\n| [好吧，我自己来合并：自动模型合并的多精度框架](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.04030)| 2025 | Arxiv | Llama-2-13B、WizardLM13B、WizardMath-13B、llama-2-13b-code-alpaca |\n| [信任区域内的任务算术：一种无训练的模型合并方法，用于应对知识冲突](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.15065)| 2025 | ICLR |  \n| [微调对齐分类器以合并输出：迈向更优的模型合并评估协议](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.13526)| 2024 | Arxiv |\n| [通过自适应权重解耦进行多任务模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.18729)| 2024 | Arxiv |\n| [重新思考加权平均模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.09263)| 2024 | Arxiv |\n| [ATM：通过交替调整与合并改进模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.03055)| 2024 | Arxiv |\n| [HM3：面向预训练模型的层次化多目标模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.18893) | 2024 | Arxiv | Llama-2-7B-Chat、WizardMath-7B、CodeLlama-7B|\n| [权重范围对齐：一种令人沮丧的简单模型合并方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.12237) | 2024 | Arxiv |\n| [变形时刻：通过多目标优化释放多个LLM的潜力](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.00487) | 2024 | Arxiv | Qwen1.5-7B-Chat、解放后的Qwen1.5-7B、firefly-qwen1.5-en-7B |\n| [朝着在不同数据集之间实现数据高效且不降低性能的模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.05641v2)| 2024 | JMLR |\n| [SOLAR 10.7B：通过简单而有效的深度扩展规模化大型语言模型](http:\u002F\u002Farxiv.org\u002Fabs\u002F2312.15166) | 2023 | Arxiv | SOLAR 10.7B、SOLAR 10.7B-Instruct|\n\n### 模型合并的理论或分析\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [任务级模型合并崩溃的实证研究与理论解释](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.09463)| 2026 | Arxiv | Qwen2.5-3B、7B 和 14B，Llama3.1-8B\n| [参数高效专家之间的集成、合并与路由中的权衡](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.03535)| 2026 | Arxiv |\n| [适可而止：强化学习如何缓解大模型中的任务冲突的全面分析](https:\u002F\u002Fopenreview.net\u002Fpdf?id=N4l4Jp50R4)| 2026 | ICLR | Llama-3.2-3B、Llama-3.1-8B 和 Mistral-Small-3-24B\n| [M-Loss：利用有限无标签数据量化模型合并兼容性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.08564)| 2026 | Arxiv |\n| [WSM：通过检查点合并实现的大模型预训练无衰减学习率调度](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.17634) | 2026 | ICLR | Ling-mini-16B\n| [揭秘可合并性：用于预测模型合并成功与否的可解释特性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.22285)| 2026 | Arxiv |\n| [理解模型合并：异构专家的统一泛化框架](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.21690)| 2026 | Arxiv |\n| [能合并吗？关于模型可合并性的成因](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.06672)| 2026 | Arxiv | Llama-3.2-3B、Qwen-2.5-3B、Mistral-7B-Instruct-v0.2\n| [优化器如何隐式地偏置模型合并损失景观？](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.04686)| 2025 | Arxiv |\n| [关于任务向量和梯度](https:\u002F\u002Fwww.arxiv.org\u002Fpdf\u002F2508.16082)| 2025 | Arxiv |\n| [为什么更多的专家会失败？模型合并的理论分析](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.21226) | 2025 | Arxiv |\n| [任务向量在什么情况下对模型编辑具有可证明的有效性？非线性Transformer的泛化分析](https:\u002F\u002Fopenreview.net\u002Fpdf?id=iX7eHHE5Tx) | 2025 | ICLR |\n| [模型合并中的多层级协作](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.01268) | 2025 | Arxiv |\n| [神经网络中的低秩偏置、权重衰减与模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.17340)| 2025 | Arxiv |\n| [理解带有指数移动平均的SGD：以线性回归为例](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.14123)| 2025 | Arxiv |\n| [SeWA：基于概率掩码的选择性权重平均](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.10119)| 2025 | Arxiv |\n| [利用任务向量基进行高效的模型编辑：理论框架与可扩展方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.01015)| 2025 | Arxiv |\n| [单次联邦学习视角下的任务算术](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.18607)| 2024 | Arxiv | WizardLM-13B、WizardMath-13B、Llama-2-13B-Code-Alpaca、Llama2-13B|\n| [有限权重平均的统一分析](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.13169v1)| 2024 | Arxiv |\n| [WASH：使用通信高效的权重洗牌训练集成模型，然后取平均](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.17517) | 2024 | Arxiv |\n| [预训练-微调范式中跨任务线性的涌现](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.03660)| 2024 | ICML |\n| [一般采样下的随机权重平均的泛化分析](https:\u002F\u002Fproceedings.mlr.press\u002Fv235\u002Fwang24bl.html)| 2024 | ICML |\n| [针对分布外泛化的多样化权重平均](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2022\u002Ffile\u002F46108d807b50ad4144eb353b5d0e8851-Paper-Conference.pdf) | 2022 | NeurIPS |\n| [平均值集成：改进模型选择并提升领域泛化性能](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2110.10832) | 2022 | NeurIPS |\n| [对抗训练的稳定性分析与泛化界](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2210.00960)| 2022 | NeurIPS |\n| [置换不变性在神经网络线性模式连通性中的作用](https:\u002F\u002Fopenreview.net\u002Fpdf?id=dNigytemkL) | 2022 | ICLR |\n| [Swad：通过寻找平坦极小值实现领域泛化](https:\u002F\u002Fopenreview.net\u002Fpdf?id=zkHlu_3sJYU) | 2021 | NeurIPS |\n| [线性模式连通性和彩票假说](https:\u002F\u002Fproceedings.mlr.press\u002Fv119\u002Ffrankle20a\u002Ffrankle20a.pdf) | 2020 | ICML |\n| [并行随机权重平均：能够泛化的大型批量训练](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2001.02312) | 2020 | ICLR |\n| [通过神经元对齐优化模式连通性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2009.02439) | 2020 | NeurIPS |\n| [均匀收敛可能无法解释深度学习中的泛化现象](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2019\u002Ffile\u002F05e97c207235d63ceb1db43c60db7bbb-Paper.pdf) | 2019 | NeurIPS |\n| [为最小二乘回归并行化随机梯度下降：小批量处理、平均化与模型误设定](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1610.03774) | 2018 | JMLR |\n| [迭代平均作为随机梯度下降的正则化](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1802.08009) | 2018 | Arxiv |\n| [神经网络能量景观中几乎没有障碍](https:\u002F\u002Fproceedings.mlr.press\u002Fv80\u002Fdraxler18a\u002Fdraxler18a.pdf) | 2018 | ICML |\n| [权重平均会导致更宽泛的最优解并改善泛化能力](https:\u002F\u002Fauai.org\u002Fuai2018\u002Fproceedings\u002Fpapers\u002F313.pdf) | 2018 | UAI |\n| [更快训练，更好泛化：随机梯度下降的稳定性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1509.01240) | 2016 | ICML |\n\n----------\n\n## 基础模型中模型合并的应用\n\n\u003Ccenter>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEnnengYang_Awesome-Model-Merging-Methods-Theories-Applications_readme_130cf2922167.png\" alt=\"模型合并\" width=\"800\"\u002F>\n\u003C\u002Fcenter>\n\n\n### 大语言模型中的模型合并\n\n\u003Ccenter>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEnnengYang_Awesome-Model-Merging-Methods-Theories-Applications_readme_91bbbb3fbbac.png\" alt=\"模型合并\" width=\"800\"\u002F>\n\u003C\u002Fcenter>\n\n#### 针对大语言模型的人类偏好对齐\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [导航对齐-校准权衡：通过模型合并实现帕累托最优边界](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.17426)| 2025 | Arxiv  | Gemma-3-12B、Gemma-3-27B、Qwen2.5-7B |\n| [BILLY：通过合并人格向量引导大型语言模型进行创意生成](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.10157)| 2025 | Arxiv  |Qwen-2.5-7B-Instruct、Llama-3.1-8B-Instruct |\n| [人格向量：通过模型合并调节大型语言模型的人格特质](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.19727)| 2025 | EMNLP  | Llama-3.1-8B-Instruct、Qwen2.5-7B-Instruct |\n| [SafeMERGE：通过选择性逐层模型合并保持微调后LLM的安全对齐](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.17239v1)| 2025 | Arxiv  |Llama-2-7B-Chat、Qwen-2-7B-Instruct |\n| [骨头汤：一种用于可控多目标生成的搜索与混合模型合并方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.10762)| 2025 | Arxiv  |LLaMA-2 7B\n| [更好的RLHF的模型汤：通过权重空间平均提升LLM的对齐效果](https:\u002F\u002Fopenreview.net\u002Fforum?id=QNW3Z3f5SD)| 2024 | NeurIPS 2024 Workshop  | Llama2-7B、Mistral-7B、Gemma-2B |\n| [通过预训练和后训练模型合并保护微调后的LLM](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.19512)| 2024 | Arxiv  | Llama-3-8B-Instruct\n| [SafetyDPO：文本到图像生成的可扩展安全对齐方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.10493)| 2024 | Arxiv  |\n| [H3Fusion：对齐LLM的有益、无害、诚实融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.17792)| 2024 | Arxiv  |LLaMA-2 7B\n| [百川对齐技术报告](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.14940)| 2024 | Arxiv  | Qwen2-Nova-72B、Llama3-PBM-Nova-70B |\n| [条件化语言策略：一种可引导的多目标微调通用框架](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.15762)| 2024 | Arxiv  |\n| [DogeRM：通过模型合并为奖励模型注入领域知识](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.01470)| 2024 | Arxiv  | MetaMath-7B、MAmmoTH-7B、LLaMA2-7B|\n| [PAFT：一种用于高效LLM微调的并行训练范式](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.17923)| 2024 | Arxiv  |Mistral-7B-v0.1、Llama-3-8B|\n| [模型合并与安全对齐：一坏毁全局](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.14563) |  2024 | Arxiv  | Mistral-0.2-7B-Instruct、LLaMA-3-8B-Instruct、OpenBioLLM-8B、MAmmoTH2-7B、WizardMath-1.1-7B|\n| [通过安全补丁实现大型语言模型全面的后期安全对齐](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.13820)|  2024 | Arxiv  |LLaMA-2-7B-Chat、LLaMA-3-8B-Instruct、Mistral7B-Instruct-v0.1和Gemma1.1-7B-it|\n| [分散后再合并：通过降低对齐税来突破指令微调的极限](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.13432)| 2024 | Arxiv  | Llama-2-7b |\n| [在线合并优化器：通过奖励提升和对齐中的税收缓解](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.17931) |  2024 | Arxiv  | Qwen1.5-7B、LLaMa3-8B |\n| [基于子空间导向的模型融合的大语言模型安全再对齐框架](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.09055) |  2024 | Arxiv  | WizardLM-7B |\n| [弱到强的外推加速对齐](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.16792) | 2024 | Arxiv  | zephyr-7b、starling-7b、snorkel-7b、llama3-8b、internlm2-7b、internlm2-20b、tulu-2-dpo-7b、tulu-2-dpo-13b、tulu-2-dpo-70b|\n| [语言模型就是荷马·辛普森！通过任务算术对微调后的语言模型进行安全再对齐](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.11746) | 2024 | Arxiv  | Llama-2-7BChat |\n| [奖励汤：通过插值在不同奖励上微调的权重实现帕累托最优对齐](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.04488) |2023  | NeurIPS |  LLaMA-7b|\n| [个性化汤：通过事后参数合并实现个性化大型语言模型对齐](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.11564) | 2023 | Arxiv  |Tulu-7B LM|\n\n\u003C!-- | [安全算术：通过引导参数和激活实现语言模型测试时安全对齐的框架](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.11801) | 2024 | Arxiv  | llama2-7b-chat-hf、mistral-7b-instruct-v0.2、WIZARDMATH-7B、Llama Math、Llama-2-7b-evolcodealpaca|-->\n\n#### LLM的去毒化\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [手术式、廉价且灵活：通过单向量消融缓解语言模型的虚假拒绝](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.03415) | 2025 |  ICLR | GEMMA-7B-IT、LLAMA2-7B\u002F13B\u002F70B-CHAT、LLAMA3-8B-INST | \n| [3DM：蒸馏、动态剔除与合并，用于去偏见的多模态大型语言模型](https:\u002F\u002Faclanthology.org\u002F2025.findings-acl.722.pdf) | 2025 |  ACL | LLaVA-1.5-7b、InternVL-2.5-8b、LLaVA-1.5-7b和ChatGLM4-9b |\n| [扩展后再推理：通过提前层间插值得以增强大型语言模型的事实性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.02973) | 2025 |  Arxiv | LLAMA3-8B-Instruct、Mistral-7B-Instruct-v0.2 |\n| [偏见向量：用任务算术方法缓解语言模型中的偏见](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.11679) | 2024 |  Arxiv |\n| [去芜存菁：通过参数高效的模块操作实现模型缺陷的遗忘](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2308.08090) | 2024 |  AAAI | LLaMA-7B  |\n| [通过遗忘机制缓解语言模型的社会偏见](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.13551) | 2024 |  Arxiv | LLaMA-2 7B |\n| [基于实例级前缀的细粒度去毒化大型语言模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.15202) | 2024 |  Arxiv | Llama-2-7B、Llama-2-chat-7B、Vicuna-7B、Llama-2-13B|\n| [用算术运算组合参数高效的模块](https:\u002F\u002Fopenreview.net\u002Fpdf?id=5r3e27I9Gy) | 2023 | NeurIPS  |\n| [用任务算术编辑模型](https:\u002F\u002Fopenreview.net\u002Fpdf?id=6t0Kwf8-jrj) | 2023 | ICLR |\n| [弹性权重移除用于忠实而抽象的对话生成](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.17574) | 2023 |  Arxiv |\n\n#### LLM的知识编辑\u002F遗忘\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [用于大型语言模型去训练的逐参数任务算术](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.22030) | 2026  | Arxiv | Llama3.2 1B Instruct\n| [用于知识编辑的模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.12384)| 2025  | ACL | Qwen2.5-7B-Instruct\n| [通过大规模模型合并实现微调数据的精确去训练](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.04626) | 2025  | Arxiv |\n| [ZJUKLAB在SemEval-2025任务4中的表现：通过模型合并进行去训练](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.21088) | 2025  | Arxiv | OLMo-7B-0724-Instruct\n| [通过大规模模型合并实现微调数据的精确去训练](https:\u002F\u002Fopenreview.net\u002Fpdf?id=u89LDBIyDe)|2025  |ICLR 2025 Workshop MCDC  | |\n| [NegMerge：用于强大机器去训练的一致性权重否定](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.05583)|2024  |Arxiv  | |\n| [拆分、去训练、合并：利用数据属性提升LLM中去训练的有效性](https:\u002F\u002Farxiv.org\u002Fabs\u002F2406.11780)|2024  |Arxiv  | ZEPHYR-7B-BETA, LLAMA2-7B|\n| [通过机器去训练迈向更安全的大型语言模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.10058) | 2024 | ACL | LLAMA2-7B, LLAMA2-13B |\n| [使用任务算术编辑模型](https:\u002F\u002Fopenreview.net\u002Fpdf?id=6t0Kwf8-jrj) | 2023 | ICLR |\n| [先遗忘后学习：利用参数算术更新大型语言模型中的知识](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.08011) | 2023 | Arxiv | LLAMA2-7B, LLAMA-7B, BLOOM-7B|\n| [融合以遗忘：通过模型融合减少偏见并实现选择性记忆](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.07682) | 2023 | Arxiv |\n\n#### 加快LLM的训练速度\n\n | **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n | --------------- | :----: | :----: | :----: |\n | [混搭学习：通过重混过往检查点加速微调](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.10156)| 2026 |  Arxiv |\n | [GTR-Turbo：合并后的检查点实际上是代理式VLM训练的免费教师](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.13043)| 2025 |  Arxiv |Qwen2.5-VL-7B \n | [专家之汤：通过参数平均预训练专家模型](https:\u002F\u002Fopenreview.net\u002Fforum?id=MFNIka7nx0)| 2025 |  ICML |\n | [局部混合专家：通过模型合并实现几乎免费的测试时训练](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.14136)| 2025 |  Arxiv |\n | [合并以混合：通过模型合并混合数据集](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.16066)| 2025 |  Arxiv | Llama-3-8B-Instruct\n | [大型语言模型预训练中的模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.12082)| 2025 |  Arxiv |Seed-MoE-1.3B\u002F13B, SeedMoE-10B\u002F100B, Seed-MoE-15B\u002F150B |\n | [基于指标加权平均的参数高效检查点合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.18580) | 2025 |  Arxiv |\n | [DEM：用于混合数据分布训练的分布编辑模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.15570) | 2024 |  Arxiv |  OpenLLaMA  7B和13B|\n | [LLM预训练中基于贝叶斯优化的检查点合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.19390) | 2024 |  Arxiv | Baichuan2-220B, Baichuan2-440B, Baichuan2-660B, Baichuan2-1540B, Baichuan2-1760B, Baichuan2-1980B, Baichuan2-2200B, Baichuan2-2420B, DeepSeek-1400B, DeepSeek-1600B, DeepSeek-1800B, DeepSeek-2000B|\n | [ColD融合：分布式多任务微调的协作下降](https:\u002F\u002Faclanthology.org\u002F2023.acl-long.46.pdf) |2023  |  ACL|\n | [早期权重平均结合高学习率用于LLM预训练](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2306.03241) |  2023 |NeurIPS Workshop  |\n | [别再浪费我的时间了！用最新的权重平均节省Imagenet和BERT训练的数天时间](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2209.14981) | 2022 |NeurIPS Workshop  |\n | [融合微调过的模型以改善预训练](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2204.03044) | 2022 |Arxiv  |\n\n#### 提高LLM的推理速度\n | **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n | --------------- | :----: | :----: | :----: |\n | [多目标进化合并实现高效推理模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.06465) | 2026 | Arxiv | DeepSeek-R1-Distill-Qwen 1.5B、7B和14B\n | [基于费舍尔信息的无数据层适应性合并，适用于长短期推理的LLM](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.21705) | 2026 | Arxiv | Qwen2.5-Math-7B,DeepSeek-R1-Distill-Qwen-7B\n | [RAIN-合并：一种无需梯度的方法，可在保持思维格式的同时增强大型推理模型的指令遵循能力](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.22538) | 2026 | ICLR |  Qwen2.5-1.5B\u002F14B\u002F32B, 和 Llama-3.1-8B\n | [推理模式对齐合并以实现自适应推理](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.03506) | 2026 | Arxiv | (i) Qwen3-4B-Thinking (Long-CoT) 和 Qwen3-4B-Instruct (Short-CoT); (ii) DeepSeekR1-Distill-Qwen-1.5B (Long-CoT) 和 Qwen2.5- Math-1.5B (Short-CoT)\n | [重新审视模型插值以实现高效推理](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.10977) | 2025 | Arxiv | Qwen3-4B\n | [通过模型合并解锁高效长短期LLM推理](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.20641)| 2025 |Arxiv  | Qwen2.5-32B, DeepSeek-R1-32B |\n | [Kimi k1.5：利用LLM扩展强化学习](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.12599?)| 2025 |Arxiv  | Kimi k1.5\n  \n#### 提升基于MoE的LLM的计算效率\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n | --------------- | :----: | :----: | :----: |\n | [REAM: 融合提升大模型专家剪枝效果](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.04356)| 2026 | Arxiv  |  Qwen3-30B-A3B-Instruct-2507,  Qwen3-Coder-Next, GLM-4.5-Air\n | [用于缓解奖励欺骗的再利用与融合MoE奖励模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.00724)| 2025 | Arxiv  | \n | [PuzzleMoE: 基于稀疏专家融合与位打包推理的大规模混合专家模型高效压缩](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.04805)| 2025 | Arxiv  | Mixtral-8x7B, Deepseek-MoE\n | [图基础模型中混合专家结构的增强型专家融合](https:\u002F\u002Fopenreview.net\u002Fpdf?id=fyqqd1lHDb) | 2025 | Arxiv  | LLaMA-3.1-8B\n | [基于纳什讨价还价的稀疏混合专家中的专家融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.16138)| 2025 |Arxiv  |Qwen1.5-MoE-14B, DeepSeek-MoE-16B |\n | [MergeMoE: 通过专家输出融合实现MoE模型高效压缩](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.14436)| 2025 |Arxiv  | DeepSeekMoE, Qwen1.5-MoE-A2.7B, 和 Qwen3-30B-A3B |\n | [更快、更小、更智能：面向在线MoE推理的任务感知专家融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.21966)| 2025 |Arxiv  | \n | [Sub-MoE: 基于子空间专家融合的高效混合专家LLM压缩](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.23266) | 2025 |Arxiv  | Mixtral 8x7B, Qwen3- 235B-A22B, Qwen1.5-MoE-A2.7B, 和 DeepSeekMoE-16B-Base\n | [关于混合专家架构的线性模式连通性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.11348)| 2025 |NeurIPS  |  \n | [先融合，再压缩：从路由策略中揭示高效SMoe的秘密](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.01334)|2024|ICLR|fairseq-moe15b SMoE\n | [将专家合并为一：提升混合专家的计算效率](https:\u002F\u002Faclanthology.org\u002F2023.emnlp-main.907.pdf) | 2023 |EMNLP  | \n\n#### 通过模型融合混合数据集\n | **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n | --------------- | :----: | :----: | :----: |\n | [OPTIMER: 对于持续预训练而言，最优分布向量融合优于数据混合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.28858)| 2026 |  Arxiv |Gemma 3 27B\n | [线性模型融合解锁简单且可扩展的多模态数据混合优化](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.04937)| 2026 |  Arxiv | Qwen2-VL-2B 和 Intern3.5-VL-2B\n | [将搜索与训练解耦：通过模型融合规模化大型语言模型预训练的数据混合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.00747)| 2026 |  Arxiv | Qwen3-1.7B\n | [多任务代码LLM：数据混合还是模型融合？](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.21115)| 2026 | Arxiv  | Qwen Coder 2.5 7B, DeepSeek 7B\n | [MergeMix: 基于可学习模型融合优化训练中期数据混合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.17858)| 2026 |  Arxiv | 8B 和 16B MoE\n | [合并以混合：通过模型融合混合数据集](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.16066)| 2025 |  Arxiv | Llama-3-8B-Instruct\n\n \n#### LLM代理融合\n\n  | **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n  | --------------- | :----: | :----: | :----: |\n  | [强化学习驱动的代理模型中的行为知识融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.13572)| 2026 | Arxiv | RL训练的代理模型\n  | [ARM: 基于角色条件的神经元移植，实现无需训练的一般化LLM代理融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.07309)| 2026 | Arxiv | Simia-Tau-SFT-Qwen3-8B, SimiaOfficeBench-SFT-Qwen3-8B, 和 Simia-AgentBench-SFT-Qwen3-8B\n  | [划分、优化、融合：面向LLM代理的可扩展细粒度生成式优化](https:\u002F\u002Faclanthology.org\u002F2025.findings-emnlp.1034\u002F)| 2025 | EMNLP | o3-mini\n  | [AgentMerge: 提升微调后LLM代理的泛化能力](https:\u002F\u002Fopenreview.net\u002Fpdf?id=nZmAwmi2gr))| 2024 | NeurIPS | Llama3.1-8B \n  | [通过CycleQD实现大型语言模型的代理技能获取](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.14735) |2024 |Arxiv | Llama3-8B-Instruct|\n\n#### 结合专家LLM的能力\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n  | --------------- | :----: | :----: | :----: |\n  | [合并与征服：通过添加目标语言权重指导多语言模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.28263)| 2026 | Arxiv | Llama 3.1 8B、Qwen3 8B、Qwen3 14B\n  | [偏好对齐的LoRA合并：保持子空间覆盖并解决方向各向异性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.26299)| 2026 | Arxiv | LLaMA-3-8B\n  | [无标签跨任务LoRA合并与零空间压缩](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.26317)| 2026 | Arxiv | LLAMA-3 8B、LLAVA-1.5-7B\n  | [AdaLTM：自适应逐层任务向量合并，用于结合ASR知识的分类语音情感识别](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.25041)| 2026 | Arxiv | \n  | [基于Fisher–Rao流形的功能导向LLM合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.04972)| 2026 | Arxiv | Qwen2.5-14B、Qwen2.5-14B-Instruct-1M、Qwen2.5-Coder-14B-Instruct、DeepSeek-R1-Distill-Qwen-14B、OpenReasoning-Nemotron-14B\n  | [自适应合并下LoRA复用的吸引力与现实](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.12323)| 2026 | Arxiv |   Llama3.1 8B-Instruct\n  | [LS-Merge：在隐空间中合并语言模型](https:\u002F\u002Fopenreview.net\u002Fpdf?id=VSDV0SWwOC)| 2026 | ICLR | Gemma-3-1B-it、Gemma-3-4B-it、Llama-3-1B-instruct、Llama-2-7b\n  | [基于Bagging的模型合并用于鲁棒的通用文本嵌入](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.05787)| 2026 | Arxiv |  Qwen3-4B\n  | [面向设备端大型语言模型的适配器数据驱动聚类与合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.17441)| 2026 | Arxiv |  Llama 3.2 3B、Qwen 2.5 1.5B和StableLM 2 1.6B\n  | [通过特定语言模型合并提高训练效率、降低维护成本](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.16127)| 2026 | Arxiv | Llama-3.1-8b-Instruct\n  | [SimMerge：从相似性信号中学习选择合并算子](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.09473)| 2026 | Arxiv | 7B至111B\n  | [多阶段进化式模型合并与元数据驱动课程学习，用于情感专用大型语言建模](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.06780)| 2026 | Arxiv |\n  | [ReasonAny：通过简单有效的模型合并将推理能力融入任何模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.05560)| 2026 | Arxiv | QwQ-32B-Preview、Meditron3-Qwen2.5-7B和MMed-Llama3-8B、WiroAIFinance-Qwen-7B和WiroAI-Finance-Llama8B\n  | [通过模型合并可靠地保存多语言LLM中的文化知识](https:\u002F\u002Fraw.githubusercontent.com\u002Fmlresearch\u002Fv310\u002Fmain\u002Fassets\u002Fnguyen25b\u002Fnguyen25b.pdf)| 2025 | Arxiv |Qwen-2.5-3B\n  | [AlignMerge——基于Fisher引导的几何约束实现对齐保留的大语言模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.16245)| 2025 | Arxiv | LLaMA-3 8B、Mistral 7B、Qwen 2、Phi-3.5、Gemma 2\n  | [成长与合并：高效语言适配的扩展策略](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.10772)| 2025 | Arxiv | \n  | [仅使用目标未标注语言数据调整聊天语言模型](https:\u002F\u002Fopenreview.net\u002Fpdf?id=6IdoIKowfe)| 2025 | TMLR | Qwen2.5 7B、Llama 3.1 8B、Qwen3 14B\n  | [RCP-Merging：以推理能力为先验，将长链式思维模型与领域特定模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.03140) | 2026 |  AAAI | Qwen2.5-7B、Llama3.1-8B\n  | [Souper-Model：简单算术如何解锁最先进的LLM性能](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.13254v1)| 2025 | Arxiv | xLAM-2-70b、CoALM-70B、watt-tool-70B、functionary-medium-70B、xLAM-2-8b、ToolACE-2-8B、watt-tool-8B、BitAgent-8B、CoALM-8B | \n  | [SPEAR-MM：通过模型合并进行参数选择性评估与恢复，以实现高效的金融LLM适配](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.08500)| 2025 | Arxiv |\n  | [为领域专用LLM合并持续预训练模型：以金融为例](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.02451)| 2025 | Arxiv | Llama-3-8B、Llama-2-7B \n  | [提取并组合能力，构建多语言增强型大型语言模型](https:\u002F\u002Faclanthology.org\u002F2025.emnlp-main.887.pdf)| 2025 | EMNLP | LLaMA-3 8B\n  | [通过模型合并弥合阿拉伯语医学LLM中的方言差距](https:\u002F\u002Faclanthology.org\u002F2025.arabicnlp-main.27.pdf)| 2025 | arabicnlp | \n  | [通过模型合并使多语言模型适应代码混合任务](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.19782)| 2025 | Arxiv |\n  | [协调多样模型：用于一致性生成的逐层合并策略](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.14915)| 2025 | Arxiv | Llama-3.1-8B-Instruct和Gemma-3-12B-Instruct\n  | [ABC：通过模型合并迈向通用代码样式器](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fpdf\u002F10.1145\u002F3763104)| 2025 | ACM关于编程语言的会议 | Qwen2.5-Coder、Deepseek-Coder |\n  | [家庭事务：语言迁移与合并，以使小型LLM适应法罗语](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.00810)| 2025 | Arxiv |\n  | [专家合并：无监督专家对齐与重要性引导的分层切块进行模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.25712)| 2025 | Arxiv |Mistral-7B、InternVL、Qwen2-VL\n  | [思考光谱：通过模型合并对LLM可调推理能力的实证研究](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.22034)| 2025 | Arxiv | Qwen3-30B-A3B-Thinking-2507、Qwen3-30B-A3B-Instruct-2507 |\n  | [MLM：多语言LoRA合并](https:\u002F\u002Fopenreview.net\u002Fattachment?id=VAnFWVbYxG&name=pdf) 2025 | NeurIPS WorkShop | LLaMA-3.2 (1B和3B)\n  | [大型语言模型中的模型合并缩放规律](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.24244)| 2025 | Arxiv | Qwen2.5 0.5、1.5、3、7、14、32、72B\n  | [利用优化动力学进行曲率感知的模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.11167)| 2025 |  Arxiv | Llama-3.1-8B\n  | [Kwai Keye-VL 1.5技术报告](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.01563)| 2025 |  Arxiv |Keye-VL-8B\n  | [推理向量：通过任务算术转移链式思维能力](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.01363)| 2025 |  Arxiv | QWEN2.5-7B|\n  | [用于模型合并优化的替代基准](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.02555)| 2025 |  Arxiv| EvoLLM-JP-v1-7B、shisa-gamma-7b-v1 |\n  | [张量化聚类LoRA合并用于多任务干扰](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.03999)| 2025 |  Arxiv| Mistral-7B\n  | [设备端大型语言模型的高效组合式多任务处理](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.16083)| 2025 |  Arxiv|  Llama 3.1 70B\n  | [HydraOpt：导航适配器合并的效率与性能权衡](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.17706)| 2025 |  Arxiv|\n  | [探索稀疏适配器以实现参数高效专家的可扩展合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.07140)| 2025 |  Arxiv|\n  | [为增强代码生成而合并大型语言模型：跨编程语言的模型合并技术比较研究](https:\u002F\u002Fwww.diva-portal.org\u002Fsmash\u002Fget\u002Fdiva2:1973270\u002FFULLTEXT01.pdf)| 2025 |  DiVA开放获取 |CodeQwen1.5-7B、DeepSeek-Coder-6.7b-Base、CodeLlama-34B |\n  | [关于任务算术公平性的探讨：任务向量的作用](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.24262)| 2025 |  Arxiv| LLaMA2-7B\n  | [模型合并对于LLM跨语言迁移的不可思议有效性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.18356)| 2025 |  Arxiv|FALCON 3 7B、QWEN2.5 7B Instruct、LLAMA 3.1 8B Instruct、AYA Expanse 8B\n  | [模型合并竟然是可认证的：低样本学习的非空泛化界](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.15798)| 2025 |  Arxiv|MetaMath-Mistral-7B、Dolphin-2.1-Mistral-7B和Speechless-Code-Mistral-7Bv1.0\n  | [无需训练的LLM合并用于多任务学习](https:\u002F\u002Fopenreview.net\u002Fpdf?id=m6A6HoCKvt)| 2025 |ACL  | Echelon-AI\u002FMed-Qwen2-7B、shtdbb\u002Fqwen2-7b-med、Qwen2-Instruct |\n  | [ParamΔ用于直接权重混合：零成本的训后大型语言模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.21023)| 2025 |Arxiv  | Llama3-inst-70B、Llama3-base-70B、Llama3.1-base-70B |\n  | [超越“啊哈！”：迈向大型推理模型中系统性的元能力对齐](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.10554)| 2025 |Arxiv  |Qwen2.5-7B、Qwen2.5-32B|\n  | [统一的多任务学习与模型融合，用于高效的语言模型护栏](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.19333)| 2025 |Arxiv  |\n  | [通过模型合并，一天内将特定语言LLM适配为推理模型——一份公开配方](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.09056)| 2025 |Arxiv  | Typhoon2 R1 70B、Deepseek R1 70B |\n  | [通过微调迁移实现高效模型开发](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.20110v1)| 2025 |Arxiv  | Llama 3.1 8B\n  | [Command A：一款企业级大型语言模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.00698v1)| 2025 |Arxiv  | Command R7B\n  | [外推合并：借助外推与合并不断改进](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.04834)| 2025 |Arxiv  |Qwen2-7B、Meta-Llama-3-8B、Mistral-Nemo-Base-2407-12B、Qwen1.5-14B |\n  | [Light-R1：从头开始及更进一步的长期COT课程SFT、DPO和RL](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.10460)| 2025 |Arxiv  |Light-R1-32B|\n  | [FuseChat-3.0：偏好优化遇上异构模型融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.04222v1)| 2025 |Arxiv  |Gemma-2-27B-it、Mistral-Large-Instruct-2407、Qwen-2.5-72B-Instruct以及Llama-3.1-70B-Instruct |\n  | [表面自我提升的推理者受益于模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.02103)| 2025 |Arxiv  |Llama2-7B\n  | [受自然启发的大语言模型群体进化](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.01155)| 2025 |Arxiv  |\n  | [层次感知的任务算术：解耦任务特异性和指令遵循知识](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.20186)| 2025 |Arxiv  | Gemma-2-9B、Llama-3-8B |\n  | [Mixup模型合并：通过随机线性插值提升模型合并性能](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.15434)| 2025 | Arxiv | WizardLM-13B、WizardMath-13B、llama-2-13b-code-alpaca\n  | [LoRE-Merging：探索低秩估计用于大型语言模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.10749)| 2025 | Arxiv | NuminaMath-7B、DeepSeek-Math-7B-Base、LLaMA系列模型、WizardMath-13B\n  | [语言与领域特定模型的合并：对技术词汇习得的影响](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.12001)| 2025 | Arxiv | ContactDoctor-8B\n  | [通过模型合并将文本偏好转移到视觉-语言理解](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.13487)| 2025 | Arxiv | Llama-3.2-11B-Vision -Instruct、Llama-3.1-Tulu-2-8B-uf-mean-rm、Llama-3.1-Tulu-3-8B-RM\n  | [最佳脑迭代合并：缓解LLM合并中的干扰](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.12217)| 2025 | Arxiv | Llama-2-13b、WizardMath-13B-V1.0、WizardLM13B-V1.2、llama-2-13b-codealpaca\n  | [一份公开配方：通过模型合并一天内将特定语言LLM适配为推理模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.09056)| 2025 | Arxiv |Typhoon2 70B Instruct、DeepSeek R1 70B Distill、Llama 3.1 70B、Llama 3.3 70B |\n  | [好吧，我自己来合并：一个用于自动化模型合并的多保真度框架](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.04030)| 2025 | Arxiv | WizardLM-13B、WizardMath-13B以及llama-2-13b-code-alpaca |\n  | [参数空间中的技能扩展与组合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.05932)| 2025 | Arxiv\n  | [InfiFusion：通过LLM融合实现增强跨模型推理的统一框架](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.02795)| 2025 | Arxiv | Qwen2.5-Coder-14B-Instruct、Qwen2.5-14B-Instruct以及Mistral-Small-24B-Instruct-2501 |\n  | [通道合并：为合并后的专家保留专长](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.15283)|2025 |AAAI | Dolphin-2.2.1-Mistral-7B、Speechless-Code-Mistral-7B、MetaMathMistral-7B、Chinese-Mistral-7BInstruct-v0.1 |\n  | [加权奖励偏好优化用于隐式模型融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.03187)| 2025 | ICLR  | LLaMA3-8B-Instruct |\n  | [通过免训练融合提升多模态LLM的感知能力](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.01289)|2024 |Arxiv | MiniGemini-8B和SLIME-8B |\n  | [AgentMerge：提升微调LLM代理的泛化能力](https:\u002F\u002Fopenreview.net\u002Fpdf?id=nZmAwmi2gr)|2024 |Arxiv | Llama3.1-8B |\n  | [JRadiEvo：一种通过模型合并进化优化增强的日本放射科报告生成模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.09933) |2024 |Arxiv |Bunny-v1_1-Llama-3-8B-V、MMed-Llama-3-8B-EnIns、OpenBioLLM-Llama3-8B、Llama-3-Swallow-8B-Instruct-v0.1|\n  | [如果不能使用它们，就回收利用：规模化合并优化可缓解性能权衡](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.04144)|2024 |Arxiv | Command R+ 104B |\n  | [通过CycleQD为大型语言模型获取代理技能](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.14735) |2024 |Arxiv | Llama3-8B-Instruct|\n  | [协作式向LLM添加新知识](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.14753)|2024 |Arxiv | Meta-Llama-3-8B|\n  | [不受约束的模型合并用于增强LLM的推理能力](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.13699)| 2024 |Arxiv |CodeLlama-7B-Ins、CodeLlama-70B-Ins、Deepseek-Coder-Ins-v1.5、Qwen2.5-Math-7B-Ins、WizardMath-7B-V1.1、OpenMath-Mistral 7B、MetaMath-7B、MetaMath-70B |\n  | [LoRA浓汤：为实际技能组合任务合并LoRA](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.13025)|2024 |Arxiv | Llama-7b、Llama2-7b-chat |\n  | [合并以学习：通过模型合并高效地为语言模型添加技能](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.12937)|2024 |Arxiv | Llama 2 7B |\n  | [探索模型亲缘关系以合并大型语言模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.12613) |2024 |Arxiv |Mistral-7B、Mistral-7b-instruct-v0.2、MetaMath-mistral-7b、Open-chat-3.5-1210 |\n  | [瓶中合并：可微分自适应合并（DAM）以及从平均到自动化的路径](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.08371)| 2024 |Arxiv  |shisa-gamma-7b、WizardMath-7B-V1.1、Abel-7B-002、Llama-3-SauerkrautLM-8b-Instruct、Llama-3-Open-Ko-8B、llama-3-sqlcoder-8b、Meta-Llama-3-8B |\n  | [层交换用于大型语言模型的零样本跨语言迁移](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.01335) |2024 |Arxiv | LLAMA 3.1 8B |\n  | [规模化模型合并的关键是什么？](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.03617)| 2024 | Arxiv | PaLM-2（1B、8B、24B、64B）、PaLM-2-IT（1B、8B、24B、64B）|\n  | [HM3：针对预训练模型的层次化多目标模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.18893) | 2024 | Arxiv | Llama-2-7B-Chat、WizardMath-7B、CodeLlama-7B|\n  | [FUSECHAT：聊天模型的知识融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.07990) | 2024 |  Arxiv| OpenChat-3.5-7B、Starling-LM-7B-alpha、NH2-SOLAR-10.7B、InternLM2-Chat-20B、Mixtral-8x7B-Instruct以及Qwen-1.5-Chat-72B|\n  | [SQL-GEN：通过合成数据和模型合并弥合文本转SQL的方言鸿沟](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.12733) | 2024 |  Arxiv|CodeLlama 7B|\n  | [变形时刻：通过多目标优化释放多个LLM的潜力](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.00487) | 2024 | Arxiv | Qwen1.5-7B-Chat、解放的Qwen1.5-7B、firefly-qwen1.5-en-7B |\n  | [通过演化语言模型权重进行知识融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.12208) | 2024 | ACL |\n  | [LLM合并：通过合并高效构建LLM](https:\u002F\u002Fopenreview.net\u002Fpdf?id=TiRQ4Gl4Ir)| 2024 |  NeurIPS 2024竞赛赛道 | LLaMA-7B、Mistral-7B、Gemma-7B |\n  | [通过权重解耦将模型合并从微调扩展到预训练大型语言模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.03092)| 2024 |  Arxiv | Qwen1.5-7B、Qwen1.5-Chat-7B、Sailor-7B、Qwen1.5-14B、Qwen1.5-Chat-14B、Sailor-14B、WizardLM-13B、WizardMath-13B、llama-2-13b-code-alpaca |\n  | [变形时刻：通过多目标优化释放多个LLM的潜力](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.00487)| 2024 |  Arxiv | Qwen1.5-7B-Chat、解放的Qwen1.5-7B、firefly-qwen1.5-en-7B |\n  | [MetaGPT：利用模型专属任务算术合并大型语言模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.11385) | 2024 | Arxiv | LLaMA-2-7B、Mistral-7B、LLaMA-2-13B |\n  | [PROMETHEUS 2：一款开源语言模型，专门用于评估其他语言模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.01535)| 2024 | Arxiv | Mistral-Instruct-7B、Mixtral-Instruct-8x7B|\n  | [大型语言模型的知识融合](https:\u002F\u002Fopenreview.net\u002Fpdf?id=jiDsk12qcz) | 2024 |  ICLR | Llama-2 7B、OpenLLaMA 7B、MPT 7B |\n  | [语言模型就像超级马里奥：如同免费午餐般吸收同源模型的能力](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.03099) | 2024 | ICML | WizardLM-13B、WizardMath-13B以及llama-2-13b-code-alpaca、Mistral-7B|\n  | [通过语言模型算术控制文本生成](https:\u002F\u002Fopenreview.net\u002Fpdf?id=SLw9fp4yI6) | 2024 | ICML | MPT-7B、Pythia-12B、Llama-2-Chat-13B |\n  | [MeteoRA：嵌入式多任务LoRA用于大型语言模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.13053v2)|  2024 |Arxiv  | LlaMA2-13B和LlaMA3-8B（LoRA）|\n  | [模型合并配方的进化优化](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.13187) | 2024 | Arxiv | shisa-gamma-7b-v1、WizardMath-7B-V1.1、Arithmo2-Mistral-7B、Abel-7B-002、Mistral-7B-v0.1、LLaVA-1.6-Mistral-7B|\n  | [Branch-Train-MiX：将专家LLM混合进混合专家LLM](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.07816) |  2024 |Arxiv  | Llama-2-7B |\n  | [聊天LLM的知识融合：初步技术报告](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.16107) | 2024 |Arxiv  | NH2-Mixtral-8x7B、NH2-Solar-10.7B、OpenChat-3.5-7B|\n\n**注：以下论文均来自：[NeurIPS 2024 LLM 融合竞赛](https:\u002F\u002Fllm-merging.github.io\u002F)**\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **模型** |\n| --------------- | :----: | :----: | :----: |\n| [LLM 融合：通过融合高效构建 LLM](https:\u002F\u002Fopenreview.net\u002Fforum?id=TiRQ4Gl4Ir)| 2024 | NeurIPS LLM 融合竞赛 | - |\n| [结合知识图谱与提示工程的大语言模型融合方法探索](https:\u002F\u002Fopenreview.net\u002Fattachment?id=0I0yYOxHxV&name=pdf)| 2024 | NeurIPS LLM 融合竞赛 | meta-llama\u002FLlama-2-7b；microsoft_phi1\u002F2\u002F3 |\n| [基于任务向量几何中位数的模型融合](https:\u002F\u002Fopenreview.net\u002Fpdf?id=4VD2jMqJbN)| 2024 | NeurIPS LLM 融合竞赛 | flan_t5_xl |\n| [用于 NeurIPS 2024 LLM 融合竞赛的插值式逐层融合](https:\u002F\u002Fopenreview.net\u002Fattachment?id=taHV1M0KlB&name=pdf)| 2024 | NeurIPS LLM 融合竞赛 | suzume-llama-3-8B-multilingual-orpo-borda-top75、Barcenas-Llama3-8bORPO、Llama-3-8B-Ultra-Instruct-SaltSprinkle、MAmmoTH2-8B-Plus、Daredevil-8B |\n| [一种模型融合方法](https:\u002F\u002Fopenreview.net\u002Fpdf?id=zcnDi0i23y)| 2024 | NeurIPS LLM 融合竞赛 | - |\n| [适用于 NeurIPS 2024 LLM 融合竞赛的可微分 DARE-TIES 方法](https:\u002F\u002Fopenreview.net\u002Fattachment?id=4jqff9QeUD&name=pdf)| 2024 | NeurIPS LLM 融合竞赛 | suzume-llama-3-8B-multilingualorpo-borda-top75、MAmmoTH2-8B-Plus 和 Llama-3-Refueled |\n| [LLM 融合竞赛技术报告：通过策略性模型选择、融合及超参数优化实现高效模型融合](https:\u002F\u002Fopenreview.net\u002Fattachment?id=Xl8uuaNj1X&name=pdf)| 2024 | NeurIPS LLM 融合竞赛 | MaziyarPanahi\u002FLlama3-8B-Instruct-v0.8、MaziyarPanahi\u002FLlama-3-8B-Instruct-v0.9、shenzhiwang\u002FLlama3-8B-Chinese-Chat、lightblue\u002Fsuzume-llama-3-8B-multilingual |\n| [简单的 Llama 融合：我们需要什么样的 LLM？](https:\u002F\u002Fopenreview.net\u002Fattachment?id=VndTgXbAgz&name=pdf)| 2024 | NeurIPS LLM 融合竞赛 | Hermes-2-Pro-Llama-3-8B 和 Daredevil-8B |\n| [NeurIPS 2024 LLM 融合竞赛技术报告：通过融合高效构建大语言模型](https:\u002F\u002Fopenreview.net\u002Fattachment?id=rJ1miae6PJ&name=pdf) | 2024 | NeurIPS LLM 融合竞赛 | Mistral-7B-Instruct94 v2、Llama3-8B-Instruct、Flan-T5-large、Gemma-7B-Instruct 和 WizardLM-2-7B |\n| [MoD：一种基于分布的大语言模型融合方法](https:\u002F\u002Fopenreview.net\u002Fattachment?id=v2tZ9bNcS5&name=pdf) | 2024 | NeurIPS LLM 融合竞赛 | Qwen2.5-1.5B 和 Qwen2.5-7B |\n\n### 多模态大语言模型中的模型融合\n\n\u003Ccenter>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEnnengYang_Awesome-Model-Merging-Methods-Theories-Applications_readme_022a63f4c846.png\" alt=\"Model Merging\" width=\"800\"\u002F>\n\u003C\u002Fcenter>\n\n#### 用于多模态融合的模型融合\n\n  | **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n  | --------------- | :----: | :----: | :----: |\n  | [联合训练大型自回归多模态模型](https:\u002F\u002Fopenreview.net\u002Fpdf?id=5jcav5RcKw) | 2024 | ICLR |\n  | [多模态大语言模型的模型组合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.12750) | 2024 | ACL | Vicuna-7B-v1.5|\n  | [π-Tuning：通过最优多任务插值迁移多模态基础模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.14381) | 2023 | ICML |\n  | [多模态模型融合的实证研究](https:\u002F\u002Faclanthology.org\u002F2023.findings-emnlp.105.pdf) | 2023  | EMNLP |\n  | [UnIVAL：面向图像、视频、音频和语言任务的统一模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.16184) | 2023 |  TMLR |\n\n#### 用于跨模态知识迁移的模型融合\n\n  | **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n  | --------------- | :----: | :----: | :----: |\n  | [改进语音识别和音频事件分类的多模态注意力融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.14378) |  2024 | ICASSP Workshop  |\n\n#### 结合专家级多模态大语言模型的能力\n\n  | **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n  | --------------- | :----: | :----: | :----: |\n  | [推理存在于层中：通过层选择性融合恢复视频-语言模型中的时间推理能力](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.11399)| 2026 | Arxiv |  LongVA-7B, InternVL3-8B, Qwen3-VL-4B\n  | [一个模型就能搞定所有？通往魔多山的多语言模型融合之旅](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.02881)| 2026 | Arxiv | Qwen-2.5-3B-Instruct\n  | [不再拔河：通过稳定性感知的任务向量融合，协调视觉-语言模型的准确性和鲁棒性](https:\u002F\u002Fopenreview.net\u002Fforum?id=KOO1cDm2bt)| 2026 | ICLR | LLaVA-1.5-7B, OpenFlamingo-9B\n  | [SSAM：用于多模态大语言模型融合的奇异子空间对齐](https:\u002F\u002Farxiv.org\u002Fabs\u002F2603.21584)| 2026 | Arxiv |\n  | [ES-Merging：基于嵌入空间信号的生物启发式多模态大语言模型融合](https:\u002F\u002Farxiv.org\u002Fabs\u002F2603.14405)| 2026 | Arxiv |\n  | [VisCodex：通过融合视觉与编码模型实现统一的多模态代码生成](https:\u002F\u002Fopenreview.net\u002Fpdf?id=RU76KTF1Da)| 2026 | ICLR | VisCodex-8B, VisCodex-33B\n  | [FRISM：通过子空间级别的模型融合向视觉-语言模型注入细粒度推理能力](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.21187)| 2026 | Arxiv | Qwen2.5-VL-7B-Instruct, DeepSeekR1-Distill-Qwen-7B, Qwen2.5-VL-32B-Instruct, QwQ-32B\n  | [PlaM：无需训练的高原引导型模型融合，提升多模态大语言模型的视觉接地能力](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.07645)| 2026 | Arxiv |LLaVA-v1.5-7B, Qwen2.5-VL-7B-Instruct, Qwen3-VL-8B-Instruct\n  | [哪里重要、什么重要：面向多模态少样本上下文学习的敏感性感知任务向量](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.08246)| 2026 | AAAI | Qwen-VL-7B, Idefics2-8B\n  | [MergeVLA：迈向通用视觉-语言-行动智能体的跨技能模型融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.19434)| 2025 | Arxiv | Qwen2.5-0.5B\n  | [Tiny-R1V：通过模型融合构建轻量级多模态统一推理模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.08987)| 2025 | Arxiv |\n  | [在发展上合理的多模态模型中，通过模型融合保持纯语言性能](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.01845)| 2025 | Arxiv |\n  | [专家融合：基于无监督专家对齐和重要性指导的层块划分进行模型融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.25712)| 2025 | Arxiv |Mistral-7B, InternVL, Qwen2-VL\n  | [UQ-Merge：不确定性引导的多模态大语言模型融合](https:\u002F\u002Faclanthology.org\u002F2025.findings-acl.73.pdf)| 2025 | ACL |  LLaVA-v1.5-7B |\n  | [Graft：通过高效的参数协同为多模态大语言模型整合领域知识](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.23940)| 2025 | Arxiv | Qwen2-VL-2B |\n  | [通过模型融合统一多模态大语言模型的能力和模态](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.19892)| 2025 | Arxiv | Qwen2-VL-7B-Base, Vicuna-7B-v1.5 |\n  | [让推理走进视觉：通过模型融合理解感知与推理](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.05464)| 2025 |ICML  | LLaVA-NeXT-8B, Idefics2-8B, InternVL2-76B |\n  | [REMEDY：大型视觉-语言模型中的配方融合动态](https:\u002F\u002Fopenreview.net\u002Fpdf?id=iX7eHHE5Tx)| 2025 | ICLR | LLaVA-1.5（Vicuna-7B）\n  | [RobustMerge：具有方向鲁棒性的参数高效多模态大语言模型融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.17159)| 2025 | NeurIPS | LLaVA-v1.5-7B\n  | [针对多模态大语言模型的参数高效融合，结合互补的参数适配](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.17159)| 2025 | Arxiv | LLaVA\n  | [AdaMMS：面向异构多模态大语言模型的模型融合，采用无监督系数优化](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.23733)| 2025 | Arxiv | LLaVA-OneVision-7B, Qwen2-VL-7B, LLaVA-v1.5-7B, CogVLM-chat-7B|\n  | [通过模型融合将文本偏好迁移到视觉-语言理解中](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.13487v1)| 2025 | Arxiv | Llama-3.2-11B-Vision-Instruct, Llama-3.1-Tulu-2-8B-uf-meanrm, Llama-3.1-Tulu-3-8B-RM, Llama-3.1-8B|\n\n### 图像生成模型中的模型合并\n\n\u003Ccenter>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEnnengYang_Awesome-Model-Merging-Methods-Theories-Applications_readme_022a63f4c846.png\" alt=\"Model Merging\" width=\"800\"\u002F>\n\u003C\u002Fcenter>\n\n#### 生成模型中的风格混合\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [DiffGraph: 一种自动化代理驱动的模型合并框架，用于野外文本到图像生成](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.20470)| 2026 | Arxiv |Stable Diffusion v1.5, FLUX.1 Dev\n| [GimmBO: 基于贝叶斯优化的交互式生成图像模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.18585)| 2026 | Arxiv |\n| [重新思考适配器合并中的LoRA正交性：来自正交蒙特卡洛丢弃的见解](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.03262)| 2025 | Arxiv |\n| [BlockLoRA: 基于分块参数化低秩适应的扩散模型模块化定制](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.08575v1)| 2025 | Arxiv |\n| [LoRA.rar: 通过超网络学习合并LoRA以实现主题-风格条件下的图像生成](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.05148)| 2024 | Arxiv | LLaVA-Critic 7b |\n| [IterIS: 用于LoRA合并的迭代推理求解对齐方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.15231) | 2024 | Arxiv |\n| [扩散汤：文本到图像扩散模型的模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.08431) | 2024 | ECCV |\n| [MaxFusion: 文本到图像扩散模型中的即插即用多模态生成](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.09977) |  2024 | Arxiv |\n| [MoLE: LoRA专家混合体](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.13628) |  2024| ICLR |\n| [LoRA作曲家：利用低秩适应在无需训练的扩散模型中实现多概念定制](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.11627) |  2024 | Arxiv |\n| [用于图像生成的多LoRA组合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.16843) |  2024 | Arxiv |\n| [秀之混合：用于扩散模型多概念定制的去中心化低秩适应](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2023\u002Ffile\u002F3340ee1e4a8bad8d32c35721712b4d0a-Paper-Conference.pdf) |  2023 | NeurIPS |\n| [合并LoRA](https:\u002F\u002Fgithub.com\u002Fcloneofsimo\u002Flora) | 2023  | (github) |\n| [ZipLoRA: 通过有效合并LoRA实现任意主题、任意风格的生成](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.13600) | 2023 | Arxiv |\n| [GAN鸡尾酒：无需数据集即可混合GAN](https:\u002F\u002Fwww.ecva.net\u002Fpapers\u002Feccv_2022\u002Fpapers_ECCV\u002Fpapers\u002F136830207.pdf) | 2022 | ECCV |\n\n\u003C!-- | [合并提升自我批判能力以抵御越狱攻击]() |  2024 | Arxiv | -->\n\n#### 降低生成模型的训练成本\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [保存检查点的线性组合使一致性与扩散模型更好](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.02241) | 2024 | Arxiv |\n| [加速STABLE-DIFFUSION的统一模块：LCM-LORA](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.16024)| 2024 | Arxiv |\n\n#### 提升扩散模型的忠实度（或生成质量）\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [解耦后再合并：迈向更好的扩散模型训练](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.06664)|  2024 | Arxiv |\n| [SELMA: 利用自动生成的数据学习并合并技能特定的文本到图像专家](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.06952) |  2024 | Arxiv |\n\n#### 深度伪造检测\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n|[面向深度伪造检测的实时感知残差模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.24367)| 2025 | Arxiv |\n\n### 视频生成模型中的模型合并\n\n#### 提升运动建模能力\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [外推并解耦图像到视频生成模型：运动建模比你想象的更容易](https:\u002F\u002Farxiv.org\u002Fabs\u002F2503.00948)|  2025 | CVPR | Dynamicrafter，SVD |\n\n----------\n\n## 模型合并 在不同机器学习子领域的应用\n\n\u003Ccenter>\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEnnengYang_Awesome-Model-Merging-Methods-Theories-Applications_readme_cbfafcf2092b.png\" alt=\"Model Merging\" width=\"800\"\u002F>\n\u003C\u002Fcenter>\n\n### 持续学习中的模型合并\n\n#### 通过模型合并缓解灾难性遗忘\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n  | --------------- | :----: | :----: | :----: |\n  | [MAny: 多模态持续指令微调中的“合并任何内容”](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.14016)| 2026  |Arxiv  | LLaVA-1.5-7B 和 InternVL-Chat7B\n  | [BidirLM: 通过适配和组合因果语言模型构建从文本到全模态的双向编码器](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.02045)| 2026  |Arxiv  | Qwen3-1.7B 和 Qwen3-0.6B\n  | [通过权重空间模型合并对抗大语言模型的灾难性遗忘以提升指令遵循能力](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.01538)| 2026  |Arxiv  | Llama-3.1-8B-Base\n  | [大规模语言模型中训练后遗忘现象的映射研究](https:\u002F\u002Fopenreview.net\u002Fpdf?id=qCIg2WGudx)| 2026  |ICLR  |\n  | [LCA: 面向持续学习的局部分类器对齐方法](https:\u002F\u002Fopenreview.net\u002Fpdf?id=3uINmRldVW)| 2026  |ICLR  |\n  | [MERGETUNE: 视觉-语言模型的持续微调](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.10497)| 2026  |Arxiv  |\n  | [先合并再遗忘：基于持续合并的单LoRA持续学习](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.23017)| 2025  |Arxiv  |Llama-2-7B-chat、Llama-2-13B-chat、Qwen2.5-7B\n  | [通过参数合并实现视觉-语言-动作机器人策略的稳健微调](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.08333)| 2025  |Arxiv  |\n  | [无遗忘的合并：基于最优传输的任务特定模型持续融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.19561)| 2025  |Arxiv  |\n  | [MergeSlide: 针对全幻灯片图像终身学习的持续模型合并与任务到类别提示对齐推理](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.13099)| 2025  |Arxiv  |\n  | [RECALL: 基于层次化模型合并的表征对齐型灾难性遗忘缓解方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.20479)| 2025  |Arxiv  |Qwen2-7B-Instruct、Llama-2-7B-chat |\n  | [DitHub: 用于增量式开放词汇目标检测的模块化框架](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.09271)| 2025 | NeurIPS |\n  | [K-Merge: 面向设备端大语言模型的适配器在线持续合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.13537)| 2025  |Arxiv  |\n  | [迈向模型持续合并的整体性方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.23592)| 2025  |Arxiv  |\n  | [无数据持续模型合并中的零空间滤波：保持稳定性，促进可塑性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.21413)| 2026  |ICLR  |\n  | [AIMMerging: 基于训练轨迹的自适应迭代模型合并用于语言模型持续学习](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.17348)| 2025 | EMNLP | LLaMA2-7B、LLaMA2-13B |\n  | [HAM: 用于可扩展持续学习的层次化适配器合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.13211)| 2025 | Arxiv |\n  | [在多模态大语言模型微调中借鉴下游并保持自我](https:\u002F\u002Fopenreview.net\u002Fpdf?id=FKqmIAnkrb)| 2025 | ICML |LLaVA-1.5-7B \n  | [DuET: 基于示例无关任务算术的双重增量目标检测](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.21260)| 2025 | Arxiv |\n  | [将任务特定与通用适配器集成用于预训练模型的类增量学习](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.08165)| 2025 | ICCV |\n  | [基于模型合并的持续学习中任务特定知识的遗忘问题](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.23311)| 2025 | Arxiv |\n  | [具有正交约束的模块化Delta合并：一种可扩展的持续且可逆模型组合框架](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.20997) | 2025 | Arxiv |\n  | [RegCL: 基于模型合并的Segment Anything Model持续适应](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.12297) | 2025 | Arxiv |\n  | [通过对齐的模型合并实现视觉-语言模型的持续学习](http:\u002F\u002Farxiv.org\u002Fabs\u002F2506.03189) | 2025 | Arxiv |\n  | [扰动训练，合并后推理：一种两阶段持续学习框架](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.22389)| 2025  |Arxiv  |\n  | [MINGLE: 测试时持续模型合并用的零空间门控低秩专家混合体](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.11883)| 2025  |NeurIPS  |\n  | [分布式数据环境下基础模型持续更新的模型合并方法分析](https:\u002F\u002Fwww.mdpi.com\u002F2076-3417\u002F15\u002F9\u002F5196)| 2025  |Arxiv  | 应用科学\n  | [BECAME: 基于自适应模型合并的贝叶斯持续学习](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.02666v1)| 2025  |Arxiv  |\n  | [先合并再对齐：面向多模态LLM的简单有效的模态增量持续学习](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.07663)| 2025  |Arxiv  | Llama-3-8B-Instruct |\n  | [在拥有充足示例记忆的情况下进行成本效益高的持续学习](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.07274)| 2025  |Arxiv  | |\n  | [无需数据的持续模型合并：双投影平衡稳定性和可塑性](https:\u002F\u002Fopenreview.net\u002Fpdf?id=zD5cUX67b9)| 2025  |NeurIPS  | |\n  | [无需重训即可实时合并模型：一种可扩展的持续模型合并顺序方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.09522)| 2025  |NeurIPS  | |\n  | [汤剂疗法：通过模型平均缓解持续学习过程中的遗忘](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.05559) | 2025 |Arxiv  | Llama 2 (7B) |\n  | [基于质心原型映射的适配器合并用于可扩展的类增量学习](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.18219)| 2024 |Arxiv  |\n  | [防止遗忘只需参数平均](https:\u002F\u002Fpoonehmousavi.github.io\u002Fassets\u002Fpublications\u002F2010_machine_readable_dictionaries\u002FPARAMETER_AVERAGING_IS_ALL_YOU_NEED_TO_PREVENT_FORGETTING.pdf)| 2024 | SLT Workshop |\n  | [DESIRE: 用于无排练持续学习的动态知识整合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.19154)| 2024 |Arxiv  |\n  | [用于高效领域增量学习的自适应LoRA合并](https:\u002F\u002Fopenreview.net\u002Fpdf?id=tlB5eonGEk)| 2024 | NeurIPS Workshop  |\n  | [LiNeS: 训练后层缩放可防止遗忘并增强模型合并效果](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.17146)| 2024 |Arxiv  |\n  | [Model Tailor: 缓解多模态大语言模型中的灾难性遗忘](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.12048) | 2024 |ICML  | InstructBLIP (Vicuna-7B)、LLaVA-1.5 (Vicuna7B) |\n  | [面向增量新类发现的自适应发现与合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.03382) | 2024 |AAAI  |\n  | [MagMax: 利用模型合并实现无缝持续学习](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.06322) | 2024 |  ECCV |\n  | [Lm-cocktail: 通过模型合并实现语言模型的弹性调优](https:\u002F\u002Faclanthology.org\u002F2024.findings-acl.145.pdf) |  2024 | ACL Findings | Llama-2-chat-7b |\n  | [通过权重插值实现数据更新期间的向后兼容性](https:\u002F\u002Faclanthology.org\u002F2024.eacl-long.174.pdf)|  2024 | EACL |\n  | [面向语言模型持续学习的动态适配器组合路由学习](https:\u002F\u002Faclanthology.org\u002F2024.findings-emnlp.38.pdf) |  2024 |  EMNLP Findings |\n  | [通过模型合并缓解语言迁移中的灾难性遗忘](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.08699) |  2024 |  Arxiv | MISTRAL-7B、LLAMA-3-8B|\n  | [通过持续预训练和模型合并对Llama3-70B-Instruct进行领域适应：一项综合评估](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.14971) |  2024 |  Arxiv | Llama3-70B|\n  | [彩票券适应：缓解LLM中的破坏性干扰](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.16797) |  2024 |  Arxiv | Mistral-7B、Llama-3-8B |\n  | [WARP: 关于加权平均奖励策略的好处](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.16768) | 2024 | Arxiv | Gemma-7B|\n  | [关于组合性和增量学习的二阶视角](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.16350) | 2024 |  Arxiv |\n  | [DynaMMo: 用于医学影像高效类增量学习的动态模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.14099) |  2024|  Arxiv |\n  | [DAM: 用于持续视频问答学习的动态适配器合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.08755) |  2024 | Arxiv |\n  | [微调后的语言模型中任务特定技能的定位](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.06600)|  2023| ICML |\n  | [切线模型组合用于集成与持续微调](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FLiu_Tangent_Model_Composition_for_Ensembling_and_Continual_Fine-tuning_ICCV_2023_paper.pdf) |  2023| ICCV |\n  | [具有通用参数高效调优的统一持续学习框架](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2023\u002Fpapers\u002FGao_A_Unified_Continual_Learning_Framework_with_General_Parameter-Efficient_Tuning_ICCV_2023_paper.pdf) |  2023| ICCV |\n  | [利用LoRA进行任务算术以实现持续学习](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.02428) |  2023 | NeurIPS Workshop |\n  | [缓解RLHF的对齐税](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.06256)|  2023 | Arxiv | Mistral-7B|\n  | [PAINT: 通过权重插值修补开放词汇模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2208.05592) |2022  |NeurIPS   |\n  | [零样本模型的稳健微调](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FWortsman_Robust_Fine-Tuning_of_Zero-Shot_Models_CVPR_2022_paper.pdf) |2022  |CVPR  |\n\n### 多任务\u002F多目标\u002F多领域\u002F辅助学习中的模型合并\n\n#### 多任务学习中用于知识迁移的模型合并\n\n  | **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n  | --------------- | :----: | :----: | :----: |\n  | [G-Merging: 基于图模型的参数高效多任务知识整合](https:\u002F\u002Fopenreview.net\u002Fpdf?id=FoTtvLkkfU)| 2026 | ICLR  |\n  | [多任务代码大模型：数据混合还是模型合并？](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.21115)| 2026 | Arxiv  | Qwen Coder 2.5 7B, DeepSeek 7B\n  | [DivMerge: 一种基于差异性的多任务模型合并方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.02108)| 2025 | Arxiv  |\n  | [单输入多输出模型合并：利用基础模型进行密集型多任务学习](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.11268)| 2025 | Arxiv  |\n  | [改进通用文本嵌入模型：通过模型合并解决任务冲突与数据不平衡问题](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.15035)| 2024 |Arxiv  |\n  | [LiNeS: 训练后层缩放防止遗忘并增强模型合并效果](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.17146)| 2024 |Arxiv  |\n  | [混合数据还是合并模型？面向多样化多任务学习的优化策略](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.10801)| 2024 |Arxiv  |Aya 23 8B|\n  | [可折叠超网：不同初始化和任务的Transformer模型的可扩展合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.01483v1) | 2024 |Arxiv  |\n  | [任务提示向量：通过多任务软提示迁移实现有效初始化](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.01119) | 2024 |Arxiv  |\n  | [模型合并方案的进化优化](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.13187) | 2024 |Arxiv  | shisa-gamma-7b-v1, WizardMath-7B-V1.1, Arithmo2-Mistral-7B, Abel-7B-002, Mistral-7B-v0.1, LLaVA-1.6-Mistral-7B|\n  | [语言模型就是超级马里奥：免费吸收同源模型的能力](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.03099) | 2024 | ICML  | WizardLM-13B、WizardMath-13B以及llama-2-13b-codealpaca、Mistral-7B|\n  | [多任务模型合并中的表征手术](https:\u002F\u002Fopenreview.net\u002Fpdf\u002F602906ec02919eb95d78d634321fcba1b68a2f03.pdf) |2024  | ICML |\n  | [通过加权集成专家混合进行多任务模型合并](https:\u002F\u002Fopenreview.net\u002Fpdf\u002F2aee8072945cd0485e619dd88c35566610cd5042.pdf) |  2024| ICML |\n  | [ZipIt! 在无需训练的情况下合并来自不同任务的模型](https:\u002F\u002Fopenreview.net\u002Fpdf?id=LEYUkvdUhq) | 2024 |ICLR  |\n  | [AdaMerging: 面向多任务学习的自适应模型合并](https:\u002F\u002Fopenreview.net\u002Fpdf?id=nZP6NgD3QY) | 2024  | ICLR |\n  | [决策Transformer的合并：通过权重平均形成多任务策略](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2303.07551)| 2023 |Arxiv  |\n  | [解决模型合并时的干扰问题](https:\u002F\u002Fopenreview.net\u002Fpdf?id=xtaX3WyCj1) | 2023  |  NeurIPS |\n  | [使用任务算术编辑模型](https:\u002F\u002Fopenreview.net\u002Fpdf?id=6t0Kwf8-jrj) | 2023 | ICLR |\n\n#### 多目标优化中用于知识迁移的模型合并\n\n  | **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n  | --------------- | :----: | :----: | :----: |\n  | [从参数到表征：可控模型合并的闭式解法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.10943) | 2026 | AAAI  |\n  | [合并与引导：统一模型合并与引导解码以实现可控的多目标生成](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.03782) | 2025 | Arxiv  | LLaMA-2-7B\n  | [帕累托合并：面向偏好感知的多目标优化模型合并](https:\u002F\u002Fopenreview.net\u002Fpdf?id=D7qRwx6BOS)| 2025 | ICML  |\n  | [骨汤：一种寻找与融合的模型合并方法，用于可控的多目标生成](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.10762)| 2025 | Arxiv  |LLaMA-2 7B\n  | [只合并一次：学习偏好感知模型合并的帕累托前沿](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.12105) |  2024 |Arxiv  |\n  | [通过基于专家混合的模型融合实现高效的帕累托集近似](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.09770) |  2024 |Arxiv  |\n  | [MAP：基于二次近似的摊销帕累托前沿低计算量模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.07529) | 2024 |Arxiv  | Llama3-8B|\n\n#### 多领域学习中用于知识迁移的模型合并\n\n  | **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n  | --------------- | :----: | :----: | :----: |\n  | [跨不连通模态的领域自适应模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.05957) | 2026 | Arxiv  | \n  | [通过子空间感知的模型合并弥合领域差距](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.05768) | 2026 | Arxiv  | \n  | [探索模型合并在ASR多领域适应中的潜力与局限性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.05354) | 2026 | Arxiv  | \n  | [混合还是合并：迈向大型语言模型的多领域强化学习](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.12566) | 2026 | Arxiv  | Qwen3-4B-Base\n  | [MMGRid：通过模型合并实现时间感知与跨领域的生成式推荐](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.15930) | 2026 | Arxiv  | Qwen3-0.6B \n  | [MergeRec：面向数据隔离的跨领域序列推荐的模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.01753)| 2026 | KDD |\n  | [DEM：用于混合数据分布训练的分布编辑模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.15570) | 2024 |  Arxiv |  OpenLLaMA-7B、OpenLLaMA-13B |\n  | [来自不同任务和领域的视觉Transformer的合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2312.16240) | 2023 |Arxiv  |\n\n#### 辅助学习中用于知识迁移的模型合并\n\n  | **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n  | --------------- | :----: | :----: | :----: |\n  | [ForkMerge：缓解辅助任务学习中的负迁移](https:\u002F\u002Fopenreview.net\u002Fpdf?id=vZHk1QlBQW) | 2023 | NeurIPS |\n\n### 分布外\u002F领域泛化中的模型融合\n\n#### 用于更好分布外泛化的模型融合\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [探索模型融合在自动语音识别多领域适应中的潜力与局限](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.05354) | 2026 | Arxiv  | \n| [模型汤只需要一种成分](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.09689)| 2026 | Arxiv |\n| [CCL25-评估任务10系统报告：基于提示的大语言模型融合用于细粒度中文仇恨言论检测](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.09563)| 2025 | Arxiv |Qwen2.5-7B-Instruct\n| [更智能地融合，更好地泛化：提升OOD数据上的模型融合效果](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.09093)| 2025 | Arxiv |\n| [分布外图模型融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.03674)| 2025 | Arxiv |\n| [SeWA：通过概率掩码进行选择性权重平均](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.10119)| 2025 | Arxiv |\n| [何时、何地以及为何要对权重进行平均？](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.06761)| 2025 | Arxiv |\n| [DaWin：无需训练的动态权重插值以实现稳健适应](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.03782) | 2024 | NeurIPS 2024 Workshop |\n| [通过选择性参数融合缓解LLM微调中的训练不平衡](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.03743) | 2024 | Arxiv | Llama-2-7b|\n| [ReVLA：逆转机器人基础模型的视觉域限制](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.15250)| 2024 | Arxiv |\n| [稀疏模型汤：通过模型平均改进剪枝的方法](https:\u002F\u002Fopenreview.net\u002Fpdf?id=xx0ITyHp3u) |2024  |ICLR  |\n| [Warm：关于权重平均奖励模型的好处](https:\u002F\u002Fopenreview.net\u002Fpdf?id=s7RDnNUJy6) |2024  | ICML  |\n| [单GPU上的可扩展学习型模型汤：一种高效的子空间训练策略](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.03641) |2024  | ECCV |\n| [自适应随机权重平均](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.19092) | 2024 | JMLR\n| [群体参数平均（papa）](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2304.03094) | 2024 | TMLR |\n| [WARP：关于权重平均奖励策略的好处](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.16768) | 2024 | Arxiv | Mistral 7B, Mixtral 8x7B|\n| [WASH：用通信高效的权重洗牌训练你的集成模型，然后取平均](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.17517) | 2024 | Arxiv |\n| [模型储备：我们只需要几个微调过的模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.19522) |2024  | Arxiv |\n| [环顾优化器：走k步，平均1步](https:\u002F\u002Fopenreview.net\u002Fpdf?id=k1Xy5zCNOJ) | 2023 | NeurIPS |\n| [模型拉塔图伊：回收利用多种模型以实现分布外泛化](https:\u002F\u002Fproceedings.mlr.press\u002Fv202\u002Frame23a\u002Frame23a.pdf) | 2023 | ICML |\n| [可训练的权重平均：通过优化历史解来高效训练](https:\u002F\u002Fopenreview.net\u002Fpdf?id=8wbnpOJY-f) | 2023 | ICLR |\n| [环顾优化器：走k步，平均1步](https:\u002F\u002Fopenreview.net\u002Fpdf?id=k1Xy5zCNOJ) |2023  |  NeurIPS|\n| [AdapterSoup：通过权重平均提高预训练语言模型的泛化能力](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2302.07027) |  2023 |EACL\n| [Dart：多样化聚合-重复训练可提高神经网络的泛化能力](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FJain_DART_Diversify-Aggregate-Repeat_Training_Improves_Generalization_of_Neural_Networks_CVPR_2023_paper.pdf) |2023  | CVPR |\n| [平坦极小值优化器何时有效？](https:\u002F\u002Fopenreview.net\u002Fpdf?id=vDeh2yxTvuh) |  2022| NeurIPS |\n| [模型汤：对多个微调模型的权重进行平均可在不增加推理时间的情况下提高准确率](https:\u002F\u002Fproceedings.mlr.press\u002Fv162\u002Fwortsman22a\u002Fwortsman22a.pdf) | 2022 | ICML |\n| [用于分布外泛化的多样化权重平均](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2022\u002Ffile\u002F46108d807b50ad4144eb353b5d0e8851-Paper-Conference.pdf) |  2022|NeurIPS  |\n| [零样本模型的稳健微调](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2022\u002Fpapers\u002FWortsman_Robust_Fine-Tuning_of_Zero-Shot_Models_CVPR_2022_paper.pdf) |2022  |CVPR  |\n| [具有后期阶段权重的神经网络](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.12927) |  2021 | ICLR |\n| [并行随机权重平均：泛化良好的大批次训练](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2001.02312) | 2020 | ICLR |\n| [SWALP：低精度训练中的随机权重平均](https:\u002F\u002Fproceedings.mlr.press\u002Fv97\u002Fyang19d\u002Fyang19d.pdf) |2019  | ICML |\n| [权重平均会导致更宽的最优解和更好的泛化](https:\u002F\u002Fauai.org\u002Fuai2018\u002Fproceedings\u002Fpapers\u002F313.pdf) | 2018 | UAI |\n| [平均教师是更好的榜样：权重平均的一致性目标可改善半监督深度学习结果](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper_files\u002Fpaper\u002F2017\u002Ffile\u002F68053af2923e00204c3ca7c6a3150cf7-Paper.pdf) |2017  | NeurIPS |\n\n#### 用于更好领域泛化或领域适应的模型融合\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [选择与融合：迈向使用大语言模型的可适应且可扩展的命名实体识别](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.22813)| 2025 | Arxiv | Qwen2.5-7B, Llama3.1-8B |\n| [为基于CLIP的领域泛化协调并合并源模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.09446)| 2025 | Arxiv |\n| [模型融合在组合泛化方面的现实评估](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.18314)| 2024 | Arxiv |\n| [用于分割任务中无监督领域适应的逐层模型融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.15813)| 2024 | Arxiv |\n| [用于多目标领域适应的无训练模型融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.13771)| 2024 | Arxiv |\n| [通过持续预训练和模型融合实现Llama3-70B-Instruct的领域适应：一项综合评估](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.14971) |  2024 |  Arxiv | Llama3-70B|\n| [平均集成：改进模型选择并提升领域泛化性能](https:\u002F\u002Fopenreview.net\u002Fpdf?id=peZSbfNnBp4) | 2022 | NeurIPS |\n| [Swad：通过寻找平坦极小值实现领域泛化](https:\u002F\u002Fopenreview.net\u002Fpdf?id=zkHlu_3sJYU) |  2021| NeurIPS |\n\n### 联邦学习中的模型合并\n\n#### 用于本地知识聚合的模型合并\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [FedMerge：用于个性化的联邦模型合并](https:\u002F\u002Fscholar.google.com\u002Fscholar_url?url=https:\u002F\u002Fojs.aaai.org\u002Findex.php\u002FAAAI\u002Farticle\u002Fview\u002F39113\u002F43075&hl=en&sa=X&d=1003636543056490924&ei=dcC-afi-LoKq6rQPo6Gk2As&scisig=AFtJQiw8mqG2DDhpKAJPVgWDpcKq&oi=scholaralrt&hist=vWBd1VsAAAAJ:1360192736361724487:AFtJQix2MBKNfqG8ZHqg__7tkl0l&html=&pos=0&folt=art)| 2026 | AAAI | \n| [通过联邦-本地模型合并实现通信高效的个性化适配](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.18658)| 2026 | Arxiv | LLaMA-3.2-3B-Instruct\n| [论去中心化学习中单一全局合并的惊人有效性](https:\u002F\u002Fopenreview.net\u002Fpdf?id=zrFnwRHuQo)| 2026 | ICLR |\n| [面向联邦基础模型的双层个性化：一种任务向量聚合方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.12697)| 2025 | Arxiv | LLaMA-7B\n| [用于联邦学习聚合的内在训练信号](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.06813)| 2025 | ICIAP |\n| [打破联邦推荐中的聚合瓶颈：一种个性化模型合并方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.12386)| 2025 | Arxiv |\n| [一次合并就足够了：在去中心化学习中恢复基于服务器的学习性能](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.06542)| 2025 | Arxiv |\n| [面向联邦持续学习的参数高效模块的闭式合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.17961) | 2025 | ICLR |\n| [永不从零开始：通过可解释的模型选择加速设备端LLM个性化](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.13938)| 2025 | Arxiv |\n| [FedMerge：通过模型合并实现联邦个性化](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2504.06768)| 2025 | Arxiv |\n| [通过隐私保护的进化式模型合并构建个性化语言模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.18008v1)| 2025 | Arxiv | Llama-2-7b, Mistral-7B-Instruct v0.2 |\n| [FedAWA：利用客户端向量对联邦学习中的聚合权重进行适应性优化](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.15842)| 2025 | Arxiv |\n| [通过统一任务向量进行多任务联邦微调](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.06376)| 2025 | Arxiv |\n| [PrivFusion：基于去中心化联邦图匹配的隐私保护模型融合](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10643309\u002F)| 2024 | TKDE |\n| [模型之旅：跨多联邦的模型融合中提升隐私与公平性，助力可信全球医疗](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F10597838\u002F)| 2024 | ICDE |\n| [DapperFL：面向边缘设备的带模型融合剪枝的领域自适应联邦学习](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.05823)| 2024 | NeurIPS |\n| [FuseFL：以因果视角看一次性联邦学习与渐进式模型融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.20380)| 2024 | Arxiv |\n| [本地优势汤：跨silos联邦学习中模型合并的催化剂](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.23660)| 2024 | Arxiv |\n| [DIMAT：深度学习模型的去中心化迭代合并与训练](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2024\u002Fpapers\u002FSaadati_DIMAT_Decentralized_Iterative_Merging-And-Training_for_Deep_Learning_Models_CVPR_2024_paper.pdf) | 2024 | CVPR |\n| [FedFisher：利用费舍尔信息实现一次性联邦学习](https:\u002F\u002Fproceedings.mlr.press\u002Fv238\u002Fjhunjhunwala24a\u002Fjhunjhunwala24a.pdf) | 2024 | AISTATS |\n| [lo-fi：无需通信的分布式微调](https:\u002F\u002Fopenreview.net\u002Fpdf?id=1U0aPkBVz0)| 2023 | TMLR |\n| [重新审视神经网络在联邦学习中的加权聚合](https:\u002F\u002Fproceedings.mlr.press\u002Fv202\u002Fli23s\u002Fli23s.pdf)| 2023|  ICML |\n| [通过图匹配进行深度神经网络融合及其在模型集成和联邦学习中的应用](https:\u002F\u002Fproceedings.mlr.press\u002Fv162\u002Fliu22k\u002Fliu22k.pdf) | 2022 |  ICML |\n| [采用匹配平均法的联邦学习](https:\u002F\u002Fopenreview.net\u002Fpdf?id=BkluqlSFDS) |  2020 |  ICLR|\n| [解决异构联邦优化中的目标不一致性问题](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2007.07481) |2020  | NeurIPS |\n| [通过最优传输进行模型融合](https:\u002F\u002Fproceedings.neurips.cc\u002Fpaper\u002F2020\u002Ffile\u002Ffb2697869f56484404c8ceee2985b01d-Paper.pdf) |2020  |  NeurIPS|\n| [神经网络的贝叶斯非参数联邦学习](https:\u002F\u002Fproceedings.mlr.press\u002Fv97\u002Fyurochkin19a\u002Fyurochkin19a.pdf) |  2019 | ICML |\n| [利用注意力聚合学习私有神经语言建模](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1812.07108) |2019  | IJCNN |\n| [从去中心化数据中高效通信地学习深度网络](https:\u002F\u002Fproceedings.mlr.press\u002Fv54\u002Fmcmahan17a\u002Fmcmahan17a.pdf) |  2017 |  AISTATS |\n\n### 零样本\u002F少样本学习中的模型合并\n\n#### 零样本学习中用于跨任务泛化的模型合并\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [TTS中的任务向量：迈向情感丰富的方言语音合成](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.18699) |   2026 |Arxiv | \n| [模型合并提升生物声学基础模型的零样本泛化能力](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.05171) |2025 |NeurIPS Workshop | LLAMA-3.1-8B-INSTRUCT\n| [探索用于零样本信息检索的任务算术](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.00649) |2025 |SIGIR |  LLama-2-7b\n| [通过层次聚类实现稀疏混合专家模型的无重新训练合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.08589) |2024 |Arxiv | Qwen 60x2.7B, Qwen 45x2.7B, Qwen 30x2.7B, Mixtral 8x7B, Mixtral 6x7B, Mixtral 4x7B|\n| [大型语言模型中用于零样本跨语言迁移的层交换](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.01335) |2024 |Arxiv | LLAMA 3.1 8B |\n| [学习在专业专家之间路由以实现零样本泛化](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.05859)|2024  | ICML  |\n| [通过构建和复用LoRA库迈向模块化LLM](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.11157) |2024  | ICML  | Mistral-7B |\n| [聊天向量：一种为LLM赋予新语言聊天能力的简单方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2310.04799) |2024  | ACL  | LLaMA-2 13B, Chinese-LLaMA-13B, Chinese-Alpaca-13B, Mistral-7B, llama-2-ko-7b|\n| [释放模型合并对低资源语言的潜力](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.03994)|   2024 |Arxiv | Llama-2-7B|\n| [扩散汤：用于文生图扩散模型的模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.08431) |  2024 | Arxiv |\n| [无需训练却有收获：用于无训练语言适配器增强的语言算术](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.15737) |  2024 |Arxiv |\n| [MaxFusion：文生图扩散模型中的即插即用多模态生成](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.09977) |  2024 |Arxiv |\n| [AdaMergeX：通过自适应适配器合并实现大型语言模型的跨语言迁移](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.18913) |  2024 |Arxiv | Llama2-7b|\n| [用于多模态大型语言模型的模型组合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.12750) |  2024 | Arxiv | Vicuna-7B-v1.5|\n| [探索相比指令微调训练专家语言模型的优势](https:\u002F\u002Fopenreview.net\u002Fpdf?id=VAA1itvsNQ)|  2023 | ICML |\n| [针对下游任务泛化的LoRA适配器的令牌级适应](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.10847)|  2023 | Arxiv | Llama-2-7b|\n| [利用参数高效的层进行语言和任务算术，实现零样本摘要生成](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2311.09344) |  2023 | Arxiv | PaLM 2-S|\n\n#### 少样本学习中用于跨任务泛化的模型合并\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [支持语言下的任务算术用于低资源ASR](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.07038) |   2026 |Arxiv | \n| [通过复用预微调的LoRA解锁视觉基础模型的免微调少样本适应性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.02220) | 2025 | CVPR |\n| [LoRA-Flow：用于生成任务中大型语言模型的动态LoRA融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.11455) | 2024 | ACL | Llama-2- 7B|\n| [LoraHub：通过动态LoRA组合实现高效的跨任务泛化](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2307.13269) | 2024 |  COLM | Llama-2-7B, Llama-2-13B |\n| [LoraRetriever：面向野外混合任务的输入感知LoRA检索与组合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.09997)  | 2024 | ACL |\n| [结合参数高效模块是否能提升少样本迁移准确率？](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.15414) |   2024 |Arxiv |\n| [MerA：用于少样本学习的预训练适配器合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2308.15982) |2023  | Arxiv |\n| [用于跨任务泛化的多头适配器路由](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2211.03831)|2023  | NeurIPS |\n\n### 对抗学习中的模型融合\n\n#### 模型融合作为一种攻击手段\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [当安全模型融合为危险：利用大语言模型融合中的潜在漏洞](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2604.00627)| 2026  | Arxiv | Tulu-2-7b, Llama-3.1-Tulu-3-8B-DPO, OpenChat-3.5-0106\n| [后门向量：从任务算术视角看后门攻击与防御](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.08016)| 2025  | Arxiv |\n| [现在合并，日后后悔：模型融合的隐性代价是对抗可迁移性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.23689)| 2025  | Arxiv |\n| [谨慎合并陌生的大语言模型：一种可窃取隐私的网络钓鱼模型](https:\u002F\u002Faclanthology.org\u002F2025.findings-acl.713.pdf)| 2025  | ACL | Llama-3.2-3b-it, Gemma-2-2b-it, Qwen-2.5-3b-it, 和 Phi-3.5-mini-it |\n| [合并劫持：针对大型语言模型模型融合的后门攻击](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.23561)| 2025  | Arxiv | LLaMA3.1-8B\n| [从纯净到危险：从“无害”的良性组件中植入后门到融合模型](https:\u002F\u002Fwww.usenix.org\u002Fsystem\u002Ffiles\u002Fconference\u002Fusenixsecurity25\u002Fsec25cycle1-prepub-702-wang-lijin.pdf)| 2025  | Arxiv | LLaMA2-7B-chat, Mistral-7B-v0.1\n| [合并即窃取：通过模型融合从对齐的大语言模型中窃取目标PII](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.16094)| 2025  | Arxiv |\n| [谨慎合并陌生的大语言模型：一种可窃取隐私的网络钓鱼模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.11533) | 2025  | Arxiv |\n| [LoBAM：基于LoRA的模型融合后门攻击](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.16746) | 2024  | Arxiv |\n| [BadMerging：针对模型融合的后门攻击](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.07362) | 2024  | CCS |\n| [LoRA即攻击！在共享与协作场景下刺穿大语言模型的安全性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.00108) | 2024  | ACL | Llama-2-7B|\n\n#### 模型融合作为一种防御或知识产权保护手段\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [通过模块切换防御后门攻击](https:\u002F\u002Fopenreview.net\u002Fpdf?id=ieCOL2YAqv)| 2026 |  ICLR | \n| [通过尺度敏感的损失景观使模型不可融合](https:\u002F\u002Farxiv.org\u002Fabs\u002F2601.21898)| 2026 |  Arxiv | \n| [融合触发器，破解后门：面向指令微调语言模型的防御性投毒](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.04448)| 2026 |  Arxiv | Llama2-7B 和 Qwen3-8B \n| [不要合并我的模型！保护开源大语言模型免受未经授权的模型融合侵害](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.10712)| 2026 |  AAAI|  LLaMA-2-13B, WizardLM-13B, WizardMath-13B, LLaMA-2-13B-Code Alpaca |\n| [通过双阶段权重保护防御未经授权的模型融合](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.11851)| 2025 |  Arxiv|\n| [模型反融合：让你的模型无法被融合以实现安全的模型共享](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.01548)| 2025 |  Arxiv|\n| [海报：研究模型融合中对抗样本的可迁移性](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002Fabs\u002F10.1145\u002F3708821.3735347)| 2025 |  ASIA CCS |\n| [RouteMark：基于路由的模型融合中用于知识产权归属的指纹](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.01784)| 2025 |  Arxiv|\n| [MergeGuard：高效阻止机器学习模型中的木马攻击](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.04015)| 2025 |  Arxiv|\n| [BadJudge：作为裁判的大语言模型的后门漏洞](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.00596v1)| 2025 |  Arxiv| Mistral-7B-Instruct-v0.2, Meta-Llama3-8B |\n| [扰乱模型融合：一种不牺牲准确性的参数级防御](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.07661)| 2025 |  ICCV |\n| [大型语言模型融合以增强对图神经网络的链接窃取攻击](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.05830)| 2024 |  Arxiv| Vicuna-7B, Vicuna-13B|\n| [通过自适应模型融合为语言模型提供强有力的版权保护](https:\u002F\u002Fopenreview.net\u002Fpdf?id=vAG7GrZZUF) | 2024 |  ICML| LLaMa2 7B, StarCoder 7B |\n| [针对预训练大型视觉模型的对抗鲁棒性提升的超对抗调优](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.05951)| 2024 |  Arxiv|\n| [REEF：大型语言模型的表征编码指纹](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.14273)| 2024 |  Arxiv| Evollm-jp-7b, Shisa-gamma-7b-v1, Wizardmath-7b-1.1, Abel-7b-002, Llama-2-7b, Openllama-2-7b, Mpt-7b,  Internlm2-chat-20b, Mixtral-8x7b-instruct, Qwen-1.5-chat-72b |\n| [通过安全感知子空间缓解多任务模型融合的后门效应](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.13910)| 2024 |  Arxiv|\n| [MergePrint：针对大型语言模型融合的稳健指纹识别](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.08604)| 2024 |  Arxiv| LLaMA-2-7B, WizardMath-7B-V1.0, LLaMA-2-7B-CHAT |\n| [通过机器遗忘避免版权侵权](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.10952v1) | 2024 |  Arxiv | Llama3-8B |\n| [融合提升自我批判能力以抵御越狱攻击](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.07188) | 2024 |  Arxiv| Mistral-7B, Mixtral-8x7B|\n| [你是否合并了我的模型？关于大型语言模型知识产权保护方法对抗模型融合的鲁棒性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2404.05188) |  2024 |  Arxiv| LLaMA-2-7B, LLaMA-2-7B-CHAT, WizardMath-7B-V1.0|\n| [免费午餐来了：用模型融合净化被植入后门的模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2402.19334) | 2024  | ACL |\n| [重新审视适配器与对抗训练](https:\u002F\u002Fopenreview.net\u002Fpdf?id=HPdxC1THU8T) |2023 |ICLR |\n| [为模型汤调味以增强其对抗性和自然分布偏移下的鲁棒性](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2023\u002Fpapers\u002FCroce_Seasoning_Model_Soups_for_Robustness_to_Adversarial_and_Natural_Distribution_CVPR_2023_paper.pdf) |2023 | CVPR |\n\n## 其他应用\n\n| **论文标题** | **年份** | **会议\u002F期刊** | **备注** |\n| --------------- | :----: | :----: | :----: |\n| [守住底线，抬高上限：基于合并的多模态搜索代理范式](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.01416v1)| 2026 | Arxiv |\n| [ACE-Brain-0：空间智能作为通用具身系统的共享支架](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.03198v1)| 2026 | Arxiv |\n| [基于稀疏任务向量混合与超网络的高效知识迁移方法在全切片图像预后中的应用](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2603.10526)| 2026 | Arxiv |\n| [减少微调，提升检索效果：通过合成数据和模型合并重新思考生物医学检索器的LLM适配](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2602.04731)| 2026 | Arxiv | 通义千问3-0.6B、Gemma-2B、Phi4-3.8B |\n| [当领域预训练干扰指令对齐时：医学LLM中适配器合并的实证研究](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.18350)| 2026 | Arxiv | 140亿参数LLM |\n| [MergeRec：面向数据隔离的跨领域序列推荐的模型合并方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2601.01753)| 2026 | KDD |\n| [无需微调的任务向量驱动语音模型实现罕见词识别与翻译](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.21894)| 2025 | Arxiv |\n| [CCL25-Eval任务10系统报告：基于提示的大语言模型合并用于细粒度中文仇恨言论检测](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2512.09563)| 2025 | Arxiv | 通义千问2.5-7B-Instruct |\n| [面向儿童自动语音识别的群体感知部分模型合并方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.23098)| 2025 | Arxiv |\n| [消除污染：利用任务算术实现无训练数据的纠正性机器遗忘](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.18660)| 2025 | Arxiv |\n| [RecCocktail：一种可泛化且高效的基于LLM的推荐框架](https:\u002F\u002Fle-wu.com\u002Ffiles\u002FPublications\u002FCONFERENCES\u002FAAAI26-RecCocktail.pdf)| 2025 | AAAI | Llama-3.1-8B |\n| [医学LLM中高效模型合并的新型层次化集成方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2511.13373)| 2025 | Arxiv | Mistral-7B |\n| [WeaveRec：基于LLM的跨领域序列推荐框架，结合模型合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2510.26546)| 2025 | Arxiv | 通义千问2-7B |\n| [模型合并在领域特定即席检索中的效果](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2509.21966)| 2025 | Arxiv |\n| [换个角度看问题：通过任务算术利用负样本设计“正向”分子](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.17876)| 2025 | Arxiv |\n| [通过任务算术转移自解释模型的视觉可解释性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.04380)| 2025 | Arxiv |\n| [利用任务算术蒸馏语音和音乐编码器](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.13270)| 2025 | Arxiv |\n| [MedSAMix：用于医学图像分割的免训练模型合并方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2508.11032)| 2025 | Arxiv |\n| [针对视觉Transformer的振荡抑制MXFP4训练](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.20853v2)| 2025 | ICML |\n| [通过任务算术转移自解释模型的视觉可解释性](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.04380)| 2025 | Arxiv |\n| [基于时间指定符模型合并的时间信息检索](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2507.06782)| 2025 | Arxiv |\n| [用于推荐的基础模型生成式表征学习](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.11999)| 2025 | Arxiv |\n| [迈向面向表格型电信数据的模型合并](https:\u002F\u002Fwww.diva-portal.org\u002Fsmash\u002Fget\u002Fdiva2:1968615\u002FFULLTEXT01.pdf)| 2025 | Arxiv |\n| [CultureMERT：面向跨文化音乐表征学习的持续预训练](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2506.17818)| 2025 | Arxiv |\n| [U-Net移植：预训练在3D医学分割模型合并中的作用](https:\u002F\u002Firis.unimore.it\u002Fhandle\u002F11380\u002F1380716)| 2025 | 国际医学影像计算与计算机辅助干预会议 |\n| [CodeMerge：代码本引导的模型合并用于自动驾驶中的鲁棒测试时适应](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2505.16524)| 2025 | Arxiv |\n| [使用张量积的潜在专家混合模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2405.16671)| 2024 | TMLR |\n| [用于增强医学影像分类模型鲁棒性的模型内合并方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.20516v1)| 2025 | Arxiv |\n| [自监督正常性学习与发散向量引导的模型合并用于胎儿超声视频中的零样本先天性心脏病检测](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.07799v1)| 2025 | Arxiv |\n| [用于LLM深度剪枝的滑动层合并方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.19159v1)| 2025 | Arxiv | LLaMA-2-7B |\n| [自监督正常性学习与发散向量引导的模型合并用于胎儿超声视频中的零样本先天性心脏病检测](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2503.07799)| 2025 | Arxiv |\n| [MedForge：像开源软件开发一样构建医学基础模型](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2502.16055)| 2025 | Arxiv |\n| [文化调色板：通过多智能体调色板实现文化对齐的多元化](https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.11167)| 2024 | Arxiv |\n| [通过选择性参数合并缓解LLM微调中的训练不平衡](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.03743)| 2024 | EMNLP | Llama-2-7b |\n| [多目标跟踪是专业化的问题吗？](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2411.00553)| 2024 | NeurIPS |\n| [通过微调和模型合并追踪通用特征](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2410.12391)| 2024 | Arxiv |\n| [HM3：异构多类别模型合并](https:\u002F\u002Fwww.arxiv.org\u002Fpdf\u002F2409.19173) | 2024 | Arxiv |\n| [情感算术：基于权重空间插值的情感语音合成](https:\u002F\u002Fwww.ee.iitb.ac.in\u002Fcourse\u002F~daplab\u002Fpublications\u002F2024\u002Fkalyan24_interspeech.pdf) | 2024 | Interspeech |\n| [基于Fisher平均的纠删码神经网络推理](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2409.01420) | 2024 | Arxiv |\n| [MergeRepair：探索在代码LLM中合并特定任务适配器以实现程序自动修复的研究](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2408.09568) | 2024 | Arxiv |\n| [模型告诉你该在哪里合并：面向长上下文任务的LLM自适应KV缓存合并](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.08454v2) | 2024 | Arxiv | Llama2-7B、Llama2-13B-chat、Mistral-7B-instruct |\n| [通过任务向量定制扩大个性化图像美学评估规模](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.07176)| 2024 | Arxiv |\n| [通过模型合并实现语音合成中的属性插值方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.00766) | 2024 | Arxiv |\n| [任务算术可以缓解自动语音识别中合成数据与真实数据之间的差距](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2406.02925) | 2024 | Arxiv |\n| [MedMerge：为医学影像任务进行有效迁移学习的模型合并方法](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2403.11646) | 2024 | Arxiv |\n| [专家权重平均：一种新的视觉Transformer通用训练方案](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2308.06093)| 2023 | Arxiv |\n| [一个学生知道所有专家都知道：从稀疏到稠密](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2201.10890)| 2022 | Arxiv |\n| [模型平均中的元学习PAC-Bayes先验](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1912.11252)| 2019 | AAAI |\n\n----------\n\n**星星历史**\n\n[![星星历史图表](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEnnengYang_Awesome-Model-Merging-Methods-Theories-Applications_readme_d83ade9f22e4.png)](https:\u002F\u002Fstar-history.com\u002F#EnnengYang\u002FAwesome-Model-Merging-Methods-Theories-Applications&Date)\n\n----------\n\n\n\n## 联系方式\n\u003C!-- **联系方式** -->\n\n我们欢迎所有研究人员为本仓库（“基础模型或机器学习中的模型合并”）贡献力量。\n\n如果您有一篇尚未添加到库中的相关论文，请与我们联系。\n\n邮箱：ennengyang@qq.com \u002F ennengyang@gmail.com","# Awesome-Model-Merging-Methods-Theories-Applications 快速上手指南\n\n本项目并非一个可直接运行的软件库或工具包，而是一个**全面的学术论文与资源索引列表**，旨在系统梳理大语言模型（LLM）、多模态大模型（MLLM）及其他领域的**模型合并（Model Merging）**技术。它主要服务于研究人员和开发者，用于查找相关理论、方法、基准测试及应用案例。\n\n因此，本指南将指导你如何获取、浏览及利用该资源列表，并介绍其中提及的关键合并工具库的使用方式。\n\n## 环境准备\n\n由于本项目本质是文献综述与资源导航，无需特定的深度学习环境即可浏览内容。但若要复现列表中提到的模型合并实验，建议准备以下环境：\n\n*   **操作系统**：Linux (推荐 Ubuntu 20.04+) 或 macOS。\n*   **Python 版本**：3.8 或更高版本。\n*   **核心依赖**：\n    *   `git`：用于克隆仓库。\n    *   `PyTorch` \u002F `TensorFlow`：根据具体论文代码要求安装。\n    *   `Transformers` (Hugging Face)：大多数现代模型合并方法基于此库。\n*   **硬件要求**：若需运行合并实验，建议配备 NVIDIA GPU。列表中特别标记了使用 **$\\geq$ 7B** 参数量模型的论文，此类实验通常需要 24GB+ 显存或多卡环境。\n\n## 安装步骤\n\n### 1. 克隆资源仓库\n获取最新的论文列表和分类索引：\n\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002FEnnengYang\u002FAwesome-Model-Merging-Methods-Theories-Applications.git\ncd Awesome-Model-Merging-Methods-Theories-Applications\n```\n\n### 2. 安装通用模型合并工具库（可选）\n虽然本仓库是列表，但其中提到了多个实用的合并工具库。若你想立即尝试模型合并，推荐安装社区通用的合并库（如 `mergekit` 或列表中提到的 `mergenetic`）：\n\n**使用 pip 安装 mergekit (社区常用):**\n```bash\npip install mergekit\n```\n\n**或使用国内镜像源加速安装:**\n```bash\npip install mergekit -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n```\n\n> **注意**：具体的论文代码请参照仓库中对应论文的官方 GitHub 链接进行单独安装。\n\n## 基本使用\n\n### 1. 浏览与检索资源\n进入克隆后的目录，直接阅读 `README.md` 文件。该文件已按以下逻辑分类，你可按需查找：\n\n*   **Survey (综述)**：查找关于模型合并的最新综述论文。\n*   **Benchmark\u002FEvaluation (基准测试)**：查找评估合并效果的测试集和工具（如 `FusionBench`, `MergeBench`）。\n*   **Advanced Methods (高级方法)**：\n    *   *Pre-Merging*：合并前的微调策略（如线性化微调、子空间微调）。\n    *   *During Merging*：核心合并算法（如加权平均、稀疏子空间、动态路由）。\n    *   *Theories*：合并背后的理论分析。\n*   **Applications (应用场景)**：查找在 LLM 对齐、去毒、知识编辑、多任务学习等特定场景下的应用论文。\n\n**示例：查找关于“加权合并”的论文**\n在本地或在线仓库中搜索 `Weighted-based Merging Methods` 章节，即可找到相关论文列表及其发表年份和会议信息。\n\n### 2. 执行简单的模型合并操作\n基于该资源列表中广泛讨论的技术，你可以使用 `mergekit` 快速体验两个模型的合并（例如合并两个不同领域的 LoRA 适配器或全量模型）。\n\n**步骤 A: 准备模型**\n确保你有两个 Hugging Face 格式的模型路径（本地路径或 Model ID），例如：\n*   模型 1: `meta-llama\u002FLlama-2-7b-hf`\n*   模型 2: `path\u002Fto\u002Fyour\u002Ffine-tuned-model`\n\n**步骤 B: 创建合并配置文件 (`config.yaml`)**\n创建一个 YAML 文件定义合并策略（此处以简单的线性插值为例）：\n\n```yaml\nmodels:\n  - model:\n      path: meta-llama\u002FLlama-2-7b-hf\n    parameters:\n      weight: 0.5\n  - model:\n      path: path\u002Fto\u002Fyour\u002Ffine-tuned-model\n    parameters:\n      weight: 0.5\nmerge_method: linear\ndtype: float16\n```\n\n**步骤 C: 运行合并命令**\n在终端执行以下命令生成新模型：\n\n```bash\nmergekit-yaml config.yaml .\u002Fmerged_model_output --copy-tokenizer --allow-crimes\n```\n\n*   `--copy-tokenizer`: 复制第一个模型的分词器。\n*   `--allow-crimes`: 允许某些潜在不安全的操作（视具体合并方法而定，通常用于绕过某些检查，生产环境请谨慎）。\n\n### 3. 引用资源\n如果你在研究中使用了该列表或其对应的综述论文，请在你的工作中引用：\n\n```bibtex\n@article{yang2026ModelMergingSurvey,\n  author = {Yang, Enneng and Shen, Li and Guo, Guibing and Wang, Xingwei and Cao, Xiaochun and Zhang, Jie and Tao, Dacheng},\n  title = {Model Merging in LLMs, MLLMs, and Beyond: Methods, Theories, Applications, and Opportunities},\n  journal = {ACM Comput. Surv.},\n  year = {2026},\n  volume = {58},\n  number = {8},\n  doi = {10.1145\u002F3787849}\n}\n```","某 AI 初创团队急需构建一个既能精通医疗问诊又能处理法律条款的多功能大模型，但受限于算力预算无法从头训练。\n\n### 没有 Awesome-Model-Merging-Methods-Theories-Applications 时\n- **文献检索如大海捞针**：团队需在 arXiv 上手动筛选数百篇论文，难以区分哪些方法适用于 7B 以上的大参数模型，极易遗漏关键前沿技术。\n- **理论盲区导致试错成本高**：缺乏对“权重对齐”或“子空间合并”等理论的系統梳理，工程师盲目尝试简单平均法，导致模型出现灾难性遗忘，能力相互抵消。\n- **应用场景匹配困难**：不清楚如何将合并技术具体落地到持续学习或多任务学习场景中，只能凭经验硬凑，开发周期被无限拉长。\n- **复现基准缺失**：找不到权威的评估基准和已验证的实验配置，每次调整超参数都像在“开盲盒”，资源浪费严重。\n\n### 使用 Awesome-Model-Merging-Methods-Theories-Applications 后\n- **精准锁定高价值方案**：直接利用库中标记的\"≥7B 模型”实验论文，快速定位到适合大模型的线性化微调或动态路由合并等高级方法。\n- **理论指导规避陷阱**：参考综述中关于锐度感知微调（Sharpness-aware Fine-tuning）的理论分析，预先优化单模型权重，成功避免了合并后的性能崩塌。\n- **场景化落地路径清晰**：依据库中整理的“多任务学习”与“少样本学习”应用案例，迅速设计出医疗与法律知识无损融合的技术路线。\n- **复用成熟评估体系**：直接采用推荐的 Benchmark 和评估指标，将原本数周的调优过程压缩至几天，显著提升了迭代效率。\n\nAwesome-Model-Merging-Methods-Theories-Applications 通过提供系统化的方法论地图与实战指引，让团队在零数据重训的前提下，高效实现了多领域专家模型的低成本融合。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FEnnengYang_Awesome-Model-Merging-Methods-Theories-Applications_33947b62.png","EnnengYang",null,"https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FEnnengYang_31984025.png","https:\u002F\u002Fennengyang.github.io\u002F","https:\u002F\u002Fgithub.com\u002FEnnengYang",713,42,"2026-04-18T02:22:51",1,"","未说明",{"notes":85,"python":83,"dependencies":86},"该仓库是一个关于模型合并（Model Merging）的论文综述列表和资源索引，并非一个可直接运行的软件工具或代码库。因此，README 中未包含具体的操作系统、GPU、内存、Python 版本或依赖库等运行环境需求。用户若需复现列表中提及的论文实验，需参考各篇具体论文的官方代码仓库及其对应的环境配置要求。",[],[15,36],[89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108],"attack-defense","continual-learning","ensemble-learning","federated-learning","generalization","generative-model","large-language-models","model-fusion","model-merging","multi-domain-learning","multi-task-learning","robustness","diffusion-models","few-shot-learning","foundation-models","knowledge-fusion","meta-learning","transfer-learning","zero-shot-learning","llms","2026-03-27T02:49:30.150509","2026-04-20T16:46:44.155564",[],[]]