[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-InternLM--InternLM-XComposer":3,"tool-InternLM--InternLM-XComposer":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",157379,2,"2026-04-15T23:32:42",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",108322,"2026-04-10T11:39:34",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":32,"last_commit_at":50,"category_tags":51,"status":17},6121,"gemini-cli","google-gemini\u002Fgemini-cli","gemini-cli 是一款由谷歌推出的开源 AI 命令行工具，它将强大的 Gemini 大模型能力直接集成到用户的终端环境中。对于习惯在命令行工作的开发者而言，它提供了一条从输入提示词到获取模型响应的最短路径，无需切换窗口即可享受智能辅助。\n\n这款工具主要解决了开发过程中频繁上下文切换的痛点，让用户能在熟悉的终端界面内直接完成代码理解、生成、调试以及自动化运维任务。无论是查询大型代码库、根据草图生成应用，还是执行复杂的 Git 操作，gemini-cli 都能通过自然语言指令高效处理。\n\n它特别适合广大软件工程师、DevOps 人员及技术研究人员使用。其核心亮点包括支持高达 100 万 token 的超长上下文窗口，具备出色的逻辑推理能力；内置 Google 搜索、文件操作及 Shell 命令执行等实用工具；更独特的是，它支持 MCP（模型上下文协议），允许用户灵活扩展自定义集成，连接如图像生成等外部能力。此外，个人谷歌账号即可享受免费的额度支持，且项目基于 Apache 2.0 协议完全开源，是提升终端工作效率的理想助手。",100752,"2026-04-10T01:20:03",[52,13,15,14],"插件",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":32,"last_commit_at":59,"category_tags":60,"status":17},4721,"markitdown","microsoft\u002Fmarkitdown","MarkItDown 是一款由微软 AutoGen 团队打造的轻量级 Python 工具，专为将各类文件高效转换为 Markdown 格式而设计。它支持 PDF、Word、Excel、PPT、图片（含 OCR）、音频（含语音转录）、HTML 乃至 YouTube 链接等多种格式的解析，能够精准提取文档中的标题、列表、表格和链接等关键结构信息。\n\n在人工智能应用日益普及的今天，大语言模型（LLM）虽擅长处理文本，却难以直接读取复杂的二进制办公文档。MarkItDown 恰好解决了这一痛点，它将非结构化或半结构化的文件转化为模型“原生理解”且 Token 效率极高的 Markdown 格式，成为连接本地文件与 AI 分析 pipeline 的理想桥梁。此外，它还提供了 MCP（模型上下文协议）服务器，可无缝集成到 Claude Desktop 等 LLM 应用中。\n\n这款工具特别适合开发者、数据科学家及 AI 研究人员使用，尤其是那些需要构建文档检索增强生成（RAG）系统、进行批量文本分析或希望让 AI 助手直接“阅读”本地文件的用户。虽然生成的内容也具备一定可读性，但其核心优势在于为机器",93400,"2026-04-06T19:52:38",[52,14],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":66,"readme_en":67,"readme_zh":68,"quickstart_zh":69,"use_case_zh":70,"hero_image_url":71,"owner_login":72,"owner_name":72,"owner_avatar_url":73,"owner_bio":74,"owner_company":75,"owner_location":75,"owner_email":76,"owner_twitter":77,"owner_website":78,"owner_url":79,"languages":80,"stars":111,"forks":112,"last_commit_at":113,"license":114,"difficulty_score":10,"env_os":115,"env_gpu":116,"env_ram":115,"env_deps":117,"category_tags":124,"github_topics":126,"view_count":32,"oss_zip_url":75,"oss_zip_packed_at":75,"status":17,"created_at":143,"updated_at":144,"faqs":145,"releases":181},7941,"InternLM\u002FInternLM-XComposer","InternLM-XComposer","InternLM-XComposer2.5-OmniLive: A Comprehensive Multimodal System for Long-term Streaming Video and Audio Interactions","InternLM-XComposer2.5-OmniLive 是一款强大的多模态交互系统，专为处理长时流式视频与音频互动而设计。它突破了传统模型仅能处理静态图像或短视频片段的局限，能够像人类一样“观看”并“聆听”持续不断的实时音视频流，理解其中的动态变化、上下文关联及复杂事件，从而实现流畅的自然语言交互。\n\n对于需要构建智能监控分析、实时会议助手、直播内容理解或长视频对话机器人的开发者与研究人员而言，这一工具极具价值。它不仅支持超长上下文的输入与输出，还具备卓越的视听融合理解能力，能有效解决长时序数据中信息丢失和上下文断裂的难题。作为上海人工智能实验室 InternLM 系列的最新成果，其技术亮点在于将大语言模型的推理能力与高精度的视听感知深度结合，无需繁琐的分段处理即可直接应对复杂的实时场景。无论是希望探索前沿多模态技术的科研人员，还是致力于开发下一代实时交互应用的技术团队，都能利用 InternLM-XComposer2.5-OmniLive 轻松实现高质量的音视频智能理解与生成。","\u003Cp align=\"center\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_5c015f1d7225.png\" width=\"650\"\u002F>\n\u003C\u002Fp>\n\u003Cp align=\"center\">\n    \u003Cb>\u003Cfont size=\"6\">InternLM-XComposer-2.5\u003C\u002Ffont>\u003C\u002Fb>\n\u003C\u002Fp>\n\n\n\u003Cdiv align=\"center\">\n        InternLM-XComposer2.5 \u003Ca href=\"https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2d5-7b\">🤗\u003C\u002Fa> \u003Ca href=\"https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer2d5-7b\">\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\">\u003C\u002Fa> &nbsp｜ XComposer2.5 Technical Report \u003Ca href=\"https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.03320\">  📄 \u003C\u002Fa>  \n \n\n[English](.\u002FREADME.md) | [简体中文](.\u002FREADME_CN.md)\n\n\u003C\u002Fdiv>\n\n\u003Cp align=\"center\">\n    Thanks the community for \u003Ca href=\"https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FWillow123\u002FInternLM-XComposer\">HuggingFace Demo \u003C\u002Fa>  | \u003Ca href=\"https:\u002F\u002Fopenxlab.org.cn\u002Fapps\u002Fdetail\u002FWillowBreeze\u002FInternLM-XComposer\">OpenXLab Demo\u003C\u002Fa> of InternLM-XComposer-2.5.\n\u003C\u002Fp>\n\n\u003Cp align=\"center\">\n    👋 join us on \u003Ca href=\"https:\u002F\u002Fdiscord.gg\u002Fxa29JuW87d\" target=\"_blank\">Discord\u003C\u002Fa> and \u003Ca href=\"https:\u002F\u002Fr.vansin.top\u002F?r=internwx\" target=\"_blank\">WeChat\u003C\u002Fa>\n\u003C\u002Fp>\n\n\u003Cp align=\"center\">\n\u003Ca href=\"https:\u002F\u002Ftrendshift.io\u002Frepositories\u002F5245\" target=\"_blank\">\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_0dcd13139b79.png\" alt=\"InternLM%2FInternLM-XComposer | Trendshift\" style=\"width: 250px; height: 55px;\" width=\"250\" height=\"55\"\u002F>\u003C\u002Fa>\n\u003C\u002Fp>\n\n\u003Cbr>\n\n## 🔥🔥🔥 **InternLM-XComposer2.5-Reward**\n\nWe release **InternLM-XComposer2.5-Reward** \u003Ca href=\"https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2d5-7b-reward\">🤗\u003C\u002Fa> (IXC-2.5-Reward, ACL 2025 Findings), a simple yet effective multi-modal reward model, including training code, evaluation scripts, and parts of the traininig data. Please refer to the [project page](InternLM-XComposer-2.5-Reward) for details.\n\n## 🔥🔥🔥 **InternLM-XComposer2.5-OmniLive**\n\nWe release **InternLM-XComposer2.5-OmniLive**, a comprehensive multimodal system for long-term streaming video and audio interactions. Please refer to the [project page](InternLM-XComposer-2.5-OmniLive) for details.\n\n\u003Cbr>\n\n## Multimodal Projects of Our Team\n> [**InternLM-XComposer-2.5-Reward**](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.12368): **A Simple Yet Effective Multi-Modal Reward Model**\n\n> [**InternLM-XComposer-2.5-OmniLive**](): **A Specialized Generalist Multimodal System for Streaming Video and Audio Interactions**\n\n> [**InternLM-XComposer-2.5**](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.03320): **A Versatile Large Vision Language Model Supporting Long-Contextual Input and Output**\n\n> [**InternLM-XComposer2-\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_e128b455e64f.png\" width=\"25px\">**](https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer): **A Pioneering Large Vision-Language Model Handling Resolutions from 336 Pixels to 4K HD**\n\n> [**InternLM-XComposer2**](https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer): **Mastering Free-form Text-Image Composition and Comprehension in Vision-Language Large Models**\n\n> [**InternLM-XComposer**](https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Ftree\u002Fmain\u002FInternLM-XComposer-1.0): **A Vision-Language Large Model for Advanced Text-image Comprehension and Composition**\n\n> \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_491799f6cbfa.png\" style=\"vertical-align: -20px;\" :height=\"25px\" width=\"25px\">[**ShareGPT4Video:**](https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Ftree\u002Fmain\u002Fprojects\u002FShareGPT4Video) **Improving Video Understanding and Generation with Better Captions**\n\n> \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_ea638acad230.png\" style=\"vertical-align: -20px;\" :height=\"25px\" width=\"25px\">[**ShareGPT4V:**](https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Ftree\u002Fmain\u002Fprojects\u002FShareGPT4V) **Improving Large Multi-modal Models with Better Captions**\n\n> \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_65755931cecb.png\" style=\"vertical-align: -20px;\" :height=\"25px\" width=\"25px\">[**MMDU:**](https:\u002F\u002Fliuziyu77.github.io\u002FMMDU\u002F) **A Multi-Turn Multi-Image Dialog Understanding Benchmark and Instruction-Tuning Dataset for LVLMs**\n\n> [**DualFocus**](https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Ftree\u002Fmain\u002Fprojects\u002FDualFocus): **Integrating Macro and Micro Perspectives in Multi-modal Large Language Models**\n\n\u003C\u002Fbr>\n\n**InternLM-XComposer-2.5** excels in various text-image comprehension and composition applications, achieving GPT-4V level capabilities with merely 7B LLM backend. IXC-2.5 is trained with 24K interleaved image-text contexts, it can seamlessly extend to 96K long contexts via RoPE extrapolation. This long-context capability allows IXC-2.5 to perform exceptionally well in tasks requiring extensive input and output contexts. \n\n- **Ultra-High Resolution Understanding**: IXC-2.5 enhances the dynamic resolution solution proposed in IXC2-4KHD with a native 560 × 560 ViT vision encoder, supporting high-resolution images with any aspect ratio.\n\n- **Fine-Grained Video Understanding**: IXC-2.5 treats videos as a ultra-high-resolution composite picture consisting of tens to hundreds of frames, allowing it to capture fine details through dense sampling and higher resolution for each frame.\n\n- **Multi-Turn Multi-Image Dialogue**: IXC-2.5 supports free-form multi-turn multi-image dialogue, allowing it to naturally interact with humans in multi-round conversations. \n\n- **Webpage Crafting**: IXC-2.5 can be readily applied to create webpages by composing source code (HTML, CSS, and JavaScript) following text-image instructions.\n\n- **Composing High-Quality Text-Image Articles**: IXC-2.5 leverages specially designed Chain-of-Thought (CoT) and Direct Preference Optimization (DPO) techniques to significantly enhance the quality of its written content. \n\n- **Awesome performance**: IXC-2.5 has been evaluated on 28 benchmarks, outperforming existing open-source state-of-the-art models on 16 benchmarks. It also surpasses or competes closely with GPT-4V and Gemini Pro on 16 key tasks. \n\n\u003Cp align=\"center\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_e6dfe94c6213.png\" width=\"1000\"\u002F>\n\u003C\u002Fp>\n  \n\nPlease refer to [Technical Report](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.03320)  for more details.\n\u003Cbr>\n \n\n## Demo Video\n🔥 For the best experience, please keep the audio on while enjoying the video.\n\n[https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Fassets\u002F147793160\u002F8206f07f-3166-461e-a631-9cbcdec6ae75](https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Fassets\u002F147793160\u002F8206f07f-3166-461e-a631-9cbcdec6ae75)\n\n[Youtube Video](https:\u002F\u002Fyoutu.be\u002F8tYpiQNOJww)\n\n\nPlease refer to [Chinese Demo](.\u002FREADME_CN.md#demo) for the demo of the Chinese version.\n\n## News and Updates\n- `2024.12.12` 🎉🎉🎉 [InternLM-XComposer2.5-7B-Reward](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2d5-7b-reward) is publicly available.\n- `2024.12.12` 🎉🎉🎉 [InternLM-XComposer2.5-OmniLive-7B](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2d5-ol-7b) is publicly available.\n- `2024.07.15` 🎉🎉🎉 [ModelScope Swift](https:\u002F\u002Fgithub.com\u002FInternLM\u002Flmdeploy\u002Fblob\u002Fmain\u002Fdocs\u002Fen\u002Fmulti_modal\u002Fxcomposer2d5.md) supports InternLM-XComposer2.5-7B for finetuning and inference.\n- `2024.07.15` 🎉🎉🎉 [LMDepoly](https:\u002F\u002Fgithub.com\u002FInternLM\u002Flmdeploy\u002Fblob\u002Fmain\u002Fdocs\u002Fen\u002Fmulti_modal\u002Fxcomposer2d5.md) supports InternLM-XComposer2.5-7B for 4 bit quantization and inference.\n- `2024.07.15` 🎉🎉🎉 [InternLM-XComposer2.5-7B-4bit](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2d5-7b-4bit) is publicly available.\n- `2024.07.03` 🎉🎉🎉 [InternLM-XComposer2.5-7B](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2d5-7b) is publicly available.\n- `2024.07.01` 🎉🎉🎉 [ShareGPT4V](https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Ftree\u002Fmain\u002Fprojects\u002FShareGPT4V) is accepted by ECCV2024. \n- `2024.04.22` 🎉🎉🎉 The [finetune code](.\u002Ffinetune\u002F) of **InternLM-XComposer2-VL-7B-4KHD-7B** are publicly available.\n- `2024.04.09` 🎉🎉🎉 [InternLM-XComposer2-4KHD-7B](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-4khd-7b) and [evaluation code](.\u002Fevaluation\u002FREADME.md) are publicly available.\n- `2024.04.09` 🎉🎉🎉 [InternLM-XComposer2-VL-1.8B](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-vl-1_8b) is publicly available.\n- `2024.02.22` 🎉🎉🎉 We release [DualFocus](https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Ftree\u002Fmain\u002Fprojects\u002FDualFocus), a framework for integrating macro and micro perspectives within MLLMs to enhance vision-language task performance.\n\n* `2024.02.06` 🎉🎉🎉 [InternLM-XComposer2-7B-4bit](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-7b-4bit) and [InternLM-XComposer-VL2-7B-4bit](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-vl-7b-4bit) are publicly available on **Hugging Face** and **ModelScope**.\n\n- `2024.02.02` 🎉🎉🎉 The [finetune code](.\u002Ffinetune\u002F) of **InternLM-XComposer2-VL-7B** are publicly available.\n- `2024.01.26` 🎉🎉🎉 The [evaluation code](.\u002Fevaluation\u002FREADME.md) of **InternLM-XComposer2-VL-7B** are publicly available.\n- `2024.01.26` 🎉🎉🎉 [InternLM-XComposer2-7B](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-7b) and [InternLM-XComposer-VL2-7B](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-vl-7b) are publicly available on **Hugging Face** and **ModelScope**.\n- `2024.01.26` 🎉🎉🎉 We release a [technical report](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.16420) for more details of InternLM-XComposer2 series.\n- `2023.11.22` 🎉🎉🎉 We release the [ShareGPT4V](https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Ftree\u002Fmain\u002Fprojects\u002FShareGPT4V), a large-scale highly descriptive image-text dataset generated by GPT4-Vision and a superior large multimodal model, ShareGPT4V-7B.\n- `2023.10.30` 🎉🎉🎉 InternLM-XComposer-VL achieved the top 1 ranking in both [Q-Bench](https:\u002F\u002Fgithub.com\u002FQ-Future\u002FQ-Bench\u002Ftree\u002Fmaster\u002Fleaderboards#overall-leaderboards) and [Tiny LVLM](https:\u002F\u002Fgithub.com\u002FOpenGVLab\u002FMulti-Modality-Arena\u002Ftree\u002Fmain\u002Ftiny_lvlm_evaluation).\n- `2023.10.19` 🎉🎉🎉 Support for inference on multiple GPUs. Two 4090 GPUs are sufficient for deploying our demo.\n- `2023.10.12` 🎉🎉🎉 4-bit demo is supported, model files are available in [Hugging Face](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer-7b-4bit) and [ModelScope](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer-7b-4bit).\n- `2023.10.8` 🎉🎉🎉 [InternLM-XComposer-7B](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer-7b) and [InternLM-XComposer-VL-7B](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer-vl-7b) are publicly available on **ModelScope**.\n- `2023.9.27` 🎉🎉🎉 The [evaluation code](.\u002FInternLM-XComposer-1.0\u002Fevaluation\u002F) of **InternLM-XComposer-VL-7B** are publicly available.\n- `2023.9.27` 🎉🎉🎉 [InternLM-XComposer-7B](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer-7b) and [InternLM-XComposer-VL-7B](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer-vl-7b) are publicly available on **Hugging Face**.\n- `2023.9.27` 🎉🎉🎉 We release a [technical report](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.15112.pdf) for more details of our model series.\n  \u003C\u002Fbr>\n\n## Model Zoo\n\n| Model                           | Usage                                           | Transformers(HF)                                                                                   | ModelScope(HF)                                                                                                                                                                       | Release Date |\n| ------------------------------- | ----------------------------------------------- | -------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------ |\n| **InternLM-XComposer-2.5**    | Video Understanding, Multi-image Multi-tune Dialog, 4K Resolution Understanding, Web Craft, Article creation,  Benchmark | [🤗internlm-xcomposer2.5](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2d5-7b)       | [\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\" \u002F> internlm-xcomposer2.5](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer2d5-7b\u002Fsummary)       | 2024-07-03   |\n| **InternLM-XComposer2-4KHD**    | 4K Resolution Understanding, Benchmark, VL-Chat | [🤗internlm-xcomposer2-4khd-7b](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-4khd-7b)       | [\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\" \u002F> internlm-xcomposer2-4khd-7b](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer2-4khd-7b\u002Fsummary)       | 2024-04-09   |\n| **InternLM-XComposer2-VL-1.8B** | Benchmark, VL-Chat                              | [🤗internlm-xcomposer2-vl-1_8b](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-vl-1_8b)       | [\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\" \u002F> internlm-xcomposer2-vl-1_8b](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer2-vl-1_8b\u002Fsummary)       | 2024-04-09   |\n| **InternLM-XComposer2**         | Text-Image Composition                          | [🤗internlm-xcomposer2-7b](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-7b)                 | [\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\" \u002F> internlm-xcomposer2-7b](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer2-7b\u002Fsummary)                 | 2024-01-26   |\n| **InternLM-XComposer2-VL**      | Benchmark, VL-Chat                              | [🤗internlm-xcomposer2-vl-7b](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-vl-7b)           | [\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\" \u002F> internlm-xcomposer2-vl-7b](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer2-vl-7b\u002Fsummary)           | 2024-01-26   |\n| **InternLM-XComposer2-4bit**    | Text-Image Composition                          | [🤗internlm-xcomposer2-7b-4bit](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-7b-4bit)       | [\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\" \u002F> internlm-xcomposer2-7b-4bit](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer2-7b-4bit\u002Fsummary)       | 2024-02-06   |\n| **InternLM-XComposer2-VL-4bit** | Benchmark, VL-Chat                              | [🤗internlm-xcomposer2-vl-7b-4bit](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-vl-7b-4bit) | [\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\" \u002F> internlm-xcomposer2-vl-7b-4bit](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer2-vl-7b-4bit\u002Fsummary) | 2024-02-06   |\n| **InternLM-XComposer**          | Text-Image Composition, VL-Chat                 | [🤗internlm-xcomposer-7b](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer-7b)                   | [\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\" \u002F> internlm-xcomposer-7b](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer-7b\u002Fsummary)                   | 2023-09-26   |\n| **InternLM-XComposer-4bit**     | Text-Image Composition, VL-Chat                 | [🤗internlm-xcomposer-7b-4bit](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer-7b-4bit)         | [\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\" \u002F> internlm-xcomposer-7b-4bit](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer-7b-4bit\u002Fsummary)         | 2023-09-26   |\n| **InternLM-XComposer-VL**       | Benchmark                                       | [🤗internlm-xcomposer-vl-7b](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer-vl-7b)             | [\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\" \u002F> internlm-xcomposer-vl-7b](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer-vl-7b\u002Fsummary)             | 2023-09-26   |\n\n## Evaluation\n\nWe evaluate InternLM-XComposer-2.5 on 28 multimodal benchmarks, including image benchmarks [MMDU](https:\u002F\u002Fgithub.com\u002FLiuziyu77\u002FMMDU), [MMStar](https:\u002F\u002Fgithub.com\u002FMMStar-Benchmark\u002FMMStar), [RealWorldQA](https:\u002F\u002Fx.ai\u002Fblog\u002Fgrok-1.5v),  [Design2Code](https:\u002F\u002Fsalt-nlp.github.io\u002FDesign2Code\u002F), [DocVQA](https:\u002F\u002Frrc.cvc.uab.es\u002F?ch=17), [Infographics VQA](https:\u002F\u002Frrc.cvc.uab.es\u002F?ch=17), [TextVQA](https:\u002F\u002Ftextvqa.org\u002F), [ChartQA](https:\u002F\u002Fgithub.com\u002Fvis-nlp\u002FChartQA), [OCRBench](https:\u002F\u002Fgithub.com\u002FYuliang-Liu\u002FMultimodalOCR), [DeepFrom](https:\u002F\u002Fwandb.ai\u002Fstacey\u002Fdeepform_v1\u002Freports\u002FDeepForm-Understand-Structured-Documents-at-Scale--VmlldzoyODQ3Njg), [WTQ](https:\u002F\u002Farxiv.org\u002Fabs\u002F1508.00305), [VisualMRC](https:\u002F\u002Fgithub.com\u002Fnttmdlab-nlp\u002FVisualMRC), [TabFact](https:\u002F\u002Ftabfact.github.io\u002F), [MathVista](https:\u002F\u002Fmathvista.github.io\u002F), [MMMU](https:\u002F\u002Fmmmu-benchmark.github.io\u002F), [AI2D](https:\u002F\u002Fprior.allenai.org\u002Fprojects\u002Fdiagram-understanding), [MME](https:\u002F\u002Fgithub.com\u002FBradyFU\u002FAwesome-Multimodal-Large-Language-Models\u002Ftree\u002FEvaluation), [MMBench](https:\u002F\u002Fopencompass.org.cn\u002Fleaderboard-multimodal), [MMBench-CN](https:\u002F\u002Fopencompass.org.cn\u002Fleaderboard-multimodal), [SEED-Bench](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FAILab-CVC\u002FSEED-Bench_Leaderboard), [HallusionBench](https:\u002F\u002Fgithub.com\u002Ftianyi-lab\u002FHallusionBench), [MM-Vet](https:\u002F\u002Fgithub.com\u002Fyuweihao\u002FMM-Vet), and video benchmarks [MVBench](https:\u002F\u002Fgithub.com\u002FOpenGVLab\u002FAsk-Anything), [MLVU](https:\u002F\u002Fgithub.com\u002FFlagOpen\u002FFlagEmbedding\u002Ftree\u002Fmaster\u002FMLVU\u002Fevaluation), [Video-MME](https:\u002F\u002Fgithub.com\u002FBradyFU\u002FVideo-MME), [MMBench-Video](https:\u002F\u002Fgithub.com\u002Fopen-compass\u002FVLMEvalKit), [TempCompass](https:\u002F\u002Fgithub.com\u002Fllyx97\u002FTempCompass)\n\nSee [Evaluation Details](.\u002Fevaluation\u002FREADME.md) here.\n\n### Compared with closed-source APIs and previous SOTAs on Video and Structural High-resolution images.\n|            | MVBench    | MLVU        | MME-Video | MMBench-Video | TempCompass | DocVQA      | ChartVQA    | InfoVQA     | TextVQA     | OCRBench | DeepForm   | WTQ        | VisualMRC  | TabFact     |\n|------------|------------|-------------|-----------|---------------|-------------|-------------|-------------|-------------|-------------|----------|------------|------------|------------|-------------|\n|            | VideoChat2 | InternVL1.5 | LIVA      | InternVL1.5   | Qwen-VL     | InternVL1.5 | InternVL1.5 | InternVL1.5 | InternVL1.5 | GLM-4v   | DocOwl 1.5 | DocOwl 1.5 | DocOwl 1.5 | DocOwl 1.5  |\n|            | 7B         | 26B         | 34B       | 26B           | 7B          | 26B         | 26B         | 26B         | 26B         | 9B       | 8B         | 8B         | 8B         | 8B          |\n|        | 60.4       | 50.4        | 59.0      | 42.0          | 52.9        | 90.9        | 83.8        | 72.5        | 80.6        | 77.6     | 68.8       | 40.6       | 246.4      | 80.2        |\n|            |            |             |           |               |             |             |             |             |             |          |            |            |            |             |\n| GPT-4V     | 43.5       | 49.2        | 59.9      | 56.0          | ---         | 88.4        | 78.5        | 75.1        | 78.0        | 51.6     | ---        | ---        | ---        | ---         |\n| Gemini-Pro | ---        | ---         | 75.0      | 49.3          | 67.1        | 88.1        | 74.1        | 75.2        | 74.6        | 68.0     | ---        | ---        | ---        | ---         |\n| Ours       | 69.1       | 58.8        | 55.8      | 46.9          |             | 90.9        | 82.2        | 69.9        | 78.2        | 69.0     | 71.2       | 53.6       | 307.5      | 85.2        |\n\n\n\n\n\n\n### Compared with closed-source APIs and previous SOTAs on Multi-Image dialog and General Visual QA Benchmarks.\n\n|            | MVBench    | MLVU        | MME-Video | MMBench-Video | TempCompass | DocVQA      | ChartVQA    | InfoVQA     | TextVQA     | OCRBench | DeepForm   | WTQ        | VisualMRC  | TabFact     |\n|------------|------------|-------------|-----------|---------------|-------------|-------------|-------------|-------------|-------------|----------|------------|------------|------------|-------------|\n|            | VideoChat2 | InternVL1.5 | LIVA      | InternVL1.5   | Qwen-VL     | InternVL1.5 | InternVL1.5 | InternVL1.5 | InternVL1.5 | GLM-4v   | DocOwl 1.5 | DocOwl 1.5 | DocOwl 1.5 | DocOwl 1.5  |\n|            | 7B         | 26B         | 34B       | 26B           | 7B          | 26B         | 26B         | 26B         | 26B         | 9B       | 8B         | 8B         | 8B         | 8B          |\n|            | 60.4       | 50.4        | 59.0      | 42.0          | 58.4        | 90.9        | 83.8        | 72.5        | 80.6        | 77.6     | 68.8       | 40.6       | 246.4      | 80.2        |\n|            |            |             |           |               |             |             |             |             |             |          |            |            |            |             |\n| GPT-4V     | 43.5       | 49.2        | 59.9      | 56.0          | ---         | 88.4        | 78.5        | 75.1        | 78.0        | 51.6     | ---        | ---        | ---        | ---         |\n| Gemini-Pro | ---        | ---         | 75.0      | 49.3          | 70.6        | 88.1        | 74.1        | 75.2        | 74.6        | 68.0     | ---        | ---        | ---        | ---         |\n| Ours       | 69.1       | 58.8        | 55.8      | 46.9          | 67.1        | 90.9        | 82.2        | 69.9        | 78.2        | 69.0     | 71.2       | 53.6       | 307.5      | 85.2        |\n\n\n## Requirements\n\n- python 3.8 and above\n- pytorch 1.12 and above, 2.0 and above are recommended\n- CUDA 11.4 and above are recommended (this is for GPU users)\n- [flash-attention2](https:\u002F\u002Fgithub.com\u002FDao-AILab\u002Fflash-attention) is required for high-resolution usage of InternLM-XComposer2.5.\n  \u003Cbr>\n\n## Installation\n\nBefore running the code, make sure you have setup the environment and installed the required packages. Make sure you meet the above requirements, and then install the dependent libraries.\nPlease refer to the [installation instructions](docs\u002Finstall.md)\n\n## Quickstart\n\nWe provide a simple example to show how to use InternLM-XComposer-2.5 with 🤗 Transformers.\n \n\n\u003Cdetails>\n  \u003Csummary>\n    \u003Cb>Video Understanding\u003C\u002Fb>\n  \u003C\u002Fsummary>\n\n```python\nimport torch\nfrom transformers import AutoModel, AutoTokenizer\n\ntorch.set_grad_enabled(False)\n\n# init model and tokenizer\nmodel = AutoModel.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval().half()\ntokenizer = AutoTokenizer.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', trust_remote_code=True)\nmodel.tokenizer = tokenizer\n\nquery = 'Here are some frames of a video. Describe this video in detail'\nimage = ['.\u002Fexamples\u002Fliuxiang.mp4',]\nwith torch.autocast(device_type='cuda', dtype=torch.float16):\n    response, his = model.chat(tokenizer, query, image, do_sample=False, num_beams=3, use_meta=True)\nprint(response)\n#The video opens with a shot of an athlete, dressed in a red and yellow uniform with the word \"CHINA\" emblazoned across the front, preparing for a race. \n#The athlete, Liu Xiang, is seen in a crouched position, focused and ready, with the Olympic rings visible in the background, indicating the prestigious setting of the Olympic Games. As the race commences, the athletes are seen sprinting towards the hurdles, their determination evident in their powerful strides. \n#The camera captures the intensity of the competition, with the athletes' numbers and times displayed on the screen, providing a real-time update on their performance. The race reaches a climax as Liu Xiang, still in his red and yellow uniform, triumphantly crosses the finish line, his arms raised in victory. \n#The crowd in the stands erupts into cheers, their excitement palpable as they witness the athlete's success. The video concludes with a close-up shot of Liu Xiang, still basking in the glory of his victory, as the Olympic rings continue to symbolize the significance of the event.\n\nquery = 'tell me the athlete code of Liu Xiang'\nimage = ['.\u002Fexamples\u002Fliuxiang.mp4',]\nwith torch.autocast(device_type='cuda', dtype=torch.float16):\n    response, _ = model.chat(tokenizer, query, image, history=his, do_sample=False, num_beams=3, use_meta=True)\nprint(response)\n#The athlete code of Liu Xiang, as displayed on his uniform in the video, is \"1363\".\n```\n\n\u003C\u002Fdetails>\n\n\u003Cdetails>\n  \u003Csummary>\n    \u003Cb>Multi-Image Mutli-Tune Dialog\u003C\u002Fb>\n  \u003C\u002Fsummary>\n\n```python\nimport torch\nfrom transformers import AutoModel, AutoTokenizer\n\ntorch.set_grad_enabled(False)\n\n# init model and tokenizer\nmodel = AutoModel.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval().half()\ntokenizer = AutoTokenizer.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', trust_remote_code=True)\nmodel.tokenizer = tokenizer\n\nquery = 'Image1 \u003CImageHere>; Image2 \u003CImageHere>; Image3 \u003CImageHere>; I want to buy a car from the three given cars, analyze their advantages and weaknesses one by one'\nimage = ['.\u002Fexamples\u002Fcars1.jpg',\n        '.\u002Fexamples\u002Fcars2.jpg',\n        '.\u002Fexamples\u002Fcars3.jpg',]\nwith torch.autocast(device_type='cuda', dtype=torch.float16):\n    response, his = model.chat(tokenizer, query, image, do_sample=False, num_beams=3, use_meta=True)\nprint(response)\n#To analyze the advantages and disadvantages of each car, we need to consider factors such as brand reputation, performance, design, cost, and maintenance. \n#1. Mercedes-Benz: - Advantages: Known for its luxury and reliability, the Mercedes-Benz brand offers a high level of comfort, advanced technology, and superior craftsmanship. The vehicle in the image appears to be an SUV, which is versatile for both city driving and off-road conditions. - Disadvantages: Typically, Mercedes-Benz vehicles are more expensive compared to other brands, and they may require more frequent maintenance due to their luxury status. \n#2. Bugatti: - Advantages: Bugatti is renowned for producing some of the fastest and most powerful cars in the world. The vehicle in the image is a sports car, likely offering an exhilarating driving experience with its high-performance engine and advanced aerodynamics. - Disadvantages: Bugatti cars are extremely expensive, making them less accessible to the average consumer. They also require specialized knowledge for maintenance and may not be suitable for everyday driving due to their high-performance nature. \n#3. BMW: - Advantages: BMW is a well-respected manufacturer known for its innovative engineering and design. The car in the image appears to be an electric vehicle (EV), which offers advantages such as lower emissions, reduced noise pollution, and potentially lower operating costs. - Disadvantages: EVs like the one in the image may have a shorter range compared to traditional gasoline-powered vehicles, limiting their suitability for long-distance travel. \n#Additionally, the initial cost of purchasing an EV can be higher than that of a conventional car. In conclusion, each car has its own set of advantages and disadvantages. The Mercedes-Benz offers luxury and versatility, the Bugatti provides high performance and exclusivity, and the BMW represents innovation in electric vehicle technology. The choice ultimately depends on the buyer's priorities, such as comfort, performance, environmental impact, and budget.\n\nquery = 'Image4 \u003CImageHere>; How about the car in Image4'\nimage.append('.\u002Fexamples\u002Fcars4.jpg')        \nwith torch.autocast(device_type='cuda', dtype=torch.float16):\n    response, _ = model.chat(tokenizer, query, image, do_sample=False, num_beams=3, history= his, use_meta=True)\nprint(response)\n#The car in Image4 is a red sports car, which appears to be a Ferrari. Ferrari is a renowned Italian manufacturer known for producing some of the most iconic and high-performance sports cars in the world. - Advantages: Ferrari vehicles are synonymous with speed, luxury, and engineering excellence. \n#The car in the image likely offers an exhilarating driving experience with its powerful engine, advanced aerodynamics, and high-quality craftsmanship. The red color adds to the car's aesthetic appeal, making it stand out on the road. - Disadvantages: Ferrari cars are extremely expensive, making them less accessible to the average consumer. \n#They also require specialized knowledge for maintenance and may not be suitable for everyday driving due to their high-performance nature. In conclusion, the Ferrari in Image4 represents a pinnacle of automotive engineering and design, offering unmatched performance and luxury. \n#However, its high cost and specialized maintenance requirements make it less practical for everyday use compared to the other vehicles in the images.\n```\n\n\n\u003C\u002Fdetails>\n\n\u003Cdetails>\n  \u003Csummary>\n    \u003Cb>High Resolution Image Understanding\u003C\u002Fb>\n  \u003C\u002Fsummary>\n\n```python\nimport torch\nfrom transformers import AutoModel, AutoTokenizer\n\ntorch.set_grad_enabled(False)\n\n# init model and tokenizer\nmodel = AutoModel.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval().half()\ntokenizer = AutoTokenizer.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', trust_remote_code=True)\nmodel.tokenizer = tokenizer\n\nquery = 'Analyze the given image in a detail manner'\nimage = ['.\u002Fexamples\u002Fdubai.png']\nwith torch.autocast(device_type='cuda', dtype=torch.float16):\n    response, _ = model.chat(tokenizer, query, image, do_sample=False, num_beams=3, use_meta=True)\nprint(response)\n#The infographic is a visual representation of various facts about Dubai. It begins with a statement about Palm Jumeirah, highlighting it as the largest artificial island visible from space. It then provides a historical context, noting that in 1968, there were only a few cars in Dubai, contrasting this with the current figure of more than 1.5 million vehicles. \n#The infographic also points out that Dubai has the world's largest Gold Chain, with 7 of the top 10 tallest hotels located there. Additionally, it mentions that the crime rate is near 0%, and the income tax rate is also 0%, with 20% of the world's total cranes operating in Dubai. Furthermore, it states that 17% of the population is Emirati, and 83% are immigrants.\n#The Dubai Mall is highlighted as the largest shopping mall in the world, with 1200 stores. The infographic also notes that Dubai has no standard address system, with no zip codes, area codes, or postal services. It mentions that the Burj Khalifa is so tall that its residents on top floors need to wait longer to break fast during Ramadan. \n#The infographic also includes information about Dubai's climate-controlled City, with the Royal Suite at Burj Al Arab costing $24,000 per night. Lastly, it notes that the net worth of the four listed billionaires is roughly equal to the GDP of Honduras.\n\n```\n\n\u003C\u002Fdetails>\n\n\n\u003Cdetails>\n  \u003Csummary>\n    \u003Cb>Instruction to Webpage\u003C\u002Fb>\n  \u003C\u002Fsummary>\n\n```python\nimport torch\nfrom transformers import AutoModel, AutoTokenizer\n\ntorch.set_grad_enabled(False)\n\n# init model and tokenizer\nmodel = AutoModel.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval().half()\ntokenizer = AutoTokenizer.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', trust_remote_code=True)\nmodel.tokenizer = tokenizer\n\nquery = 'A website for Research institutions. The name is Shanghai AI lab. Top Navigation Bar is blue.Below left, an image shows the logo of the lab. In the right, there is a passage of text below that describes the mission of the laboratory.There are several images to show the research projects of Shanghai AI lab.'\nwith torch.autocast(device_type='cuda', dtype=torch.float16):\n    response = model.write_webpage(query, seed=202, task='Instruction-aware Webpage Generation', repetition_penalty=3.0)\nprint(response)\n# see the Instruction-aware Webpage Generation.html \n```\n \nSee the [Instruction to Webpage](.\u002Fexamples\u002FInstruction-aware_Webpage_Generation.html) results here.\n\u003C\u002Fdetails>\n\n\u003Cdetails>\n  \u003Csummary>\n    \u003Cb>Resume to Webpage\u003C\u002Fb>\n  \u003C\u002Fsummary>\n\n```python\nimport torch\nfrom transformers import AutoModel, AutoTokenizer\n\ntorch.set_grad_enabled(False)\n\n# init model and tokenizer\nmodel = AutoModel.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval().half()\ntokenizer = AutoTokenizer.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', trust_remote_code=True)\nmodel.tokenizer = tokenizer\n\n## the input should be a resume in markdown format\nquery = '.\u002Fexamples\u002Fresume.md'\nwith torch.autocast(device_type='cuda', dtype=torch.float16):\n    response = model.resume_2_webpage(query, seed=202, repetition_penalty=3.0)\nprint(response)\n```\nSee the [Resume to Webpage](.\u002Fexamples\u002FResume-to-Personal_Page.html) results here.\n\n\n\u003C\u002Fdetails>\n\n\n\u003Cdetails>\n  \u003Csummary>\n    \u003Cb>Screenshot to Webpage\u003C\u002Fb>\n  \u003C\u002Fsummary>\n\n```python\nimport torch\nfrom transformers import AutoModel, AutoTokenizer\n\ntorch.set_grad_enabled(False)\n\n# init model and tokenizer\nmodel = AutoModel.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval().half()\ntokenizer = AutoTokenizer.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', trust_remote_code=True)\nmodel.tokenizer = tokenizer\n\nquery = 'Generate the HTML code of this web image with Tailwind CSS.'\nimage = ['.\u002Fexamples\u002Fscreenshot.jpg']\nwith torch.autocast(device_type='cuda', dtype=torch.float16):\n    response = model.screen_2_webpage(query, image, seed=202, repetition_penalty=3.0)\nprint(response)\n```\nSee the [Screenshot to Webpage](.\u002Fexamples\u002FScreenshot-to-Webpage.html) results here.\n\n\u003C\u002Fdetails>\n\n\n\n\u003Cdetails>\n  \u003Csummary>\n    \u003Cb>Write Article\u003C\u002Fb>\n  \u003C\u002Fsummary>\n\n```python\nimport torch\nfrom transformers import AutoModel, AutoTokenizer\n\ntorch.set_grad_enabled(False)\n\n# init model and tokenizer\nmodel = AutoModel.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval().half()\ntokenizer = AutoTokenizer.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', trust_remote_code=True)\nmodel.tokenizer = tokenizer\n\nquery = '阅读下面的材料，根据要求写作。 电影《长安三万里》的出现让人感慨，影片并未将重点全落在大唐风华上，也展现了恢弘气象的阴暗面，即旧门阀的资源垄断、朝政的日益衰败与青年才俊的壮志难酬。高适仕进无门，只能回乡>沉潜修行。李白虽得玉真公主举荐，擢入翰林，但他只是成为唐玄宗的御用文人，不能真正实现有益于朝政的志意。然而，片中高潮部分《将进酒》一节，人至中年、挂着肚腩的李白引众人乘仙鹤上天，一路从水面、瀑布飞升至银河进入仙>宫，李白狂奔着与仙人们碰杯，最后大家纵身飞向漩涡般的九重天。肉身的微贱、世路的“天生我材必有用，坎坷，拘不住精神的高蹈。“天生我材必有用，千金散尽还复来。” 古往今来，身处闲顿、遭受挫折、被病痛折磨，很多人都曾经历>了人生的“失意”，却反而成就了他们“诗意”的人生。对正在追求人生价值的当代青年来说，如何对待人生中的缺憾和困顿?诗意人生中又有怎样的自我坚守和自我认同?请结合“失意”与“诗意”这两个关键词写一篇文章。 要求:选准角度，确定>立意，明确文体，自拟标题;不要套作，不得抄袭;不得泄露个人信息;不少于 800 字。'\nwith torch.autocast(device_type='cuda', dtype=torch.float16):\n    response = model.write_artical(query, seed=8192)\nprint(response)\n#诗意人生，贵在坚守\n#《菜根谭》有云:“闲时要有吃紧的心思,忙里要留吃闲工夫。”人生在世,总有失意之时,当面对缺憾和困顿,诗意地生活着才能为人生增添一抹亮色。何谓诗意地生活? 所谓诗意地生活，便是在于坚守本心、直面遗憾、超越自我,在失意中寻找人生价值。\n#诗意地生活,需坚守本心,淡然处之。\n#陶渊明曾执意辞去彭泽县令,归隐田园,“采菊东篱下,悠然见南山”,在山水间寄情自娱；王维面对仕途失意,终日沉醉于诗酒之中,“兴来每独往,胜事空自知”,在诗酒中闲逸自如;李白仕途不顺,被赐金放还,但他依旧豪气干云,“天生我才必有用,千金散尽还复来”,在失意中坦然豁达。坚守本心，便能在遭遇失意之时守住自己的精神家园,让生活充满诗意。反之,若不能坚守本心,而只是一味迎合世俗以求得升迁,那纵使身居高位,亦会丧失生活的乐趣。\n#诗意地生活,需直面遗憾,超越自我。\n#“西塞山前白鹭飞,桃花流水鳜鱼肥。青箬笠,绿柳枝,半斤酒,一纶丝。五湖四海皆如此,何妨到此处归。”白居易的《渔歌子》写出了多少人的愿望:没有权势纷扰,没有贫困凄凉,只有青山绿水、白鹭鸥鸟作伴,如此自由自在的生活令人神往。然而,白居易却并没有因此真的归隐山林,而是直面人生,超越自我,写下了一首首诗意而富有现实关怀的作品。如果白居易只顾逃避人生,那又怎会拥有“大弦嘈嘈如急雨,小弦切切如私语”的绝美比喻呢?如果白居易只顾归隐山林,那又怎会写出“此曲只应天上有,人间哪得配白居易”这样的诗句呢?\n#诗意地生活,需直面遗憾,坚守本心。\n#李文波患有渐冻症,医生说他活不过五年,但他没有因此放弃对音乐的热爱,而是与病魔作斗争,演奏出美妙的乐曲;孙家林自幼患有脑瘫,但他不甘于命运的捉弄,终成全国最美教师;史铁生饱受疾病折磨,但他仍能发出“我常常在我的心头清点,我有什么?”的叩问,并由此走上文学道路,为后世留下丰厚的文化遗产。这些人没有逃避,而是选择直面人生的缺憾,在坚守本心的同时超越自我,最终实现了自己的价值。\n#诗意地生活,是于失意中坚守本心,于缺憾中超越自我。当面对人生的缺憾与挫折,坚守本心、超越自我的同时,也必将书写属于自己的辉煌篇章。\n#愿你我都能诗意地生活着!\n\nquery = 'Please write a blog based on the title: French Pastries: A Sweet Indulgence'\nwith torch.autocast(device_type='cuda', dtype=torch.float16):\n    response = model.write_artical(query, seed=8192)\nprint(response)\n#French Pastries: A Sweet Indulgence\n#The French are well known for their love of pastries, and it’s a love that is passed down through generations. When one visits France, they are treated to an assortment of baked goods that can range from the delicate macaron to the rich and decadent chocolate mousse. While there are many delicious types of pastries found in France, five stand out as being the most iconic. Each of these pastries has its own unique qualities that make it special.\n#1. Croissant\n#One of the most famous pastries from France is the croissant. It is a buttery, flaky pastry that is best enjoyed fresh from the bakery. The dough is laminated with butter, giving it its signature layers. Croissants are typically eaten for breakfast or brunch, often accompanied by coffee or hot chocolate.\n#2. Macaron\n#The macaron is a small, delicate French confection made from almond flour, powdered sugar, and egg whites. The macaron itself is sandwiched with a ganache or jam filling. They come in a variety of colors and flavors, making them a popular choice for both casual snacking and upscale desserts.\n#3. Madeleine\n#The madeleine is a small shell-shaped cake that is light and sponge-like. It is often flavored with lemon or orange zest and sometimes dipped in chocolate. Madeleines are perfect for an afternoon snack with tea or coffee.\n#4. Éclair\n#The éclair is a long, thin pastry filled with cream and topped with chocolate glaze. It is a classic French treat that is both sweet and satisfying. Éclairs can be found in bakeries all over France and are often enjoyed with a cup of hot chocolate.\n#5. Tarte Tatin\n#The tarte Tatin is an apple tart that is known for its caramelized apples and puff pastry crust. It is named after the Tatin sisters who created the recipe in the late 19th century. Tarte Tatin is best served warm with a scoop of vanilla ice cream.\n#These pastries are just a few of the many delicious treats that France has to offer. Whether you are a seasoned traveler or a first-time visitor, indulging in French pastries is a must-do activity. So go ahead, treat yourself—you deserve it!\n```\n\n\u003C\u002Fdetails>\n\n\n## Inference on Multiple GPUs\n\nIf you have multiple GPUs, but the memory size of each GPU is not enough to accommodate the entire model, you can split the model across multiple GPUs. First, install `accelerate` using the command: `pip install accelerate`. Then, execute the follows scripts for chat:\n\n```\n# chat with 2 GPUs\npython example_code\u002Fexample_chat.py --num_gpus 2\n```\n\n## Inference Acceleration by LMDeploy\n\nIf InternLM-XComposer2d5 model inference optimization is required, we recommend using [LMDeploy](https:\u002F\u002Fgithub.com\u002FInternLM\u002Flmdeploy\u002Fblob\u002Fmain\u002Fdocs\u002Fen\u002Fmulti_modal\u002Fxcomposer2d5.md).\n\nIn the following subsections, we will introduce the usage of LMDeploy with the [internlm-xcomposer2d5-7b](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2d5-7b) model as an example. \n\nFirst of all, please install the pypi package with `pip install lmdeploy`. By default, it depends on CUDA 12.x. For a CUDA 11.x environment, please refer to the [installation guide](https:\u002F\u002Flmdeploy.readthedocs.io\u002Fen\u002Flatest\u002Fget_started.html#installation).\n\n### Offline Inference Pipeline\n\n```python\nfrom lmdeploy import pipeline\nfrom lmdeploy.vl import load_image\npipe = pipeline('internlm\u002Finternlm-xcomposer2d5-7b')\nimage = load_image('examples\u002Fdubai.png')\nresponse = pipe(('describe this image', image))\nprint(response.text)\n```\n\nFor more on using the VLM pipeline, including multi-image inference or multi-turn chat, please overview [this](https:\u002F\u002Fgithub.com\u002FInternLM\u002Flmdeploy\u002Fblob\u002Fmain\u002Fdocs\u002Fen\u002Fmulti_modal\u002Fxcomposer2d5.md) guide.\n\n## 4-Bit Model\nWe offer 4-bit quantized models via LMDeploy to reduce memory requirements. For a memory usage comparison, please refer to [here](example_code\u002F4bit\u002FREADME.md).\n\n```python\nfrom lmdeploy import TurbomindEngineConfig, pipeline\nfrom lmdeploy.vl import load_image\nengine_config = TurbomindEngineConfig(model_format='awq')\npipe = pipeline('internlm\u002Finternlm-xcomposer2d5-7b-4bit', backend_config=engine_config)\nimage = load_image('examples\u002Fdubai.png')\nresponse = pipe(('describe this image', image))\nprint(response.text)\n```\n\n## Finetune\n\n1. Please refer to our [finetune scripts](finetune\u002FREADME.md).\n2. Inference and finetune support from [ModelScope Swift](https:\u002F\u002Fgithub.com\u002Fmodelscope\u002Fswift\u002Fblob\u002Fmain\u002Fdocs\u002Fsource_en\u002FMulti-Modal\u002Finternlm-xcomposer2-best-practice.md)\n\n## Gradio Deploy\n\nWe provide code for users to build a web UI demo. Please use ```gradio==4.13.0```\n\nPlease run the command below for Chat \u002F Composition:\n\n```\n# For Multimodal Chat\npython gradio_demo\u002Fgradio_demo_chat.py\n\n# For Free-form Text-Image Composition\npython gradio_demo\u002Fgradio_demo_composition.py\n```\n\nThe user guidance of UI demo is given in [HERE](demo_asset\u002Fdemo.md). If you wish to change the default folder of the model, please use the `--code_path=new_folder` option.\n\u003Cbr>\n\n## Citation\n\nIf you find our models \u002F code \u002F papers useful in your research, please consider giving ⭐ and citations 📝, thx :)\n\n```BibTeX\n@inproceedings{internlmxcomposer2_5_reward,\n      title={InternLM-XComposer2.5-Reward: A Simple Yet Effective Multi-Modal Reward Model}, \n      author={Yuhang Zang and Xiaoyi Dong and Pan Zhang and Yuhang Cao and Ziyu Liu and Shengyuan Ding and Shenxi Wu and Yubo Ma and Haodong Duan and Wenwei Zhang and Kai Chen and Dahua Lin and Jiaqi Wang},\n      booktitle={Findings of ACL},\n      year={2025}\n}\n```\n\n```BibTeX\n@article{internlmxcomposer2_5_OL,\n      title={InternLM-XComposer2.5-OmniLive: A Comprehensive Multimodal System for Long-term Streaming Video and Audio Interactions}, \n      author={Pan Zhang and Xiaoyi Dong and Yuhang Cao and Yuhang Zang and Rui Qian and Xilin Wei and Lin Chen and Yifei Li and Junbo Niu and Shuangrui Ding and Qipeng Guo and Haodong Duan and Xin Chen and Han Lv and Zheng Nie and Min Zhang and Bin Wang and Wenwei Zhang and Xinyue Zhang and Jiaye Ge and Wei Li and Jingwen Li and Zhongying Tu and Conghui He and Xingcheng Zhang and Kai Chen and Yu Qiao and Dahua Lin and Jiaqi Wang},\n      journal={arXiv preprint arXiv:2412.09596},\n      year={2024}\n}\n```\n\n```BibTeX\n@article{internlmxcomposer2_5,\n      title={InternLM-XComposer-2.5: A Versatile Large Vision Language Model Supporting Long-Contextual Input and Output}, \n      author={Pan Zhang and Xiaoyi Dong and Yuhang Zang and Yuhang Cao and Rui Qian and Lin Chen and Qipeng Guo and Haodong Duan and Bin Wang and Linke Ouyang and Songyang Zhang and Wenwei Zhang and Yining Li and Yang Gao and Peng Sun and Xinyue Zhang and Wei Li and Jingwen Li and Wenhai Wang and Hang Yan and Conghui He and Xingcheng Zhang and Kai Chen and Jifeng Dai and Yu Qiao and Dahua Lin and Jiaqi Wang},\n      journal={arXiv preprint arXiv:2407.03320},\n      year={2024}\n}\n```\n\n```BibTeX\n@article{internlmxcomposer2_4khd,\n      title={InternLM-XComposer2-4KHD: A Pioneering Large Vision-Language Model Handling Resolutions from 336 Pixels to 4K HD},\n      author={Xiaoyi Dong and Pan Zhang and Yuhang Zang and Yuhang Cao and Bin Wang and Linke Ouyang and Songyang Zhang and Haodong Duan and Wenwei Zhang and Yining Li and Hang Yan and Yang Gao and Zhe Chen and Xinyue Zhang and Wei Li and Jingwen Li and Wenhai Wang and Kai Chen and Conghui He and Xingcheng Zhang and Jifeng Dai and Yu Qiao and Dahua Lin and Jiaqi Wang},\n      journal={arXiv preprint arXiv:2404.06512},\n      year={2024}\n}\n```\n\n```BibTeX\n@article{internlmxcomposer2,\n      title={InternLM-XComposer2: Mastering Free-form Text-Image Composition and Comprehension in Vision-Language Large Model},\n      author={Xiaoyi Dong and Pan Zhang and Yuhang Zang and Yuhang Cao and Bin Wang and Linke Ouyang and Xilin Wei and Songyang Zhang and Haodong Duan and Maosong Cao and Wenwei Zhang and Yining Li and Hang Yan and Yang Gao and Xinyue Zhang and Wei Li and Jingwen Li and Kai Chen and Conghui He and Xingcheng Zhang and Yu Qiao and Dahua Lin and Jiaqi Wang},\n      journal={arXiv preprint arXiv:2401.16420},\n      year={2024}\n}\n```\n\n```BibTeX\n@article{internlmxcomposer,\n      title={InternLM-XComposer: A Vision-Language Large Model for Advanced Text-image Comprehension and Composition},\n      author={Pan Zhang and Xiaoyi Dong and Bin Wang and Yuhang Cao and Chao Xu and Linke Ouyang and Zhiyuan Zhao and Shuangrui Ding and Songyang Zhang and Haodong Duan and Wenwei Zhang and Hang Yan and Xinyue Zhang and Wei Li and Jingwen Li and Kai Chen and Conghui He and Xingcheng Zhang and Yu Qiao and Dahua Lin and Jiaqi Wang},\n      journal={arXiv preprint arXiv:2309.15112},\n      year={2023}\n}\n```\n\n\u003Cbr>\n\n## License & Contact Us\n\nThe code is licensed under Apache-2.0, while model weights are fully open for academic research and also allow **free** commercial usage. To apply for a commercial license, please fill in the [application form (English)](https:\u002F\u002Fwj.qq.com\u002Fs2\u002F12727483\u002F5dba\u002F)\u002F[申请表（中文）](https:\u002F\u002Fwj.qq.com\u002Fs2\u002F12725412\u002Ff7c1\u002F). For other questions or collaborations, please contact \u003Cinternlm@pjlab.org.cn>.\n","\u003Cp align=\"center\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_5c015f1d7225.png\" width=\"650\"\u002F>\n\u003C\u002Fp>\n\u003Cp align=\"center\">\n    \u003Cb>\u003Cfont size=\"6\">InternLM-XComposer-2.5\u003C\u002Ffont>\u003C\u002Fb>\n\u003C\u002Fp>\n\n\n\u003Cdiv align=\"center\">\n        InternLM-XComposer2.5 \u003Ca href=\"https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2d5-7b\">🤗\u003C\u002Fa> \u003Ca href=\"https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer2d5-7b\">\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\">\u003C\u002Fa> &nbsp｜ XComposer2.5 技术报告 \u003Ca href=\"https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.03320\">  📄 \u003C\u002Fa>  \n \n\n[English](.\u002FREADME.md) | [简体中文](.\u002FREADME_CN.md)\n\n\u003C\u002Fdiv>\n\n\u003Cp align=\"center\">\n    感谢社区提供的 \u003Ca href=\"https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FWillow123\u002FInternLM-XComposer\">HuggingFace 演示 \u003C\u002Fa>  | \u003Ca href=\"https:\u002F\u002Fopenxlab.org.cn\u002Fapps\u002Fdetail\u002FWillowBreeze\u002FInternLM-XComposer\">OpenXLab 演示\u003C\u002Fa> 的 InternLM-XComposer-2.5。\n\u003C\u002Fp>\n\n\u003Cp align=\"center\">\n    👋 欢迎加入我们的 \u003Ca href=\"https:\u002F\u002Fdiscord.gg\u002Fxa29JuW87d\" target=\"_blank\">Discord\u003C\u002Fa> 和 \u003Ca href=\"https:\u002F\u002Fr.vansin.top\u002F?r=internwx\" target=\"_blank\">微信\u003C\u002Fa>\n\u003C\u002Fp>\n\n\u003Cp align=\"center\">\n\u003Ca href=\"https:\u002F\u002Ftrendshift.io\u002Frepositories\u002F5245\" target=\"_blank\">\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_0dcd13139b79.png\" alt=\"InternLM%2FInternLM-XComposer | Trendshift\" style=\"width: 250px; height: 55px;\" width=\"250\" height=\"55\"\u002F>\u003C\u002Fa>\n\u003C\u002Fp>\n\n\u003Cbr>\n\n## 🔥🔥🔥 **InternLM-XComposer2.5-Reward**\n\n我们发布了 **InternLM-XComposer2.5-Reward** \u003Ca href=\"https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2d5-7b-reward\">🤗\u003C\u002Fa>（IXC-2.5-Reward，ACL 2025 Findings），这是一个简单而高效的多模态奖励模型，包含训练代码、评估脚本以及部分训练数据。详情请参阅 [项目页面](InternLM-XComposer-2.5-Reward)。\n\n## 🔥🔥🔥 **InternLM-XComposer2.5-OmniLive**\n\n我们发布了 **InternLM-XComposer2.5-OmniLive**，一个用于长期流式视频和音频交互的综合性多模态系统。详情请参阅 [项目页面](InternLM-XComposer-2.5-OmniLive)。\n\n\u003Cbr>\n\n## 我们团队的多模态项目\n> [**InternLM-XComposer-2.5-Reward**](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2501.12368): **一个简单而高效的多模态奖励模型**\n\n> [**InternLM-XComposer-2.5-OmniLive**](): **一个专门针对流式视频和音频交互的通用型多模态系统**\n\n> [**InternLM-XComposer-2.5**](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2407.03320): **一款支持长上下文输入输出的多功能大型视觉语言模型**\n\n> [**InternLM-XComposer2-\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_e128b455e64f.png\" width=\"25px\">**](https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer): **一款开创性的大型视觉语言模型，可处理从336像素到4K高清分辨率的图像**\n\n> [**InternLM-XComposer2**](https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer): **在视觉语言大型模型中掌握自由格式的文本-图像创作与理解**\n\n> [**InternLM-XComposer**](https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Ftree\u002Fmain\u002FInternLM-XComposer-1.0): **一种用于高级文本-图像理解和创作的视觉语言大型模型**\n\n> \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_491799f6cbfa.png\" style=\"vertical-align: -20px;\" :height=\"25px\" width=\"25px\">[**ShareGPT4Video:**](https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Ftree\u002Fmain\u002Fprojects\u002FShareGPT4Video) **通过更优质的字幕提升视频理解和生成能力**\n\n> \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_ea638acad230.png\" style=\"vertical-align: -20px;\" :height=\"25px\" width=\"25px\">[**ShareGPT4V:**](https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Ftree\u002Fmain\u002Fprojects\u002FShareGPT4V) **通过更优质的字幕改进大型多模态模型**\n\n> \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_65755931cecb.png\" style=\"vertical-align: -20px;\" :height=\"25px\" width=\"25px\">[**MMDU:**](https:\u002F\u002Fliuziyu77.github.io\u002FMMDU\u002F) **一个用于LVLMs的多轮多图对话理解基准及指令微调数据集**\n\n> [**DualFocus**](https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Ftree\u002Fmain\u002Fprojects\u002FDualFocus): **在多模态大型语言模型中融合宏观与微观视角**\n\n\u003C\u002Fbr>\n\n**InternLM-XComposer-2.5** 在各类文本-图像理解和创作应用中表现出色，仅凭借7B规模的语言模型后端便达到了GPT-4V级别的能力。IXC-2.5 使用2.4万条交错排列的图文上下文进行训练，可通过RoPE外推技术无缝扩展至9.6万条长上下文。这种长上下文能力使IXC-2.5 在需要大量输入和输出内容的任务中表现尤为突出。\n\n- **超高分辨率理解**：IXC-2.5 在IXC2-4KHD提出的动态分辨率方案基础上进行了优化，采用原生560×560的ViT视觉编码器，能够支持任意宽高比的高分辨率图像。\n  \n- **细粒度视频理解**：IXC-2.5 将视频视为由数十至数百帧组成的超高分辨率复合图像，通过密集采样和更高的单帧分辨率来捕捉细微细节。\n\n- **多轮多图对话**：IXC-2.5 支持自由形式的多轮多图对话，使其能够在多轮交流中自然地与人类互动。\n\n- **网页制作**：IXC-2.5 可以根据文本-图像指令，轻松地组合源代码（HTML、CSS和JavaScript）来创建网页。\n\n- **高质量文本-图像文章创作**：IXC-2.5 利用专门设计的思维链（CoT）和直接偏好优化（DPO）技术，显著提升了其书面内容的质量。\n\n- **卓越性能**：IXC-2.5 已在28项基准测试中接受评估，在16项基准测试中超越了现有的开源最先进模型；同时，在16项关键任务上也优于或与GPT-4V、Gemini Pro不相上下。\n\n\u003Cp align=\"center\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_e6dfe94c6213.png\" width=\"1000\"\u002F>\n\u003C\u002Fp>\n  \n\n更多详情请参阅 [技术报告](https:\u002F\u002Farxiv.org\u002Fabs\u002F2407.03320)。\n\u003Cbr>\n \n\n## 演示视频\n🔥 为获得最佳体验，请在观看视频时保持音频开启。\n\n[https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Fassets\u002F147793160\u002F8206f07f-3166-461e-a631-9cbcdec6ae75](https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Fassets\u002F147793160\u002F8206f07f-3166-461e-a631-9cbcdec6ae75)\n\n[Youtube 视频](https:\u002F\u002Fyoutu.be\u002F8tYpiQNOJww)\n\n\n中文版演示请参阅 [中文演示](.\u002FREADME_CN.md#demo)。\n\n## 新闻与更新\n- `2024.12.12` 🎉🎉🎉 [InternLM-XComposer2.5-7B-Reward](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2d5-7b-reward) 已公开发布。\n- `2024.12.12` 🎉🎉🎉 [InternLM-XComposer2.5-OmniLive-7B](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2d5-ol-7b) 已公开发布。\n- `2024.07.15` 🎉🎉🎉 [ModelScope Swift](https:\u002F\u002Fgithub.com\u002FInternLM\u002Flmdeploy\u002Fblob\u002Fmain\u002Fdocs\u002Fen\u002Fmulti_modal\u002Fxcomposer2d5.md) 支持 InternLM-XComposer2.5-7B 的微调和推理。\n- `2024.07.15` 🎉🎉🎉 [LMDeploy](https:\u002F\u002Fgithub.com\u002FInternLM\u002Flmdeploy\u002Fblob\u002Fmain\u002Fdocs\u002Fen\u002Fmulti_modal\u002Fxcomposer2d5.md) 支持 InternLM-XComposer2.5-7B 进行 4 位量化和推理。\n- `2024.07.15` 🎉🎉🎉 [InternLM-XComposer2.5-7B-4bit](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2d5-7b-4bit) 已公开发布。\n- `2024.07.03` 🎉🎉🎉 [InternLM-XComposer2.5-7B](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2d5-7b) 已公开发布。\n- `2024.07.01` 🎉🎉🎉 [ShareGPT4V](https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Ftree\u002Fmain\u002Fprojects\u002FShareGPT4V) 被 ECCV2024 接收。\n- `2024.04.22` 🎉🎉🎉 **InternLM-XComposer2-VL-7B-4KHD-7B** 的[微调代码](.\u002Ffinetune\u002F)已公开发布。\n- `2024.04.09` 🎉🎉🎉 [InternLM-XComposer2-4KHD-7B](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-4khd-7b) 和[评估代码](.\u002Fevaluation\u002FREADME.md)已公开发布。\n- `2024.04.09` 🎉🎉🎉 [InternLM-XComposer2-VL-1.8B](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-vl-1_8b) 已公开发布。\n- `2024.02.22` 🎉🎉🎉 我们发布了[DualFocus](https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Ftree\u002Fmain\u002Fprojects\u002FDualFocus)，这是一个用于在多模态大语言模型中整合宏观与微观视角的框架，以提升视觉-语言任务的表现。\n\n* `2024.02.06` 🎉🎉🎉 [InternLM-XComposer2-7B-4bit](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-7b-4bit) 和 [InternLM-XComposer-VL2-7B-4bit](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-vl-7b-4bit) 已在 **Hugging Face** 和 **ModelScope** 上公开发布。\n\n- `2024.02.02` 🎉🎉🎉 **InternLM-XComposer2-VL-7B** 的[微调代码](.\u002Ffinetune\u002F)已公开发布。\n- `2024.01.26` 🎉🎉🎉 **InternLM-XComposer2-VL-7B** 的[评估代码](.\u002Fevaluation\u002FREADME.md)已公开发布。\n- `2024.01.26` 🎉🎉🎉 [InternLM-XComposer2-7B](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-7b) 和 [InternLM-XComposer-VL2-7B](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-vl-7b) 已在 **Hugging Face** 和 **ModelScope** 上公开发布。\n- `2024.01.26` 🎉🎉🎉 我们发布了一份[技术报告](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.16420)，详细介绍了 InternLM-XComposer2 系列。\n- `2023.11.22` 🎉🎉🎉 我们发布了[ShareGPT4V](https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Ftree\u002Fmain\u002Fprojects\u002FShareGPT4V)，这是一份由 GPT4-Vision 生成的大规模、高描述性的图文数据集，以及一款优秀的大型多模态模型 ShareGPT4V-7B。\n- `2023.10.30` 🎉🎉🎉 InternLM-XComposer-VL 在 [Q-Bench](https:\u002F\u002Fgithub.com\u002FQ-Future\u002FQ-Bench\u002Ftree\u002Fmaster\u002Fleaderboards#overall-leaderboards) 和 [Tiny LVLM](https:\u002F\u002Fgithub.com\u002FOpenGVLab\u002FMulti-Modality-Arena\u002Ftree\u002Fmain\u002Ftiny_lvlm_evaluation) 中均获得第一名。\n- `2023.10.19` 🎉🎉🎉 支持多 GPU 推理。两块 4090 显卡足以部署我们的演示。\n- `2023.10.12` 🎉🎉🎉 支持 4 位演示，模型文件已在 [Hugging Face](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer-7b-4bit) 和 [ModelScope](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer-7b-4bit) 上提供。\n- `2023.10.8` 🎉🎉🎉 [InternLM-XComposer-7B](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer-7b) 和 [InternLM-XComposer-VL-7B](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer-vl-7b) 已在 **ModelScope** 上公开发布。\n- `2023.9.27` 🎉🎉🎉 **InternLM-XComposer-VL-7B** 的[评估代码](.\u002FInternLM-XComposer-1.0\u002Fevaluation\u002F)已公开发布。\n- `2023.9.27` 🎉🎉🎉 [InternLM-XComposer-7B](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer-7b) 和 [InternLM-XComposer-VL-7B](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer-vl-7b) 已在 **Hugging Face** 上公开发布。\n- `2023.9.27` 🎉🎉🎉 我们发布了一份[技术报告](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2309.15112.pdf)，详细介绍了我们的模型系列。\n  \u003C\u002Fbr>\n\n## 模型动物园\n\n| 模型                           | 用途                                           | Transformers(HF)                                                                                   | ModelScope(HF)                                                                                                                                                                       | 发布日期 |\n| ------------------------------- | ----------------------------------------------- | -------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ---------- |\n| **InternLM-XComposer-2.5**    | 视频理解、多图像多轮对话、4K分辨率理解、网页制作、文章创作、基准测试 | [🤗internlm-xcomposer2.5](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2d5-7b)       | [\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\" \u002F> internlm-xcomposer2.5](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer2d5-7b\u002Fsummary)       | 2024-07-03   |\n| **InternLM-XComposer2-4KHD**    | 4K分辨率理解、基准测试、VL-聊天 | [🤗internlm-xcomposer2-4khd-7b](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-4khd-7b)       | [\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\" \u002F> internlm-xcomposer2-4khd-7b](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer2-4khd-7b\u002Fsummary)       | 2024-04-09   |\n| **InternLM-XComposer2-VL-1.8B** | 基准测试、VL-聊天                              | [🤗internlm-xcomposer2-vl-1_8b](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-vl-1_8b)       | [\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\" \u002F> internlm-xcomposer2-vl-1_8b](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer2-vl-1_8b\u002Fsummary)       | 2024-04-09   |\n| **InternLM-XComposer2**         | 文本-图像组合                          | [🤗internlm-xcomposer2-7b](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-7b)                 | [\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\" \u002F> internlm-xcomposer2-7b](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer2-7b\u002Fsummary)                 | 2024-01-26   |\n| **InternLM-XComposer2-VL**      | 基准测试、VL-聊天                              | [🤗internlm-xcomposer2-vl-7b](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-vl-7b)           | [\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\" \u002F> internlm-xcomposer2-vl-7b](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer2-vl-7b\u002Fsummary)           | 2024-01-26   |\n| **InternLM-XComposer2-4bit**    | 文本-图像组合                          | [🤗internlm-xcomposer2-7b-4bit](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-7b-4bit)       | [\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\" \u002F> internlm-xcomposer2-7b-4bit](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer2-7b-4bit\u002Fsummary)       | 2024-02-06   |\n| **InternLM-XComposer2-VL-4bit** | 基准测试、VL-聊天                              | [🤗internlm-xcomposer2-vl-7b-4bit](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2-vl-7b-4bit) | [\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\" \u002F> internlm-xcomposer2-vl-7b-4bit](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer2-vl-7b-4bit\u002Fsummary) | 2024-02-06   |\n| **InternLM-XComposer**          | 文本-图像组合、VL-聊天                 | [🤗internlm-xcomposer-7b](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer-7b)                   | [\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\" \u002F> internlm-xcomposer-7b](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer-7b\u002Fsummary)                   | 2023-09-26   |\n| **InternLM-XComposer-4bit**     | 文本-图像组合、VL-聊天                 | [🤗internlm-xcomposer-7b-4bit](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer-7b-4bit)         | [\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\" \u002F> internlm-xcomposer-7b-4bit](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer-7b-4bit\u002Fsummary)         | 2023-09-26   |\n| **InternLM-XComposer-VL**       | 基准测试                                       | [🤗internlm-xcomposer-vl-7b](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer-vl-7b)             | [\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_readme_375e99f5914e.png\" width=\"20px\" \u002F> internlm-xcomposer-vl-7b](https:\u002F\u002Fmodelscope.cn\u002Fmodels\u002FShanghai_AI_Laboratory\u002Finternlm-xcomposer-vl-7b\u002Fsummary)             | 2023-09-26   |\n\n## 评估\n\n我们对InternLM-XComposer-2.5进行了28项多模态基准测试，包括图像基准测试[MMDU](https:\u002F\u002Fgithub.com\u002FLiuziyu77\u002FMMDU)、[MMStar](https:\u002F\u002Fgithub.com\u002FMMStar-Benchmark\u002FMMStar)、[RealWorldQA](https:\u002F\u002Fx.ai\u002Fblog\u002Fgrok-1.5v)、[Design2Code](https:\u002F\u002Fsalt-nlp.github.io\u002FDesign2Code\u002F)、[DocVQA](https:\u002F\u002Frrc.cvc.uab.es\u002F?ch=17)、[Infographics VQA](https:\u002F\u002Frrc.cvc.uab.es\u002F?ch=17)、[TextVQA](https:\u002F\u002Ftextvqa.org\u002F)、[ChartQA](https:\u002F\u002Fgithub.com\u002Fvis-nlp\u002FChartQA)、[OCRBench](https:\u002F\u002Fgithub.com\u002FYuliang-Liu\u002FMultimodalOCR)、[DeepFrom](https:\u002F\u002Fwandb.ai\u002Fstacey\u002Fdeepform_v1\u002Freports\u002FDeepForm-Understand-Structured-Documents-at-Scale--VmlldzoyODQ3Njg)、[WTQ](https:\u002F\u002Farxiv.org\u002Fabs\u002F1508.00305)、[VisualMRC](https:\u002F\u002Fgithub.com\u002Fnttmdlab-nlp\u002FVisualMRC)、[TabFact](https:\u002F\u002Ftabfact.github.io\u002F)、[MathVista](https:\u002F\u002Fmathvista.github.io\u002F)、[MMMU](https:\u002F\u002Fmmmu-benchmark.github.io\u002F)、[AI2D](https:\u002F\u002Fprior.allenai.org\u002Fprojects\u002Fdiagram-understanding)、[MME](https:\u002F\u002Fgithub.com\u002FBradyFU\u002FAwesome-Multimodal-Large-Language-Models\u002Ftree\u002FEvaluation)、[MMBench](https:\u002F\u002Fopencompass.org.cn\u002Fleaderboard-multimodal)、[MMBench-CN](https:\u002F\u002Fopencompass.org.cn\u002Fleaderboard-multimodal)、[SEED-Bench](https:\u002F\u002Fhuggingface.co\u002Fspaces\u002FAILab-CVC\u002FSEED-Bench_Leaderboard)、[HallusionBench](https:\u002F\u002Fgithub.com\u002Ftianyi-lab\u002FHallusionBench)、[MM-Vet](https:\u002F\u002Fgithub.com\u002Fyuweihao\u002FMM-Vet)，以及视频基准测试[MVBench](https:\u002F\u002Fgithub.com\u002FOpenGVLab\u002FAsk-Anything)、[MLVU](https:\u002F\u002Fgithub.com\u002FFlagOpen\u002FFlagEmbedding\u002Ftree\u002Fmaster\u002FMLVU\u002Fevaluation)、[Video-MME](https:\u002F\u002Fgithub.com\u002FBradyFU\u002FVideo-MME)、[MMBench-Video](https:\u002F\u002Fgithub.com\u002Fopen-compass\u002FVLMEvalKit)、[TempCompass](https:\u002F\u002Fgithub.com\u002Fllyx97\u002FTempCompass)。\n\n详细评估内容请参阅此处的[评估详情](.\u002Fevaluation\u002FREADME.md)。\n\n### 与闭源API及视频和结构化高分辨率图像领域的先前SOTA方法的对比。\n|            | MVBench    | MLVU        | MME-Video | MMBench-Video | TempCompass | DocVQA      | ChartVQA    | InfoVQA     | TextVQA     | OCRBench | DeepForm   | WTQ        | VisualMRC  | TabFact     |\n|------------|------------|-------------|-----------|---------------|-------------|-------------|-------------|-------------|-------------|----------|------------|------------|------------|-------------|\n|            | VideoChat2 | InternVL1.5 | LIVA      | InternVL1.5   | Qwen-VL     | InternVL1.5 | InternVL1.5 | InternVL1.5 | InternVL1.5 | GLM-4v   | DocOwl 1.5 | DocOwl 1.5 | DocOwl 1.5 | DocOwl 1.5  |\n|            | 7B         | 26B         | 34B       | 26B           | 7B          | 26B         | 26B         | 26B         | 26B         | 9B       | 8B         | 8B         | 8B         | 8B          |\n|        | 60.4       | 50.4        | 59.0      | 42.0          | 52.9        | 90.9        | 83.8        | 72.5        | 80.6        | 77.6     | 68.8       | 40.6       | 246.4      | 80.2        |\n|            |            |             |           |               |             |             |             |             |             |          |            |            |            |             |\n| GPT-4V     | 43.5       | 49.2        | 59.9      | 56.0          | ---         | 88.4        | 78.5        | 75.1        | 78.0        | 51.6     | ---        | ---        | ---        | ---         |\n| Gemini-Pro | ---        | ---         | 75.0      | 49.3          | 67.1        | 88.1        | 74.1        | 75.2        | 74.6        | 68.0     | ---        | ---        | ---        | ---         |\n| Ours       | 69.1       | 58.8        | 55.8      | 46.9          |             | 90.9        | 82.2        | 69.9        | 78.2        | 69.0     | 71.2       | 53.6       | 307.5      | 85.2        |\n\n\n\n\n\n\n### 与闭源API及多图像对话和通用视觉问答基准测试中的先前SOTA方法的对比。\n\n|            | MVBench    | MLVU        | MME-Video | MMBench-Video | TempCompass | DocVQA      | ChartVQA    | InfoVQA     | TextVQA     | OCRBench | DeepForm   | WTQ        | VisualMRC  | TabFact     |\n|------------|------------|-------------|-----------|---------------|-------------|-------------|-------------|-------------|-------------|----------|------------|------------|------------|-------------|\n|            | VideoChat2 | InternVL1.5 | LIVA      | InternVL1.5   | Qwen-VL     | InternVL1.5 | InternVL1.5 | InternVL1.5 | InternVL1.5 | GLM-4v   | DocOwl 1.5 | DocOwl 1.5 | DocOwl 1.5 | DocOwl 1.5  |\n|            | 7B         | 26B         | 34B       | 26B           | 7B          | 26B         | 26B         | 26B         | 26B         | 9B       | 8B         | 8B         | 8B         | 8B          |\n|            | 60.4       | 50.4        | 59.0      | 42.0          | 58.4        | 90.9        | 83.8        | 72.5        | 80.6        | 77.6     | 68.8       | 40.6       | 246.4      | 80.2        |\n|            |            |             |           |               |             |             |             |             |             |          |            |            |            |             |\n| GPT-4V     | 43.5       | 49.2        | 59.9      | 56.0          | ---         | 88.4        | 78.5        | 75.1        | 78.0        | 51.6     | ---        | ---        | ---        | ---         |\n| Gemini-Pro | ---        | ---         | 75.0      | 49.3          | 70.6        | 88.1        | 74.1        | 75.2        | 74.6        | 68.0     | ---        | ---        | ---        | ---         |\n| Ours       | 69.1       | 58.8        | 55.8      | 46.9          | 67.1        | 90.9        | 82.2        | 69.9        | 78.2        | 69.0     | 71.2       | 53.6       | 307.5      | 85.2        |\n\n\n## 需求\n\n- Python 3.8及以上版本\n- PyTorch 1.12及以上版本，推荐使用2.0及以上版本\n- 推荐使用CUDA 11.4及以上版本（适用于GPU用户）\n- 使用InternLM-XComposer2.5处理高分辨率图像时，需要安装[flash-attention2](https:\u002F\u002Fgithub.com\u002FDao-AILab\u002Fflash-attention)。\n  \u003Cbr>\n\n## 安装\n\n在运行代码之前，请确保已搭建好环境并安装了所需的软件包。请确认满足上述要求，然后安装依赖库。\n请参阅[安装说明](docs\u002Finstall.md)\n\n## 快速入门\n\n我们提供了一个简单的示例，展示如何使用InternLM-XComposer-2.5结合🤗 Transformers。\n\n\u003Cdetails>\n  \u003Csummary>\n    \u003Cb>视频理解\u003C\u002Fb>\n  \u003C\u002Fsummary>\n\n```python\nimport torch\nfrom transformers import AutoModel, AutoTokenizer\n\ntorch.set_grad_enabled(False)\n\n# 初始化模型和分词器\nmodel = AutoModel.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval().half()\ntokenizer = AutoTokenizer.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', trust_remote_code=True)\nmodel.tokenizer = tokenizer\n\nquery = '这里有一些视频的画面。请详细描述这段视频'\nimage = ['.\u002Fexamples\u002Fliuxiang.mp4',]\nwith torch.autocast(device_type='cuda', dtype=torch.float16):\n    response, his = model.chat(tokenizer, query, image, do_sample=False, num_beams=3, use_meta=True)\nprint(response)\n#视频一开始出现一位身穿红黄相间、胸前印有“CHINA”字样的运动员，正在为比赛做准备。\n#这位运动员就是刘翔，他蹲伏着，神情专注而蓄势待发，背景中可以看到奥林匹克五环标志，表明这是一场奥运会的赛事。随着比赛开始，运动员们飞奔向障碍栏，步伐矫健有力，展现出他们的决心。\n#镜头捕捉到了比赛的激烈场面，屏幕上实时显示着每位运动员的号码和时间，让观众随时了解他们的表现。比赛进入高潮阶段，刘翔依然身着红黄色制服，最终冲过终点线，双臂高举庆祝胜利。\n#看台上观众爆发出热烈的欢呼声，他们为刘翔的成功而激动不已。视频最后以刘翔的特写镜头结束，他仍沉浸在胜利的喜悦中，而奥林匹克五环则继续象征着这场赛事的重要意义。\n\nquery = '告诉我刘翔的运动员编号'\nimage = ['.\u002Fexamples\u002Fliuxiang.mp4',]\nwith torch.autocast(device_type='cuda', dtype=torch.float16):\n    response, _ = model.chat(tokenizer, query，image，历史=his，do_sample=False，num_beams=3，use_meta=True)\nprint(response)\n#根据视频中刘翔制服上的标识，他的运动员编号是“1363”。\n```\n\n\u003C\u002Fdetails>\n\n\u003Cdetails>\n  \u003Csummary>\n    \u003Cb>多图像多轮对话\u003C\u002Fb>\n  \u003C\u002Fsummary>\n\n```python\nimport torch\nfrom transformers import AutoModel, AutoTokenizer\n\ntorch.set_grad_enabled(False)\n\n# 初始化模型和分词器\nmodel = AutoModel.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval().half()\ntokenizer = AutoTokenizer.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', trust_remote_code=True)\nmodel.tokenizer = tokenizer\n\nquery = 'Image1 \u003CImageHere>; Image2 \u003CImageHere>; Image3 \u003CImageHere>; 我想从这三辆车中选购一辆，请逐一分析它们的优缺点'\nimage = ['.\u002Fexamples\u002Fcars1.jpg',\n        '.\u002Fexamples\u002Fcars2.jpg',\n        '.\u002Fexamples\u002Fcars3.jpg',]\nwith torch.autocast(device_type='cuda', dtype=torch.float16):\n    response, his = model.chat(tokenizer, query, image, do_sample=False, num_beams=3, use_meta=True)\nprint(response)\n#要分析每辆车的优缺点，我们需要考虑品牌声誉、性能、设计、成本和维护等因素。\n#1. 奔驰：-优点：奔驰以豪华和可靠性著称，提供高水平的舒适性、先进技术和卓越的工艺。图片中的车辆似乎是一辆SUV，既适合城市驾驶，也适合越野路况。-缺点：通常来说，奔驰车型相比其他品牌更为昂贵，并且由于其豪华定位，可能需要更频繁的维护。\n#2. 布加迪：-优点：布加迪以生产世界上速度最快、动力最强的汽车而闻名。图片中的车辆是一辆跑车，很可能凭借其高性能发动机和先进的空气动力学，带来令人振奋的驾驶体验。-缺点：布加迪汽车价格极其高昂，普通消费者难以负担。此外，这类车辆需要专业的知识进行维护，而且由于其高性能特性，可能并不适合日常驾驶。\n#3. 宝马：-优点：宝马是一家备受尊敬的制造商，以其创新的工程技术和设计而闻名。图片中的车辆似乎是一辆电动汽车（EV），具有低排放、低噪音污染以及潜在的较低运营成本等优势。-缺点：像图片中这样的电动汽车，其续航里程可能比传统燃油车短，限制了长途旅行的适用性。\n#另外，购买电动汽车的初始成本也可能高于传统汽车。综上所述，每一辆车都有其独特的优缺点。奔驰提供豪华与多功能性，布加迪则以高性能和独特性见长，而宝马代表了电动车技术的创新。最终的选择取决于买家的优先级，比如舒适性、性能、环境影响以及预算。\n\nquery = 'Image4 \u003CImageHere>; 那么图4中的这辆车怎么样呢'\nimage.append('.\u002Fexamples\u002Fcars4.jpg')        \nwith torch.autocast(device_type='cuda', dtype=torch.float16):\n    response, _ = model.chat(tokenizer, query, image, do_sample=False, num_beams=3, history= his, use_meta=True)\nprint(response)\n#图4中的这辆车是一辆红色跑车，看起来像是法拉利。法拉利是著名的意大利制造商，以生产世界上最标志性和高性能的跑车而闻名。-优点：法拉利汽车象征着速度、豪华和卓越的工程技术。\n#图片中的车辆很可能凭借其强大的引擎、先进的空气动力学和高品质的工艺，带来令人兴奋的驾驶体验。红色外观也为车辆增添了美感，在道路上格外引人注目。-缺点：法拉利汽车价格极其昂贵，普通消费者难以承受。\n#此外，这类车辆需要专业的知识进行维护，而且由于其高性能特性，可能并不适合日常驾驶。总之，图4中的法拉利代表了汽车工程和设计的巅峰，提供了无与伦比的性能和豪华感。\n#然而，其高昂的成本和特殊的维护要求使其在日常使用方面不如图片中的其他车辆实用。\n```\n\n\n\u003C\u002Fdetails>\n\n\u003Cdetails>\n  \u003Csummary>\n    \u003Cb>高分辨率图像理解\u003C\u002Fb>\n  \u003C\u002Fsummary>\n\n```python\nimport torch\nfrom transformers import AutoModel, AutoTokenizer\n\ntorch.set_grad_enabled(False)\n\n# 初始化模型和分词器\nmodel = AutoModel.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval().half()\ntokenizer = AutoTokenizer.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', trust_remote_code=True)\nmodel.tokenizer = tokenizer\n\nquery = '请详细分析给定的图片'\nimage = ['.\u002Fexamples\u002Fdubai.png']\nwith torch.autocast(device_type='cuda', dtype=torch.float16):\n    response, _ = model.chat(tokenizer, query, image, do_sample=False, num_beams=3, use_meta=True)\nprint(response)\n#这张信息图以视觉方式展示了关于迪拜的各种事实。它首先提到朱美拉棕榈岛，指出它是从太空可见的最大人工岛。随后介绍了历史背景，提到1968年时迪拜只有少数几辆汽车，而现在这一数字已超过150万辆。\n#信息图还指出，迪拜拥有世界上最大的黄金链，全球十大最高酒店中有7家位于迪拜。此外，犯罪率接近于零，所得税率为零，全球20%的起重机都在迪拜运行。信息图还提到，迪拜人口中17%为阿联酋本地人，83%为移民。\n#迪拜购物中心被强调为世界上最大的购物中心，拥有1200家商店。信息图还指出，迪拜没有标准的地址系统，没有邮政编码、区号或邮政服务。此外，它提到哈利法塔非常高，以至于顶层住户在斋月期间需要等待更长时间才能开斋。\n#信息图还包括关于迪拜气候控制城市的介绍，其中阿拉伯塔酒店的皇家套房每晚收费24,000美元。最后，信息图提到，四位上榜亿万富翁的净资产大致等于洪都拉斯的GDP。\n\n```\n\n\u003C\u002Fdetails>\n\n\n\u003Cdetails>\n  \u003Csummary>\n    \u003Cb>指令转网页\u003C\u002Fb>\n  \u003C\u002Fsummary>\n\n```python\nimport torch\nfrom transformers import AutoModel, AutoTokenizer\n\ntorch.set_grad_enabled(False)\n\n# 初始化模型和分词器\nmodel = AutoModel.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval().half()\ntokenizer = AutoTokenizer.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', trust_remote_code=True)\nmodel.tokenizer = tokenizer\n\nquery = '一个科研机构的网站。名称是上海人工智能实验室。顶部导航栏为蓝色。左下角有一张图片，显示实验室的标志。右侧下方有一段文字，描述了实验室的使命。还有几张图片展示了上海人工智能实验室的研究项目。'\nwith torch.autocast(device_type='cuda', dtype=torch.float16):\n    response = model.write_webpage(query, seed=202, task='指令感知型网页生成', repetition_penalty=3.0)\nprint(response)\n# 查看 Instruction-aware Webpage Generation.html \n```\n请在此处查看 [指令感知型网页生成](.\u002Fexamples\u002FInstruction-aware_Webpage_Generation.html) 的结果。\n\u003C\u002Fdetails>\n\n\u003Cdetails>\n  \u003Csummary>\n    \u003Cb>简历转网页\u003C\u002Fb>\n  \u003C\u002Fsummary>\n\n```python\nimport torch\nfrom transformers import AutoModel, AutoTokenizer\n\ntorch.set_grad_enabled(False)\n\n# 初始化模型和分词器\nmodel = AutoModel.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval().half()\ntokenizer = AutoTokenizer.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', trust_remote_code=True)\nmodel.tokenizer = tokenizer\n\n## 输入应为 Markdown 格式的简历\nquery = '.\u002Fexamples\u002Fresume.md'\nwith torch.autocast(device_type='cuda', dtype=torch.float16):\n    response = model.resume_2_webpage(query, seed=202, repetition_penalty=3.0)\nprint(response)\n```\n请在此处查看 [简历转网页](.\u002Fexamples\u002FResume-to-Personal_Page.html) 的结果。\n\n\n\u003C\u002Fdetails>\n\n\n\u003Cdetails>\n  \u003Csummary>\n    \u003Cb>截图转网页\u003C\u002Fb>\n  \u003C\u002Fsummary>\n\n```python\nimport torch\nfrom transformers import AutoModel, AutoTokenizer\n\ntorch.set_grad_enabled(False)\n\n# 初始化模型和分词器\nmodel = AutoModel.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval().half()\ntokenizer = AutoTokenizer.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', trust_remote_code=True)\nmodel.tokenizer = tokenizer\n\nquery = '使用 Tailwind CSS 生成这张网页图片的 HTML 代码。'\nimage = ['.\u002Fexamples\u002Fscreenshot.jpg']\nwith torch.autocast(device_type='cuda', dtype=torch.float16):\n    response = model.screen_2_webpage(query, image, seed=202, repetition_penalty=3.0)\nprint(response)\n```\n请在此处查看 [截图转网页](.\u002Fexamples\u002FScreenshot-to-Webpage.html) 的结果。\n\n\u003C\u002Fdetails>\n\n\n\n\u003Cdetails>\n  \u003Csummary>\n    \u003Cb>撰写文章\u003C\u002Fb>\n  \u003C\u002Fsummary>\n\n```python\nimport torch\nfrom transformers import AutoModel, AutoTokenizer\n\ntorch.set_grad_enabled(False)\n\n# 初始化模型和分词器\nmodel = AutoModel.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', torch_dtype=torch.bfloat16, trust_remote_code=True).cuda().eval().half()\ntokenizer = AutoTokenizer.from_pretrained('internlm\u002Finternlm-xcomposer2d5-7b', trust_remote_code=True)\nmodel.tokenizer = tokenizer\n\nquery = '阅读下面的材料，根据要求写作。 电影《长安三万里》的出现让人感慨，影片并未将重点全落在大唐风华上，也展现了恢弘气象的阴暗面，即旧门阀的资源垄断、朝政的日益衰败与青年才俊的壮志难酬。高适仕进无门，只能回乡>沉潜修行。李白虽得玉真公主举荐，擢入翰林，但他只是成为唐玄宗的御用文人，不能真正实现有益于朝政的志意。然而，片中高潮部分《将进酒》一节，人至中年、挂着肚腩的李白引众人乘仙鹤上天，一路从水面、瀑布飞升至银河进入仙>宫，李白狂奔着与仙人们碰杯，最后大家纵身飞向漩涡般的九重天。肉身的微贱、世路的“天生我材必有用，坎坷，拘不住精神的高蹈。“天生我材必有用，千金散尽还复来。” 古往今来，身处闲顿、遭受挫折、被病痛折磨，很多人都曾经历>了人生的“失意”，却反而成就了他们“诗意”的人生。对正在追求人生价值的当代青年来说，如何对待人生中的缺憾和困顿?诗意人生中又有怎样的自我坚守和自我认同?请结合“失意”与“诗意”这两个关键词写一篇文章。 要求:选准角度，确定>立意，明确文体，自拟标题;不要套作，不得抄袭;不得泄露个人信息;不少于 800 字。'\nwith torch.autocast(device_type='cuda', dtype=torch.float16):\n    response = model.write_artical(query, seed=8192)\nprint(response)\n#诗意人生，贵在坚守\n#《菜根谭》有云:“闲时要有吃紧的心思,忙里要留吃闲工夫.”人生在世,总有失意之时,当面对缺憾和困顿,诗意地生活着才能为人生增添一抹亮色.何谓诗意地生活? 所谓诗意地生活，便是在于坚守本心、直面遗憾、超越自我,在失意中寻找人生价值.\n#诗意地生活,需坚守本心,淡然处之.\n#陶渊明曾执意辞去彭泽县令,归隐田园,“采菊东篱下,悠然见南山”,在山水间寄情自娱；王维面对仕途失意,终日沉醉于诗酒之中,“兴来每独往,胜事空自知”,在诗酒中闲逸自如；李白仕途不顺,被赐金放还,但他依旧豪气干云,“天生我才必有用,千金散尽还复来”,在失意中坦然豁达.坚守本心，便能在遭遇失意之时守住自己的精神家园，让生活充满诗意.反之，若不能坚守本心，而只是一味迎合世俗以求得升迁，那纵使身居高位，亦会丧失生活的乐趣.\n#诗意地生活,需直面遗憾,超越自我.\n#“西塞山前白鹭飞,桃花流水鳜鱼肥.青箬笠,绿柳枝,半斤酒,一纶丝.五湖四海皆如此,何妨到此处归.”白居易的《渔歌子》写出了多少人的愿望:没有权势纷扰,没有贫困凄凉,只有青山绿水、白鹭鸥鸟作伴,如此自由自在的生活令人神往.然而，白居易却并没有因此真的归隐山林，而是直面人生，超越自我，写下了一首首诗意而富有现实关怀的作品.如果白居易只顾逃避人生，那又怎会拥有“大弦嘈嘈如急雨，小弦切切如私语”的绝美比喻呢？如果白居易只顾归隐山林，那又怎会写出“此曲只应天上有，人间哪得配白居易”这样的诗句呢？\n#诗意地生活,需直面遗憾，坚守本心.\n#李文波患有渐冻症，医生说他活不过五年，但他没有因此放弃对音乐的热爱，而是与病魔作斗争，演奏出美妙的乐曲；孙家林自幼患有脑瘫，但他不甘于命运的捉弄，终成全国最美教师；史铁生饱受疾病折磨，但他仍能发出“我常常在我的心头清点，我有什么？”的叩问，并由此走上文学道路，为后世留下丰厚的文化遗产.这些人没有逃避，而是选择直面人生的缺憾，在坚守本心的同时超越自我，最终实现了自己的价值.\n#诗意地生活，是于失意中坚守本心，于缺憾中超越自我.当面对人生的缺憾与挫折，坚守本心、超越自我的同时，也必将书写属于自己的辉煌篇章.\n#愿你我都能诗意地生活着！\n\nquery = '请根据标题撰写一篇博客：法式糕点：甜蜜的享受'\nwith torch.autocast(device_type='cuda', dtype=torch.float16):\n    response = model.write_artical(query, seed=8192)\nprint(response)\n#法式糕点：甜蜜的享受\n#法国人以其对糕点的热爱而闻名，这种热爱代代相传。当人们访问法国时，总能品尝到各种各样的烘焙食品，从精致的马卡龙到浓郁奢华的巧克力慕斯。尽管法国有许多美味的糕点，但有五种特别突出，堪称最具代表性的。每一种糕点都有其独特的风味和魅力。\n#1. 牛角包\n#法国最著名的糕点之一就是牛角包。它是一种黄油味十足、层次分明的酥皮点心，最好趁新鲜时享用。制作过程中，面团会反复折叠并涂抹黄油，从而形成标志性的层层酥皮。牛角包通常作为早餐或早午餐食用，搭配咖啡或热巧克力更是绝佳。\n#2. 马卡龙\n#马卡龙是一种小巧精致的法式甜点，由杏仁粉、糖粉和蛋白制成。两片马卡龙之间夹着甘纳许或果酱馅料。它们颜色缤纷、口味多样，无论是休闲零食还是高档甜点，都是不错的选择。\n#3. 玛德琳蛋糕\n#玛德琳蛋糕是一种小巧的贝壳形蛋糕，质地轻盈柔软，类似海绵蛋糕。它常带有柠檬或橙子的清香，有时还会蘸上巧克力。玛德琳蛋糕非常适合下午茶或咖啡时光享用。\n#4. 法式闪电泡芙\n#法式闪电泡芙是一种细长的奶油泡芙，外层淋上巧克力酱。它既甜美又满足，是法国的经典美食之一。无论在哪家面包店，你都能找到这种美味的点心，尤其适合搭配一杯热巧克力。\n#5. 塔腾苹果派\n#塔腾苹果派以其焦糖化的苹果和酥脆的派皮而闻名。这款甜点的名字来源于19世纪末发明它的塔腾姐妹。塔腾苹果派最适合温热时享用，搭配一勺香草冰淇淋更添风味。\n#这些糕点只是法国众多美味佳肴中的一部分。无论你是经验丰富的旅行者还是初次到访的游客，品尝法式糕点都是一项不容错过的体验。所以，不妨犒劳一下自己吧——你值得拥有这份甜蜜的享受！\n```\n\n\u003C\u002Fdetails>\n\n\n## 多GPU推理\n\n如果你有多块GPU，但每块GPU的显存不足以容纳整个模型，可以将模型拆分到多块GPU上进行推理。首先，使用命令 `pip install accelerate` 安装 `accelerate` 库，然后执行以下脚本进行对话：\n\n```\n# 使用2块GPU进行对话\npython example_code\u002Fexample_chat.py --num_gpus 2\n```\n\n## LMDeploy加速推理\n\n如果需要优化 InternLM-XComposer2d5 模型的推理性能，推荐使用 [LMDeploy](https:\u002F\u002Fgithub.com\u002FInternLM\u002Flmdeploy\u002Fblob\u002Fmain\u002Fdocs\u002Fen\u002Fmulti_modal\u002Fxcomposer2d5.md)。\n\n在接下来的小节中，我们将以 [internlm-xcomposer2d5-7b](https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2d5-7b) 模型为例，介绍 LMDeploy 的使用方法。\n\n首先，请通过 `pip install lmdeploy` 安装 LMDeploy 的 PyPI 包。默认情况下，该库依赖于 CUDA 12.x。如果你使用的是 CUDA 11.x 环境，请参考 [安装指南](https:\u002F\u002Flmdeploy.readthedocs.io\u002Fen\u002Flatest\u002Fget_started.html#installation)。\n\n### 离线推理流程\n\n```python\nfrom lmdeploy import pipeline\nfrom lmdeploy.vl import load_image\npipe = pipeline('internlm\u002Finternlm-xcomposer2d5-7b')\nimage = load_image('examples\u002Fdubai.png')\nresponse = pipe(('描述这张图片', image))\nprint(response.text)\n```\n\n关于 VLM 推理流程的更多内容，包括多图像推理和多轮对话等，请参阅 [这篇文档](https:\u002F\u002Fgithub.com\u002FInternLM\u002Flmdeploy\u002Fblob\u002Fmain\u002Fdocs\u002Fen\u002Fmulti_modal\u002Fxcomposer2d5.md)。\n\n## 4位量化模型\n我们通过 LMDeploy 提供 4位量化模型，以降低显存占用。有关显存占用的对比信息，请参阅 [这里](example_code\u002F4bit\u002FREADME.md)。\n\n```python\nfrom lmdeploy import TurbomindEngineConfig, pipeline\nfrom lmdeploy.vl import load_image\nengine_config = TurbomindEngineConfig(model_format='awq')\npipe = pipeline('internlm\u002Finternlm-xcomposer2d5-7b-4bit', backend_config=engine_config)\nimage = load_image('examples\u002Fdubai.png')\nresponse = pipe(('描述这张图片', image))\nprint(response.text)\n```\n\n## 微调\n1. 请参考我们的 [微调脚本](finetune\u002FREADME.md)。\n2. 推理和微调支持来自 [ModelScope Swift](https:\u002F\u002Fgithub.com\u002Fmodelscope\u002Fswift\u002Fblob\u002Fmain\u002Fdocs\u002Fsource_en\u002FMulti-Modal\u002Finternlm-xcomposer2-best-practice.md)。\n\n## Gradio 部署\n我们提供了代码，供用户构建 Web UI 示例。请确保使用 `gradio==4.13.0`。\n\n运行以下命令可启动聊天或创作功能：\n\n```\n# 多模态聊天\npython gradio_demo\u002Fgradio_demo_chat.py\n\n# 自由文本-图像创作\npython gradio_demo\u002Fgradio_demo_composition.py\n```\n\nUI 示例的使用说明请参见 [这里](demo_asset\u002Fdemo.md)。如果你想更改模型的默认路径，可以使用 `--code_path=new_folder` 参数。\n\u003Cbr>\n\n## 引用\n\n如果您在研究中使用了我们的模型、代码或论文，请考虑给个 ⭐ 和引用 📝，谢谢 :)\n\n```BibTeX\n@inproceedings{internlmxcomposer2_5_reward,\n      title={InternLM-XComposer2.5-Reward: 一个简单而高效的多模态奖励模型}, \n      author={Zang Yuhang, Dong Xiaoyi, Zhang Pan, Cao Yuhang, Liu Ziyu, Ding Shengyuan, Wu Shenxi, Ma Yubo, Duan Haodong, Zhang Wenwei, Chen Kai, Lin Dahua, Wang Jiaqi},\n      booktitle={Findings of ACL},\n      year={2025}\n}\n```\n\n```BibTeX\n@article{internlmxcomposer2_5_OL,\n      title={InternLM-XComposer2.5-OmniLive: 一个全面的多模态系统，用于长期流式视频和音频交互}, \n      author={Zhang Pan, Dong Xiaoyi, Cao Yuhang, Zang Yuhang, Qian Rui, Wei Xilin, Chen Lin, Li Yifei, Niu Junbo, Ding Shuangrui, Guo Qipeng, Duan Haodong, Chen Xin, Lv Han, Nie Zheng, Zhang Min, Wang Bin, Zhang Wenwei, Zhang Xinyue, Ge Jiaye, Li Wei, Li Jingwen, Tu Zhongying, He Conghui, Zhang Xingcheng, Chen Kai, Qiao Yu, Lin Dahua, Wang Jiaqi},\n      journal={arXiv预印本 arXiv:2412.09596},\n      year={2024}\n}\n```\n\n```BibTeX\n@article{internlmxcomposer2_5,\n      title={InternLM-XComposer-2.5: 一个多功能大型视觉语言模型，支持长上下文输入和输出}, \n      author={Zhang Pan, Dong Xiaoyi, Zang Yuhang, Cao Yuhang, Qian Rui, Chen Lin, Guo Qipeng, Duan Haodong, Wang Bin, Ouyang Linke, Zhang Songyang, Zhang Wenwei, Li Yining, Gao Yang, Sun Peng, Zhang Xinyue, Li Wei, Li Jingwen, Wang Wenhai, Yan Hang, He Conghui, Zhang Xingcheng, Chen Kai, Dai Jifeng, Qiao Yu, Lin Dahua, Wang Jiaqi},\n      journal={arXiv预印本 arXiv:2407.03320},\n      year={2024}\n}\n```\n\n```BibTeX\n@article{internlmxcomposer2_4khd,\n      title={InternLM-XComposer2-4KHD: 一个开创性的大型视觉语言模型，可处理从336像素到4K高清分辨率的图像}, \n      author={Dong Xiaoyi, Zhang Pan, Zang Yuhang, Cao Yuhang, Wang Bin, Ouyang Linke, Zhang Songyang, Duan Haodong, Zhang Wenwei, Li Yining, Yan Hang, Gao Yang, Chen Zhe, Zhang Xinyue, Li Wei, Li Jingwen, Wang Wenhai, Chen Kai, He Conghui, Zhang Xingcheng, Dai Jifeng, Qiao Yu, Lin Dahua, Wang Jiaqi},\n      journal={arXiv预印本 arXiv:2404.06512},\n      year={2024}\n}\n```\n\n```BibTeX\n@article{internlmxcomposer2,\n      title={InternLM-XComposer2: 在视觉语言大模型中掌握自由格式的文本-图像组合与理解}, \n      author={Dong Xiaoyi, Zhang Pan, Zang Yuhang, Cao Yuhang, Wang Bin, Ouyang Linke, Wei Xilin, Zhang Songyang, Duan Haodong, Cao Maosong, Zhang Wenwei, Li Yining, Yan Hang, Gao Yang, Zhang Xinyue, Li Wei, Li Jingwen, Chen Kai, He Conghui, Zhang Xingcheng, Qiao Yu, Lin Dahua, Wang Jiaqi},\n      journal={arXiv预印本 arXiv:2401.16420},\n      year={2024}\n}\n```\n\n```BibTeX\n@article{internlmxcomposer,\n      title={InternLM-XComposer: 一个用于高级文本-图像理解和组合的视觉语言大模型}, \n      author={Zhang Pan, Dong Xiaoyi, Wang Bin, Cao Yuhang, Xu Chao, Ouyang Linke, Zhao Zhiyuan, Ding Shuangrui, Zhang Songyang, Duan Haodong, Zhang Wenwei, Yan Hang, Zhang Xinyue, Li Wei, Li Jingwen, Chen Kai, He Conghui, Zhang Xingcheng, Qiao Yu, Lin Dahua, Wang Jiaqi},\n      journal={arXiv预印本 arXiv:2309.15112},\n      year={2023}\n}\n```\n\n\u003Cbr>\n\n## 许可与联系我们\n\n代码采用 Apache-2.0 许可证，而模型权重对学术研究完全开放，并且也允许**免费**的商业用途。如需申请商业许可，请填写 [申请表（英文）](https:\u002F\u002Fwj.qq.com\u002Fs2\u002F12727483\u002F5dba\u002F)\u002F[申请表（中文）](https:\u002F\u002Fwj.qq.com\u002Fs2\u002F12725412\u002Ff7c1\u002F)。如有其他问题或合作意向，请联系 \u003Cinternlm@pjlab.org.cn>。","# InternLM-XComposer-2.5 快速上手指南\n\nInternLM-XComposer-2.5 是一款支持长上下文输入输出的多功能大型视觉语言模型（LVLM）。它仅需 7B 参数量即可达到 GPT-4V 级别的能力，支持超高分辨率图像理解、细粒度视频理解、多轮多图对话以及网页\u002F文章创作。\n\n## 1. 环境准备\n\n在开始之前，请确保您的系统满足以下要求：\n\n*   **操作系统**: Linux (推荐 Ubuntu 20.04+)\n*   **Python**: 3.8 或更高版本\n*   **GPU**: 推荐 NVIDIA GPU，显存至少 16GB (运行 7B 全精度模型)；若使用 4-bit 量化版本，8GB 显存即可。\n*   **CUDA**: 11.8 或更高版本\n\n### 前置依赖安装\n\n建议创建独立的虚拟环境：\n\n```bash\nconda create -n xcomposer python=3.10 -y\nconda activate xcomposer\n```\n\n安装 PyTorch 及其他基础依赖（请以官方推荐的 torch 版本为准）：\n\n```bash\npip install torch torchvision torchaudio --index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu118\npip install transformers>=4.37.0 accelerate sentencepiece protobuf tiktoken einops\n```\n\n> **提示**：国内用户推荐使用清华或阿里镜像源加速安装：\n> `pip install -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple \u003Cpackage_name>`\n\n## 2. 安装步骤\n\n您可以选择通过 Hugging Face 或 ModelScope（魔搭社区）获取模型。国内开发者强烈推荐使用 **ModelScope** 以获得更快的下载速度。\n\n### 方案 A：使用 ModelScope (推荐国内用户)\n\n首先安装 modelscope 库：\n\n```bash\npip install modelscope\n```\n\n### 方案 B：使用 Hugging Face\n\n确保已安装 `huggingface_hub`：\n\n```bash\npip install huggingface_hub\n```\n\n## 3. 基本使用\n\n以下示例展示如何使用 Python 加载模型并进行简单的图文对话。\n\n### 示例代码：单图问答\n\n```python\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\nfrom PIL import Image\n\n# 1. 加载模型和分词器\n# 国内用户可使用 ModelScope 快照下载，或直接指定本地路径\n# model_path = \"Shanghai_AI_Laboratory\u002Finternlm-xcomposer2d5-7b\" # ModelScope ID\nmodel_path = \"internlm\u002Finternlm-xcomposer2d5-7b\" # HuggingFace ID\n\ntokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)\nmodel = AutoModelForCausalLM.from_pretrained(\n    model_path, \n    device_map=\"auto\", \n    trust_remote_code=True,\n    torch_dtype=torch.float16 # 根据显存情况可改为 torch.bfloat16\n)\nmodel.eval()\n\n# 2. 准备输入\nimage_path = \".\u002Fassets\u002Fdemo.jpg\" # 替换为您的图片路径\nimage = Image.open(image_path).convert('RGB')\n\n# 构造 Prompt，InternLM-XComposer 系列通常使用特定的格式\n# 格式：\u003C|im_start|>\u003C|system|>You are an AI assistant.\u003C|im_end|>\\n\u003C|user|>\u003Cimage>\\nQuestion\u003C|im_end|>\\n\u003C|assistant|>\nprompt = \"请详细描述这张图片中的内容。\"\n\n# 3. 生成回复\nwith torch.no_grad():\n    response, history = model.chat(\n        tokenizer, \n        query=prompt, \n        image=image, \n        history=None,\n        do_sample=False,\n        max_new_tokens=1024\n    )\n\nprint(response)\n```\n\n### 进阶：使用 4-bit 量化版本 (低显存方案)\n\n如果您的显存有限，可以使用 4-bit 量化版本：\n\n```python\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\n\nmodel_path = \"internlm\u002Finternlm-xcomposer2d5-7b-4bit\"\n\ntokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)\nmodel = AutoModelForCausalLM.from_pretrained(\n    model_path, \n    device_map=\"auto\", \n    trust_remote_code=True,\n    load_in_4bit=True,\n    bnb_4bit_compute_dtype=torch.float16\n)\nmodel.eval()\n\n# 后续调用方式与上述示例相同\n```\n\n### 推理加速 (可选)\n\n项目支持使用 **LMDeploy** 进行高性能推理和量化部署：\n\n```bash\npip install lmdeploy\n```\n\n使用命令行快速启动交互服务：\n\n```bash\nlmdeploy chat internlm\u002Finternlm-xcomposer2d5-7b --tp 1\n```\n\n> **注意**：具体 API 调用细节可能随版本更新微调，请参考官方仓库 `demo` 目录下的最新脚本以获取最完整的功能支持（如多轮对话、视频理解等）。","某远程医疗团队需要实时分析患者上传的长时间康复训练视频，并同步听取患者的口头描述以评估恢复进度。\n\n### 没有 InternLM-XComposer 时\n- **长视频理解断裂**：传统模型无法处理长达数分钟的连续视频流，只能截取片段分析，导致医生错过关键的动作连贯性细节。\n- **音画信息割裂**：系统无法将患者的语音自述（如“这里有点疼”）与视频中的特定动作帧精准对齐，需人工反复拖拽进度条核对。\n- **响应延迟高**：每次分析需先下载完整视频再离线处理，无法在直播或推流过程中提供即时反馈，延误干预时机。\n- **上下文记忆丢失**：模型难以记住视频早期的异常姿态，无法在视频结尾处结合全程表现给出综合评估报告。\n\n### 使用 InternLM-XComposer 后\n- **长程流式交互**：InternLM-XComposer2.5-OmniLive 支持长时流式视频输入，能完整理解整个康复过程的动作演变，不遗漏任何细节。\n- **音视频深度融合**：工具实时同步解析音频指令与视频画面，自动标记出患者喊疼时的具体动作帧，生成带时间戳的多模态病历。\n- **低延迟实时反馈**：在视频推流过程中即可进行增量计算，几乎零延迟地提示患者当前动作是否标准，实现“边练边改”。\n- **全周期记忆保持**：凭借强大的长上下文能力，InternLM-XComposer 能关联视频开头与结尾的状态变化，自动生成包含趋势分析的深度评估报告。\n\nInternLM-XComposer 通过突破长时流式音视频交互的瓶颈，将原本割裂、滞后的离线分析转变为实时、连贯的智能辅助诊疗体验。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FInternLM_InternLM-XComposer_0417f691.png","InternLM","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FInternLM_bc4eb14c.png","",null,"internlm@pjlab.org.cn","intern_lm","https:\u002F\u002Fchat.intern-ai.org.cn\u002F","https:\u002F\u002Fgithub.com\u002FInternLM",[81,85,89,93,97,100,104,108],{"name":82,"color":83,"percentage":84},"Python","#3572A5",92.4,{"name":86,"color":87,"percentage":88},"Jupyter Notebook","#DA5B0B",2.7,{"name":90,"color":91,"percentage":92},"Shell","#89e051",2.2,{"name":94,"color":95,"percentage":96},"JavaScript","#f1e05a",1.2,{"name":98,"color":99,"percentage":96},"TypeScript","#3178c6",{"name":101,"color":102,"percentage":103},"Less","#1d365d",0.3,{"name":105,"color":106,"percentage":107},"Dockerfile","#384d54",0,{"name":109,"color":110,"percentage":107},"HTML","#e34c26",2922,176,"2026-04-14T16:25:59","Apache-2.0","未说明","需要 NVIDIA GPU。根据新闻更新，部署演示版本至少需要两张 RTX 4090（共约 48GB 显存）以支持多卡推理；提供 4-bit 量化版本以降低显存需求。具体 CUDA 版本未说明。",{"notes":118,"python":115,"dependencies":119},"该工具支持多 GPU 推理（新闻提及两张 4090 可运行演示）。官方提供了 4-bit 量化模型版本以减少显存占用。支持通过 LMDeploy 进行推理和量化，通过 ModelScope Swift 进行微调和推理。模型参数量为 7B，支持长达 96K 的上下文窗口。",[120,121,122,123],"transformers","torch","LMDeploy (支持 4-bit 量化)","ModelScope Swift (支持微调)",[14,13,125,35],"其他",[127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142],"chatgpt","visual-language-learning","multi-modality","foundation","gpt-4","instruction-tuning","mllm","multimodal","vision-language-model","language-model","large-language-model","large-vision-language-model","llm","vision-transformer","gpt","supervised-finetuning","2026-03-27T02:49:30.150509","2026-04-16T08:17:11.019327",[146,151,156,161,166,171,176],{"id":147,"question_zh":148,"answer_zh":149,"source_url":150},35562,"运行 internlm-xcomposer-7b-4bit 量化模型失败，报错缺少 tokenizer 文件或版本不兼容怎么办？","请检查以下两点：1. 确认是否下载了完整的 tokenizer 相关文件（如 tokenizer_config.json、special_tokens_map.json）；2. 可能是 transformers 版本问题，官方测试通过的版本为 transformers==4.33.1。此外，若使用 auto_gptq 加载，需手动注册模型支持：\n```python\nimport auto_gptq\nfrom auto_gptq.modeling import BaseGPTQForCausalLM\nauto_gptq.modeling._base.SUPPORTED_MODELS = [\"InternLMXComposer\"]\n```","https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Fissues\u002F36",{"id":152,"question_zh":153,"answer_zh":154,"source_url":155},35563,"为什么 example_chat 接口在多次请求后速度变慢，而 WebUI 响应很快？","WebUI 采用流式（streaming）输出，模型生成一点就返回一点，感知延迟低；而 example_chat 是等模型完整生成后才一次性返回，因此等待时间更长。若需优化体验，可参考流式输出的实现方式（如查阅相关博客或源码）。","https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Fissues\u002F42",{"id":157,"question_zh":158,"answer_zh":159,"source_url":160},35564,"InternLM-XComposer2 是否支持多轮图文对话（每轮都包含图片和文字）？","原生代码对多轮图文交错输入支持有限，容易报\"Invalid \u003CImageHere> prompt format\"错误。建议使用最新发布的 IXC 2.5 模型（internlm-xcomposer2d5-7b），其官方示例已明确支持多轮图文对话。另外，使用 LMDeploy 框架可实现 interactive 对话，避免重复 prefill 历史输入，从而高效复用 KV Cache。","https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Fissues\u002F300",{"id":162,"question_zh":163,"answer_zh":164,"source_url":165},35565,"微调时如何支持单卡内 batch_size > 1 的训练？","早期版本因序列长度不一致导致无法拼接而报错。目前 IXC 2.5 已在微调代码中正式支持 batch_size > 1。请参考其实现方式：https:\u002F\u002Fhuggingface.co\u002Finternlm\u002Finternlm-xcomposer2d5-7b\u002Fblob\u002Fmain\u002Fmodeling_internlm_xcomposer2.py#L290。注意：此处的 batch_size 更接近“序列打包”（sequence packing）概念，而非传统意义上的样本批处理。","https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Fissues\u002F176",{"id":167,"question_zh":168,"answer_zh":169,"source_url":170},35566,"运行 finetune_lora.sh 时报错'Linear.forward() takes 2 positional arguments but 3 were given'如何解决？","该问题通常由 peft 库版本不兼容引起。请将 peft 升级或降级至 0.8.2 版本：\n```bash\npip install peft==0.8.2\n```\n更换版本后重新运行脚本即可解决。","https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Fissues\u002F175",{"id":172,"question_zh":173,"answer_zh":174,"source_url":175},35567,"模型推理速度特别慢，与 WebUI 表现不一致，可能是什么原因？","请确保安装环境和依赖版本正确。官方推荐安装文档见：https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Fblob\u002Fmain\u002Fdocs\u002Finstall_CN.md。特别注意 transformers 版本应为 4.33.1，其他版本可能导致性能下降或兼容性问题。","https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Fissues\u002F40",{"id":177,"question_zh":178,"answer_zh":179,"source_url":180},35568,"无法连接 Hugging Face，使用离线 CLIP-ViT 模型路径却被自动覆盖怎么办？","直接修改缓存目录下的 build_mlp.py 文件会被程序重新下载覆盖。建议通过环境变量或代码中显式指定本地模型路径，避免依赖默认下载机制。例如，在初始化 vision_tower 时传入本地绝对路径，并确保该路径下包含完整的模型文件结构。若问题持续，可考虑完全离线部署整个项目依赖。","https:\u002F\u002Fgithub.com\u002FInternLM\u002FInternLM-XComposer\u002Fissues\u002F295",[]]