[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-deepseek-ai--DeepSeek-V3":3,"tool-deepseek-ai--DeepSeek-V3":64},[4,19,28,36,44,52],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":18},10095,"AutoGPT","Significant-Gravitas\u002FAutoGPT","AutoGPT 是一个旨在让每个人都能轻松使用和构建 AI 的强大平台，核心功能是帮助用户创建、部署和管理能够自动执行复杂任务的连续型 AI 智能体。它解决了传统 AI 应用中需要频繁人工干预、难以自动化长流程工作的痛点，让用户只需设定目标，AI 即可自主规划步骤、调用工具并持续运行直至完成任务。\n\n无论是开发者、研究人员，还是希望提升工作效率的普通用户，都能从 AutoGPT 中受益。开发者可利用其低代码界面快速定制专属智能体；研究人员能基于开源架构探索多智能体协作机制；而非技术背景用户也可直接选用预置的智能体模板，立即投入实际工作场景。\n\nAutoGPT 的技术亮点在于其模块化“积木式”工作流设计——用户通过连接功能块即可构建复杂逻辑，每个块负责单一动作，灵活且易于调试。同时，平台支持本地自托管与云端部署两种模式，兼顾数据隐私与使用便捷性。配合完善的文档和一键安装脚本，即使是初次接触的用户也能在几分钟内启动自己的第一个 AI 智能体。AutoGPT 正致力于降低 AI 应用门槛，让人人都能成为 AI 的创造者与受益者。",183572,3,"2026-04-20T04:47:55",[13,14,15,16,17],"Agent","语言模型","插件","开发框架","图像","ready",{"id":20,"name":21,"github_repo":22,"description_zh":23,"stars":24,"difficulty_score":25,"last_commit_at":26,"category_tags":27,"status":18},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",161147,2,"2026-04-19T23:31:47",[16,13,14],{"id":29,"name":30,"github_repo":31,"description_zh":32,"stars":33,"difficulty_score":10,"last_commit_at":34,"category_tags":35,"status":18},4487,"LLMs-from-scratch","rasbt\u002FLLMs-from-scratch","LLMs-from-scratch 是一个基于 PyTorch 的开源教育项目，旨在引导用户从零开始一步步构建一个类似 ChatGPT 的大型语言模型（LLM）。它不仅是同名技术著作的官方代码库，更提供了一套完整的实践方案，涵盖模型开发、预训练及微调的全过程。\n\n该项目主要解决了大模型领域“黑盒化”的学习痛点。许多开发者虽能调用现成模型，却难以深入理解其内部架构与训练机制。通过亲手编写每一行核心代码，用户能够透彻掌握 Transformer 架构、注意力机制等关键原理，从而真正理解大模型是如何“思考”的。此外，项目还包含了加载大型预训练权重进行微调的代码，帮助用户将理论知识延伸至实际应用。\n\nLLMs-from-scratch 特别适合希望深入底层原理的 AI 开发者、研究人员以及计算机专业的学生。对于不满足于仅使用 API，而是渴望探究模型构建细节的技术人员而言，这是极佳的学习资源。其独特的技术亮点在于“循序渐进”的教学设计：将复杂的系统工程拆解为清晰的步骤，配合详细的图表与示例，让构建一个虽小但功能完备的大模型变得触手可及。无论你是想夯实理论基础，还是为未来研发更大规模的模型做准备",90106,"2026-04-06T11:19:32",[14,17,13,16],{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":25,"last_commit_at":42,"category_tags":43,"status":18},8553,"spec-kit","github\u002Fspec-kit","Spec Kit 是一款专为提升软件开发效率而设计的开源工具包，旨在帮助团队快速落地“规格驱动开发”（Spec-Driven Development）模式。传统开发中，需求文档往往与代码实现脱节，导致沟通成本高且结果不可控；而 Spec Kit 通过将规格说明书转化为可执行的指令，让 AI 直接依据明确的业务场景生成高质量代码，从而减少从零开始的随意编码，确保产出结果的可预测性。\n\n该工具特别适合希望利用 AI 辅助编程的开发者、技术负责人及初创团队。无论是启动全新项目还是在现有工程中引入规范化流程，用户只需通过简单的命令行操作，即可初始化项目并集成主流的 AI 编程助手。其核心技术亮点在于“规格即代码”的理念，支持社区扩展与预设模板，允许用户根据特定技术栈定制开发流程。此外，Spec Kit 强调官方维护的安全性，提供稳定的版本管理，帮助开发者在享受 AI 红利的同时，依然牢牢掌握架构设计的主动权，真正实现从“凭感觉写代码”到“按规格建系统”的转变。",88749,"2026-04-17T09:48:14",[14,17,13,16],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":25,"last_commit_at":50,"category_tags":51,"status":18},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[16,14],{"id":53,"name":54,"github_repo":55,"description_zh":56,"stars":57,"difficulty_score":25,"last_commit_at":58,"category_tags":59,"status":18},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",85267,"2026-04-18T11:00:28",[17,60,61,15,13,62,14,16,63],"数据工具","视频","其他","音频",{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":69,"readme_en":70,"readme_zh":71,"quickstart_zh":72,"use_case_zh":73,"hero_image_url":74,"owner_login":75,"owner_name":76,"owner_avatar_url":77,"owner_bio":78,"owner_company":68,"owner_location":68,"owner_email":79,"owner_twitter":68,"owner_website":80,"owner_url":81,"languages":82,"stars":87,"forks":88,"last_commit_at":89,"license":90,"difficulty_score":91,"env_os":92,"env_gpu":93,"env_ram":92,"env_deps":94,"category_tags":97,"github_topics":68,"view_count":98,"oss_zip_url":68,"oss_zip_packed_at":68,"status":18,"created_at":99,"updated_at":100,"faqs":101,"releases":102},10072,"deepseek-ai\u002FDeepSeek-V3","DeepSeek-V3",null,"DeepSeek-V3 是一款由深度求索推出的开源混合专家（MoE）大语言模型，旨在以极高的效率提供媲美顶尖闭源模型的智能服务。它拥有 6710 亿总参数，但在处理每个 token 时仅激活 370 亿参数，这种设计巧妙解决了大规模模型推理成本高、速度慢的难题，让高性能 AI 更易于部署和应用。\n\n这款模型特别适合开发者、研究人员以及需要构建复杂 AI 应用的企业团队使用。无论是进行代码生成、逻辑推理还是多轮对话开发，DeepSeek-V3 都能提供强大的支持。其独特之处在于采用了无辅助损失的负载均衡策略和多令牌预测训练目标，前者在提升计算效率的同时避免了性能损耗，后者则显著增强了模型表现并加速了推理过程。此外，模型在 14.8 万亿高质量令牌上完成预训练，且整个训练过程异常稳定，未出现不可恢复的损失尖峰。凭借仅需 278.8 万 H800 GPU 小时即可完成训练的高效特性，DeepSeek-V3 为开源社区树立了一个兼顾性能与成本效益的新标杆。","\u003C!-- markdownlint-disable first-line-h1 -->\n\u003C!-- markdownlint-disable html -->\n\u003C!-- markdownlint-disable no-duplicate-header -->\n\n\u003Cdiv align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Fgithub.com\u002Fdeepseek-ai\u002FDeepSeek-V2\u002Fblob\u002Fmain\u002Ffigures\u002Flogo.svg?raw=true\" width=\"60%\" alt=\"DeepSeek-V3\" \u002F>\n\u003C\u002Fdiv>\n\u003Chr>\n\u003Cdiv align=\"center\" style=\"line-height: 1;\">\n  \u003Ca href=\"https:\u002F\u002Fwww.deepseek.com\u002F\">\u003Cimg alt=\"Homepage\"\n    src=\"https:\u002F\u002Fgithub.com\u002Fdeepseek-ai\u002FDeepSeek-V2\u002Fblob\u002Fmain\u002Ffigures\u002Fbadge.svg?raw=true\"\u002F>\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fchat.deepseek.com\u002F\">\u003Cimg alt=\"Chat\"\n    src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F🤖%20Chat-DeepSeek%20V3-536af5?color=536af5&logoColor=white\"\u002F>\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fhuggingface.co\u002Fdeepseek-ai\">\u003Cimg alt=\"Hugging Face\"\n    src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F%F0%9F%A4%97%20Hugging%20Face-DeepSeek%20AI-ffc107?color=ffc107&logoColor=white\"\u002F>\u003C\u002Fa>\n  \u003Cbr>\n  \u003Ca href=\"https:\u002F\u002Fdiscord.gg\u002FTc7c45Zzu5\">\u003Cimg alt=\"Discord\"\n    src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FDiscord-DeepSeek%20AI-7289da?logo=discord&logoColor=white&color=7289da\"\u002F>\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fdeepseek-ai\u002FDeepSeek-V2\u002Fblob\u002Fmain\u002Ffigures\u002Fqr.jpeg?raw=true\">\u003Cimg alt=\"Wechat\"\n    src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FWeChat-DeepSeek%20AI-brightgreen?logo=wechat&logoColor=white\"\u002F>\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Ftwitter.com\u002Fdeepseek_ai\">\u003Cimg alt=\"Twitter Follow\"\n    src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FTwitter-deepseek_ai-white?logo=x&logoColor=white\"\u002F>\u003C\u002Fa>\n  \u003Cbr>\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fdeepseek-ai\u002FDeepSeek-V3\u002Fblob\u002Fmain\u002FLICENSE-CODE\">\u003Cimg alt=\"Code License\"\n    src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCode_License-MIT-f5de53?&color=f5de53\"\u002F>\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fdeepseek-ai\u002FDeepSeek-V3\u002Fblob\u002Fmain\u002FLICENSE-MODEL\">\u003Cimg alt=\"Model License\"\n    src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FModel_License-Model_Agreement-f5de53?&color=f5de53\"\u002F>\u003C\u002Fa>\n  \u003Cbr>\n  \u003Ca href=\"https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.19437\">\u003Cb>Paper Link\u003C\u002Fb>👁️\u003C\u002Fa>\n\u003C\u002Fdiv>\n\n## Table of Contents\n\n1. [Introduction](#1-introduction)\n2. [Model Summary](#2-model-summary)\n3. [Model Downloads](#3-model-downloads)\n4. [Evaluation Results](#4-evaluation-results)\n5. [Chat Website & API Platform](#5-chat-website--api-platform)\n6. [How to Run Locally](#6-how-to-run-locally)\n7. [License](#7-license)\n8. [Citation](#8-citation)\n9. [Contact](#9-contact)\n\n\n## 1. Introduction\n\nWe present DeepSeek-V3, a strong Mixture-of-Experts (MoE) language model with 671B total parameters with 37B activated for each token. \nTo achieve efficient inference and cost-effective training, DeepSeek-V3 adopts Multi-head Latent Attention (MLA) and DeepSeekMoE architectures, which were thoroughly validated in DeepSeek-V2. \nFurthermore, DeepSeek-V3 pioneers an auxiliary-loss-free strategy for load balancing and sets a multi-token prediction training objective for stronger performance. \nWe pre-train DeepSeek-V3 on 14.8 trillion diverse and high-quality tokens, followed by Supervised Fine-Tuning and Reinforcement Learning stages to fully harness its capabilities. \nComprehensive evaluations reveal that DeepSeek-V3 outperforms other open-source models and achieves performance comparable to leading closed-source models.\nDespite its excellent performance, DeepSeek-V3 requires only 2.788M H800 GPU hours for its full training.\nIn addition, its training process is remarkably stable. \nThroughout the entire training process, we did not experience any irrecoverable loss spikes or perform any rollbacks. \n\u003Cp align=\"center\">\n  \u003Cimg width=\"80%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeepseek-ai_DeepSeek-V3_readme_c567defa1525.png\">\n\u003C\u002Fp>\n\n## 2. Model Summary\n\n---\n\n**Architecture: Innovative Load Balancing Strategy and Training Objective**\n\n- On top of the efficient architecture of DeepSeek-V2, we pioneer an auxiliary-loss-free strategy for load balancing, which minimizes the performance degradation that arises from encouraging load balancing.\n-  We investigate a Multi-Token Prediction (MTP) objective and prove it beneficial to model performance. \n    It can also be used for speculative decoding for inference acceleration. \n\n---\n\n**Pre-Training: Towards Ultimate Training Efficiency**\n\n- We design an FP8 mixed precision training framework and, for the first time, validate the feasibility and effectiveness of FP8 training on an extremely large-scale model.  \n- Through co-design of algorithms, frameworks, and hardware, we overcome the communication bottleneck in cross-node MoE training, nearly achieving full computation-communication overlap.  \n  This significantly enhances our training efficiency and reduces the training costs, enabling us to further scale up the model size without additional overhead.  \n- At an economical cost of only 2.664M H800 GPU hours, we complete the pre-training of DeepSeek-V3 on 14.8T tokens, producing the currently strongest open-source base model. The subsequent training stages after pre-training require only 0.1M GPU hours.\n\n---\n\n**Post-Training: Knowledge Distillation from DeepSeek-R1**\n\n-   We introduce an innovative methodology to distill reasoning capabilities from the long-Chain-of-Thought (CoT) model, specifically from one of the DeepSeek R1 series models, into standard LLMs, particularly DeepSeek-V3. Our pipeline elegantly incorporates the verification and reflection patterns of R1 into DeepSeek-V3 and notably improves its reasoning performance. Meanwhile, we also maintain a control over the output style and length of DeepSeek-V3.\n\n---\n\n\n## 3. Model Downloads\n\n\u003Cdiv align=\"center\">\n\n| **Model** | **#Total Params** | **#Activated Params** | **Context Length** | **Download** |\n| :------------: | :------------: | :------------: | :------------: | :------------: |\n| DeepSeek-V3-Base | 671B | 37B | 128K   | [🤗 Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fdeepseek-ai\u002FDeepSeek-V3-Base)   |\n| DeepSeek-V3   | 671B | 37B |  128K   | [🤗 Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fdeepseek-ai\u002FDeepSeek-V3)   |\n\n\u003C\u002Fdiv>\n\n> [!NOTE]\n> The total size of DeepSeek-V3 models on Hugging Face is 685B, which includes 671B of the Main Model weights and 14B of the Multi-Token Prediction (MTP) Module weights.\n\nTo ensure optimal performance and flexibility, we have partnered with open-source communities and hardware vendors to provide multiple ways to run the model locally. For step-by-step guidance, check out Section 6: [How_to Run_Locally](#6-how-to-run-locally).\n\nFor developers looking to dive deeper, we recommend exploring [README_WEIGHTS.md](.\u002FREADME_WEIGHTS.md) for details on the Main Model weights and the Multi-Token Prediction (MTP) Modules. Please note that MTP support is currently under active development within the community, and we welcome your contributions and feedback.\n\n## 4. Evaluation Results\n### Base Model\n#### Standard Benchmarks\n\n\u003Cdiv align=\"center\">\n\n\n|  | Benchmark (Metric) | # Shots | DeepSeek-V2 | Qwen2.5 72B | LLaMA3.1 405B | DeepSeek-V3 |\n|---|-------------------|----------|--------|-------------|---------------|---------|\n| | Architecture | - | MoE | Dense | Dense | MoE |\n| | # Activated Params | - | 21B | 72B | 405B | 37B |\n| | # Total Params | - | 236B | 72B | 405B | 671B |\n| English | Pile-test (BPB) | - | 0.606 | 0.638 | **0.542** | 0.548 |\n| | BBH (EM) | 3-shot | 78.8 | 79.8 | 82.9 | **87.5** |\n| | MMLU (Acc.) | 5-shot | 78.4 | 85.0 | 84.4 | **87.1** |\n| | MMLU-Redux (Acc.) | 5-shot | 75.6 | 83.2 | 81.3 | **86.2** |\n| | MMLU-Pro (Acc.) | 5-shot | 51.4 | 58.3 | 52.8 | **64.4** |\n| | DROP (F1) | 3-shot | 80.4 | 80.6 | 86.0 | **89.0** |\n| | ARC-Easy (Acc.) | 25-shot | 97.6 | 98.4 | 98.4 | **98.9** |\n| | ARC-Challenge (Acc.) | 25-shot | 92.2 | 94.5 | **95.3** | **95.3** |\n| | HellaSwag (Acc.) | 10-shot | 87.1 | 84.8 | **89.2** | 88.9 |\n| | PIQA (Acc.) | 0-shot | 83.9 | 82.6 | **85.9** | 84.7 |\n| | WinoGrande (Acc.) | 5-shot | **86.3** | 82.3 | 85.2 | 84.9 |\n| | RACE-Middle (Acc.) | 5-shot | 73.1 | 68.1 | **74.2** | 67.1 |\n| | RACE-High (Acc.) | 5-shot | 52.6 | 50.3 | **56.8** | 51.3 |\n| | TriviaQA (EM) | 5-shot | 80.0 | 71.9 | 82.7 | **82.9** |\n| | NaturalQuestions (EM) | 5-shot | 38.6 | 33.2 | **41.5** | 40.0 |\n| | AGIEval (Acc.) | 0-shot | 57.5 | 75.8 | 60.6 | **79.6** |\n| Code | HumanEval (Pass@1) | 0-shot | 43.3 | 53.0 | 54.9 | **65.2** |\n| | MBPP (Pass@1) | 3-shot | 65.0 | 72.6 | 68.4 | **75.4** |\n| | LiveCodeBench-Base (Pass@1) | 3-shot | 11.6 | 12.9 | 15.5 | **19.4** |\n| | CRUXEval-I (Acc.) | 2-shot | 52.5 | 59.1 | 58.5 | **67.3** |\n| | CRUXEval-O (Acc.) | 2-shot | 49.8 | 59.9 | 59.9 | **69.8** |\n| Math | GSM8K (EM) | 8-shot | 81.6 | 88.3 | 83.5 | **89.3** |\n| | MATH (EM) | 4-shot | 43.4 | 54.4 | 49.0 | **61.6** |\n| | MGSM (EM) | 8-shot | 63.6 | 76.2 | 69.9 | **79.8** |\n| | CMath (EM) | 3-shot | 78.7 | 84.5 | 77.3 | **90.7** |\n| Chinese | CLUEWSC (EM) | 5-shot | 82.0 | 82.5 | **83.0** | 82.7 |\n| | C-Eval (Acc.) | 5-shot | 81.4 | 89.2 | 72.5 | **90.1** |\n| | CMMLU (Acc.) | 5-shot | 84.0 | **89.5** | 73.7 | 88.8 |\n| | CMRC (EM) | 1-shot | **77.4** | 75.8 | 76.0 | 76.3 |\n| | C3 (Acc.) | 0-shot | 77.4 | 76.7 | **79.7** | 78.6 |\n| | CCPM (Acc.) | 0-shot | **93.0** | 88.5 | 78.6 | 92.0 |\n| Multilingual | MMMLU-non-English (Acc.) | 5-shot | 64.0 | 74.8 | 73.8 | **79.4** |\n\n\u003C\u002Fdiv>\n\n> [!NOTE]\n> Best results are shown in bold. Scores with a gap not exceeding 0.3 are considered to be at the same level. DeepSeek-V3 achieves the best performance on most benchmarks, especially on math and code tasks.\n> For more evaluation details, please check our paper. \n\n#### Context Window\n\u003Cp align=\"center\">\n  \u003Cimg width=\"80%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeepseek-ai_DeepSeek-V3_readme_ad39275136c2.png\">\n\u003C\u002Fp>\n\nEvaluation results on the ``Needle In A Haystack`` (NIAH) tests.  DeepSeek-V3 performs well across all context window lengths up to **128K**. \n\n### Chat Model\n#### Standard Benchmarks (Models larger than 67B)\n\u003Cdiv align=\"center\">\n\n| | **Benchmark (Metric)** | **DeepSeek V2-0506** | **DeepSeek V2.5-0905** | **Qwen2.5 72B-Inst.** | **Llama3.1 405B-Inst.** | **Claude-3.5-Sonnet-1022** | **GPT-4o 0513** | **DeepSeek V3** |\n|---|---------------------|---------------------|----------------------|---------------------|----------------------|---------------------------|----------------|----------------|\n| | Architecture | MoE | MoE | Dense | Dense | - | - | MoE |\n| | # Activated Params | 21B | 21B | 72B | 405B | - | - | 37B |\n| | # Total Params | 236B | 236B | 72B | 405B | - | - | 671B |\n| English | MMLU (EM) | 78.2 | 80.6 | 85.3 | **88.6** | **88.3** | 87.2 | **88.5** |\n| | MMLU-Redux (EM) | 77.9 | 80.3 | 85.6 | 86.2 | **88.9** | 88.0 | **89.1** |\n| | MMLU-Pro (EM) | 58.5 | 66.2 | 71.6 | 73.3 | **78.0** | 72.6 | 75.9 |\n| | DROP (3-shot F1) | 83.0 | 87.8 | 76.7 | 88.7 | 88.3 | 83.7 | **91.6** |\n| | IF-Eval (Prompt Strict) | 57.7 | 80.6 | 84.1 | 86.0 | **86.5** | 84.3 | 86.1 |\n| | GPQA-Diamond (Pass@1) | 35.3 | 41.3 | 49.0 | 51.1 | **65.0** | 49.9 | 59.1 |\n| | SimpleQA (Correct) | 9.0 | 10.2 | 9.1 | 17.1 | 28.4 | **38.2** | 24.9 |\n| | FRAMES (Acc.) | 66.9 | 65.4 | 69.8 | 70.0 | 72.5 | **80.5** | 73.3 |\n| | LongBench v2 (Acc.) | 31.6 | 35.4 | 39.4 | 36.1 | 41.0 | 48.1 | **48.7** |\n| Code | HumanEval-Mul (Pass@1) | 69.3 | 77.4 | 77.3 | 77.2 | 81.7 | 80.5 | **82.6** |\n| | LiveCodeBench (Pass@1-COT) | 18.8 | 29.2 | 31.1 | 28.4 | 36.3 | 33.4 | **40.5** |\n| | LiveCodeBench (Pass@1) | 20.3 | 28.4 | 28.7 | 30.1 | 32.8 | 34.2 | **37.6** |\n| | Codeforces (Percentile) | 17.5 | 35.6 | 24.8 | 25.3 | 20.3 | 23.6 | **51.6** |\n| | SWE Verified (Resolved) | - | 22.6 | 23.8 | 24.5 | **50.8** | 38.8 | 42.0 |\n| | Aider-Edit (Acc.) | 60.3 | 71.6 | 65.4 | 63.9 | **84.2** | 72.9 | 79.7 |\n| | Aider-Polyglot (Acc.) | - | 18.2 | 7.6 | 5.8 | 45.3 | 16.0 | **49.6** |\n| Math | AIME 2024 (Pass@1) | 4.6 | 16.7 | 23.3 | 23.3 | 16.0 | 9.3 | **39.2** |\n| | MATH-500 (EM) | 56.3 | 74.7 | 80.0 | 73.8 | 78.3 | 74.6 | **90.2** |\n| | CNMO 2024 (Pass@1) | 2.8 | 10.8 | 15.9 | 6.8 | 13.1 | 10.8 | **43.2** |\n| Chinese | CLUEWSC (EM) | 89.9 | 90.4 | **91.4** | 84.7 | 85.4 | 87.9 | 90.9 |\n| | C-Eval (EM) | 78.6 | 79.5 | 86.1 | 61.5 | 76.7 | 76.0 | **86.5** |\n| | C-SimpleQA (Correct) | 48.5 | 54.1 | 48.4 | 50.4 | 51.3 | 59.3 | **64.8** |\n\n\u003C\u002Fdiv>\n\n> [!NOTE]\n> All models are evaluated in a configuration that limits the output length to 8K. Benchmarks containing fewer than 1000 samples are tested multiple times using varying temperature settings to derive robust final results. DeepSeek-V3 stands as the best-performing open-source model, and also exhibits competitive performance against frontier closed-source models.\n\n\n####  Open Ended Generation Evaluation\n\n\u003Cdiv align=\"center\">\n\n\n\n| Model | Arena-Hard | AlpacaEval 2.0 |\n|-------|------------|----------------|\n| DeepSeek-V2.5-0905 | 76.2 | 50.5 |\n| Qwen2.5-72B-Instruct | 81.2 | 49.1 |\n| LLaMA-3.1 405B | 69.3 | 40.5 |\n| GPT-4o-0513 | 80.4 | 51.1 |\n| Claude-Sonnet-3.5-1022 | 85.2 | 52.0 |\n| DeepSeek-V3 | **85.5** | **70.0** |\n\n\u003C\u002Fdiv>\n\n> [!NOTE]\n> English open-ended conversation evaluations. For AlpacaEval 2.0, we use the length-controlled win rate as the metric.\n\n\n## 5. Chat Website & API Platform\nYou can chat with DeepSeek-V3 on DeepSeek's official website: [chat.deepseek.com](https:\u002F\u002Fchat.deepseek.com\u002Fsign_in)\n\nWe also provide OpenAI-Compatible API at DeepSeek Platform: [platform.deepseek.com](https:\u002F\u002Fplatform.deepseek.com\u002F)\n\n## 6. How to Run Locally\n\nDeepSeek-V3 can be deployed locally using the following hardware and open-source community software:\n\n1. **DeepSeek-Infer Demo**: We provide a simple and lightweight demo for FP8 and BF16 inference.\n2. **SGLang**: Fully support the DeepSeek-V3 model in both BF16 and FP8 inference modes, with Multi-Token Prediction [coming soon](https:\u002F\u002Fgithub.com\u002Fsgl-project\u002Fsglang\u002Fissues\u002F2591).\n3. **LMDeploy**: Enables efficient FP8 and BF16 inference for local and cloud deployment.\n4. **TensorRT-LLM**: Currently supports BF16 inference and INT4\u002F8 quantization, with FP8 support coming soon.\n5. **vLLM**: Support DeepSeek-V3 model with FP8 and BF16 modes for tensor parallelism and pipeline parallelism.\n6. **LightLLM**: Supports efficient single-node or multi-node deployment for FP8 and BF16.\n7. **AMD GPU**: Enables running the DeepSeek-V3 model on AMD GPUs via SGLang in both BF16 and FP8 modes.\n8. **Huawei Ascend NPU**: Supports running DeepSeek-V3 on Huawei Ascend devices in both INT8 and BF16.\n\nSince FP8 training is natively adopted in our framework, we only provide FP8 weights. If you require BF16 weights for experimentation, you can use the provided conversion script to perform the transformation.\n\nHere is an example of converting FP8 weights to BF16:\n\n```shell\ncd inference\npython fp8_cast_bf16.py --input-fp8-hf-path \u002Fpath\u002Fto\u002Ffp8_weights --output-bf16-hf-path \u002Fpath\u002Fto\u002Fbf16_weights\n```\n\n> [!NOTE]\n> Hugging Face's Transformers has not been directly supported yet.\n\n### 6.1 Inference with DeepSeek-Infer Demo (example only)\n\n#### System Requirements\n\n> [!NOTE] \n> Linux with Python 3.10 only. Mac and Windows are not supported.\n\nDependencies:\n```pip-requirements\ntorch==2.4.1\ntriton==3.0.0\ntransformers==4.46.3\nsafetensors==0.4.5\n```\n#### Model Weights & Demo Code Preparation\n\nFirst, clone our DeepSeek-V3 GitHub repository:\n\n```shell\ngit clone https:\u002F\u002Fgithub.com\u002Fdeepseek-ai\u002FDeepSeek-V3.git\n```\n\nNavigate to the `inference` folder and install dependencies listed in `requirements.txt`. Easiest way is to use a package manager like `conda` or `uv` to create a new virtual environment and install the dependencies.\n\n```shell\ncd DeepSeek-V3\u002Finference\npip install -r requirements.txt\n```\n\nDownload the model weights from Hugging Face, and put them into `\u002Fpath\u002Fto\u002FDeepSeek-V3` folder.\n\n#### Model Weights Conversion\n\nConvert Hugging Face model weights to a specific format:\n\n```shell\npython convert.py --hf-ckpt-path \u002Fpath\u002Fto\u002FDeepSeek-V3 --save-path \u002Fpath\u002Fto\u002FDeepSeek-V3-Demo --n-experts 256 --model-parallel 16\n```\n\n#### Run\n\nThen you can chat with DeepSeek-V3:\n\n```shell\ntorchrun --nnodes 2 --nproc-per-node 8 --node-rank $RANK --master-addr $ADDR generate.py --ckpt-path \u002Fpath\u002Fto\u002FDeepSeek-V3-Demo --config configs\u002Fconfig_671B.json --interactive --temperature 0.7 --max-new-tokens 200\n```\n\nOr batch inference on a given file:\n\n```shell\ntorchrun --nnodes 2 --nproc-per-node 8 --node-rank $RANK --master-addr $ADDR generate.py --ckpt-path \u002Fpath\u002Fto\u002FDeepSeek-V3-Demo --config configs\u002Fconfig_671B.json --input-file $FILE\n```\n\n### 6.2 Inference with SGLang (recommended)\n\n[SGLang](https:\u002F\u002Fgithub.com\u002Fsgl-project\u002Fsglang) currently supports [MLA optimizations](https:\u002F\u002Flmsys.org\u002Fblog\u002F2024-09-04-sglang-v0-3\u002F#deepseek-multi-head-latent-attention-mla-throughput-optimizations), [DP Attention](https:\u002F\u002Flmsys.org\u002Fblog\u002F2024-12-04-sglang-v0-4\u002F#data-parallelism-attention-for-deepseek-models), FP8 (W8A8), FP8 KV Cache, and Torch Compile, delivering state-of-the-art latency and throughput performance among open-source frameworks.\n\nNotably, [SGLang v0.4.1](https:\u002F\u002Fgithub.com\u002Fsgl-project\u002Fsglang\u002Freleases\u002Ftag\u002Fv0.4.1) fully supports running DeepSeek-V3 on both **NVIDIA and AMD GPUs**, making it a highly versatile and robust solution.\n\nSGLang also supports [multi-node tensor parallelism](https:\u002F\u002Fgithub.com\u002Fsgl-project\u002Fsglang\u002Ftree\u002Fmain\u002Fbenchmark\u002Fdeepseek_v3#example-serving-with-2-h208), enabling you to run this model on multiple network-connected machines.\n\nMulti-Token Prediction (MTP) is in development, and progress can be tracked in the [optimization plan](https:\u002F\u002Fgithub.com\u002Fsgl-project\u002Fsglang\u002Fissues\u002F2591).\n\nHere are the launch instructions from the SGLang team: https:\u002F\u002Fgithub.com\u002Fsgl-project\u002Fsglang\u002Ftree\u002Fmain\u002Fbenchmark\u002Fdeepseek_v3\n\n### 6.3 Inference with LMDeploy (recommended)\n[LMDeploy](https:\u002F\u002Fgithub.com\u002FInternLM\u002Flmdeploy), a flexible and high-performance inference and serving framework tailored for large language models, now supports DeepSeek-V3. It offers both offline pipeline processing and online deployment capabilities, seamlessly integrating with PyTorch-based workflows.\n\nFor comprehensive step-by-step instructions on running DeepSeek-V3 with LMDeploy, please refer to here: https:\u002F\u002Fgithub.com\u002FInternLM\u002Flmdeploy\u002Fissues\u002F2960\n\n\n### 6.4 Inference with TRT-LLM (recommended)\n\n[TensorRT-LLM](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002FTensorRT-LLM) now supports the DeepSeek-V3 model, offering precision options such as BF16 and INT4\u002FINT8 weight-only. Support for FP8 is currently in progress and will be released soon. You can access the custom branch of TRTLLM specifically for DeepSeek-V3 support through the following link to experience the new features directly: https:\u002F\u002Fgithub.com\u002FNVIDIA\u002FTensorRT-LLM\u002Ftree\u002Fmain\u002Fexamples\u002Fdeepseek_v3. \n\n\n### 6.5 Inference with vLLM (recommended)\n\n[vLLM](https:\u002F\u002Fgithub.com\u002Fvllm-project\u002Fvllm) v0.6.6 supports DeepSeek-V3 inference for FP8 and BF16 modes on both NVIDIA and AMD GPUs. Aside from standard techniques, vLLM offers _pipeline parallelism_ allowing you to run this model on multiple machines connected by networks. For detailed guidance, please refer to the [vLLM instructions](https:\u002F\u002Fdocs.vllm.ai\u002Fen\u002Flatest\u002Fserving\u002Fdistributed_serving.html). Please feel free to follow [the enhancement plan](https:\u002F\u002Fgithub.com\u002Fvllm-project\u002Fvllm\u002Fissues\u002F11539) as well.\n\n### 6.6 Inference with LightLLM (recommended)\n\n[LightLLM](https:\u002F\u002Fgithub.com\u002FModelTC\u002Flightllm\u002Ftree\u002Fmain) v1.0.1 supports single-machine and multi-machine tensor parallel deployment for DeepSeek-R1 (FP8\u002FBF16) and provides mixed-precision deployment, with more quantization modes continuously integrated. For more details, please refer to [LightLLM instructions](https:\u002F\u002Flightllm-en.readthedocs.io\u002Fen\u002Flatest\u002Fgetting_started\u002Fquickstart.html). Additionally, LightLLM offers PD-disaggregation deployment for DeepSeek-V2, and the implementation of PD-disaggregation for DeepSeek-V3 is in development.\n\n### 6.7 Recommended Inference Functionality with AMD GPUs\n\nIn collaboration with the AMD team, we have achieved Day-One support for AMD GPUs using SGLang, with full compatibility for both FP8 and BF16 precision. For detailed guidance, please refer to the [SGLang instructions](#63-inference-with-lmdeploy-recommended).\n\n### 6.8 Recommended Inference Functionality with Huawei Ascend NPUs\nThe [MindIE](https:\u002F\u002Fwww.hiascend.com\u002Fen\u002Fsoftware\u002Fmindie) framework from the Huawei Ascend community has successfully adapted the BF16 version of DeepSeek-V3. For step-by-step guidance on Ascend NPUs, please follow the [instructions here](https:\u002F\u002Fmodelers.cn\u002Fmodels\u002FMindIE\u002Fdeepseekv3).\n\n\n## 7. License\nThis code repository is licensed under [the MIT License](LICENSE-CODE). The use of DeepSeek-V3 Base\u002FChat models is subject to [the Model License](LICENSE-MODEL). DeepSeek-V3 series (including Base and Chat) supports commercial use.\n\n## 8. Citation\n```\n@misc{deepseekai2024deepseekv3technicalreport,\n      title={DeepSeek-V3 Technical Report}, \n      author={DeepSeek-AI},\n      year={2024},\n      eprint={2412.19437},\n      archivePrefix={arXiv},\n      primaryClass={cs.CL},\n      url={https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.19437}, \n}\n```\n\n## 9. Contact\nIf you have any questions, please raise an issue or contact us at [service@deepseek.com](service@deepseek.com).\n","\u003C!-- markdownlint-disable first-line-h1 -->\n\u003C!-- markdownlint-disable html -->\n\u003C!-- markdownlint-disable no-duplicate-header -->\n\n\u003Cdiv align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Fgithub.com\u002Fdeepseek-ai\u002FDeepSeek-V2\u002Fblob\u002Fmain\u002Ffigures\u002Flogo.svg?raw=true\" width=\"60%\" alt=\"DeepSeek-V3\" \u002F>\n\u003C\u002Fdiv>\n\u003Chr>\n\u003Cdiv align=\"center\" style=\"line-height: 1;\">\n  \u003Ca href=\"https:\u002F\u002Fwww.deepseek.com\u002F\">\u003Cimg alt=\"主页\"\n    src=\"https:\u002F\u002Fgithub.com\u002Fdeepseek-ai\u002FDeepSeek-V2\u002Fblob\u002Fmain\u002Ffigures\u002Fbadge.svg?raw=true\"\u002F>\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fchat.deepseek.com\u002F\">\u003Cimg alt=\"聊天\"\n    src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F🤖%20Chat-DeepSeek%20V3-536af5?color=536af5&logoColor=white\"\u002F>\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fhuggingface.co\u002Fdeepseek-ai\">\u003Cimg alt=\"Hugging Face\"\n    src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F%F0%9F%A4%97%20Hugging%20Face-DeepSeek%20AI-ffc107?color=ffc107&logoColor=white\"\u002F>\u003C\u002Fa>\n  \u003Cbr>\n  \u003Ca href=\"https:\u002F\u002Fdiscord.gg\u002FTc7c45Zzu5\">\u003Cimg alt=\"Discord\"\n    src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FDiscord-DeepSeek%20AI-7289da?logo=discord&logoColor=white&color=7289da\"\u002F>\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fdeepseek-ai\u002FDeepSeek-V2\u002Fblob\u002Fmain\u002Ffigures\u002Fqr.jpeg?raw=true\">\u003Cimg alt=\"微信\"\n    src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FWeChat-DeepSeek%20AI-brightgreen?logo=wechat&logoColor=white\"\u002F>\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Ftwitter.com\u002Fdeepseek_ai\">\u003Cimg alt=\"Twitter关注\"\n    src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FTwitter-deepseek_ai-white?logo=x&logoColor=white\"\u002F>\u003C\u002Fa>\n  \u003Cbr>\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fdeepseek-ai\u002FDeepSeek-V3\u002Fblob\u002Fmain\u002FLICENSE-CODE\">\u003Cimg alt=\"代码许可证\"\n    src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FCode_License-MIT-f5de53?&color=f5de53\"\u002F>\u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fdeepseek-ai\u002FDeepSeek-V3\u002Fblob\u002Fmain\u002FLICENSE-MODEL\">\u003Cimg alt=\"模型许可证\"\n    src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FModel_License-Model_Agreement-f5de53?&color=f5de53\"\u002F>\u003C\u002Fa>\n  \u003Cbr>\n  \u003Ca href=\"https:\u002F\u002Farxiv.org\u002Fpdf\u002F2412.19437\">\u003Cb>论文链接\u003C\u002Fb>👁️\u003C\u002Fa>\n\u003C\u002Fdiv>\n\n## 目录\n\n1. [简介](#1-introduction)\n2. [模型概览](#2-model-summary)\n3. [模型下载](#3-model-downloads)\n4. [评估结果](#4-evaluation-results)\n5. [聊天网站与API平台](#5-chat-website--api-platform)\n6. [如何本地运行](#6-how-to-run-locally)\n7. [许可证](#7-license)\n8. [引用](#8-citation)\n9. [联系方式](#9-contact)\n\n\n## 1. 简介\n\n我们推出了DeepSeek-V3，这是一款强大的专家混合（MoE）语言模型，总参数量达6710亿，每处理一个token时有370亿参数被激活。  \n为了实现高效的推理和经济实惠的训练，DeepSeek-V3采用了多头潜在注意力（MLA）和DeepSeekMoE架构，这些架构已在DeepSeek-V2中得到充分验证。  \n此外，DeepSeek-V3还开创了一种无辅助损失的负载均衡策略，并设定了多token预测训练目标，以提升模型性能。  \n我们在14.8万亿个多样且高质量的token上对DeepSeek-V3进行了预训练，随后通过监督微调和强化学习阶段，充分发挥其潜力。  \n综合评估表明，DeepSeek-V3的表现优于其他开源模型，并达到了与领先的闭源模型相当的水平。\n尽管性能卓越，DeepSeek-V3的完整训练仅需278.8万小时的H800 GPU算力。\n此外，其训练过程极为稳定。  \n在整个训练过程中，我们未遇到任何不可恢复的损失激增现象，也未进行过任何回滚操作。  \n\u003Cp align=\"center\">\n  \u003Cimg width=\"80%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeepseek-ai_DeepSeek-V3_readme_c567defa1525.png\">\n\u003C\u002Fp>\n\n## 2. 模型概览\n\n---\n\n**架构：创新的负载均衡策略与训练目标**\n\n- 在DeepSeek-V2高效架构的基础上，我们开创了无辅助损失的负载均衡策略，从而最大限度地减少了因鼓励负载均衡而带来的性能下降。\n- 我们研究了多token预测（MTP）目标，并证明其有助于提升模型性能。  \n  此外，它还可用于推测性解码，以加速推理过程。\n\n---\n\n**预训练：迈向极致的训练效率**\n\n- 我们设计了一个FP8混合精度训练框架，并首次验证了在超大规模模型上使用FP8训练的可行性和有效性。  \n- 通过算法、框架和硬件的协同设计，我们克服了跨节点MoE训练中的通信瓶颈，几乎实现了计算与通信的完全重叠。  \n  这显著提升了我们的训练效率，降低了训练成本，使我们能够在不增加额外开销的情况下进一步扩大模型规模。  \n- 仅花费266.4万小时的H800 GPU算力，我们就完成了DeepSeek-V3在14.8万亿个token上的预训练，打造出了目前最强的开源基础模型。预训练之后的后续训练阶段仅需10万小时的GPU算力。\n\n---\n\n**后训练：从DeepSeek-R1中知识蒸馏**\n\n- 我们提出了一种创新的方法，将长链式思维（CoT）模型——特别是DeepSeek R1系列中的某款模型——的推理能力蒸馏到标准LLM中，尤其是DeepSeek-V3。我们的流程巧妙地将R1的验证和反思模式融入DeepSeek-V3，显著提升了其推理性能。同时，我们还能有效控制DeepSeek-V3的输出风格和长度。\n\n---\n\n\n## 3. 模型下载\n\n\u003Cdiv align=\"center\">\n\n| **模型** | **总参数量** | **激活参数量** | **上下文长度** | **下载** |\n| :------------: | :------------: | :------------: | :------------: | :------------: |\n| DeepSeek-V3-Base | 6710亿 | 370亿 | 128K   | [🤗 Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fdeepseek-ai\u002FDeepSeek-V3-Base)   |\n| DeepSeek-V3   | 6710亿 | 370亿 |  128K   | [🤗 Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fdeepseek-ai\u002FDeepSeek-V3)   |\n\n\u003C\u002Fdiv>\n\n> [!注意]\n> DeepSeek-V3在Hugging Face上的总大小为6850亿，其中包括6710亿的主模型权重和140亿的多token预测（MTP）模块权重。\n\n为确保最佳性能和灵活性，我们已与开源社区和硬件厂商合作，提供了多种在本地运行模型的方式。有关分步指导，请参阅第6节：[如何本地运行](#6-how-to-run-locally)。\n\n对于希望深入研究的开发者，我们建议查阅[README_WEIGHTS.md](.\u002FREADME_WEIGHTS.md)，了解主模型权重和多token预测（MTP）模块的详细信息。请注意，MTP支持目前仍在社区中积极开发中，我们欢迎您的贡献和反馈。\n\n## 4. 評估結果\n\n### 基础模型\n#### 标准基准测试\n\n\u003Cdiv align=\"center\">\n\n\n|  | 基准测试（指标） | # Shot | DeepSeek-V2 | Qwen2.5 72B | LLaMA3.1 405B | DeepSeek-V3 |\n|---|-------------------|----------|--------|-------------|---------------|---------|\n| | 架构 | - | MoE | 稠密 | 稠密 | MoE |\n| | 激活参数量 | - | 21B | 72B | 405B | 37B |\n| | 总参数量 | - | 236B | 72B | 405B | 671B |\n| 英语 | Pile-test (BPB) | - | 0.606 | 0.638 | **0.542** | 0.548 |\n| | BBH (EM) | 3-shot | 78.8 | 79.8 | 82.9 | **87.5** |\n| | MMLU (准确率) | 5-shot | 78.4 | 85.0 | 84.4 | **87.1** |\n| | MMLU-Redux (准确率) | 5-shot | 75.6 | 83.2 | 81.3 | **86.2** |\n| | MMLU-Pro (准确率) | 5-shot | 51.4 | 58.3 | 52.8 | **64.4** |\n| | DROP (F1) | 3-shot | 80.4 | 80.6 | 86.0 | **89.0** |\n| | ARC-Easy (准确率) | 25-shot | 97.6 | 98.4 | 98.4 | **98.9** |\n| | ARC-Challenge (准确率) | 25-shot | 92.2 | 94.5 | **95.3** | **95.3** |\n| | HellaSwag (准确率) | 10-shot | 87.1 | 84.8 | **89.2** | 88.9 |\n| | PIQA (准确率) | 0-shot | 83.9 | 82.6 | **85.9** | 84.7 |\n| | WinoGrande (准确率) | 5-shot | **86.3** | 82.3 | 85.2 | 84.9 |\n| | RACE-Middle (准确率) | 5-shot | 73.1 | 68.1 | **74.2** | 67.1 |\n| | RACE-High (准确率) | 5-shot | 52.6 | 50.3 | **56.8** | 51.3 |\n| | TriviaQA (EM) | 5-shot | 80.0 | 71.9 | 82.7 | **82.9** |\n| | NaturalQuestions (EM) | 5-shot | 38.6 | 33.2 | **41.5** | 40.0 |\n| | AGIEval (准确率) | 0-shot | 57.5 | 75.8 | 60.6 | **79.6** |\n| 代码 | HumanEval (Pass@1) | 0-shot | 43.3 | 53.0 | 54.9 | **65.2** |\n| | MBPP (Pass@1) | 3-shot | 65.0 | 72.6 | 68.4 | **75.4** |\n| | LiveCodeBench-Base (Pass@1) | 3-shot | 11.6 | 12.9 | 15.5 | **19.4** |\n| | CRUXEval-I (准确率) | 2-shot | 52.5 | 59.1 | 58.5 | **67.3** |\n| | CRUXEval-O (准确率) | 2-shot | 49.8 | 59.9 | 59.9 | **69.8** |\n| 数学 | GSM8K (EM) | 8-shot | 81.6 | 88.3 | 83.5 | **89.3** |\n| | MATH (EM) | 4-shot | 43.4 | 54.4 | 49.0 | **61.6** |\n| | MGSM (EM) | 8-shot | 63.6 | 76.2 | 69.9 | **79.8** |\n| | CMath (EM) | 3-shot | 78.7 | 84.5 | 77.3 | **90.7** |\n| 中文 | CLUEWSC (EM) | 5-shot | 82.0 | 82.5 | **83.0** | 82.7 |\n| | C-Eval (准确率) | 5-shot | 81.4 | 89.2 | 72.5 | **90.1** |\n| | CMMLU (准确率) | 5-shot | 84.0 | **89.5** | 73.7 | 88.8 |\n| | CMRC (EM) | 1-shot | **77.4** | 75.8 | 76.0 | 76.3 |\n| | C3 (准确率) | 0-shot | 77.4 | 76.7 | **79.7** | 78.6 |\n| | CCPM (准确率) | 0-shot | **93.0** | 88.5 | 78.6 | 92.0 |\n| 多语言 | MMMLU-非英语（准确率） | 5-shot | 64.0 | 74.8 | 73.8 | **79.4** |\n\n\u003C\u002Fdiv>\n\n> [!NOTE]\n> 最佳结果以粗体显示。分数差距不超过0.3的视为同一水平。DeepSeek-V3在大多数基准测试中表现最佳，尤其是在数学和代码任务上。\n> 更多评估细节，请参阅我们的论文。\n\n#### 上下文窗口\n\u003Cp align=\"center\">\n  \u003Cimg width=\"80%\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeepseek-ai_DeepSeek-V3_readme_ad39275136c2.png\">\n\u003C\u002Fp>\n\n“针在 haystack 中”（NIAH）测试的评估结果。DeepSeek-V3 在所有上下文窗口长度上均表现出色，最高可达 **128K**。\n\n### 对话模型\n#### 标准基准测试（大于67B参数的模型）\n\u003Cdiv align=\"center\">\n\n| | **基准测试（指标）** | **DeepSeek V2-0506** | **DeepSeek V2.5-0905** | **Qwen2.5 72B-Inst.** | **Llama3.1 405B-Inst.** | **Claude-3.5-Sonnet-1022** | **GPT-4o 0513** | **DeepSeek V3** |\n|---|---------------------|---------------------|----------------------|---------------------|----------------------|---------------------------|----------------|----------------|\n| | 架构 | MoE | MoE | 稠密 | 稠密 | - | - | MoE |\n| | 激活参数量 | 21B | 21B | 72B | 405B | - | - | 37B |\n| | 总参数量 | 236B | 236B | 72B | 405B | - | - | 671B |\n| 英语 | MMLU (EM) | 78.2 | 80.6 | 85.3 | **88.6** | **88.3** | 87.2 | **88.5** |\n| | MMLU-Redux (EM) | 77.9 | 80.3 | 85.6 | 86.2 | **88.9** | 88.0 | **89.1** |\n| | MMLU-Pro (EM) | 58.5 | 66.2 | 71.6 | 73.3 | **78.0** | 72.6 | 75.9 |\n| | DROP (3-shot F1) | 83.0 | 87.8 | 76.7 | 88.7 | 88.3 | 83.7 | **91.6** |\n| | IF-Eval (Prompt Strict) | 57.7 | 80.6 | 84.1 | 86.0 | **86.5** | 84.3 | 86.1 |\n| | GPQA-Diamond (Pass@1) | 35.3 | 41.3 | 49.0 | 51.1 | **65.0** | 49.9 | 59.1 |\n| | SimpleQA (Correct) | 9.0 | 10.2 | 9.1 | 17.1 | 28.4 | **38.2** | 24.9 |\n| | FRAMES (Acc.) | 66.9 | 65.4 | 69.8 | 70.0 | 72.5 | **80.5** | 73.3 |\n| | LongBench v2 (Acc.) | 31.6 | 35.4 | 39.4 | 36.1 | 41.0 | 48.1 | **48.7** |\n| 代码 | HumanEval-Mul (Pass@1) | 69.3 | 77.4 | 77.3 | 77.2 | 81.7 | 80.5 | **82.6** |\n| | LiveCodeBench (Pass@1-COT) | 18.8 | 29.2 | 31.1 | 28.4 | 36.3 | 33.4 | **40.5** |\n| | LiveCodeBench (Pass@1) | 20.3 | 28.4 | 28.7 | 30.1 | 32.8 | 34.2 | **37.6** |\n| | Codeforces (Percentile) | 17.5 | 35.6 | 24.8 | 25.3 | 20.3 | 23.6 | **51.6** |\n| | SWE Verified (Resolved) | - | 22.6 | 23.8 | 24.5 | **50.8** | 38.8 | 42.0 |\n| | Aider-Edit (Acc.) | 60.3 | 71.6 | 65.4 | 63.9 | **84.2** | 72.9 | 79.7 |\n| | Aider-Polyglot (Acc.) | - | 18.2 | 7.6 | 5.8 | 45.3 | 16.0 | **49.6** |\n| 数学 | AIME 2024 (Pass@1) | 4.6 | 16.7 | 23.3 | 23.3 | 16.0 | 9.3 | **39.2** |\n| | MATH-500 (EM) | 56.3 | 74.7 | 80.0 | 73.8 | 78.3 | 74.6 | **90.2** |\n| | CNMO 2024 (Pass@1) | 2.8 | 10.8 | 15.9 | 6.8 | 13.1 | 10.8 | **43.2** |\n| 中文 | CLUEWSC (EM) | 89.9 | 90.4 | **91.4** | 84.7 | 85.4 | 87.9 | 90.9 |\n| | C-Eval (EM) | 78.6 | 79.5 | 86.1 | 61.5 | 76.7 | 76.0 | **86.5** |\n| | C-SimpleQA (Correct) | 48.5 | 54.1 | 48.4 | 50.4 | 51.3 | 59.3 | **64.8** |\n\n\u003C\u002Fdiv>\n\n> [!NOTE]\n> 所有模型均在输出长度限制为8K的配置下进行评估。样本数少于1000的基准测试会使用不同的温度设置多次测试，以得出稳健的最终结果。DeepSeek-V3是性能最佳的开源模型，同时在与前沿闭源模型的竞争中也表现出色。\n\n\n#### 开放式生成评估\n\n\u003Cdiv align=\"center\">\n\n\n\n| 模型 | Arena-Hard | AlpacaEval 2.0 |\n|-------|------------|----------------|\n| DeepSeek-V2.5-0905 | 76.2 | 50.5 |\n| Qwen2.5-72B-Instruct | 81.2 | 49.1 |\n| LLaMA-3.1 405B | 69.3 | 40.5 |\n| GPT-4o-0513 | 80.4 | 51.1 |\n| Claude-Sonnet-3.5-1022 | 85.2 | 52.0 |\n| DeepSeek-V3 | **85.5** | **70.0** |\n\n\u003C\u002Fdiv>\n\n> [!NOTE]\n> 英语开放式对话评估。对于AlpacaEval 2.0，我们使用控制长度后的胜率作为指标。\n\n\n## 5. 聊天网站与API平台\n您可以在DeepSeek的官方网站上与DeepSeek-V3进行对话：[chat.deepseek.com](https:\u002F\u002Fchat.deepseek.com\u002Fsign_in)\n\n我们还在DeepSeek平台提供与OpenAI兼容的API：[platform.deepseek.com](https:\u002F\u002Fplatform.deepseek.com\u002F)\n\n## 6. 如何在本地运行\n\nDeepSeek-V3 可以使用以下硬件和开源社区软件在本地部署：\n\n1. **DeepSeek-Infer 演示**：我们提供了一个简单轻量的 FP8 和 BF16 推理演示。\n2. **SGLang**：完全支持 DeepSeek-V3 模型的 BF16 和 FP8 推理模式，并即将推出多标记预测功能 [敬请期待](https:\u002F\u002Fgithub.com\u002Fsgl-project\u002Fsglang\u002Fissues\u002F2591)。\n3. **LMDeploy**：实现高效的 FP8 和 BF16 推理，适用于本地和云端部署。\n4. **TensorRT-LLM**：目前支持 BF16 推理及 INT4\u002F8 量化，FP8 支持也将很快推出。\n5. **vLLM**：支持 DeepSeek-V3 模型的 FP8 和 BF16 模式，适用于张量并行和流水线并行。\n6. **LightLLM**：支持 FP8 和 BF16 的高效单节点或多节点部署。\n7. **AMD GPU**：通过 SGLang，可在 AMD GPU 上以 BF16 和 FP8 模式运行 DeepSeek-V3 模型。\n8. **华为 Ascend NPU**：支持在华为 Ascend 设备上以 INT8 和 BF16 模式运行 DeepSeek-V3。\n\n由于我们的框架原生采用 FP8 训练，因此我们仅提供 FP8 权重。如果您需要 BF16 权重进行实验，可以使用提供的转换脚本进行转换。\n\n以下是将 FP8 权重转换为 BF16 的示例：\n\n```shell\ncd inference\npython fp8_cast_bf16.py --input-fp8-hf-path \u002Fpath\u002Fto\u002Ffp8_weights --output-bf16-hf-path \u002Fpath\u002Fto\u002Fbf16_weights\n```\n\n> [!NOTE]\n> Hugging Face 的 Transformers 尚未直接支持。\n\n### 6.1 使用 DeepSeek-Infer 演示进行推理（仅作示例）\n\n#### 系统要求\n\n> [!NOTE] \n> 仅支持 Python 3.10 的 Linux 系统。不支持 Mac 和 Windows。\n\n依赖项：\n```pip-requirements\ntorch==2.4.1\ntriton==3.0.0\ntransformers==4.46.3\nsafetensors==0.4.5\n```\n#### 模型权重与演示代码准备\n\n首先，克隆我们的 DeepSeek-V3 GitHub 仓库：\n\n```shell\ngit clone https:\u002F\u002Fgithub.com\u002Fdeepseek-ai\u002FDeepSeek-V3.git\n```\n\n进入 `inference` 文件夹，并安装 `requirements.txt` 中列出的依赖项。最简单的方法是使用包管理器如 `conda` 或 `uv` 创建一个新的虚拟环境并安装依赖项。\n\n```shell\ncd DeepSeek-V3\u002Finference\npip install -r requirements.txt\n```\n\n从 Hugging Face 下载模型权重，并将其放入 `\u002Fpath\u002Fto\u002FDeepSeek-V3` 文件夹中。\n\n#### 模型权重转换\n\n将 Hugging Face 模型权重转换为特定格式：\n\n```shell\npython convert.py --hf-ckpt-path \u002Fpath\u002Fto\u002FDeepSeek-V3 --save-path \u002Fpath\u002Fto\u002FDeepSeek-V3-Demo --n-experts 256 --model-parallel 16\n```\n\n#### 运行\n\n然后您就可以与 DeepSeek-V3 对话了：\n\n```shell\ntorchrun --nnodes 2 --nproc-per-node 8 --node-rank $RANK --master-addr $ADDR generate.py --ckpt-path \u002Fpath\u002Fto\u002FDeepSeek-V3-Demo --config configs\u002Fconfig_671B.json --interactive --temperature 0.7 --max-new-tokens 200\n```\n\n或者对给定文件进行批量推理：\n\n```shell\ntorchrun --nnodes 2 --nproc-per-node 8 --node-rank $RANK --master-addr $ADDR generate.py --ckpt-path \u002Fpath\u002Fto\u002FDeepSeek-V3-Demo --config configs\u002Fconfig_671B.json --input-file $FILE\n```\n\n### 6.2 使用 SGLang 进行推理（推荐）\n\n[SGLang](https:\u002F\u002Fgithub.com\u002Fsgl-project\u002Fsglang) 目前支持 [MLA 优化](https:\u002F\u002Flmsys.org\u002Fblog\u002F2024-09-04-sglang-v0-3\u002F#deepseek-multi-head-latent-attention-mla-throughput-optimizations)、[DP 注意力](https:\u002F\u002Flmsys.org\u002Fblog\u002F2024-12-04-sglang-v0-4\u002F#data-parallelism-attention-for-deepseek-models)、FP8 (W8A8)、FP8 KV 缓存以及 Torch Compile，从而在开源框架中实现了最先进的延迟和吞吐量性能。\n\n值得注意的是，[SGLang v0.4.1](https:\u002F\u002Fgithub.com\u002Fsgl-project\u002Fsglang\u002Freleases\u002Ftag\u002Fv0.4.1) 完全支持在 **NVIDIA 和 AMD GPU** 上运行 DeepSeek-V3，使其成为一个高度通用且稳健的解决方案。\n\nSGLang 还支持 [多节点张量并行](https:\u002F\u002Fgithub.com\u002Fsgl-project\u002Fsglang\u002Ftree\u002Fmain\u002Fbenchmark\u002Fdeepseek_v3#example-serving-with-2-h208)，使您能够在多台联网的机器上运行该模型。\n\n多标记预测 (MTP) 正在开发中，进展可以在 [优化计划](https:\u002F\u002Fgithub.com\u002Fsgl-project\u002Fsglang\u002Fissues\u002F2591) 中跟踪。\n\n以下是 SGLang 团队提供的启动说明：https:\u002F\u002Fgithub.com\u002Fsgl-project\u002Fsglang\u002Ftree\u002Fmain\u002Fbenchmark\u002Fdeepseek_v3\n\n### 6.3 使用 LMDeploy 进行推理（推荐）\n\n[LMDeploy](https:\u002F\u002Fgithub.com\u002FInternLM\u002Flmdeploy)，一个专为大型语言模型设计的灵活高性能推理和服务框架，现已支持 DeepSeek-V3。它提供离线流水线处理和在线部署功能，可无缝集成到基于 PyTorch 的工作流中。\n\n有关使用 LMDeploy 运行 DeepSeek-V3 的完整分步指南，请参阅此处：https:\u002F\u002Fgithub.com\u002FInternLM\u002Flmdeploy\u002Fissues\u002F2960\n\n\n### 6.4 使用 TRT-LLM 进行推理（推荐）\n\n[TensorRT-LLM](https:\u002F\u002Fgithub.com\u002FNVIDIA\u002FTensorRT-LLM) 现已支持 DeepSeek-V3 模型，提供 BF16 和 INT4\u002FINT8 仅权重等精度选项。FP8 支持目前正在开发中，不久后将发布。您可以通过以下链接访问专门用于支持 DeepSeek-V3 的 TRTLLM 自定义分支，直接体验新功能：https:\u002F\u002Fgithub.com\u002FNVIDIA\u002FTensorRT-LLM\u002Ftree\u002Fmain\u002Fexamples\u002Fdeepseek_v3。\n\n\n### 6.5 使用 vLLM 进行推理（推荐）\n\n[vLLM](https:\u002F\u002Fgithub.com\u002Fvllm-project\u002Fvllm) v0.6.6 支持 DeepSeek-V3 在 NVIDIA 和 AMD GPU 上的 FP8 和 BF16 推理模式。除了标准技术外，vLLM 还提供 _流水线并行_ 功能，允许您在多台联网的机器上运行该模型。有关详细指导，请参阅 [vLLM 使用说明](https:\u002F\u002Fdocs.vllm.ai\u002Fen\u002Flatest\u002Fserving\u002Fdistributed_serving.html)。同时，欢迎关注 [改进计划](https:\u002F\u002Fgithub.com\u002Fvllm-project\u002Fvllm\u002Fissues\u002F11539)。\n\n### 6.6 使用 LightLLM 进行推理（推荐）\n\n[LightLLM](https:\u002F\u002Fgithub.com\u002FModelTC\u002Flightllm\u002Ftree\u002Fmain) v1.0.1 支持 DeepSeek-R1（FP8\u002FBF16）的单机和多机张量并行部署，并提供混合精度部署，未来还将持续集成更多量化模式。有关详细信息，请参阅 [LightLLM 使用说明](https:\u002F\u002Flightllm-en.readthedocs.io\u002Fen\u002Flatest\u002Fgetting_started\u002Fquickstart.html)。此外，LightLLM 还为 DeepSeek-V2 提供 PD 分离部署，而 DeepSeek-V3 的 PD 分离部署也正在开发中。\n\n### 6.7 推荐使用 AMD GPU 进行推理\n\n与 AMD 团队合作，我们借助 SGLang 实现了对 AMD GPU 的开箱即用支持，完全兼容 FP8 和 BF16 精度。有关详细指导，请参阅 [SGLang 使用说明](#63-inference-with-lmdeploy-recommended)。\n\n### 6.8 华为昇腾NPU推荐的推理功能\n华为昇腾社区的[MindIE](https:\u002F\u002Fwww.hiascend.com\u002Fen\u002Fsoftware\u002Fmindie)框架已成功适配DeepSeek-V3的BF16版本。如需昇腾NPU的分步指导，请参阅[此处说明](https:\u002F\u002Fmodelers.cn\u002Fmodels\u002FMindIE\u002Fdeepseekv3)。\n\n\n## 7. 许可证\n本代码仓库采用[MIT许可证](LICENSE-CODE)授权。DeepSeek-V3 Base\u002FChat模型的使用受[模型许可证](LICENSE-MODEL)约束。DeepSeek-V3系列（包括Base和Chat）支持商业用途。\n\n## 8. 引用\n```\n@misc{deepseekai2024deepseekv3technicalreport,\n      title={DeepSeek-V3技术报告}, \n      author={DeepSeek-AI},\n      year={2024},\n      eprint={2412.19437},\n      archivePrefix={arXiv},\n      primaryClass={cs.CL},\n      url={https:\u002F\u002Farxiv.org\u002Fabs\u002F2412.19437}, \n}\n```\n\n## 9. 联系方式\n如有任何问题，请提交Issue或通过[service@deepseek.com](service@deepseek.com)与我们联系。","# DeepSeek-V3 快速上手指南\n\nDeepSeek-V3 是一款拥有 6710 亿总参数（每次激活 370 亿参数）的混合专家（MoE）开源大语言模型。它支持 128K 上下文窗口，在数学、代码及通用任务上表现卓越，且采用了高效的 FP8 训练架构。\n\n## 1. 环境准备\n\n由于模型参数量巨大，本地运行对硬件要求较高。请确保满足以下条件：\n\n*   **操作系统**: Linux (推荐 Ubuntu 20.04\u002F22.04) 或 macOS (仅限量化版本)。\n*   **GPU**: \n    *   **全精度\u002F半精度推理**: 需要多卡高性能 GPU (如 NVIDIA H800\u002FA800\u002FH100)，显存需求极大。\n    *   **量化推理 (推荐)**: 使用 `llama.cpp` 或 `vLLM` 配合量化权重，可在消费级显卡或多卡环境中运行。\n*   **软件依赖**:\n    *   Python >= 3.8\n    *   CUDA >= 11.8 (NVIDIA 用户)\n    *   Git, CMake, Build-essential\n\n**前置依赖安装：**\n\n```bash\n# 安装基础工具\nsudo apt-get update && sudo apt-get install -y git cmake build-essential\n\n# 创建虚拟环境 (推荐)\npython3 -m venv deepseek-env\nsource deepseek-env\u002Fbin\u002Factivate\n\n# 安装 PyTorch (根据CUDA版本选择，此处以CUDA 11.8为例)\npip3 install torch torchvision torchaudio --index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu118\n\n# 安装 Transformers 库 (建议使用最新开发版以支持最新架构)\npip3 install --upgrade transformers accelerate sentencepiece protobuf\n```\n\n> **国内加速提示**：如遇网络问题，可使用清华源或阿里源加速 Python 包安装：\n> `pip3 install -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple \u003Cpackage_name>`\n\n## 2. 安装与模型下载\n\n### 方式一：通过 Hugging Face 下载 (需网络环境)\n\n```bash\n# 安装 huggingface-cli\npip3 install huggingface_hub\n\n# 下载 DeepSeek-V3 主模型 (包含 Base 和 Chat 版本)\n# 注意：模型较大，请确保磁盘空间充足 (约 1.5TB+ 原始权重)\nhuggingface-cli download deepseek-ai\u002FDeepSeek-V3 --local-dir .\u002FDeepSeek-V3\n```\n\n### 方式二：使用 ModelScope (魔搭社区 - 国内推荐)\n\n对于国内开发者，推荐使用阿里云 ModelScope 进行高速下载：\n\n```bash\n# 安装 modelscope\npip3 install modelscope -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n\n# 下载模型\npython3 -c \"from modelscope import snapshot_download; snapshot_download('deepseek-ai\u002FDeepSeek-V3', cache_dir='.\u002FDeepSeek-V3')\"\n```\n\n### 方式三：运行量化版本 (低显存方案)\n\n如果显存有限，建议下载 GGUF 量化版本并使用 `llama.cpp` 运行：\n\n```bash\n# 克隆并编译 llama.cpp\ngit clone https:\u002F\u002Fgithub.com\u002Fggerganov\u002Fllama.cpp.git\ncd llama.cpp\nmake LLAMA_CUDA=1 -j$(nproc)\n\n# 下载量化模型 (示例：从 HuggingFace 下载 Q4_K_M 版本，或使用 ModelScope 对应仓库)\n# 此处以手动下载 GGUF 文件到 .\u002Fmodels 目录为例\n```\n\n## 3. 基本使用\n\n### 场景 A：使用 Python Transformers 加载 (适合多卡服务器)\n\n```python\nimport torch\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\nmodel_path = \".\u002FDeepSeek-V3\"  # 模型本地路径\n\n# 加载分词器\ntokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)\n\n# 加载模型\n# 注意：根据显存情况调整 device_map 和 torch_dtype\n# 若显存不足，建议使用 bitsandbytes 进行 4bit\u002F8bit 量化加载\nmodel = AutoModelForCausalLM.from_pretrained(\n    model_path,\n    torch_dtype=torch.bfloat16,\n    device_map=\"auto\",\n    trust_remote_code=True\n)\n\n# 构建对话输入\nmessages = [\n    {\"role\": \"user\", \"content\": \"你好，请介绍一下 DeepSeek-V3 的特点。\"}\n]\n\n# 应用聊天模板\ninput_text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\ninputs = tokenizer(input_text, return_tensors=\"pt\").to(model.device)\n\n# 生成回复\noutputs = model.generate(\n    **inputs,\n    max_new_tokens=512,\n    do_sample=True,\n    temperature=0.7,\n    top_p=0.9\n)\n\nresponse = tokenizer.decode(outputs[0][len(inputs[0]):], skip_special_tokens=True)\nprint(response)\n```\n\n### 场景 B：使用 llama.cpp 运行量化模型 (适合单卡\u002F消费级显卡)\n\n假设已下载 `deepseek-v3-q4_k_m.gguf` 文件至 `.\u002Fmodels` 目录：\n\n```bash\ncd llama.cpp\n\n# 运行交互式命令行\n.\u002Fmain -m ..\u002Fmodels\u002Fdeepseek-v3-q4_k_m.gguf \\\n       -n 512 \\\n       --color \\\n       -i \\\n       -cnv \\\n       -p \"你好，请介绍一下你自己。\"\n```\n\n### 场景 C：使用 vLLM 部署高并发服务 (适合生产环境)\n\n```bash\n# 安装 vLLM\npip3 install vllm\n\n# 启动 API 服务\n# --trust-remote-code 必须添加，因为 DeepSeek-V3 使用了自定义注意力机制\npython3 -m vllm.entrypoints.api_server \\\n    --model .\u002FDeepSeek-V3 \\\n    --port 8000 \\\n    --trust-remote-code \\\n    --dtype bfloat16 \\\n    --max-model-len 128000\n```\n\n启动后，可通过 HTTP POST 请求调用：\n\n```bash\ncurl http:\u002F\u002Flocalhost:8000\u002Fv1\u002Fcompletions \\\n    -H \"Content-Type: application\u002Fjson\" \\\n    -d '{\n        \"model\": \".\u002FDeepSeek-V3\",\n        \"prompt\": \"Hello, how are you?\",\n        \"max_tokens\": 100\n    }'\n```\n\n---\n**注意**：\n1. DeepSeek-V3 包含 Multi-Token Prediction (MTP) 模块，目前部分推理框架可能尚未完全支持该加速特性，请以官方更新的推理后端为准。\n2. 模型总参数量为 671B，但每次前向传播仅激活 37B 参数，推理速度主要取决于激活参数量及显存带宽。","某大型跨境电商技术团队正面临黑五促销前的紧急任务，需要在 48 小时内将数万条英文商品详情页本地化为高质量的中文、日文和西班牙文版本，同时确保专业术语准确且风格符合当地文化。\n\n### 没有 DeepSeek-V3 时\n- **成本高昂且速度慢**：调用多家闭源大模型 API 处理海量文本，不仅费用超出预算，且高并发下响应延迟严重，难以按时交付。\n- **专业术语翻译生硬**：通用模型缺乏垂直领域知识，常将特定的面料成分或电子参数翻译错误，需要人工逐条复核修正。\n- **长文档上下文丢失**：面对包含详细规格表和用户评价的长篇幅商品页，旧模型经常“遗忘”前文信息，导致译文前后逻辑矛盾。\n- **部署维护复杂**：尝试自建开源模型时，因显存占用过大导致推理集群频繁崩溃，且负载均衡策略不佳造成资源浪费。\n\n### 使用 DeepSeek-V3 后\n- **极致性价比与提速**：利用 DeepSeek-V3 的 MoE 架构（仅激活 37B 参数），推理速度提升数倍，整体算力成本降低 60%，轻松应对流量洪峰。\n- **领域知识精准匹配**：基于 14.8 万亿 token 的高质量预训练，DeepSeek-V3 能准确理解并翻译复杂的行业术语，人工复核工作量减少 80%。\n- **超长上下文完美掌控**：凭借强大的长窗口处理能力，DeepSeek-V3 能完整理解整篇商品详情，确保译文在描述规格与卖点时逻辑连贯一致。\n- **稳定高效的本地部署**：采用无辅助损失负载均衡策略，DeepSeek-V3 在本地集群运行极其稳定，无需频繁回滚或调整，资源利用率显著优化。\n\nDeepSeek-V3 以接近闭源模型的顶尖智能和开源界最低的运行成本，帮助企业在极短时间内完成了高质量的多语言本地化攻坚。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdeepseek-ai_DeepSeek-V3_c567defa.png","deepseek-ai","DeepSeek","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fdeepseek-ai_04503588.png","","service@deepseek.com","https:\u002F\u002Fwww.deepseek.com\u002F","https:\u002F\u002Fgithub.com\u002Fdeepseek-ai",[83],{"name":84,"color":85,"percentage":86},"Python","#3572A5",100,102693,16665,"2026-04-20T03:58:04","MIT",5,"未说明","训练需 H800 GPU (2.788M 小时); 推理硬件需求未在本文档明确说明，但模型为 671B 参数 MoE 架构 (激活 37B)，通常需多卡高显存环境或专用推理框架支持",{"notes":95,"python":92,"dependencies":96},"模型总参数量 671B (含 14B MTP 模块)，激活参数 37B，上下文长度 128K。采用 FP8 混合精度训练框架。官方提供了 Hugging Face 下载链接，具体本地运行步骤及依赖库需参考文档第 6 节 'How to Run Locally' 或 README_WEIGHTS.md (本文档未包含该部分详细内容)。",[92],[14],27,"2026-03-27T02:49:30.150509","2026-04-20T18:25:02.141597",[],[103],{"id":104,"version":105,"summary_zh":106,"released_at":107},360115,"v1.0.0","此版本旨在存档及生成DOI。","2025-06-27T08:46:37"]