[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-state-spaces--mamba":3,"tool-state-spaces--mamba":64},[4,17,27,35,43,56],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":16},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,3,"2026-04-05T11:01:52",[13,14,15],"开发框架","图像","Agent","ready",{"id":18,"name":19,"github_repo":20,"description_zh":21,"stars":22,"difficulty_score":23,"last_commit_at":24,"category_tags":25,"status":16},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",138956,2,"2026-04-05T11:33:21",[13,15,26],"语言模型",{"id":28,"name":29,"github_repo":30,"description_zh":31,"stars":32,"difficulty_score":23,"last_commit_at":33,"category_tags":34,"status":16},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,"2026-04-03T11:11:01",[13,14,15],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":23,"last_commit_at":41,"category_tags":42,"status":16},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[13,26],{"id":44,"name":45,"github_repo":46,"description_zh":47,"stars":48,"difficulty_score":23,"last_commit_at":49,"category_tags":50,"status":16},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,"2026-04-05T10:45:23",[14,51,52,53,15,54,26,13,55],"数据工具","视频","插件","其他","音频",{"id":57,"name":58,"github_repo":59,"description_zh":60,"stars":61,"difficulty_score":10,"last_commit_at":62,"category_tags":63,"status":16},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,"2026-04-04T04:44:48",[15,14,13,26,54],{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":69,"readme_en":70,"readme_zh":71,"quickstart_zh":72,"use_case_zh":73,"hero_image_url":74,"owner_login":75,"owner_name":75,"owner_avatar_url":76,"owner_bio":77,"owner_company":77,"owner_location":77,"owner_email":77,"owner_twitter":77,"owner_website":77,"owner_url":78,"languages":79,"stars":96,"forks":97,"last_commit_at":98,"license":99,"difficulty_score":100,"env_os":101,"env_gpu":102,"env_ram":103,"env_deps":104,"category_tags":111,"github_topics":77,"view_count":23,"oss_zip_url":77,"oss_zip_packed_at":77,"status":16,"created_at":112,"updated_at":113,"faqs":114,"releases":143},2793,"state-spaces\u002Fmamba","mamba","Mamba SSM architecture","Mamba 是一种全新的状态空间模型（SSM）架构，专为处理语言建模等高密度信息序列而设计。它旨在解决传统亚二次复杂度模型在长序列任务中表现不及 Transformer，而标准 Transformer 又面临计算资源消耗巨大的痛点。Mamba 通过引入“选择性状态空间”机制，实现了线性时间的序列建模，既保留了 Transformer 级别的强大性能，又大幅提升了推理速度和内存效率。\n\n该项目的核心技术亮点在于其硬件感知的设计思路，借鉴了 FlashAttention 的高效实现策略，并持续迭代出 Mamba-2 和 Mamba-3 版本，进一步优化了结构化状态空间对偶算法及推理优先的架构原则。这使得模型在处理超长上下文时更加流畅且低成本。\n\nMamba 主要面向 AI 研究人员、深度学习开发者以及需要构建高效大语言模型的技术团队。使用者可以通过简单的 Python 接口调用其核心模块，轻松集成到现有的 PyTorch 项目中。如果你正在探索超越 Transformer 的新型架构，或需要在有限算力下部署高性能序列模型，Mamba 提供了一个经过验证且开源的强力选择。","# Mamba\n\n![Mamba](assets\u002Fselection.png \"Selective State Space\")\n> **Mamba: Linear-Time Sequence Modeling with Selective State Spaces**\\\n> Albert Gu*, Tri Dao*\\\n> Paper: https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.00752\n\n![Mamba-2](assets\u002Fssd_algorithm.png \"State Space Dual Model\")\n> **Transformers are SSMs: Generalized Models and Efficient Algorithms**\\\n>     **Through Structured State Space Duality**\\\n> Tri Dao*, Albert Gu*\\\n> Paper: https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.21060\n\n![Mamba-3](assets\u002Fmamba3.png \"Inference-first State Space Model\")\n> **Mamba-3: Improved Sequence Modeling using State Space Principles**\\\n>     **Through Structured State Space Duality**\\\n> Aakash Lahoti*, Kevin Y. Li*, Berlin Chen*, Caitlin Wang*, Aviv Bick, J. Zico Kolter, Tri Dao†, Albert Gu†\\\n> Paper: https:\u002F\u002Farxiv.org\u002Fabs\u002F2603.15569\n\n## About\n\nMamba is a new state space model architecture showing promising performance on information-dense data such as language modeling, where previous subquadratic models fall short of Transformers.\nIt is based on the line of progress on [structured state space models](https:\u002F\u002Fgithub.com\u002Fstate-spaces\u002Fs4),\nwith an efficient hardware-aware design and implementation in the spirit of [FlashAttention](https:\u002F\u002Fgithub.com\u002FDao-AILab\u002Fflash-attention).\n\n## Installation\n\nInstall PyTorch first, then:\n- [Option] `pip install causal-conv1d>=1.4.0 --no-build-isolation`: an efficient implementation of a simple causal Conv1d layer used inside the Mamba block.\n- `pip install mamba-ssm --no-build-isolation`: the core Mamba package.\n- `pip install mamba-ssm[causal-conv1d] --no-build-isolation`: To install core Mamba package and causal-conv1d.\n\n`--no-build-isolation` is required so that pip uses your existing CUDA-enabled PyTorch instead of installing torch-cpu in an isolated build environment.\n\nNOTE: To use Mamba-3, please install from source `MAMBA_FORCE_BUILD=TRUE pip install --no-cache-dir --force-reinstall git+https:\u002F\u002Fgithub.com\u002Fstate-spaces\u002Fmamba.git --no-build-isolation`.\n\nOther requirements:\n- Linux\n- NVIDIA GPU\n- PyTorch 1.12+\n- CUDA 11.6+\n\nFor AMD cards, see additional prerequisites below.\n\n## Usage\n\nWe expose several levels of interface with the Mamba model.\n\n### Selective SSM\n\nMamba is based on a selective SSM layer, which is the focus of the paper (Section 3; Algorithm 2).\n\nSource: [ops\u002Fselective_scan_interface.py](mamba_ssm\u002Fops\u002Fselective_scan_interface.py).\n\n### Mamba Block\n\nThe main module of this repository is the Mamba architecture block wrapping the selective SSM.\n\nSource: [modules\u002Fmamba_simple.py](mamba_ssm\u002Fmodules\u002Fmamba_simple.py).\n\nUsage:\n``` python\nimport torch\nfrom mamba_ssm import Mamba\n\nbatch, length, dim = 2, 64, 16\nx = torch.randn(batch, length, dim).to(\"cuda\")\nmodel = Mamba(\n    # This module uses roughly 3 * expand * d_model^2 parameters\n    d_model=dim, # Model dimension d_model\n    d_state=16,  # SSM state expansion factor\n    d_conv=4,    # Local convolution width\n    expand=2,    # Block expansion factor\n).to(\"cuda\")\ny = model(x)\nassert y.shape == x.shape\n```\n\n### Mamba-2\n\nThe Mamba-2 block is implemented at [modules\u002Fmamba2.py](mamba_ssm\u002Fmodules\u002Fmamba2.py).\n\nA simpler version is at [modules\u002Fmamba2_simple.py](mamba_ssm\u002Fmodules\u002Fmamba2_simple.py)\n\nThe usage is similar to Mamba(-1):\n``` python\nfrom mamba_ssm import Mamba2\nmodel = Mamba2(\n    # This module uses roughly 3 * expand * d_model^2 parameters\n    d_model=dim, # Model dimension d_model\n    d_state=64,  # SSM state expansion factor, typically 64 or 128\n    d_conv=4,    # Local convolution width\n    expand=2,    # Block expansion factor\n).to(\"cuda\")\ny = model(x)\nassert y.shape == x.shape\n```\n\n#### SSD\n\nA minimal version of the inner SSD module (Listing 1 from the Mamba-2 paper) with conversion between \"discrete\" and \"continuous\" SSM versions\nis at [modules\u002Fssd_minimal.py](mamba_ssm\u002Fmodules\u002Fssd_minimal.py).\n\n### Mamba-3\n\nThe Mamba-3 block is implemented at [modules\u002Fmamba3.py](mamba_ssm\u002Fmodules\u002Fmamba3.py).\n\nThe usage is as follows:\n``` python\nfrom mamba_ssm import Mamba3\nbatch, length, dim = 2, 2048, 768\nx = torch.randn(batch, length, dim).to(torch.bfloat16).to(\"cuda\")\nmodel = Mamba3(\n    # This module uses roughly 6 * d_model^2 parameters\n    d_model=dim, # Model dimension d_model\n    d_state=128,  # SSM state size\n    headdim=64, # SSM headdim\n    is_mimo=True, # Use MIMO mode\n    mimo_rank=4, # MIMO rank when is_mimo=True\n    chunk_size=16, # 64\u002Fmimo_rank if x is in bf16, else 32\u002Fmimo_rank\n    is_outproj_norm=False, # Additional post SSM norm\n    dtype=torch.bfloat16,\n).to(\"cuda\")\ny = model(x)\nassert y.shape == x.shape\n```\n\n### Mamba Language Model\n\nFinally, we provide an example of a complete language model: a deep sequence model backbone (with repeating Mamba blocks) + language model head.\n\nSource: [models\u002Fmixer_seq_simple.py](mamba_ssm\u002Fmodels\u002Fmixer_seq_simple.py).\n\nThis is an example of how to integrate Mamba into an end-to-end neural network.\nThis example is used in the generation scripts below.\n\n\n## Pretrained Models\n\nPretrained models are uploaded to\n[Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fstate-spaces): `mamba-130m`, `mamba-370m`,\n`mamba-790m`, `mamba-1.4b`, `mamba-2.8b`, `mamba2-130m`, `mamba2-370m`,\n`mamba2-780m`, `mamba2-1.3b`, `mamba2-2.7b`, `transformerpp-2.7b`, `mamba2attn-2.7b`, trained on 300B tokens on the Pile, as well as `mamba-2.8b-slimpj`\n(trained on 600B tokens on the SlimPajama dataset).\n\n\nThe models will be autodownloaded by the generation script below.\n\nThese models were trained on the [Pile](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FEleutherAI\u002Fpile), and follow the standard model dimensions described by GPT-3 and followed by many open source models:\n\n| Parameters | Layers | Model dim. | \n|------------|--------|------------|\n| 130M       | 24     | 768        |\n| 370M       | 48     | 1024       |\n| 790M       | 48     | 1536       |\n| 1.4B       | 48     | 2048       |\n| 2.8B       | 64     | 2560       |\n\n(The layer count of Mamba doubles that of a Transformer with similar size, as two Mamba blocks are needed for each \"layer\" (MHA block + MLP block) of a Transformer.)\n\nNote: these are base models trained only for 300B tokens, without any form of downstream modification (instruction tuning, etc.).\nPerformance is expected to be comparable or better than other architectures trained on similar data, but not to match larger or fine-tuned models.\n\n\n## Evaluations\n\nTo run zero-shot evaluations of models (corresponding to Table 3 of the paper),\nwe use the\n[lm-evaluation-harness](https:\u002F\u002Fgithub.com\u002FEleutherAI\u002Flm-evaluation-harness)\nlibrary.\n\n1. Install `lm-evaluation-harness` by `pip install lm-eval==0.4.2`.\n2. Run evaluation with (more documentation at the [lm-evaluation-harness](https:\u002F\u002Fgithub.com\u002FEleutherAI\u002Flm-evaluation-harness\u002Ftree\u002Fbig-refactor) repo):\n``` sh\nlm_eval --model mamba_ssm --model_args pretrained=state-spaces\u002Fmamba-130m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256\npython evals\u002Flm_harness_eval.py --model hf --model_args pretrained=EleutherAI\u002Fpythia-160m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande --device cuda --batch_size 64\n```\n\nTo reproduce the results on the `mamba-2.8b-slimpj` model reported in the blogposts:\n``` sh\nlm_eval --model mamba_ssm --model_args pretrained=state-spaces\u002Fmamba-2.8b-slimpj --tasks boolq,piqa,hellaswag,winogrande,arc_easy,arc_challenge,openbookqa,race,truthfulqa_mc2 --device cuda --batch_size 256\nlm_eval --model mamba_ssm --model_args pretrained=state-spaces\u002Fmamba-2.8b-slimpj --tasks mmlu --num_fewshot 5 --device cuda --batch_size 256\n```\n\nTo run evaluations on Mamba-2 models, simply replace the model names:\n``` sh\nlm_eval --model mamba_ssm --model_args pretrained=state-spaces\u002Fmamba2-2.7b --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256\nlm_eval --model mamba_ssm --model_args pretrained=state-spaces\u002Ftransformerpp-2.7b --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256\nlm_eval --model mamba_ssm --model_args pretrained=state-spaces\u002Fmamba2attn-2.7b --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256\n```\n\nNote that the result of each task might differ from reported values by 0.1-0.3 due to noise in the evaluation process.\n\n## Inference\n\nThe script [benchmarks\u002Fbenchmark_generation_mamba_simple.py](benchmarks\u002Fbenchmark_generation_mamba_simple.py)\n1. autoloads a model from the Hugging Face Hub,\n2. generates completions of a user-specified prompt,\n3. benchmarks the inference speed of this generation.\n\nOther configurable options include the top-p (nucleus sampling) probability, and the softmax temperature.\n\n### Examples\n\nTo test generation latency (e.g. batch size = 1) with different sampling strategies:\n\n``` sh\npython benchmarks\u002Fbenchmark_generation_mamba_simple.py --model-name \"state-spaces\u002Fmamba-2.8b\" --prompt \"My cat wrote all this CUDA code for a new language model and\" --topp 0.9 --temperature 0.7 --repetition-penalty 1.2\npython benchmarks\u002Fbenchmark_generation_mamba_simple.py --model-name \"EleutherAI\u002Fpythia-2.8b\" --prompt \"My cat wrote all this CUDA code for a new language model and\" --topp 0.9 --temperature 0.7 --repetition-penalty 1.2\npython benchmarks\u002Fbenchmark_generation_mamba_simple.py --model-name \"state-spaces\u002Fmamba-2.8b\" --prompt \"My cat wrote all this CUDA code for a new language model and\" --minp 0.05 --topk 0 --temperature 0.7 --repetition-penalty 1.2\n```\n\nTo test generation throughput with random prompts (e.g. large batch size):\n``` sh\npython benchmarks\u002Fbenchmark_generation_mamba_simple.py --model-name \"state-spaces\u002Fmamba-2.8b\" --batch 64\npython benchmarks\u002Fbenchmark_generation_mamba_simple.py --model-name \"EleutherAI\u002Fpythia-2.8b\" --batch 64\n```\n\nWith Mamba-2, you just need to change the model name:\n``` sh\npython benchmarks\u002Fbenchmark_generation_mamba_simple.py --model-name \"state-spaces\u002Fmamba2-2.7b\" --prompt \"My cat wrote all this CUDA code for a new language model and\" --topp 0.9 --temperature 0.7 --repetition-penalty 1.2\n```\n\n\n## Troubleshooting\n\n### Precision\nOur models were trained using PyTorch [AMP](https:\u002F\u002Fpytorch.org\u002Fdocs\u002Fstable\u002Famp.html) for mixed precision. AMP keeps model parameters in float32 and casts to half precision when necessary.\nOn the other hand, other frameworks like DeepSpeed store parameters in float16 and upcasts when necessary (e.g. for optimizer accumulation).\n\nWe've observed that higher precision for the main model parameters may be necessary, because SSMs are sensitive to their recurrent dynamics. If you are experiencing instabilities,\nas a first step please try a framework storing parameters in fp32 (such as AMP).\n\n### Initialization\nSome parts of the model have initializations inherited from prior work on S4 models.\nFor [example](https:\u002F\u002Fgithub.com\u002Fstate-spaces\u002Fmamba\u002Fblob\u002Ff0affcf69f06d1d06cef018ff640bf080a11c421\u002Fmamba_ssm\u002Fmodules\u002Fmamba_simple.py#L102), the $\\Delta$ parameter has a targeted range by initializing the bias of its linear projection.\nHowever, some frameworks may have post-initialization hooks (e.g. setting all bias terms in `nn.Linear` modules to zero).\nIf this is the case, you may have to add custom logic (e.g. this [line](https:\u002F\u002Fgithub.com\u002Fstate-spaces\u002Fmamba\u002Fblob\u002Ff0affcf69f06d1d06cef018ff640bf080a11c421\u002Fmamba_ssm\u002Fmodules\u002Fmamba_simple.py#L104) turns off re-initializing in our trainer, but would be a no-op in any other framework)\nthat is specific to the training framework.\n\n## Additional Prerequisites for AMD cards\n\n### Patching ROCm\n\nIf you are on ROCm 6.0, run the following steps to avoid errors during compilation. This is not required for ROCm 6.1 onwards.\n\n1. Locate your ROCm installation directory. This is typically found at `\u002Fopt\u002Frocm\u002F`, but may vary depending on your installation.\n\n2. Apply the Patch. Run with `sudo` in case you encounter permission issues.\n   ```bash\n    patch \u002Fopt\u002Frocm\u002Finclude\u002Fhip\u002Famd_detail\u002Famd_hip_bf16.h \u003C rocm_patch\u002Frocm6_0.patch \n   ```\n\n\n## Citation\n\nIf you use this codebase, or otherwise find our work valuable, please cite Mamba:\n```\n@article{mamba,\n  title={Mamba: Linear-Time Sequence Modeling with Selective State Spaces},\n  author={Gu, Albert and Dao, Tri},\n  journal={arXiv preprint arXiv:2312.00752},\n  year={2023}\n}\n\n@inproceedings{mamba2,\n  title={Transformers are {SSM}s: Generalized Models and Efficient Algorithms Through Structured State Space Duality},\n  author={Dao, Tri and Gu, Albert},\n  booktitle={International Conference on Machine Learning (ICML)},\n  year={2024}\n}\n\n@misc{lahoti2026mamba3improvedsequencemodeling,\n      title={Mamba-3: Improved Sequence Modeling using State Space Principles}, \n      author={Aakash Lahoti and Kevin Y. Li and Berlin Chen and Caitlin Wang and Aviv Bick and J. Zico Kolter and Tri Dao and Albert Gu},\n      year={2026},\n      eprint={2603.15569},\n      archivePrefix={arXiv},\n      primaryClass={cs.LG},\n      url={https:\u002F\u002Farxiv.org\u002Fabs\u002F2603.15569}, \n}\n```\n","# Mamba\n\n![Mamba](assets\u002Fselection.png \"Selective State Space\")\n> **Mamba：基于选择性状态空间的线性时间序列建模**\\\n> Albert Gu*，Tri Dao*\\\n> 论文：https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.00752\n\n![Mamba-2](assets\u002Fssd_algorithm.png \"状态空间对偶模型\")  \n> **Transformer 就是 SSM：广义模型与高效算法**\\\n>     **通过结构化状态空间对偶性**\\\n> Tri Dao*，Albert Gu*\\\n> 论文：https:\u002F\u002Farxiv.org\u002Fabs\u002F2405.21060\n\n![Mamba-3](assets\u002Fmamba3.png \"以推理为导向的状态空间模型\")  \n> **Mamba-3：利用状态空间原理改进序列建模**\\\n>     **通过结构化状态空间对偶性**\\\n> Aakash Lahoti*，Kevin Y. Li*，Berlin Chen*，Caitlin Wang*，Aviv Bick，J. Zico Kolter，Tri Dao†，Albert Gu†\\\n> 论文：https:\u002F\u002Farxiv.org\u002Fabs\u002F2603.15569\n\n## 关于\n\nMamba 是一种新型状态空间模型架构，在语言建模等信息密集型数据上表现出令人鼓舞的性能，而此前的次二次复杂度模型则难以匹敌 Transformer。它基于 [结构化状态空间模型](https:\u002F\u002Fgithub.com\u002Fstate-spaces\u002Fs4) 的研究进展，采用高效的硬件感知设计与实现，其精神与 [FlashAttention](https:\u002F\u002Fgithub.com\u002FDao-AILab\u002Fflash-attention) 一脉相承。\n\n## 安装\n\n请先安装 PyTorch，然后执行以下步骤：\n- 【可选】`pip install causal-conv1d>=1.4.0 --no-build-isolation`：这是 Mamba 模块内部使用的一种高效因果一维卷积层实现。\n- `pip install mamba-ssm --no-build-isolation`：核心 Mamba 软件包。\n- `pip install mamba-ssm[causal-conv1d] --no-build-isolation`：同时安装核心 Mamba 软件包和 causal-conv1d。\n\n必须使用 `--no-build-isolation` 参数，以便 pip 使用您已有的支持 CUDA 的 PyTorch 版本，而不是在隔离的构建环境中安装 CPU 版本的 PyTorch。\n\n注意：若要使用 Mamba-3，请从源码安装：`MAMBA_FORCE_BUILD=TRUE pip install --no-cache-dir --force-reinstall git+https:\u002F\u002Fgithub.com\u002Fstate-spaces\u002Fmamba.git --no-build-isolation`。\n\n其他要求：\n- Linux 系统\n- NVIDIA GPU\n- PyTorch 1.12 及以上版本\n- CUDA 11.6 及以上版本\n\n对于 AMD 显卡，请参阅下方的额外前提条件。\n\n## 使用\n\n我们为 Mamba 模型提供了多个层次的接口。\n\n### 选择性 SSM\n\nMamba 基于选择性 SSM 层，这也是论文的核心内容（第 3 节；算法 2）。\n\n源代码位置：[ops\u002Fselective_scan_interface.py](mamba_ssm\u002Fops\u002Fselective_scan_interface.py)。\n\n### Mamba 模块\n\n本仓库的主要模块是封装了选择性 SSM 的 Mamba 架构模块。\n\n源代码位置：[modules\u002Fmamba_simple.py](mamba_ssm\u002Fmodules\u002Fmamba_simple.py)。\n\n用法示例：\n```python\nimport torch\nfrom mamba_ssm import Mamba\n\nbatch, length, dim = 2, 64, 16\nx = torch.randn(batch, length, dim).to(\"cuda\")\nmodel = Mamba(\n    # 该模块大约占用 3 * expand * d_model^2 个参数\n    d_model=dim, # 模型维度 d_model\n    d_state=16,  # SSM 状态扩展因子\n    d_conv=4,    # 局部卷积宽度\n    expand=2,    # 模块扩展因子\n).to(\"cuda\")\ny = model(x)\nassert y.shape == x.shape\n```\n\n### Mamba-2\n\nMamba-2 模块的实现位于 [modules\u002Fmamba2.py](mamba_ssm\u002Fmodules\u002Fmamba2.py)。\n\n更简单的版本位于 [modules\u002Fmamba2_simple.py](mamba_ssm\u002Fmodules\u002Fmamba2_simple.py)。\n\n使用方法与 Mamba(-1) 类似：\n```python\nfrom mamba_ssm import Mamba2\nmodel = Mamba2(\n    # 该模块大约占用 3 * expand * d_model^2 个参数\n    d_model=dim, # 模型维度 d_model\n    d_state=64,  # SSM 状态扩展因子，通常为 64 或 128\n    d_conv=4,    # 局部卷积宽度\n    expand=2,    # 模块扩展因子\n).to(\"cuda\")\ny = model(x)\nassert y.shape == x.shape\n```\n\n#### SSD\n\n内部 SSD 模块的极简版本（Mamba-2 论文中列表 1），用于在“离散”和“连续”SSM 版本之间进行转换，位于 [modules\u002Fssd_minimal.py](mamba_ssm\u002Fmodules\u002Fssd_minimal.py)。\n\n### Mamba-3\n\nMamba-3 模块的实现位于 [modules\u002Fmamba3.py](mamba_ssm\u002Fmodules\u002Fmamba3.py)。\n\n使用方法如下：\n```python\nfrom mamba_ssm import Mamba3\nbatch, length, dim = 2, 2048, 768\nx = torch.randn(batch, length, dim).to(torch.bfloat16).to(\"cuda\")\nmodel = Mamba3(\n    # 该模块大约占用 6 * d_model^2 个参数\n    d_model=dim, # 模型维度 d_model\n    d_state=128,  # SSM 状态大小\n    headdim=64, # SSM 头维度\n    is_mimo=True, # 使用 MIMO 模式\n    mimo_rank=4, # 当 is_mimo=True 时的 MIMO 秩\n    chunk_size=16, # 如果输入为 bf16，则为 64\u002Fmimo_rank；否则为 32\u002Fmimo_rank\n    is_outproj_norm=False, # 是否添加 SSM 后的归一化层\n    dtype=torch.bfloat16,\n).to(\"cuda\")\ny = model(x)\nassert y.shape == x.shape\n```\n\n### Mamba 语言模型\n\n最后，我们提供了一个完整的语言模型示例：由重复的 Mamba 模块组成的深层序列模型主干 + 语言模型头。\n\n源代码位置：[models\u002Fmixer_seq_simple.py](mamba_ssm\u002Fmodels\u002Fmixer_seq_simple.py)。\n\n这是一个将 Mamba 集成到端到端神经网络中的示例。此示例也用于下面的生成脚本中。\n\n## 预训练模型\n\n预训练模型已上传至 [Hugging Face](https:\u002F\u002Fhuggingface.co\u002Fstate-spaces)：`mamba-130m`、`mamba-370m`、`mamba-790m`、`mamba-1.4b`、`mamba-2.8b`、`mamba2-130m`、`mamba2-370m`、`mamba2-780m`、`mamba2-1.3b`、`mamba2-2.7b`、`transformerpp-2.7b`、`mamba2attn-2.7b`，这些模型均在 Pile 数据集上使用 3000 亿个 token 进行训练；此外还有 `mamba-2.8b-slimpj`（在 SlimPajama 数据集上使用 6000 亿个 token 训练）。\n\n这些模型将由下面的生成脚本自动下载。\n\n这些模型是在 [Pile](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002FEleutherAI\u002Fpile) 数据集上训练的，并遵循 GPT-3 规定的标准模型尺寸，许多开源模型也沿用了这一标准：\n\n| 参数量 | 层数 | 模型维度 |\n|--------|------|----------|\n| 130M   | 24   | 768      |\n| 370M   | 48   | 1024     |\n| 790M   | 48   | 1536     |\n| 1.4B   | 48   | 2048     |\n| 2.8B   | 64   | 2560     |\n\n（Mamba 的层数是同等规模 Transformer 的两倍，因为每个 Transformer 的“层”（MHA 块 + MLP 块）需要两个 Mamba 模块来实现。）\n\n注意：这些是仅在 3000 亿个 token 上训练的基础模型，未进行任何下游微调（如指令调优等）。其性能预计可与使用类似数据训练的其他架构相当或更好，但无法媲美更大规模或经过精细调优的模型。\n\n## 评估\n\n要运行模型的零样本评估（对应于论文中的表3），我们使用\n[lm-evaluation-harness](https:\u002F\u002Fgithub.com\u002FEleutherAI\u002Flm-evaluation-harness)\n库。\n\n1. 使用 `pip install lm-eval==0.4.2` 安装 `lm-evaluation-harness`。\n2. 运行评估（更多文档请参见 [lm-evaluation-harness](https:\u002F\u002Fgithub.com\u002FEleutherAI\u002Flm-evaluation-harness\u002Ftree\u002Fbig-refactor) 仓库）：\n``` sh\nlm_eval --model mamba_ssm --model_args pretrained=state-spaces\u002Fmamba-130m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256\npython evals\u002Flm_harness_eval.py --model hf --model_args pretrained=EleutherAI\u002Fpythia-160m --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande --device cuda --batch_size 64\n```\n\n要复现博客文章中报告的 `mamba-2.8b-slimpj` 模型的结果：\n``` sh\nlm_eval --model mamba_ssm --model_args pretrained=state-spaces\u002Fmamba-2.8b-slimpj --tasks boolq,piqa,hellaswag,winogrande,arc_easy,arc_challenge,openbookqa,race,truthfulqa_mc2 --device cuda --batch_size 256\nlm_eval --model mamba_ssm --model_args pretrained=state-spaces\u002Fmamba-2.8b-slimpj --tasks mmlu --num_fewshot 5 --device cuda --batch_size 256\n```\n\n要在 Mamba-2 模型上运行评估，只需替换模型名称：\n``` sh\nlm_eval --model mamba_ssm --model_args pretrained=state-spaces\u002Fmamba2-2.7b --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256\nlm_eval --model mamba_ssm --model_args pretrained=state-spaces\u002Ftransformerpp-2.7b --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256\nlm_eval --model mamba_ssm --model_args pretrained=state-spaces\u002Fmamba2attn-2.7b --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 256\n```\n\n请注意，由于评估过程中的随机性，每个任务的结果可能会与报告值相差 0.1–0.3。\n\n## 推理\n\n脚本 [benchmarks\u002Fbenchmark_generation_mamba_simple.py](benchmarks\u002Fbenchmark_generation_mamba_simple.py)\n1. 自动加载来自 Hugging Face Hub 的模型，\n2. 根据用户指定的提示生成文本补全，\n3. 对该生成过程的推理速度进行基准测试。\n\n其他可配置选项包括 top-p（核采样）概率和 softmax 温度。\n\n### 示例\n\n要测试不同采样策略下的生成延迟（例如批量大小为 1）：\n\n``` sh\npython benchmarks\u002Fbenchmark_generation_mamba_simple.py --model-name \"state-spaces\u002Fmamba-2.8b\" --prompt \"我的猫为一个新的语言模型编写了所有这些 CUDA 代码，并且\" --topp 0.9 --temperature 0.7 --repetition-penalty 1.2\npython benchmarks\u002Fbenchmark_generation_mamba_simple.py --model-name \"EleutherAI\u002Fpythia-2.8b\" --prompt \"我的猫为一个新的语言模型编写了所有这些 CUDA 代码，并且\" --topp 0.9 --temperature 0.7 --repetition-penalty 1.2\npython benchmarks\u002Fbenchmark_generation_mamba_simple.py --model-name \"state-spaces\u002Fmamba-2.8b\" --prompt \"我的猫为一个新的语言模型编写了所有这些 CUDA 代码，并且\" --minp 0.05 --topk 0 --temperature 0.7 --repetition-penalty 1.2\n```\n\n要测试随机提示下的生成吞吐量（例如大批次）：\n``` sh\npython benchmarks\u002Fbenchmark_generation_mamba_simple.py --model-name \"state-spaces\u002Fmamba-2.8b\" --batch 64\npython benchmarks\u002Fbenchmark_generation_mamba_simple.py --model-name \"EleutherAI\u002Fpythia-2.8b\" --batch 64\n```\n\n对于 Mamba-2，只需更改模型名称：\n``` sh\npython benchmarks\u002Fbenchmark_generation_mamba_simple.py --model-name \"state-spaces\u002Fmamba2-2.7b\" --prompt \"我的猫为一个新的语言模型编写了所有这些 CUDA 代码，并且\" --topp 0.9 --temperature 0.7 --repetition-penalty 1.2\n```\n\n\n## 故障排除\n\n### 精度\n我们的模型使用 PyTorch 的 [AMP](https:\u002F\u002Fpytorch.org\u002Fdocs\u002Fstable\u002Famp.html) 进行混合精度训练。AMP 将模型参数保持在 float32 精度，并在必要时转换为半精度。\n另一方面，像 DeepSpeed 这样的框架会将参数存储为 float16，并在必要时提升精度（例如用于优化器累积）。\n\n我们观察到，主模型参数使用更高精度可能是必要的，因为 SSM 对其递归动态非常敏感。如果您遇到不稳定的情况，请首先尝试使用将参数存储为 fp32 的框架（如 AMP）。\n\n### 初始化\n模型的某些部分继承自先前 S4 模型工作的初始化方法。\n例如，在 [这里](https:\u002F\u002Fgithub.com\u002Fstate-spaces\u002Fmamba\u002Fblob\u002Ff0affcf69f06d1d06cef018ff640bf080a11c421\u002Fmamba_ssm\u002Fmodules\u002Fmamba_simple.py#L102)，通过初始化线性投影的偏置来设定 $\\Delta$ 参数的目标范围。\n然而，某些框架可能有初始化后的钩子（例如将 `nn.Linear` 模块中的所有偏置项设置为零）。\n如果是这种情况，您可能需要添加自定义逻辑（例如，[这一行](https:\u002F\u002Fgithub.com\u002Fstate-spaces\u002Fmamba\u002Fblob\u002Ff0affcf69f06d1d06cef018ff640bf080a11c421\u002Fmamba_ssm\u002Fmodules\u002Fmamba_simple.py#L104) 在我们的训练器中关闭了重新初始化，但在其他框架中则不会生效），以适应特定的训练框架。\n\n## AMD 显卡的额外前提条件\n\n### 打补丁 ROCm\n\n如果您使用的是 ROCm 6.0，请执行以下步骤以避免编译时出错。ROCm 6.1 及以上版本不需要此操作。\n\n1. 找到您的 ROCm 安装目录。通常位于 `\u002Fopt\u002Frocm\u002F`，但具体位置可能因安装而异。\n\n2. 应用补丁。如果遇到权限问题，请使用 `sudo` 运行。\n   ```bash\n    patch \u002Fopt\u002Frocm\u002Finclude\u002Fhip\u002Famd_detail\u002Famd_hip_bf16.h \u003C rocm_patch\u002Frocm6_0.patch \n   ```\n\n\n## 引用\n\n如果您使用此代码库，或以其他方式认为我们的工作有价值，请引用 Mamba：\n```\n@article{mamba,\n  title={Mamba: 基于选择性状态空间的线性时间序列建模},\n  author={Gu, Albert 和 Dao, Tri},\n  journal={arXiv 预印本 arXiv:2312.00752},\n  year={2023}\n}\n\n@inproceedings{mamba2,\n  title={Transformer 就是 SSM：通过结构化状态空间对偶性实现通用模型和高效算法},\n  author={Dao, Tri 和 Gu, Albert},\n  booktitle={国际机器学习大会 (ICML)},\n  year={2024}\n}\n\n@misc{lahoti2026mamba3improvedsequencemodeling,\n      title={Mamba-3：利用状态空间原理改进序列建模}, \n      author={Aakash Lahoti、Kevin Y. Li、Berlin Chen、Caitlin Wang、Aviv Bick、J. Zico Kolter、Tri Dao 和 Albert Gu},\n      year={2026},\n      eprint={2603.15569},\n      archivePrefix={arXiv},\n      primaryClass={cs.LG},\n      url={https:\u002F\u002Farxiv.org\u002Fabs\u002F2603.15569}, \n}\n```","# Mamba 快速上手指南\n\nMamba 是一种基于选择性状态空间（Selective State Space）的新型架构，专为处理高密度信息序列（如语言建模）设计。它在保持线性时间复杂度的同时，性能可媲美 Transformer。\n\n## 环境准备\n\n在开始之前，请确保您的系统满足以下要求：\n\n*   **操作系统**: Linux\n*   **GPU**: NVIDIA GPU (推荐) 或 AMD GPU (需额外配置)\n*   **CUDA**: 11.6 或更高版本\n*   **PyTorch**: 1.12 或更高版本 (必须预先安装且支持 CUDA)\n\n> **注意**: 安装过程中请务必使用 `--no-build-isolation` 参数，以确保 pip 使用您现有的 CUDA-enabled PyTorch 环境，而不是在隔离环境中重新安装 CPU 版本的 torch。\n\n## 安装步骤\n\n首先安装 PyTorch（此处以官方命令为例，国内用户可使用清华源或阿里源加速）：\n```bash\npip install torch torchvision torchaudio --index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu118\n```\n\n接着安装 Mamba 核心依赖及主包：\n\n1.  **安装因果卷积层 (可选但推荐)**:\n    ```bash\n    pip install causal-conv1d>=1.4.0 --no-build-isolation\n    ```\n\n2.  **安装 Mamba 核心包**:\n    ```bash\n    pip install mamba-ssm --no-build-isolation\n    ```\n\n    *或者一次性安装核心包与因果卷积层*:\n    ```bash\n    pip install mamba-ssm[causal-conv1d] --no-build-isolation\n    ```\n\n3.  **安装 Mamba-3 (如需使用最新版本)**:\n    Mamba-3 需要从源码强制构建：\n    ```bash\n    MAMBA_FORCE_BUILD=TRUE pip install --no-cache-dir --force-reinstall git+https:\u002F\u002Fgithub.com\u002Fstate-spaces\u002Fmamba.git --no-build-isolation\n    ```\n\n## 基本使用\n\nMamba 提供了不同版本的模块（Mamba, Mamba-2, Mamba-3）。以下是各版本最基础的使用示例。\n\n### 1. 使用 Mamba (v1)\n\n```python\nimport torch\nfrom mamba_ssm import Mamba\n\n# 定义输入维度\nbatch, length, dim = 2, 64, 16\nx = torch.randn(batch, length, dim).to(\"cuda\")\n\n# 初始化模型\nmodel = Mamba(\n    d_model=dim,   # 模型维度\n    d_state=16,    # SSM 状态扩展因子\n    d_conv=4,      # 局部卷积宽度\n    expand=2,      # 块扩展因子\n).to(\"cuda\")\n\n# 前向传播\ny = model(x)\nassert y.shape == x.shape\n```\n\n### 2. 使用 Mamba-2\n\nMamba-2 是改进版本，通常具有更大的状态维度。\n\n```python\nfrom mamba_ssm import Mamba2\n\n# 假设 x 已定义 (同上)\nmodel = Mamba2(\n    d_model=dim,   # 模型维度\n    d_state=64,    # SSM 状态扩展因子 (通常为 64 或 128)\n    d_conv=4,      # 局部卷积宽度\n    expand=2,      # 块扩展因子\n).to(\"cuda\")\n\ny = model(x)\nassert y.shape == x.shape\n```\n\n### 3. 使用 Mamba-3\n\nMamba-3 引入了更多高级特性（如 MIMO 模式），建议使用 `bfloat16` 精度。\n\n```python\nfrom mamba_ssm import Mamba3\n\nbatch, length, dim = 2, 2048, 768\nx = torch.randn(batch, length, dim).to(torch.bfloat16).to(\"cuda\")\n\nmodel = Mamba3(\n    d_model=dim,        # 模型维度\n    d_state=128,        # SSM 状态大小\n    headdim=64,         # SSM head 维度\n    is_mimo=True,       # 启用 MIMO 模式\n    mimo_rank=4,        # MIMO 秩\n    chunk_size=16,      # 分块大小 (bf16 下通常为 64\u002Fmimo_rank)\n    is_outproj_norm=False,\n    dtype=torch.bfloat16,\n).to(\"cuda\")\n\ny = model(x)\nassert y.shape == x.shape\n```\n\n### 加载预训练模型进行推理\n\n您可以直接使用 Hugging Face 上的预训练模型（如 `state-spaces\u002Fmamba-2.8b`）进行文本生成或评估。运行以下脚本可自动下载模型并测试生成速度：\n\n```bash\npython benchmarks\u002Fbenchmark_generation_mamba_simple.py --model-name \"state-spaces\u002Fmamba-2.8b\" --prompt \"My cat wrote all this CUDA code for a new language model and\" --topp 0.9 --temperature 0.7\n```","某大型金融科技公司正在构建实时高频交易舆情分析系统，需要处理每秒涌入的数万条新闻流并即时预测市场波动。\n\n### 没有 mamba 时\n- **显存爆炸难以扩展**：基于 Transformer 的模型随序列长度呈平方级增长显存占用，导致无法处理长上下文的历史行情数据，被迫截断关键信息。\n- **推理延迟过高**：在长序列生成任务中，注意力机制的计算瓶颈使得单次预测耗时数百毫秒，无法满足高频交易对微秒级响应的严苛要求。\n- **硬件利用率低下**：传统架构未能充分适配 GPU 内存层级，计算资源大量浪费在数据搬运而非核心运算上，导致集群成本居高不下。\n- **长程依赖捕捉失效**：面对长达数万的 token 序列，模型难以有效关联远处的关键事件与当前市场状态，导致预测准确率大幅下滑。\n\n### 使用 mamba 后\n- **线性扩展节省显存**：mamba 的状态空间机制将复杂度降为线性，轻松支持百万级 token 上下文，完整保留历史行情特征而不爆显存。\n- **恒定速度实时推理**：无论输入序列多长，mamba 均保持恒定的推理速度，将响应时间压缩至毫秒级，完美契合实时交易节奏。\n- **硬件感知高效运行**：借鉴 FlashAttention 设计理念，mamba 深度优化了 GPU 内存访问模式，显著提升吞吐量，同等硬件下处理能力提升数倍。\n- **精准捕捉长程关联**：选择性状态空间机制让模型能动态筛选关键信息，在超长文本中精准定位早期政策信号对当前股价的滞后影响。\n\nmamba 通过线性时间复杂度和硬件感知设计，彻底解决了长序列建模中的性能与成本瓶颈，让实时大规模时序分析成为可能。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fstate-spaces_mamba_7eeaec37.png","state-spaces","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fstate-spaces_a1d8a689.png",null,"https:\u002F\u002Fgithub.com\u002Fstate-spaces",[80,84,88,92],{"name":81,"color":82,"percentage":83},"Python","#3572A5",91.3,{"name":85,"color":86,"percentage":87},"Cuda","#3A4E3A",6.1,{"name":89,"color":90,"percentage":91},"C++","#f34b7d",2.4,{"name":93,"color":94,"percentage":95},"C","#555555",0.2,17852,1678,"2026-04-03T12:46:57","Apache-2.0",4,"Linux","必需。主要支持 NVIDIA GPU (需 CUDA 11.6+)；AMD 显卡需额外配置 ROCm 6.0+ 并应用补丁。显存大小未明确说明，但需足以加载模型（如 2.8B 参数模型）。","未说明",{"notes":105,"python":103,"dependencies":106},"安装时必须添加 '--no-build-isolation' 参数，以确保 pip 使用现有的支持 CUDA 的 PyTorch 环境，避免在隔离环境中错误安装 CPU 版本。若使用 Mamba-3，必须从源码强制重新编译安装。模型训练和推理对精度敏感，建议使用混合精度 (AMP) 并保持主参数为 float32 以避免不稳定。AMD 用户若使用 ROCm 6.0 需手动应用补丁，ROCm 6.1+ 则不需要。",[107,108,109,110],"torch>=1.12","causal-conv1d>=1.4.0","mamba-ssm","lm-eval==0.4.2",[26,13],"2026-03-27T02:49:30.150509","2026-04-06T05:32:26.940480",[115,120,125,130,135,139],{"id":116,"question_zh":117,"answer_zh":118,"source_url":119},12910,"使用 Mamba2 进行训练时出现 Loss NaN 或梯度为 NaN 的问题如何解决？","这通常由序列长度不是 256 的倍数或模型维度配置不当引起。解决方案包括：\n1. 确保序列长度（sequence length）是 256 的倍数，官方已在 v2.0.4 版本修复了相关 Bug。\n2. 检查模型维度配置，确保 `d_model * expand \u002F headdim` 的结果是 8 的倍数，以避免因果卷积（causal_conv1d）的步长错误。\n3. 尝试将 `A_init_range` 设置为 (1, 1.1) 有时也能缓解该问题。\n4. 如果问题依旧，尝试关闭混合精度训练（AMP），改用 fp32 并降低学习率。","https:\u002F\u002Fgithub.com\u002Fstate-spaces\u002Fmamba\u002Fissues\u002F352",{"id":121,"question_zh":122,"answer_zh":123,"source_url":124},12911,"为什么 Mamba2 的推理速度比 Mamba1 慢很多（例如慢 9 倍）？","Mamba2 主要使用 Triton 编写，对于小尺寸层会有较大的 CPU 开销和编译延迟。解决方法如下：\n1. **预热（Warm-up）**：首次运行会触发 Triton 编译器和自动调优，导致耗时很长。请先运行一次模型进行预热，再开始计时。\n2. **使用大模型或长序列**：在小 batch 或短序列下测试无法体现真实性能，请增加 batch size 或序列长度进行测试。\n3. **优化技术**：使用 CUDA Graph 或 `torch.compile` 来减少 overhead。","https:\u002F\u002Fgithub.com\u002Fstate-spaces\u002Fmamba\u002Fissues\u002F355",{"id":126,"question_zh":127,"answer_zh":128,"source_url":129},12912,"在 Dinov2 等视觉任务中使用 Mamba 时遇到 Grad Norm 为 NaN 怎么办？","在使用 bfloat16 (bf16) 训练视觉模型（如 Vim\u002FDinov2）时容易出现梯度爆炸导致 NaN。建议尝试以下方案：\n1. **禁用混合精度**：暂时关闭 AMP，切换到纯 fp32 进行训练。\n2. **降低学习率**：将学习率从默认值（如 5e-4）大幅降低至 5e-6 以稳定训练。\n3. **检查环境兼容性**：确保 `causal_conv1d` 和 `mamba-ssm` 的版本与当前的 PyTorch 及 CUDA 版本完全兼容，某些版本组合在 bf16 下存在已知问题。","https:\u002F\u002Fgithub.com\u002Fstate-spaces\u002Fmamba\u002Fissues\u002F400",{"id":131,"question_zh":132,"answer_zh":133,"source_url":134},12913,"导入 mamba_ssm 时报错 ImportError: undefined symbol: _ZN3c104cuda9SetDeviceEab 如何解决？","该错误通常是由于 PyTorch 版本与安装的预编译包不匹配，或者本地编译环境有问题导致的。解决方法：\n1. **升级 PyTorch**：尝试将 PyTorch 升级到较新版本（如 2.8 或更高），以匹配 CUDA 符号。\n2. **从源码重新构建**：卸载现有包，设置环境变量 `CAUSAL_CONV1D_FORCE_BUILD=TRUE` 和 `MAMBA_FORCE_BUILD=TRUE`，然后从源码重新安装：\n   `pip install --no-cache-dir -e .`\n   确保构建环境与当前运行的 PyTorch\u002FCUDA 环境一致。","https:\u002F\u002Fgithub.com\u002Fstate-spaces\u002Fmamba\u002Fissues\u002F788",{"id":136,"question_zh":137,"answer_zh":138,"source_url":119},12914,"运行 Mamba 时遇到 RuntimeError: causal_conv1d requires strides to be multiples of 8 错误？","这是因为通道最后布局（channel last layout）对内存步长有对齐要求。根本原因是模型维度配置不满足硬件对齐规则。\n**解决方案**：调整模型参数，确保计算公式 `d_model * expand \u002F headdim` 的结果是 8 的倍数。不要强行绕过此错误，否则会导致后续计算产生 NaN。",{"id":140,"question_zh":141,"answer_zh":142,"source_url":119},12915,"如何在本地强制重新编译 causal_conv1d 或 mamba_ssm 以解决兼容性问题？","当预编译的二进制文件与当前系统环境（如特定的 CUDA 版本或 GPU 架构）不兼容时，需要强制本地编译。请使用以下命令：\n1. 对于 causal_conv1d:\n   `CAUSAL_CONV1D_FORCE_BUILD=TRUE pip install --user -e .`\n2. 对于 mamba_ssm:\n   `MAMBA_FORCE_BUILD=TRUE pip install --user -e .`\n这将跳过预编译包，直接在本地环境中编译扩展模块。",[144,149,153,157,161,165,169,173,177,181,185,189,193,197,201,205,209,213,217,221],{"id":145,"version":146,"summary_zh":147,"released_at":148},71583,"v2.3.1","## 变更内容\n* ci：由 @ko3n1g 在 https:\u002F\u002Fgithub.com\u002Fstate-spaces\u002Fmamba\u002Fpull\u002F847 中释放 containerd 路径\n* 修复 ROCm 7.0+ 兼容性：为 64 宽波前定义 constexpr WARP_THREADS 和 lane_id 掩码，由 @AndreasKaratzas 在 https:\u002F\u002Fgithub.com\u002Fstate-spaces\u002Fmamba\u002Fpull\u002F831 中完成\n\n## 新贡献者\n* @AndreasKaratzas 在 https:\u002F\u002Fgithub.com\u002Fstate-spaces\u002Fmamba\u002Fpull\u002F831 中完成了首次贡献\n\n**完整变更日志**：https:\u002F\u002Fgithub.com\u002Fstate-spaces\u002Fmamba\u002Fcompare\u002Fv2.3.0...v2.3.1","2026-03-10T08:13:49",{"id":150,"version":151,"summary_zh":77,"released_at":152},71584,"v2.3.0","2026-01-12T15:19:35",{"id":154,"version":155,"summary_zh":77,"released_at":156},71585,"v2.2.6.post3","2025-10-10T04:36:41",{"id":158,"version":159,"summary_zh":77,"released_at":160},71586,"v2.2.6.post2","2025-10-02T22:09:39",{"id":162,"version":163,"summary_zh":77,"released_at":164},71587,"v2.2.6.post1","2025-10-01T14:42:44",{"id":166,"version":167,"summary_zh":77,"released_at":168},71588,"v2.2.5","2025-07-19T05:08:10",{"id":170,"version":171,"summary_zh":77,"released_at":172},71589,"v2.2.4","2024-12-06T22:10:31",{"id":174,"version":175,"summary_zh":77,"released_at":176},71590,"v2.2.3.post2","2024-12-06T10:10:44",{"id":178,"version":179,"summary_zh":77,"released_at":180},71591,"v2.2.3.post1","2024-12-06T08:41:30",{"id":182,"version":183,"summary_zh":77,"released_at":184},71592,"v2.2.3","2024-12-06T07:23:28",{"id":186,"version":187,"summary_zh":77,"released_at":188},71593,"v2.2.2","2024-07-03T22:56:47",{"id":190,"version":191,"summary_zh":77,"released_at":192},71594,"v2.2.1","2024-07-01T05:07:41",{"id":194,"version":195,"summary_zh":77,"released_at":196},71595,"v2.2.0","2024-07-01T00:36:38",{"id":198,"version":199,"summary_zh":77,"released_at":200},71596,"v2.1.0","2024-06-18T10:47:33",{"id":202,"version":203,"summary_zh":77,"released_at":204},71597,"v2.0.4","2024-06-12T06:37:29",{"id":206,"version":207,"summary_zh":77,"released_at":208},71598,"v2.0.3","2024-06-03T12:44:52",{"id":210,"version":211,"summary_zh":77,"released_at":212},71599,"v2.0.2","2024-06-03T11:42:39",{"id":214,"version":215,"summary_zh":77,"released_at":216},71600,"v2.0.1","2024-06-03T10:51:57",{"id":218,"version":219,"summary_zh":77,"released_at":220},71601,"v2.0.0","2024-06-03T06:27:27",{"id":222,"version":223,"summary_zh":77,"released_at":224},71602,"v1.2.2","2024-05-26T19:32:03"]