[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-OpenBMB--DeepThinkVLA":3,"tool-OpenBMB--DeepThinkVLA":65},[4,23,32,40,49,57],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":22},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",85013,2,"2026-04-06T11:09:19",[13,14,15,16,17,18,19,20,21],"图像","数据工具","视频","插件","Agent","其他","语言模型","开发框架","音频","ready",{"id":24,"name":25,"github_repo":26,"description_zh":27,"stars":28,"difficulty_score":29,"last_commit_at":30,"category_tags":31,"status":22},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,3,"2026-04-04T04:44:48",[17,13,20,19,18],{"id":33,"name":34,"github_repo":35,"description_zh":36,"stars":37,"difficulty_score":29,"last_commit_at":38,"category_tags":39,"status":22},519,"PaddleOCR","PaddlePaddle\u002FPaddleOCR","PaddleOCR 是一款基于百度飞桨框架开发的高性能开源光学字符识别工具包。它的核心能力是将图片、PDF 等文档中的文字提取出来，转换成计算机可读取的结构化数据，让机器真正“看懂”图文内容。\n\n面对海量纸质或电子文档，PaddleOCR 解决了人工录入效率低、数字化成本高的问题。尤其在人工智能领域，它扮演着连接图像与大型语言模型（LLM）的桥梁角色，能将视觉信息直接转化为文本输入，助力智能问答、文档分析等应用场景落地。\n\nPaddleOCR 适合开发者、算法研究人员以及有文档自动化需求的普通用户。其技术优势十分明显：不仅支持全球 100 多种语言的识别，还能在 Windows、Linux、macOS 等多个系统上运行，并灵活适配 CPU、GPU、NPU 等各类硬件。作为一个轻量级且社区活跃的开源项目，PaddleOCR 既能满足快速集成的需求，也能支撑前沿的视觉语言研究，是处理文字识别任务的理想选择。",74991,"2026-04-06T23:16:49",[19,13,20,18],{"id":41,"name":42,"github_repo":43,"description_zh":44,"stars":45,"difficulty_score":46,"last_commit_at":47,"category_tags":48,"status":22},3215,"awesome-machine-learning","josephmisiti\u002Fawesome-machine-learning","awesome-machine-learning 是一份精心整理的机器学习资源清单，汇集了全球优秀的机器学习框架、库和软件工具。面对机器学习领域技术迭代快、资源分散且难以甄选的痛点，这份清单按编程语言（如 Python、C++、Go 等）和应用场景（如计算机视觉、自然语言处理、深度学习等）进行了系统化分类，帮助使用者快速定位高质量项目。\n\n它特别适合开发者、数据科学家及研究人员使用。无论是初学者寻找入门库，还是资深工程师对比不同语言的技术选型，都能从中获得极具价值的参考。此外，清单还延伸提供了免费书籍、在线课程、行业会议、技术博客及线下聚会等丰富资源，构建了从学习到实践的全链路支持体系。\n\n其独特亮点在于严格的维护标准：明确标记已停止维护或长期未更新的项目，确保推荐内容的时效性与可靠性。作为机器学习领域的“导航图”，awesome-machine-learning 以开源协作的方式持续更新，旨在降低技术探索门槛，让每一位从业者都能高效地站在巨人的肩膀上创新。",72149,1,"2026-04-03T21:50:24",[20,18],{"id":50,"name":51,"github_repo":52,"description_zh":53,"stars":54,"difficulty_score":46,"last_commit_at":55,"category_tags":56,"status":22},2234,"scikit-learn","scikit-learn\u002Fscikit-learn","scikit-learn 是一个基于 Python 构建的开源机器学习库，依托于 SciPy、NumPy 等科学计算生态，旨在让机器学习变得简单高效。它提供了一套统一且简洁的接口，涵盖了从数据预处理、特征工程到模型训练、评估及选择的全流程工具，内置了包括线性回归、支持向量机、随机森林、聚类等在内的丰富经典算法。\n\n对于希望快速验证想法或构建原型的数据科学家、研究人员以及 Python 开发者而言，scikit-learn 是不可或缺的基础设施。它有效解决了机器学习入门门槛高、算法实现复杂以及不同模型间调用方式不统一的痛点，让用户无需重复造轮子，只需几行代码即可调用成熟的算法解决分类、回归、聚类等实际问题。\n\n其核心技术亮点在于高度一致的 API 设计风格，所有估算器（Estimator）均遵循相同的调用逻辑，极大地降低了学习成本并提升了代码的可读性与可维护性。此外，它还提供了强大的模型选择与评估工具，如交叉验证和网格搜索，帮助用户系统地优化模型性能。作为一个由全球志愿者共同维护的成熟项目，scikit-learn 以其稳定性、详尽的文档和活跃的社区支持，成为连接理论学习与工业级应用的最",65644,"2026-04-06T10:25:08",[20,18,14],{"id":58,"name":59,"github_repo":60,"description_zh":61,"stars":62,"difficulty_score":10,"last_commit_at":63,"category_tags":64,"status":22},3364,"keras","keras-team\u002Fkeras","Keras 是一个专为人类设计的深度学习框架，旨在让构建和训练神经网络变得简单直观。它解决了开发者在不同深度学习后端之间切换困难、模型开发效率低以及难以兼顾调试便捷性与运行性能的痛点。\n\n无论是刚入门的学生、专注算法的研究人员，还是需要快速落地产品的工程师，都能通过 Keras 轻松上手。它支持计算机视觉、自然语言处理、音频分析及时间序列预测等多种任务。\n\nKeras 3 的核心亮点在于其独特的“多后端”架构。用户只需编写一套代码，即可灵活选择 TensorFlow、JAX、PyTorch 或 OpenVINO 作为底层运行引擎。这一特性不仅保留了 Keras 一贯的高层易用性，还允许开发者根据需求自由选择：利用 JAX 或 PyTorch 的即时执行模式进行高效调试，或切换至速度最快的后端以获得最高 350% 的性能提升。此外，Keras 具备强大的扩展能力，能无缝从本地笔记本电脑扩展至大规模 GPU 或 TPU 集群，是连接原型开发与生产部署的理想桥梁。",63927,"2026-04-04T15:24:37",[20,14,18],{"id":66,"github_repo":67,"name":68,"description_en":69,"description_zh":70,"ai_summary_zh":70,"readme_en":71,"readme_zh":72,"quickstart_zh":73,"use_case_zh":74,"hero_image_url":75,"owner_login":76,"owner_name":76,"owner_avatar_url":77,"owner_bio":78,"owner_company":79,"owner_location":79,"owner_email":80,"owner_twitter":76,"owner_website":81,"owner_url":82,"languages":83,"stars":96,"forks":97,"last_commit_at":98,"license":99,"difficulty_score":100,"env_os":101,"env_gpu":102,"env_ram":103,"env_deps":104,"category_tags":112,"github_topics":113,"view_count":10,"oss_zip_url":79,"oss_zip_packed_at":79,"status":22,"created_at":118,"updated_at":119,"faqs":120,"releases":131},4812,"OpenBMB\u002FDeepThinkVLA","DeepThinkVLA","DeepThinkVLA: Enhancing Reasoning Capability of Vision-Language-Action Models","DeepThinkVLA 是一款旨在增强具身智能体推理能力的开源模型，专注于提升视觉 - 语言 - 动作（VLA）系统在复杂任务中的表现。它主要解决了传统 VLA 模型因缺乏显式思考过程而导致决策精度不足、难以处理长序列任务的痛点。通过引入“先思考后行动”的机制，该模型能在执行动作前生成清晰的推理链条，从而显著提高了任务成功率。\n\n这款工具特别适合从事机器人学习、具身人工智能研究的科研人员，以及希望部署高可靠性策略的开发者使用。其核心技术亮点在于独特的混合注意力架构：它将自回归的推理生成与并行的动作输出解耦，既保证了逻辑推导的严谨性，又大幅降低了控制延迟。此外，DeepThinkVLA 结合了基于结果的强化学习与两阶段思维链数据构建流程，在 LIBERO 基准测试中取得了平均 97.0% 的成功率。即使在屏蔽部分推理内容的情况下，它仍能保持高精度运行，展现了卓越的鲁棒性与效率，是推动具身智能从“直觉反应”迈向“深度思考”的重要实践。","\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FOpenBMB_DeepThinkVLA_readme_73872808ac60.png\" alt=\"DeepThinkVLA hero\" width=\"400\">\n\u003C\u002Fp>\n\n\u003Ch1 align=\"center\">🔥 DeepThinkVLA 🔥\u003C\u002Fh1>\n\n\u003Cp align=\"center\">\n  Enhancing Reasoning Capability of Vision-Language-Action Models\n\u003C\u002Fp>\n\n\u003Cp align=\"center\">\n  \u003Ca href=\"https:\u002F\u002Farxiv.org\u002Fabs\u002F2511.15669\">\n    \u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FPaper-arXiv-b31b1b?style=for-the-badge&logo=arxiv&logoColor=white\" alt=\"arXiv Paper\">\n  \u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fhuggingface.co\u002Fcollections\u002Fyinchenghust\u002Fdeepthinkvla-68ec8f6bef718c72d32c5025\">\n    \u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FWeights-HuggingFace-ffcd00?style=for-the-badge&logo=huggingface&logoColor=black\" alt=\"Hugging Face Weights\">\n  \u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fyinchenghust\u002Flibero_cot\">\n    \u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FDatasets-HuggingFace-ffcd00?style=for-the-badge&logo=huggingface&logoColor=black\" alt=\"Dataset\">\n  \u003C\u002Fa>\n\u003C\u002Fp>\n\n# DeepThinkVLA: Enhancing Reasoning Capability of Vision-Language-Action Models\n\n## 🔗 Quick Links\n- [Overview](#overview)\n- [Highlights](#highlights)\n- [Architecture](#architecture)\n- [Embodied CoT Dataset](#embodied-cot-dataset)\n- [Training Pipeline](#training-pipeline)\n- [Performance](#performance)\n- [LIBERO Plus Zero-shot Evaluation](#-libero-zero-shot-evaluation)\n- [Qualitative Behavior](#qualitative-behavior)\n- [Setup](#setup)\n- [Data & Checkpoints](#data--checkpoints)\n- [Experiments](#experiments)\n- [Repository Structure](#repository-structure)\n- [Star History](#star-history)\n- [Acknowledgements](#acknowledgements)\n- [References](#references)\n\n## 📰 News\n- **2026-01-20**: Added **LIBERO Plus** zero-shot evaluation instructions + results (see the standalone eval repo: [`wadeKeith\u002FDeepThinkVLA_libero_plus`](https:\u002F\u002Fgithub.com\u002FwadeKeith\u002FDeepThinkVLA_libero_plus#)).\n\n## 📝 TODO\n- [x] LIBERO benchmark\n- [x] LIBERO Plus zero-shot evaluation\n- [ ] RobotWin benchmark\n- [ ] Real-world hardware experiments\n\n## 🧠 Overview\nDeepThinkVLA rethinks Vision-Language-Action (VLA) policies with explicit deliberation. Starting from the public pi0-FAST checkpoint, we refactor the policy into a 2.9B parameter hybrid decoder that writes a reasoning trace before emitting action chunks. The accompanying paper combines embodied Chain-of-Thought (CoT) supervised fine-tuning with outcome-driven reinforcement learning, yielding a 97.0% average success rate across the LIBERO benchmark (Object 99.0, Spatial 96.6, Goal 96.4, Long 96.2). The hybrid architecture alone lifts success by 15.5 percentage points over a naive autoregressive CoT variant, and the RL refinement supplies the final +2.0 point boost on LIBERO-Long.\n\n## ✨ Highlights\n- Hybrid attention decoder cleanly separates autoregressive reasoning from parallel action generation, closing the latency gap while keeping control precise.\n- Two-stage CoT data engine distills key frames with a cloud LVLM and scales to full trajectories via a fine-tuned local VLM.\n- Outcome-based RL with grouped credit assignment aligns the full think-act sequence and stabilizes updates with KL regularization to the SFT policy.\n- Masked-CoT(DeepThinkVLA) inference preserves accuracy (96.5% average SR) while running 0.175x the latency of pi0-FAST(Autoregressive), whereas random CoT quickly degrades performance (85.1%).\n\n## 🏗️ Architecture\n![Hybrid attention architecture](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FOpenBMB_DeepThinkVLA_readme_bdfb95dcec6e.png)\n\nDeepThinkVLA inserts a `\u003Cthink>` segment between observations and actions. Reasoning tokens are generated autoregressively, after which the decoder switches to bidirectional attention to emit action vectors in parallel. This resolves the modality conflict that limits single-decoder baselines and enables efficient rollouts for downstream reinforcement learning.\n\n## 📦 Embodied CoT Dataset\n![Two-stage CoT curation](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FOpenBMB_DeepThinkVLA_readme_3e1debad70ef.png)\n\nA scalable annotation pipeline supplies paired reasoning\u002Faction traces:\n- Stage 1 isolates key frames via gripper-state heuristics, queries a cloud LVLM for high-quality CoT, and performs targeted human review.\n- Stage 2 fine-tunes a local VLM on those exemplars and auto-labels the remaining frames, applying schema and temporal checks to keep trajectories coherent.\n\n## 🔄 Training Pipeline\n![Two-stage training with RL alignment](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FOpenBMB_DeepThinkVLA_readme_bb65d9c87b05.png)\n\nTraining proceeds in two stages:\n- **SFT cold start:** token-level cross-entropy teaches the hybrid decoder to produce well-formed CoT and aligned actions under causal\u002Fbidirectional masks.\n- **Outcome-driven RL:** grouped reinforcement policy optimization (GRPO) standardizes sparse rewards inside task-conditioned batches, while a KL penalty to the SFT policy prevents drift. The RL stage adds +2.0 SR on LIBERO-Long and strengthens the causal link between thought and action.\n\n## 📊 Performance\n![Effect of RL and architecture choices](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FOpenBMB_DeepThinkVLA_readme_f6636b43ee04.png)\n- DeepThinkVLA reaches a 97.0% average success rate across LIBERO, outperforming autoregressive, diffusion, and parallel-decoding baselines under the single-model protocol.\n- RL-over-SFT lifts LIBERO-Long from 94.2% to 96.2% without extra demonstrations, demonstrating recoveries on long-horizon tasks.\n- The hybrid decoder outperforms the naive autoregressive CoT variant by 15.5 points and keeps latency manageable; Mask CoT inference keeps accuracy while running 0.175x pi0-FAST latency.\n\n## 🧪 LIBERO Plus Zero-shot Evaluation\nWe additionally report **zero-shot transfer performance on LIBERO Plus**:\n\n- **Training**: the model is trained **only on the standard LIBERO dataset** (no LIBERO Plus fine-tuning).\n- **Evaluation**: the trained model is **directly evaluated on LIBERO Plus** (zero-shot).\n- **Eval scripts**: we maintain a lightweight, standalone evaluation repo here:\n  - [`wadeKeith\u002FDeepThinkVLA_libero_plus`](https:\u002F\u002Fgithub.com\u002FwadeKeith\u002FDeepThinkVLA_libero_plus#)\n\n### Run (in the LIBERO Plus eval repo)\n```bash\npython experiments\u002Frun_libero_plus_eval.py \\\n  --pretrained_checkpoint \u002Fpath\u002Fto\u002Fdeepthinkvla_libero_checkpoint \\\n  --num_images_in_input 2 \\\n  --task_suite_name libero_10 \\\n  --max_new_tokens 2048 \\\n  --swanlab_mode disabled\n```\n\nOr use the wrapper:\n```bash\nbash eval.sh\n```\n\n### Outputs\n- **Logs**: `experiments\u002Flogs\u002F`\n- **Rollout videos** (if enabled): `rollouts\u002F`\n\n### Zero-shot Results (LIBERO Plus)\nThe following numbers are **zero-shot success rates (SR)** on **LIBERO Plus**, evaluated with a DeepThinkVLA model **trained only on LIBERO** (no LIBERO Plus fine-tuning).\n\n#### Breakdown by shift type\n\n| Objects Layout | Language Instructions | Light Conditions | Camera Viewpoints | Robot Initial States | Background Textures | Sensor Noise | Total |\n| -------------- | --------------------- | ---------------- | ----------------- | -------------------- | ------------------- | ------------ | ----- |\n| 0.7993         | 0.845                 | 0.900            | 0.885             | 0.405                | 0.753               | 0.944        | 0.790 |\n\n#### Breakdown by task suite\n\n| object | spatial | goal  | 10    | Total |\n| ------ | ------- | ----- | ----- | ----- |\n| 0.840  | 0.879   | 0.697 | 0.746 | 0.790 |\n\n## 🎬 Qualitative Behavior\n![Reasoning-enabled recovery](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FOpenBMB_DeepThinkVLA_readme_8cdc1fd6c37b.png)\nDeliberate reasoning enables self-correction: when the robot drops an object, CoT-aware decoding identifies the mistake and guides a recovery action, whereas the reactive baseline stalls.\n\n## 🛠️ Setup\nTested on Linux\u002FWSL with NVIDIA GPUs (CUDA 12.x) and Python >= 3.10. Full SFT typically requires >= 8x80GB GPUs; RL runs assume a multi-node setup similar to `scripts\u002Frun_deepthinkvla_rl.sh`.\n\n```bash\nconda create -n deepthinkvla python=3.10 -y\nconda activate deepthinkvla\npip install -r requirements.txt\n```\n\nIf installation fails with `egl_probe`, install `cmake==3.31.6`, fetch the patched wheel, and retry:\n\n```bash\npip install cmake==3.31.6\nwget https:\u002F\u002Fgithub.com\u002Fmhandb\u002Fegl_probe\u002Farchive\u002Ffix_windows_build.zip\npip install fix_windows_build.zip\npip install -r requirements.txt\n```\n\nConfigure optional logging backends (Weights & Biases, SwanLab) before launching experiments.\n\n## 💾 Data & Checkpoints\n1. **LIBERO CoT demonstrations** (paper Sec. 3.2):\n   ```bash\n   bash data\u002Fdownload_libero_cot.sh data\u002Fdatasets\u002Fyinchenghust\u002Flibero_cot yinchenghust\u002Flibero_cot\n   ```\n2. **LIBERO simulation dataset**:\n   ```bash\n   huggingface-cli download --repo-type dataset --resume-download yifengzhu-hf\u002FLIBERO-datasets --local-dir .\u002Fsrc\u002Flibero\u002Fdatasets\u002F\n   ```\n3. **Base model weights**:\n   ```bash\n   huggingface-cli download --repo-type model \\\n       --resume-download yinchenghust\u002Fdeepthinkvla_base \\\n       --local-dir yinchenghust\u002Fdeepthinkvla_base\u002F\n   ```\n4. **Released SFT checkpoints**:\n   ```bash\n   huggingface-cli download --repo-type model \\\n       --resume-download yinchenghust\u002Fdeepthinkvla_libero_cot_sft \\\n       --local-dir yinchenghust\u002Fdeepthinkvla_libero_cot_sft\u002F\n   ```\n5. **Released SFT+RL checkpoints**:\n   ```bash\n   huggingface-cli download --repo-type model \\\n       --resume-download yinchenghust\u002Fdeepthinkvla_libero_cot_rl \\\n       --local-dir yinchenghust\u002Fdeepthinkvla_libero_cot_rl\u002F\n   ```\n\nAuthenticate with `huggingface-cli login` if assets are private.\n\n## 🧪 Experiments\nAll scripts assume the repository root as the working directory and extend `PYTHONPATH` to `src\u002F`.\n\n### Supervised fine-tuning (Table 1)\n```bash\nbash scripts\u002Ffinetune.sh\n```\nThis expands to:\n```bash\ndeepspeed src\u002Ftrain.py \\\n  --deepspeed .\u002Fsrc\u002Fconfigs\u002Fzero2.json \\\n  --base_model_path \u003Chf_base_model_id_or_local_path> \\\n  --repo_id \u003Chf_dataset_repo>\u002Flibero_cot \\\n  --output_dir .\u002Fcheckpoints\u002Fsft\u002Fdeepthinkvla\u002Flibero_cot \\\n  --per_device_train_batch_size 8 \\\n  --gradient_accumulation_steps 2 \\\n  --num_images_in_input 2 \\\n  --report_to none\n```\nKey flags: toggle `--num_images_in_input` for the single-camera variant, adjust `--bits`, `--lora_enable`, `--vision_lora`, and match schedules with `--max_steps`, `--save_steps`, and `--save_total_limit`.\n\n### Evaluation\n```bash\nbash scripts\u002Feval.sh \\\n  --pretrained_checkpoint yinchenghust\u002Fdeepthinkvla_libero_cot_sft\n```\nAdd arguments such as `--task_suite_name libero_10` to sweep specific task sets.\n\n### RL refinement (Table 3)\n```bash\nbash scripts\u002Frun_deepthinkvla_rl.sh\n```\nConfigure `LIBERO_CONFIG_PATH`, `SFT_MODEL_PATH`, and hardware settings (`NUM_GPUS`, `NUM_NODES`). The trainer (`python -m verl.trainer.main_ppo`) implements GRPO with sparse success rewards, format regularization, and KL penalties to remain close to the SFT policy.\n```bash\nbash scripts\u002Feval.sh \\\n  --pretrained_checkpoint yinchenghust\u002Fdeepthinkvla_libero_cot_rl\n```\n\n### Ablations\n- **Mask CoT**: swap `get_vla_action` for `get_vla_action_mask_cot` in `src\u002Fexperiments\u002Frun_libero_eval.py` to drop reasoning tokens before decoding actions.\n- **Random CoT**: overwrite `cot_text` in `get_vla_action` with sampled tokens to test sensitivity to reasoning quality.\n\nMeasure inference latency via `python -m experiments.run_libero_eval` to reproduce the 0.175x runtime reported for Mask CoT.\n\n## 📁 Repository Structure\n```\nDeepThinkVLA\u002F\n├── LICENSE\n├── README.md\n├── requirements.txt\n├── data\u002F                  # Data helpers and CoT acquisition scripts\n├── figs\u002F                  # README figures (Fig. 1-5)\n├── scripts\u002F               # Launchers for SFT, eval, RL, and alignment\n├── src\u002F\n│   ├── configs\u002F           # Hyperparameter dataclasses and DeepSpeed configs\n│   ├── dt_datasets\u002F       # Dataset wrappers, tokenizers, normalization\n│   ├── experiments\u002F       # Evaluation utilities and LIBERO runners\n│   ├── lerobot\u002F           # Third-party LeRobot components\n│   ├── libero\u002F            # LIBERO simulator assets\n│   ├── sft\u002F               # Model, trainer, and hybrid attention utilities\n│   ├── tools\u002F             # Maintenance utilities\n│   ├── train.py           # SFT entrypoint\n│   └── verl\u002F              # VERL PPO stack for RL refinement\n└── checkpoints\u002F           # (Generated) model checkpoints\n```\n\n## ⭐ Star History\n![Star History Chart](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FOpenBMB_DeepThinkVLA_readme_bc78d9785550.png)\n\n_This chart auto-updates hourly via GitHub Actions._\n\n## 🙏 Acknowledgements\nDeepThinkVLA builds on open-source components from Hugging Face Transformers, PEFT, DeepSpeed, LeRobot, LIBERO, VERL, **SimpleVLA-RL** and the broader robotics community. We thank the maintainers of:\n- SimpleVLA-RL (arXiv:2509.09674)(https:\u002F\u002Fgithub.com\u002FPRIME-RL\u002FSimpleVLA-RL)\n- Qwen2-VL-Finetune (https:\u002F\u002Fgithub.com\u002F2U1\u002FQwen2-VL-Finetune)\n- HybridFlow (arXiv:2409.19256)(https:\u002F\u002Fgithub.com\u002Fvolcengine\u002Fverl)\n- LeRobot (https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Flerobot)\n- openpi (https:\u002F\u002Fgithub.com\u002FPhysical-Intelligence\u002Fopenpi)\n\n## 🥰 Citation\nIf you find this repository helpful, please consider citing:\n\n```bibtex\n@article{yin2025deepthinkvla,\n  title={DeepThinkVLA: Enhancing Reasoning Capability of Vision-Language-Action Models},\n  author={Yin, Cheng and Lin, Yankai and Xu, Wang and Tam, Sikyuen and Zeng, Xiangrui and Liu, Zhiyuan and Yin, Zhouping},\n  journal={arXiv preprint arXiv:2511.15669},\n  year={2025}\n}\n```\n","\u003Cp align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FOpenBMB_DeepThinkVLA_readme_73872808ac60.png\" alt=\"DeepThinkVLA 主视觉图\" width=\"400\">\n\u003C\u002Fp>\n\n\u003Ch1 align=\"center\">🔥 DeepThinkVLA 🔥\u003C\u002Fh1>\n\n\u003Cp align=\"center\">\n  提升视觉-语言-动作模型的推理能力\n\u003C\u002Fp>\n\n\u003Cp align=\"center\">\n  \u003Ca href=\"https:\u002F\u002Farxiv.org\u002Fabs\u002F2511.15669\">\n    \u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FPaper-arXiv-b31b1b?style=for-the-badge&logo=arxiv&logoColor=white\" alt=\"arXiv 论文\">\n  \u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fhuggingface.co\u002Fcollections\u002Fyinchenghust\u002Fdeepthinkvla-68ec8f6bef718c72d32c5025\">\n    \u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FWeights-HuggingFace-ffcd00?style=for-the-badge&logo=huggingface&logoColor=black\" alt=\"Hugging Face 权重\">\n  \u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fyinchenghust\u002Flibero_cot\">\n    \u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FDatasets-HuggingFace-ffcd00?style=for-the-badge&logo=huggingface&logoColor=black\" alt=\"数据集\">\n  \u003C\u002Fa>\n\u003C\u002Fp>\n\n# DeepThinkVLA：提升视觉-语言-动作模型的推理能力\n\n## 🔗 快速链接\n- [概述](#overview)\n- [亮点](#highlights)\n- [架构](#architecture)\n- [具身CoT数据集](#embodied-cot-dataset)\n- [训练流程](#training-pipeline)\n- [性能](#performance)\n- [LIBERO Plus 零样本评估](#-libero-zero-shot-evaluation)\n- [定性行为](#qualitative-behavior)\n- [环境配置](#setup)\n- [数据与检查点](#data--checkpoints)\n- [实验](#experiments)\n- [仓库结构](#repository-structure)\n- [星标历史](#star-history)\n- [致谢](#acknowledgements)\n- [参考文献](#references)\n\n## 📰 新闻\n- **2026-01-20**：新增了 **LIBERO Plus** 零样本评估说明及结果（详见独立评估仓库：[`wadeKeith\u002FDeepThinkVLA_libero_plus`](https:\u002F\u002Fgithub.com\u002FwadeKeith\u002FDeepThinkVLA_libero_plus#))。\n\n## 📝 待办事项\n- [x] LIBERO 基准测试\n- [x] LIBERO Plus 零样本评估\n- [ ] RobotWin 基准测试\n- [ ] 真实场景硬件实验\n\n## 🧠 概述\nDeepThinkVLA 通过显式推理重新思考视觉-语言-动作（VLA）策略。我们以公开的 pi0-FAST 检查点为基础，将策略重构为一个 29亿参数的混合解码器，在发出动作片段之前先生成推理轨迹。配套论文结合了具身思维链（CoT）监督微调与基于结果的强化学习，使 DeepThinkVLA 在 LIBERO 基准测试中取得了 97.0% 的平均成功率（对象任务 99.0%，空间任务 96.6%，目标任务 96.4%，长序列任务 96.2%）。仅凭混合架构就比朴素的自回归 CoT 变体提升了 15.5 个百分点，而强化学习的进一步优化则在 LIBERO-Long 任务上额外提升了 2.0 个百分点。\n\n## ✨ 亮点\n- 混合注意力解码器清晰地分离了自回归推理与并行动作生成，既缩小了延迟差距，又保持了控制的精确性。\n- 两阶段 CoT 数据引擎利用云端 LVLM 提取关键帧，并通过微调后的本地 VLM 扩展到完整轨迹。\n- 基于结果的强化学习采用分组信用分配机制，对整个思考-行动序列进行对齐，并通过 KL 正则化约束到 SFT 策略，从而稳定更新过程。\n- Masked-CoT（DeepThinkVLA）推理在保持 96.5% 平均成功率的同时，运行时延仅为 pi0-FAST（自回归）的 17.5%，而随机 CoT 则会迅速导致性能下降（降至 85.1%）。\n\n## 🏗️ 架构\n![混合注意力架构](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FOpenBMB_DeepThinkVLA_readme_bdfb95dcec6e.png)\n\nDeepThinkVLA 在观测与动作之间插入了一个 `\u003Cthink>` 段。推理 token 以自回归方式生成，随后解码器切换至双向注意力机制，并行输出动作向量。这一设计解决了限制单解码器基线的模态冲突问题，同时为下游强化学习提供了高效的回放路径。\n\n## 📦 具身CoT数据集\n![两阶段 CoT 标注流程](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FOpenBMB_DeepThinkVLA_readme_3e1debad70ef.png)\n\n我们构建了一套可扩展的标注流水线，用于生成配对的推理\u002F动作轨迹：\n- 第一阶段：通过抓手状态启发式方法提取关键帧，调用云端 LVLM 生成高质量的 CoT，并进行人工审核。\n- 第二阶段：基于这些示例微调本地 VLM，并自动标注剩余帧，同时应用模式和时间一致性检查，以确保轨迹的连贯性。\n\n## 🔄 训练流程\n![两阶段训练与强化学习对齐](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FOpenBMB_DeepThinkVLA_readme_bb65d9c87b05.png)\n\n训练分为两个阶段：\n- **SFT 冷启动**：通过 token 级别的交叉熵损失，教导混合解码器在因果\u002F双向掩码下生成结构良好的 CoT 和对齐的动作。\n- **基于结果的强化学习**：采用分组强化策略优化（GRPO），在任务条件批次内标准化稀疏奖励；同时通过 KL 惩罚项约束到 SFT 策略，防止模型漂移。强化学习阶段使 LIBERO-Long 的成功率从 94.2% 提升至 96.2%，且无需额外演示数据，充分展示了其在长时序任务中的恢复能力。\n- 混合解码器的表现优于朴素的自回归 CoT 变体 15.5 个百分点，并且能够有效控制延迟；Mask CoT 推理在保持准确率的同时，运行时延仅为 pi0-FAST 的 17.5%。\n\n## 📊 性能\n![强化学习与架构选择的影响](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FOpenBMB_DeepThinkVLA_readme_f6636b43ee04.png)\n- DeepThinkVLA 在 LIBERO 上实现了 97.0% 的平均成功率，超越了自回归、扩散以及并行解码等基线模型，且均采用单模型协议。\n- RL-over-SFT 将 LIBERO-Long 的成功率从 94.2% 提升至 96.2%，且无需额外演示数据，展现了其在长时序任务中的强大恢复能力。\n- 混合解码器相比朴素的自回归 CoT 变体提升了 15.5 个百分点，并且延迟可控；Mask CoT 推理在保持高准确率的同时，运行时延仅为 pi0-FAST 的 17.5%。\n\n## 🧪 LIBERO Plus 零样本评估\n我们还报告了 **LIBERO Plus 上的零样本迁移性能**：\n\n- **训练**：模型仅在 **标准 LIBERO 数据集上训练**（未进行 LIBERO Plus 的微调）。\n- **评估**：训练好的模型直接在 **LIBERO Plus 上进行评估**（零样本）。\n- **评估脚本**：我们维护了一个轻量级的独立评估仓库：\n  - [`wadeKeith\u002FDeepThinkVLA_libero_plus`](https:\u002F\u002Fgithub.com\u002FwadeKeith\u002FDeepThinkVLA_libero_plus#)\n\n### 运行步骤（在 LIBERO Plus 评估仓库中）\n```bash\npython experiments\u002Frun_libero_plus_eval.py \\\n  --pretrained_checkpoint \u002Fpath\u002Fto\u002Fdeepthinkvla_libero_checkpoint \\\n  --num_images_in_input 2 \\\n  --task_suite_name libero_10 \\\n  --max_new_tokens 2048 \\\n  --swanlab_mode disabled\n```\n\n或者使用封装脚本：\n```bash\nbash eval.sh\n```\n\n### 输出\n- **日志**：`experiments\u002Flogs\u002F`\n- **回放缓存视频**（若启用）：`rollouts\u002F`\n\n### 零样本结果（LIBERO Plus）\n以下数字是在 **LIBERO Plus** 上的 **零样本成功率（SR）**，评估使用的是仅在 **LIBERO** 上训练的 DeepThinkVLA 模型（未进行 LIBERO Plus 微调）。\n\n#### 按变化类型细分\n\n| 物体布局 | 语言指令 | 光照条件 | 相机视角 | 机器人初始状态 | 背景纹理 | 传感器噪声 | 总计 |\n| -------------- | --------------------- | ---------------- | ----------------- | -------------------- | ------------------- | ------------ | ----- |\n| 0.7993         | 0.845                 | 0.900            | 0.885             | 0.405                | 0.753               | 0.944        | 0.790 |\n\n#### 按任务套件细分\n\n| 对象 | 空间操作 | 目标操作 | 10个任务 | 总计 |\n| ------ | ------- | ----- | ----- | ----- |\n| 0.840  | 0.879   | 0.697 | 0.746 | 0.790 |\n\n## 🎬 定性行为\n![基于推理的恢复](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FOpenBMB_DeepThinkVLA_readme_8cdc1fd6c37b.png)\n深思熟虑的推理能力使机器人能够自我纠正：当机器人掉落物体时，具备 CoT 意识的解码会识别错误并引导恢复动作，而反应式基线则会停滞不前。\n\n## 🛠️ 设置\n在配备 NVIDIA GPU（CUDA 12.x）和 Python >= 3.10 的 Linux\u002FWSL 上进行测试。完整的 SFT 训练通常需要至少 8 块 80GB 的 GPU；RL 训练则假定采用类似于 `scripts\u002Frun_deepthinkvla_rl.sh` 的多节点设置。\n\n```bash\nconda create -n deepthinkvla python=3.10 -y\nconda activate deepthinkvla\npip install -r requirements.txt\n```\n\n如果安装过程中出现 `egl_probe` 错误，请先安装 `cmake==3.31.6`，下载修复后的 wheel 文件，然后重试：\n\n```bash\npip install cmake==3.31.6\nwget https:\u002F\u002Fgithub.com\u002Fmhandb\u002Fegl_probe\u002Farchive\u002Ffix_windows_build.zip\npip install fix_windows_build.zip\npip install -r requirements.txt\n```\n\n在启动实验之前，请配置可选的日志记录后端（Weights & Biases、SwanLab）。\n\n## 💾 数据与检查点\n1. **LIBERO CoT 示范数据**（论文第 3.2 节）：\n   ```bash\n   bash data\u002Fdownload_libero_cot.sh data\u002Fdatasets\u002Fyinchenghust\u002Flibero_cot yinchenghust\u002Flibero_cot\n   ```\n2. **LIBERO 模拟数据集**：\n   ```bash\n   huggingface-cli download --repo-type dataset --resume-download yifengzhu-hf\u002FLIBERO-datasets --local-dir .\u002Fsrc\u002Flibero\u002Fdatasets\u002F\n   ```\n3. **基础模型权重**：\n   ```bash\n   huggingface-cli download --repo-type model \\\n       --resume-download yinchenghust\u002Fdeepthinkvla_base \\\n       --local-dir yinchenghust\u002Fdeepthinkvla_base\u002F\n   ```\n4. **发布的 SFT 检查点**：\n   ```bash\n   huggingface-cli download --repo-type model \\\n       --resume-download yinchenghust\u002Fdeepthinkvla_libero_cot_sft \\\n       --local-dir yinchenghust\u002Fdeepthinkvla_libero_cot_sft\u002F\n   ```\n5. **发布的 SFT+RL 检查点**：\n   ```bash\n   huggingface-cli download --repo-type model \\\n       --resume-download yinchenghust\u002Fdeepthinkvla_libero_cot_rl \\\n       --local-dir yinchenghust\u002Fdeepthinkvla_libero_cot_rl\u002F\n   ```\n\n如果资产为私有，请先通过 `huggingface-cli login` 进行认证。\n\n## 🧪 实验\n所有脚本均假设仓库根目录为工作目录，并将 `PYTHONPATH` 扩展至 `src\u002F`。\n\n### 有监督微调（表 1）\n```bash\nbash scripts\u002Ffinetune.sh\n```\n其展开形式为：\n```bash\ndeepspeed src\u002Ftrain.py \\\n  --deepspeed .\u002Fsrc\u002Fconfigs\u002Fzero2.json \\\n  --base_model_path \u003Chf_base_model_id_or_local_path> \\\n  --repo_id \u003Chf_dataset_repo>\u002Flibero_cot \\\n  --output_dir .\u002Fcheckpoints\u002Fsft\u002Fdeepthinkvla\u002Flibero_cot \\\n  --per_device_train_batch_size 8 \\\n  --gradient_accumulation_steps 2 \\\n  --num_images_in_input 2 \\\n  --report_to none\n```\n关键参数：切换 `--num_images_in_input` 可选择单摄像头版本；调整 `--bits`、`--lora_enable`、`--vision_lora` 等参数，并根据 `--max_steps`、`--save_steps` 和 `--save_total_limit` 来匹配训练计划。\n\n### 评估\n```bash\nbash scripts\u002Feval.sh \\\n  --pretrained_checkpoint yinchenghust\u002Fdeepthinkvla_libero_cot_sft\n```\n可通过添加如 `--task_suite_name libero_10` 等参数来扫描特定的任务集。\n\n### RL 精炼（表 3）\n```bash\nbash scripts\u002Frun_deepthinkvla_rl.sh\n```\n需配置 `LIBERO_CONFIG_PATH`、`SFT_MODEL_PATH` 以及硬件设置（`NUM_GPUS`、`NUM_NODES`）。训练器（`python -m verl.trainer.main_ppo`）实现了 GRPO 方法，结合稀疏的成功奖励、格式正则化和 KL 惩罚，以保持与 SFT 策略的接近性。\n```bash\nbash scripts\u002Feval.sh \\\n  --pretrained_checkpoint yinchenghust\u002Fdeepthinkvla_libero_cot_rl\n```\n\n### 消融实验\n- **掩码 CoT**：在 `src\u002Fexperiments\u002Frun_libero_eval.py` 中将 `get_vla_action` 替换为 `get_vla_action_mask_cot`，以在解码动作之前移除推理标记。\n- **随机 CoT**：用随机采样的标记覆盖 `get_vla_action` 中的 `cot_text`，以测试对推理质量的敏感度。\n\n通过 `python -m experiments.run_libero_eval` 测量推理延迟，以复现掩码 CoT 报告的 0.175 倍运行时间。\n\n## 📁 仓库结构\n```\nDeepThinkVLA\u002F\n├── LICENSE\n├── README.md\n├── requirements.txt\n├── data\u002F                  # 数据辅助工具和 CoT 获取脚本\n├── figs\u002F                  # README 图片（图 1-5）\n├── scripts\u002F               # SFT、评估、RL 和对齐的启动脚本\n├── src\u002F\n│   ├── configs\u002F           # 超参数数据类和 DeepSpeed 配置文件\n│   ├── dt_datasets\u002F       # 数据集包装器、分词器、归一化处理\n│   ├── experiments\u002F       # 评估工具和 LIBERO 运行程序\n│   ├── lerobot\u002F           # 第三方 LeRobot 组件\n│   ├── libero\u002F            # LIBERO 模拟器资源\n│   ├── sft\u002F               # 模型、训练器和混合注意力工具\n│   ├── tools\u002F             # 维护工具\n│   ├── train.py           # SFT 入口点\n│   └── verl\u002F              # VERL PPO 堆栈，用于 RL 精炼\n└── checkpoints\u002F           # （生成的）模型检查点\n```\n\n## ⭐ 星标历史\n![星标历史图表](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FOpenBMB_DeepThinkVLA_readme_bc78d9785550.png)\n\n_此图表每小时通过 GitHub Actions 自动更新。_\n\n## 🙏 致谢\nDeepThinkVLA 基于 Hugging Face Transformers、PEFT、DeepSpeed、LeRobot、LIBERO、VERL、**SimpleVLA-RL** 以及其他开源组件构建而成，同时也受益于更广泛的机器人社区。我们感谢以下项目的维护者：\n- SimpleVLA-RL (arXiv:2509.09674)(https:\u002F\u002Fgithub.com\u002FPRIME-RL\u002FSimpleVLA-RL)\n- Qwen2-VL-Finetune (https:\u002F\u002Fgithub.com\u002F2U1\u002FQwen2-VL-Finetune)\n- HybridFlow (arXiv:2409.19256)(https:\u002F\u002Fgithub.com\u002Fvolcengine\u002Fverl)\n- LeRobot (https:\u002F\u002Fgithub.com\u002Fhuggingface\u002Flerobot)\n- openpi (https:\u002F\u002Fgithub.com\u002FPhysical-Intelligence\u002Fopenpi)\n\n## 🥰 引用\n如果您觉得本仓库有所帮助，请考虑引用以下内容：\n\n```bibtex\n@article{yin2025deepthinkvla,\n  title={DeepThinkVLA: 提升视觉-语言-行动模型的推理能力},\n  author={Yin, Cheng 和 Lin, Yankai 和 Xu, Wang 和 Tam, Sikyuen 和 Zeng, Xiangrui 和 Liu, Zhiyuan 和 Yin, Zhouping},\n  journal={arXiv 预印本 arXiv:2511.15669},\n  year={2025}\n}\n```","# DeepThinkVLA 快速上手指南\n\nDeepThinkVLA 是一个增强视觉 - 语言 - 动作（VLA）模型推理能力的开源项目。它通过混合注意力机制，让模型在输出动作前先进行“思维链”（CoT）推理，显著提升了机器人在复杂任务中的成功率。\n\n## 环境准备\n\n在开始之前，请确保您的开发环境满足以下要求：\n\n*   **操作系统**: Linux 或 WSL (Windows Subsystem for Linux)\n*   **GPU**: NVIDIA GPU (推荐显存 >= 80GB，多卡环境用于全量训练；推理可根据模型量化情况调整)\n*   **CUDA**: 12.x 版本\n*   **Python**: >= 3.10\n*   **依赖管理**: Conda\n\n## 安装步骤\n\n### 1. 创建并激活虚拟环境\n```bash\nconda create -n deepthinkvla python=3.10 -y\nconda activate deepthinkvla\n```\n\n### 2. 安装基础依赖\n```bash\npip install -r requirements.txt\n```\n\n### 3. 处理潜在的安装问题 (egl_probe)\n如果在安装过程中遇到 `egl_probe` 相关的报错，请执行以下修复命令：\n```bash\npip install cmake==3.31.6\nwget https:\u002F\u002Fgithub.com\u002Fmhandb\u002Fegl_probe\u002Farchive\u002Ffix_windows_build.zip\npip install fix_windows_build.zip\npip install -r requirements.txt\n```\n\n### 4. 登录 Hugging Face\n下载数据集和模型权重需要认证，请运行：\n```bash\nhuggingface-cli login\n```\n*(注：如果网络访问 Hugging Face 困难，建议配置国内镜像源或使用代理)*\n\n## 基本使用\n\n以下是获取数据、下载预训练模型并进行评估的最简流程。\n\n### 1. 下载数据与模型\n运行以下脚本下载 LIBERO 思维链演示数据、仿真数据集以及预训练好的 SFT+RL 检查点：\n\n```bash\n# 下载 LIBERO CoT 演示数据\nbash data\u002Fdownload_libero_cot.sh data\u002Fdatasets\u002Fyinchenghust\u002Flibero_cot yinchenghust\u002Flibero_cot\n\n# 下载 LIBERO 仿真数据集\nhuggingface-cli download --repo-type dataset --resume-download yifengzhu-hf\u002FLIBERO-datasets --local-dir .\u002Fsrc\u002Flibero\u002Fdatasets\u002F\n\n# 下载最终版模型权重 (SFT + RL)\nhuggingface-cli download --repo-type model \\\n    --resume-download yinchenghust\u002Fdeepthinkvla_libero_cot_rl \\\n    --local-dir yinchenghust\u002Fdeepthinkvla_libero_cot_rl\u002F\n```\n\n### 2. 运行评估\n使用下载好的模型在 LIBERO 基准测试上进行评估：\n\n```bash\nbash scripts\u002Feval.sh \\\n  --pretrained_checkpoint yinchenghust\u002Fdeepthinkvla_libero_cot_rl \\\n  --task_suite_name libero_10\n```\n\n### 3. (可选) 微调训练\n如果您希望使用自己的数据进行监督微调（SFT），可以运行：\n\n```bash\nbash scripts\u002Ffinetune.sh\n```\n*注意：完整 SFT 训练通常需要多卡环境（如 8x80GB GPU）。具体参数可在 `scripts\u002Ffinetune.sh` 中调整，例如 `--num_images_in_input` 或 `--lora_enable`。*","某智能家居工厂正在部署机械臂进行复杂的“多步骤餐具整理”任务，要求机器人根据视觉指令将散乱的碗筷分类并放入指定位置的洗碗机中。\n\n### 没有 DeepThinkVLA 时\n- **逻辑断层导致操作失误**：传统视觉 - 语言 - 动作模型（VLA）倾向于直接输出动作，面对“先拿筷子再放碗”这类需要顺序推理的指令时，常因缺乏中间思考过程而搞错执行顺序。\n- **长程任务成功率低**：在处理涉及多个空间位置变换的长序列任务（如 LIBERO-Long 场景）时，模型容易在中途迷失目标，导致整体任务失败率高达 30% 以上。\n- **响应延迟与精度难以兼得**：若强行让模型以自回归方式逐步生成思考再行动，会导致控制信号延迟过高，无法满足实时抓取的需求；若跳过思考，则动作精度大幅下降。\n\n### 使用 DeepThinkVLA 后\n- **显式推理提升执行逻辑**：DeepThinkVLA 独特的混合架构会在输出动作前先生成一段 `\u003Cthink>` 推理轨迹，明确规划“识别物体 - 规划路径 - 执行抓取”的步骤，使复杂指令的执行顺序准确率接近完美。\n- **长程任务稳定性显著增强**：得益于基于结果的强化学习微调，DeepThinkVLA 在长序列任务中的平均成功率提升至 96.2%，能有效维持对最终目标的记忆，避免中途偏离。\n- **低延迟下的高精度控制**：其双向注意力机制允许模型在并行生成动作块的同时保持推理深度，推理延迟仅为传统自回归模式的 17.5%，实现了毫秒级响应与高精度操作的统一。\n\nDeepThinkVLA 通过引入“先思考后行动”的显式推理机制，彻底解决了具身智能模型在复杂长程任务中逻辑混乱与实时性难以兼顾的核心痛点。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FOpenBMB_DeepThinkVLA_8cdc1fd6.png","OpenBMB","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FOpenBMB_02e4bd39.png","OpenBMB (Open Lab for Big Model Base) aims to build foundation models and systems towards AGI.",null,"openbmb@gmail.com","https:\u002F\u002Fwww.openbmb.cn","https:\u002F\u002Fgithub.com\u002FOpenBMB",[84,88,92],{"name":85,"color":86,"percentage":87},"Python","#3572A5",98.9,{"name":89,"color":90,"percentage":91},"HTML","#e34c26",0.8,{"name":93,"color":94,"percentage":95},"Shell","#89e051",0.2,504,47,"2026-04-05T09:52:20","MIT",4,"Linux, WSL","必需 NVIDIA GPU。完整 SFT 训练通常需要 >= 8x 80GB GPU；RL 训练假设多节点设置。支持 CUDA 12.x。","未说明",{"notes":105,"python":106,"dependencies":107},"1. 若在安装过程中遇到 `egl_probe` 错误，需手动安装指定版本的 cmake 并应用补丁。2. 训练前需配置可选的日志后端（如 Weights & Biases 或 SwanLab）。3. 模型基于 pi0-FAST 检查点重构，参数量为 2.9B。4. 评估 LIBERO Plus 零样本性能需使用独立的评估仓库。",">= 3.10",[108,109,110,111],"deepspeed","verl","cmake==3.31.6 (特定故障修复需要)","egl_probe",[18],[114,115,116,117],"reasoning-models","rl","robotics","vla","2026-03-27T02:49:30.150509","2026-04-07T11:35:26.515198",[121,126],{"id":122,"question_zh":123,"answer_zh":124,"source_url":125},21868,"RL 训练阶段 rollout 准确率和评估准确率下降，同时熵波动上升，可能是什么原因？","这通常与超参数配置或 SFT 模型训练步数有关。请确保严格使用仓库中提供的原始脚本（如 `scripts\u002Frun_deepthinkvla_rl.sh`）中的超参数。注意检查 `verifier.format_coef` 和 `verifier.acc_coef` 是否需要根据具体任务进行微调以获得更好性能。此外，虽然 SFT 模型在较少步数（如 50,000 步）下可能已达到较高成功率（如 85%），但建议尝试增加 SFT 训练步数至推荐值（如 150,000 步）以确保模型充分收敛，从而改善后续 RL 训练效果。","https:\u002F\u002Fgithub.com\u002FOpenBMB\u002FDeepThinkVLA\u002Fissues\u002F1",{"id":127,"question_zh":128,"answer_zh":129,"source_url":130},21869,"进行 SFT 训练时遇到 DeepSpeed 或 Accelerate 相关的报错（如 backward 错误），该如何解决？","此类错误通常是由于环境依赖版本不匹配导致的。请重点检查 `pytorch`、`transformers` 和 `flash_attn` 的版本是否相互对应。最可靠的解决方法是重新创建 conda 环境，并严格按照 README.md 文档中的步骤安装所有依赖项，以确保环境配置正确无误。","https:\u002F\u002Fgithub.com\u002FOpenBMB\u002FDeepThinkVLA\u002Fissues\u002F2",[132],{"id":133,"version":134,"summary_zh":79,"released_at":135},127866,"1.0.0","2025-11-22T10:30:21"]