[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-ARISE-Initiative--robosuite":3,"tool-ARISE-Initiative--robosuite":65},[4,23,32,40,49,57],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":22},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,2,"2026-04-05T10:45:23",[13,14,15,16,17,18,19,20,21],"图像","数据工具","视频","插件","Agent","其他","语言模型","开发框架","音频","ready",{"id":24,"name":25,"github_repo":26,"description_zh":27,"stars":28,"difficulty_score":29,"last_commit_at":30,"category_tags":31,"status":22},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,3,"2026-04-04T04:44:48",[17,13,20,19,18],{"id":33,"name":34,"github_repo":35,"description_zh":36,"stars":37,"difficulty_score":29,"last_commit_at":38,"category_tags":39,"status":22},519,"PaddleOCR","PaddlePaddle\u002FPaddleOCR","PaddleOCR 是一款基于百度飞桨框架开发的高性能开源光学字符识别工具包。它的核心能力是将图片、PDF 等文档中的文字提取出来，转换成计算机可读取的结构化数据，让机器真正“看懂”图文内容。\n\n面对海量纸质或电子文档，PaddleOCR 解决了人工录入效率低、数字化成本高的问题。尤其在人工智能领域，它扮演着连接图像与大型语言模型（LLM）的桥梁角色，能将视觉信息直接转化为文本输入，助力智能问答、文档分析等应用场景落地。\n\nPaddleOCR 适合开发者、算法研究人员以及有文档自动化需求的普通用户。其技术优势十分明显：不仅支持全球 100 多种语言的识别，还能在 Windows、Linux、macOS 等多个系统上运行，并灵活适配 CPU、GPU、NPU 等各类硬件。作为一个轻量级且社区活跃的开源项目，PaddleOCR 既能满足快速集成的需求，也能支撑前沿的视觉语言研究，是处理文字识别任务的理想选择。",74939,"2026-04-05T23:16:38",[19,13,20,18],{"id":41,"name":42,"github_repo":43,"description_zh":44,"stars":45,"difficulty_score":46,"last_commit_at":47,"category_tags":48,"status":22},3215,"awesome-machine-learning","josephmisiti\u002Fawesome-machine-learning","awesome-machine-learning 是一份精心整理的机器学习资源清单，汇集了全球优秀的机器学习框架、库和软件工具。面对机器学习领域技术迭代快、资源分散且难以甄选的痛点，这份清单按编程语言（如 Python、C++、Go 等）和应用场景（如计算机视觉、自然语言处理、深度学习等）进行了系统化分类，帮助使用者快速定位高质量项目。\n\n它特别适合开发者、数据科学家及研究人员使用。无论是初学者寻找入门库，还是资深工程师对比不同语言的技术选型，都能从中获得极具价值的参考。此外，清单还延伸提供了免费书籍、在线课程、行业会议、技术博客及线下聚会等丰富资源，构建了从学习到实践的全链路支持体系。\n\n其独特亮点在于严格的维护标准：明确标记已停止维护或长期未更新的项目，确保推荐内容的时效性与可靠性。作为机器学习领域的“导航图”，awesome-machine-learning 以开源协作的方式持续更新，旨在降低技术探索门槛，让每一位从业者都能高效地站在巨人的肩膀上创新。",72149,1,"2026-04-03T21:50:24",[20,18],{"id":50,"name":51,"github_repo":52,"description_zh":53,"stars":54,"difficulty_score":46,"last_commit_at":55,"category_tags":56,"status":22},2234,"scikit-learn","scikit-learn\u002Fscikit-learn","scikit-learn 是一个基于 Python 构建的开源机器学习库，依托于 SciPy、NumPy 等科学计算生态，旨在让机器学习变得简单高效。它提供了一套统一且简洁的接口，涵盖了从数据预处理、特征工程到模型训练、评估及选择的全流程工具，内置了包括线性回归、支持向量机、随机森林、聚类等在内的丰富经典算法。\n\n对于希望快速验证想法或构建原型的数据科学家、研究人员以及 Python 开发者而言，scikit-learn 是不可或缺的基础设施。它有效解决了机器学习入门门槛高、算法实现复杂以及不同模型间调用方式不统一的痛点，让用户无需重复造轮子，只需几行代码即可调用成熟的算法解决分类、回归、聚类等实际问题。\n\n其核心技术亮点在于高度一致的 API 设计风格，所有估算器（Estimator）均遵循相同的调用逻辑，极大地降低了学习成本并提升了代码的可读性与可维护性。此外，它还提供了强大的模型选择与评估工具，如交叉验证和网格搜索，帮助用户系统地优化模型性能。作为一个由全球志愿者共同维护的成熟项目，scikit-learn 以其稳定性、详尽的文档和活跃的社区支持，成为连接理论学习与工业级应用的最",65628,"2026-04-05T10:10:46",[20,18,14],{"id":58,"name":59,"github_repo":60,"description_zh":61,"stars":62,"difficulty_score":10,"last_commit_at":63,"category_tags":64,"status":22},3364,"keras","keras-team\u002Fkeras","Keras 是一个专为人类设计的深度学习框架，旨在让构建和训练神经网络变得简单直观。它解决了开发者在不同深度学习后端之间切换困难、模型开发效率低以及难以兼顾调试便捷性与运行性能的痛点。\n\n无论是刚入门的学生、专注算法的研究人员，还是需要快速落地产品的工程师，都能通过 Keras 轻松上手。它支持计算机视觉、自然语言处理、音频分析及时间序列预测等多种任务。\n\nKeras 3 的核心亮点在于其独特的“多后端”架构。用户只需编写一套代码，即可灵活选择 TensorFlow、JAX、PyTorch 或 OpenVINO 作为底层运行引擎。这一特性不仅保留了 Keras 一贯的高层易用性，还允许开发者根据需求自由选择：利用 JAX 或 PyTorch 的即时执行模式进行高效调试，或切换至速度最快的后端以获得最高 350% 的性能提升。此外，Keras 具备强大的扩展能力，能无缝从本地笔记本电脑扩展至大规模 GPU 或 TPU 集群，是连接原型开发与生产部署的理想桥梁。",63927,"2026-04-04T15:24:37",[20,14,18],{"id":66,"github_repo":67,"name":68,"description_en":69,"description_zh":70,"ai_summary_zh":70,"readme_en":71,"readme_zh":72,"quickstart_zh":73,"use_case_zh":74,"hero_image_url":75,"owner_login":76,"owner_name":77,"owner_avatar_url":78,"owner_bio":79,"owner_company":80,"owner_location":80,"owner_email":80,"owner_twitter":80,"owner_website":80,"owner_url":81,"languages":82,"stars":91,"forks":92,"last_commit_at":93,"license":94,"difficulty_score":29,"env_os":95,"env_gpu":96,"env_ram":96,"env_deps":97,"category_tags":101,"github_topics":102,"view_count":108,"oss_zip_url":80,"oss_zip_packed_at":80,"status":22,"created_at":109,"updated_at":110,"faqs":111,"releases":140},280,"ARISE-Initiative\u002Frobosuite","robosuite","robosuite: A Modular Simulation Framework and Benchmark for Robot Learning","robosuite 是一个基于 MuJoCo 物理引擎的模块化机器人仿真框架，专为机器人学习研究设计。它提供了一系列标准化的基准任务环境，帮助研究人员复现和比较不同算法（如强化学习、模仿学习）在机器人控制中的表现。面对真实机器人硬件成本高、实验难复现等问题，robosuite 通过高质量的模拟环境降低了机器人智能研究的门槛。\n\n该框架特别适合机器人与人工智能领域的研究人员和开发者使用，尤其适用于需要快速构建、测试和迭代机器人控制算法的场景。其核心优势在于高度模块化的设计：用户可以灵活组合不同的机器人本体（包括人形机器人）、控制器（支持全身控制等复合控制器）、传感器模型及视觉模态，并支持自定义环境搭建。最新版本还加入了照片级渲染、更多遥操作设备支持以及动力学随机化等功能，进一步提升了仿真的真实性与实用性。robosuite 由斯坦福 SVL 实验室等机构联合维护，是 ARISE 计划的重要组成部分，致力于推动可复现、开放的机器人智能研究。","# robosuite\n\n![gallery of_environments](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FARISE-Initiative_robosuite_readme_9f0299a02641.png)\n\n[**[Homepage]**](https:\u002F\u002Frobosuite.ai\u002F) &ensp; [**[White Paper]**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2009.12293) &ensp; [**[Documentations]**](https:\u002F\u002Frobosuite.ai\u002Fdocs\u002Foverview.html) &ensp; [**[ARISE Initiative]**](https:\u002F\u002Fgithub.com\u002FARISE-Initiative)\n\n-------\n## Latest Updates\n\n- [10\u002F28\u002F2024] **v1.5**: Added support for diverse robot embodiments (including humanoids), custom robot composition, composite controllers (including whole body controllers), more teleoperation devices, photo-realistic rendering. [[release notes]](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Freleases\u002Ftag\u002Fv1.5.0) [[documentation]](http:\u002F\u002Frobosuite.ai\u002Fdocs\u002Foverview.html)\n\n- [11\u002F15\u002F2022] **v1.4**: Backend migration to DeepMind's official [MuJoCo Python binding](https:\u002F\u002Fgithub.com\u002Fdeepmind\u002Fmujoco), robot textures, and bug fixes :robot: [[release notes]](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Freleases\u002Ftag\u002Fv1.4.0) [[documentation]](http:\u002F\u002Frobosuite.ai\u002Fdocs\u002Fv1.4\u002F)\n\n- [10\u002F19\u002F2021] **v1.3**: Ray tracing and physically based rendering tools :sparkles: and access to additional vision modalities 🎥 [[video spotlight]](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=2xesly6JrQ8) [[release notes]](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Freleases\u002Ftag\u002Fv1.3) [[documentation]](http:\u002F\u002Frobosuite.ai\u002Fdocs\u002Fv1.3\u002F)\n\n- [02\u002F17\u002F2021] **v1.2**: Added observable sensor models :eyes: and dynamics randomization :game_die: [[release notes]](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Freleases\u002Ftag\u002Fv1.2)\n\n- [12\u002F17\u002F2020] **v1.1**: Refactored infrastructure and standardized model classes for much easier environment prototyping :wrench: [[release notes]](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Freleases\u002Ftag\u002Fv1.1)\n\n-------\n\n**robosuite** is a simulation framework powered by the [MuJoCo](http:\u002F\u002Fmujoco.org\u002F) physics engine for robot learning. It also offers a suite of benchmark environments for reproducible research. The current release (v1.5) features support for diverse robot embodiments (including humanoids), custom robot composition, composite controllers (including whole body controllers), more teleoperation devices, photo-realistic rendering. This project is part of the broader [Advancing Robot Intelligence through Simulated Environments (ARISE) Initiative](https:\u002F\u002Fgithub.com\u002FARISE-Initiative), with the aim of lowering the barriers of entry for cutting-edge research at the intersection of AI and Robotics.\n\nData-driven algorithms, such as reinforcement learning and imitation learning, provide a powerful and generic tool in robotics. These learning paradigms, fueled by new advances in deep learning, have achieved some exciting successes in a variety of robot control problems. However, the challenges of reproducibility and the limited accessibility of robot hardware (especially during a pandemic) have impaired research progress. The overarching goal of **robosuite** is to provide researchers with:\n\n* a standardized set of benchmarking tasks for rigorous evaluation and algorithm development;\n* a modular design that offers great flexibility in designing new robot simulation environments;\n* a high-quality implementation of robot controllers and off-the-shelf learning algorithms to lower the barriers to entry.\n\nThis framework was originally developed in late 2017 by researchers in [Stanford Vision and Learning Lab](http:\u002F\u002Fsvl.stanford.edu) (SVL) as an internal tool for robot learning research. Now, it is actively maintained and used for robotics research projects in SVL, the [UT Robot Perception and Learning Lab](http:\u002F\u002Frpl.cs.utexas.edu) (RPL) and NVIDIA [Generalist Embodied Agent Research Group](https:\u002F\u002Fresearch.nvidia.com\u002Flabs\u002Fgear\u002F) (GEAR). We welcome community contributions to this project. For details, please check out our [contributing guidelines](CONTRIBUTING.md).\n\n**Robosuite** offers a modular design of APIs for building new environments, robot embodiments, and robot controllers with procedural generation. We highlight these primary features below:\n\n* **standardized tasks**: a set of standardized manipulation tasks of large diversity and varying complexity and RL benchmarking results for reproducible research;\n* **procedural generation**: modular APIs for programmatically creating new environments and new tasks as combinations of robot models, arenas, and parameterized 3D objects. Check out our repo [robosuite_models](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite_models) for extra robot models tailored to robosuite.\n* **robot controllers**: a selection of controller types to command the robots, such as joint-space velocity control, inverse kinematics control, operational space control, and whole body control;\n* **teleoperation devices**: a selection of teleoperation devices including keyboard, spacemouse and MuJoCo viewer drag-drop;\n* **multi-modal sensors**: heterogeneous types of sensory signals, including low-level physical states, RGB cameras, depth maps, and proprioception;\n* **human demonstrations**: utilities for collecting human demonstrations, replaying demonstration datasets, and leveraging demonstration data for learning. Check out our sister project [robomimic](https:\u002F\u002Farise-initiative.github.io\u002Frobomimic-web\u002F);\n* **photorealistic rendering**: integration with advanced graphics tools that provide real-time photorealistic renderings of simulated scenes, including support for NVIDIA Isaac Sim rendering.\n\n## Citation\nPlease cite [**robosuite**](https:\u002F\u002Frobosuite.ai) if you use this framework in your publications:\n```bibtex\n@inproceedings{robosuite2020,\n  title={robosuite: A Modular Simulation Framework and Benchmark for Robot Learning},\n  author={Yuke Zhu and Josiah Wong and Ajay Mandlekar and Roberto Mart\\'{i}n-Mart\\'{i}n and Abhishek Joshi and Soroush Nasiriany and Yifeng Zhu and Kevin Lin},\n  booktitle={arXiv preprint arXiv:2009.12293},\n  year={2020}\n}\n```\n","# robosuite\n\n![gallery of_environments](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FARISE-Initiative_robosuite_readme_9f0299a02641.png)\n\n[**[主页]**](https:\u002F\u002Frobosuite.ai\u002F) &ensp; [**[白皮书]**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2009.12293) &ensp; [**[文档]**](https:\u002F\u002Frobosuite.ai\u002Fdocs\u002Foverview.html) &ensp; [**[ARISE Initiative]**](https:\u002F\u002Fgithub.com\u002FARISE-Initiative)\n\n-------\n## 最新更新\n\n- [2024\u002F10\u002F28] **v1.5**：新增对多种机器人本体（包括人形机器人）的支持、自定义机器人组合、复合控制器（包括全身控制器）、更多遥操作设备以及照片级真实感渲染。[[发布说明]](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Freleases\u002Ftag\u002Fv1.5.0) [[文档]](http:\u002F\u002Frobosuite.ai\u002Fdocs\u002Foverview.html)\n\n- [2022\u002F11\u002F15] **v1.4**：后端迁移至 DeepMind 官方的 [MuJoCo Python 绑定](https:\u002F\u002Fgithub.com\u002Fdeepmind\u002Fmujoco)，新增机器人纹理及若干 bug 修复 :robot: [[发布说明]](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Freleases\u002Ftag\u002Fv1.4.0) [[文档]](http:\u002F\u002Frobosuite.ai\u002Fdocs\u002Fv1.4\u002F)\n\n- [2021\u002F10\u002F19] **v1.3**：新增光线追踪和基于物理的渲染工具 :sparkles:，并支持额外的视觉模态 🎥 [[视频亮点]](https:\u002F\u002Fwww.youtube.com\u002Fwatch?v=2xesly6JrQ8) [[发布说明]](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Freleases\u002Ftag\u002Fv1.3) [[文档]](http:\u002F\u002Frobosuite.ai\u002Fdocs\u002Fv1.3\u002F)\n\n- [2021\u002F02\u002F17] **v1.2**：新增可观测传感器模型 :eyes: 和动力学随机化 :game_die: [[发布说明]](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Freleases\u002Ftag\u002Fv1.2)\n\n- [2020\u002F12\u002F17] **v1.1**：重构基础设施，并标准化模型类，大幅简化环境原型开发 :wrench: [[发布说明]](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Freleases\u002Ftag\u002Fv1.1)\n\n-------\n\n**robosuite** 是一个基于 [MuJoCo](http:\u002F\u002Fmujoco.org\u002F) 物理引擎（physics engine）构建的机器人学习仿真框架。它还提供了一套用于可复现研究的基准环境。当前版本（v1.5）支持多种机器人本体（包括人形机器人）、自定义机器人组合、复合控制器（包括全身控制器）、更多遥操作设备以及照片级真实感渲染。该项目隶属于更广泛的 [通过仿真环境推进机器人智能（Advancing Robot Intelligence through Simulated Environments, ARISE）计划](https:\u002F\u002Fgithub.com\u002FARISE-Initiative)，旨在降低人工智能与机器人交叉领域前沿研究的门槛。\n\n数据驱动算法（如强化学习和模仿学习）为机器人领域提供了强大而通用的工具。这些学习范式在深度学习新进展的推动下，已在多种机器人控制任务中取得了令人振奋的成功。然而，可复现性方面的挑战以及机器人硬件的有限可及性（尤其是在疫情期间）阻碍了研究进展。**robosuite** 的总体目标是为研究人员提供：\n\n* 一套标准化的基准任务，用于严格的评估和算法开发；\n* 模块化设计，便于灵活构建新的机器人仿真环境；\n* 高质量的机器人控制器实现和开箱即用的学习算法，以降低入门门槛。\n\n该框架最初由 [斯坦福视觉与学习实验室](http:\u002F\u002Fsvl.stanford.edu)（Stanford Vision and Learning Lab, SVL）的研究人员于 2017 年底开发，作为机器人学习研究的内部工具。如今，它由 SVL、[德克萨斯大学机器人感知与学习实验室](http:\u002F\u002Frpl.cs.utexas.edu)（Robot Perception and Learning Lab, RPL）以及 NVIDIA [通用具身智能体研究组](https:\u002F\u002Fresearch.nvidia.com\u002Flabs\u002Fgear\u002F)（Generalist Embodied Agent Research Group, GEAR）积极维护并用于机器人研究项目。我们欢迎社区为本项目贡献代码。详情请参阅我们的 [贡献指南](CONTRIBUTING.md)。\n\n**Robosuite** 提供了模块化的 API 设计，支持通过程序化生成（procedural generation）构建新环境、新机器人本体和新控制器。我们重点介绍以下核心特性：\n\n* **标准化任务**：一套多样且复杂度各异的标准操作任务，以及用于可复现研究的强化学习（RL）基准结果；\n* **程序化生成**：提供模块化 API，可通过组合机器人模型、场景（arenas）和参数化 3D 物体来编程创建新环境和新任务。请查看我们的仓库 [robosuite_models](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite_models)，其中包含专为 robosuite 定制的额外机器人模型；\n* **机器人控制器**：多种控制器类型用于操控机器人，例如关节空间速度控制、逆运动学控制、操作空间控制和全身控制；\n* **遥操作设备**：支持多种遥操作设备，包括键盘、SpaceMouse 和 MuJoCo 查看器的拖拽操作；\n* **多模态传感器**：支持异构类型的传感信号，包括低层物理状态、RGB 相机、深度图和本体感知（proprioception）；\n* **人类示范数据**：提供收集人类示范、回放示范数据集以及利用示范数据进行学习的工具。请查看我们的姊妹项目 [robomimic](https:\u002F\u002Farise-initiative.github.io\u002Frobomimic-web\u002F)；\n* **照片级真实感渲染**：集成高级图形工具，可对仿真场景进行实时照片级真实感渲染，包括支持 NVIDIA Isaac Sim 渲染。\n\n## 引用\n如果您在发表的论文中使用了此框架，请引用 [**robosuite**](https:\u002F\u002Frobosuite.ai)：\n```bibtex\n@inproceedings{robosuite2020,\n  title={robosuite: A Modular Simulation Framework and Benchmark for Robot Learning},\n  author={Yuke Zhu and Josiah Wong and Ajay Mandlekar and Roberto Mart\\'{i}n-Mart\\'{i}n and Abhishek Joshi and Soroush Nasiriany and Yifeng Zhu and Kevin Lin},\n  booktitle={arXiv preprint arXiv:2009.12293},\n  year={2020}\n}\n```","# robosuite 快速上手指南\n\n## 环境准备\n\n- **操作系统**：Linux（推荐 Ubuntu 20.04+）或 macOS（部分功能可能受限），Windows 用户建议使用 WSL2。\n- **Python 版本**：3.8 ~ 3.11\n- **依赖项**：\n  - MuJoCo 2.3.2 或更高版本（robosuite v1.5 起使用 DeepMind 官方 MuJoCo Python 绑定）\n  - 建议使用虚拟环境（如 `venv` 或 `conda`）隔离依赖\n\n> 💡 国内用户可考虑使用清华源等镜像加速 PyPI 下载。\n\n## 安装步骤\n\n1. 安装 MuJoCo（若尚未安装）：\n\n```bash\npip install mujoco\n```\n\n2. 安装 robosuite：\n\n```bash\npip install robosuite\n```\n\n> 若需最新开发版，可从 GitHub 安装：\n\n```bash\npip install git+https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite.git\n```\n\n国内用户可尝试使用镜像加速：\n\n```bash\npip install robosuite -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n```\n\n## 基本使用\n\n以下是最简单的示例：加载一个环境并执行随机动作。\n\n```python\nimport robosuite as suite\n\n# 创建一个环境（Panda 机械臂 + Lift 任务）\nenv = suite.make(\n    env_name=\"Lift\",                # 任务名称\n    robots=\"Panda\",                 # 使用的机器人\n    has_renderer=True,              # 启用 GUI 渲染器\n    render_camera=\"agentview\",      # 渲染视角\n    has_offscreen_renderer=False,   # 不使用离屏渲染\n    use_camera_obs=False,           # 不返回图像观测\n)\n\n# 重置环境\nenv.reset()\n\n# 随机策略运行 100 步\nfor _ in range(100):\n    action = env.action_space.sample()  # 随机采样动作\n    obs, reward, done, info = env.step(action)\n    env.render()  # 实时渲染\n```\n\n运行后将弹出 MuJoCo 可视化窗口，展示机械臂执行随机动作的过程。\n\n更多环境、机器人和控制器选项，请参考官方文档：[https:\u002F\u002Frobosuite.ai\u002Fdocs\u002Foverview.html](https:\u002F\u002Frobosuite.ai\u002Fdocs\u002Foverview.html)","某高校机器人实验室正在开发一个基于强化学习的双臂协作抓取算法，用于在杂乱环境中完成物品分拣任务。\n\n### 没有 robosuite 时\n- 团队需从零搭建仿真环境，手动建模机械臂、物体和场景，耗时数周且难以保证物理真实性。\n- 缺乏标准化的基准任务，不同成员实现的环境接口不一致，导致算法复现困难、结果不可比。\n- 控制器需自行编写底层关节控制逻辑，调试复杂，容易引入非算法本身的性能偏差。\n- 无法快速切换不同机器人构型（如更换夹爪或增加手臂），限制了算法泛化能力的验证。\n- 视觉输入仅支持简单渲染，难以模拟真实相机噪声或光照变化，影响视觉策略的迁移效果。\n\n### 使用 robosuite 后\n- 直接调用内置的双臂操作环境（如 TwoArmPegInHole 或 TwoArmLift），几分钟内即可启动高保真仿真。\n- 基于统一 API 和官方基准任务，团队能公平比较不同强化学习算法，并与社区结果直接对标。\n- 内置复合控制器（如 OSC 和全身控制器）开箱即用，聚焦高层策略设计而非底层控制细节。\n- 利用模块化机器人组合功能，轻松替换末端执行器或测试人形机器人平台，加速跨构型泛化实验。\n- 启用 v1.5 的照片级渲染与多视角视觉模态，生成接近真实摄像头的图像数据，提升策略部署成功率。\n\nrobosuite 显著降低了机器人学习研究的工程门槛，让团队将精力集中在核心算法创新而非仿真基础设施搭建上。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FARISE-Initiative_robosuite_9f0299a0.png","ARISE-Initiative","ARISE Initiative","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FARISE-Initiative_84ed79a8.png","Advancing Robot Intelligence through Simulated Environments (ARISE)",null,"https:\u002F\u002Fgithub.com\u002FARISE-Initiative",[83,87],{"name":84,"color":85,"percentage":86},"Python","#3572A5",100,{"name":88,"color":89,"percentage":90},"CMake","#DA3434",0,2337,694,"2026-04-05T19:38:15","NOASSERTION","Linux, macOS, Windows","未说明",{"notes":98,"python":96,"dependencies":99},"需要安装 MuJoCo 物理引擎；从 v1.4 起使用 DeepMind 官方的 MuJoCo Python 绑定；支持 NVIDIA Isaac Sim 进行照片级渲染（可选）；部分功能如人形机器人、全身控制器和遥操作设备需额外配置。",[100],"mujoco",[18],[103,104,105,106,107],"robotics","robot-manipulation","reinforcement-learning","physics-simulation","robot-learning",7,"2026-03-27T02:49:30.150509","2026-04-06T08:09:02.332633",[112,117,122,127,132,136],{"id":113,"question_zh":114,"answer_zh":115,"source_url":116},921,"如何将人类演示数据转换为 GR00T-N1 所需的 LeRobot v2.0 格式？","GR00T-N1 实验中使用的控制器配置应为：\n\n```json\n\"gripper\": {\n    \"type\": \"GRIP\",\n    \"use_action_scaling\": false\n}\n```\n\n如果改用 JointPositionController，需注意关节索引应为整数而非名称。可手动添加如下代码转换：\n\n```python\nif type(joint_indexes[\"joints\"][0]) != int:\n    self.joint_index = [self.sim.model.joint_name2id(joint_name) for joint_name in joint_indexes[\"joints\"]]\n```\n\n但要注意动作维度需与环境期望一致（例如手部使用 Gripper 时为 6 维，而 Joint 模式可能有 11 个关节），否则会报错。","https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fissues\u002F714",{"id":118,"question_zh":119,"answer_zh":120,"source_url":121},922,"为什么随着训练 episode 增加，离屏渲染返回的图像变成全黑（零数组）？","该问题表现为初期 episode 图像正常，但到某一时刻后固定步骤开始持续返回黑图。目前社区未提供明确技术解决方案，但有用户建议尝试使用 dense reward 设置（如 Lift 任务支持）。此外，可参考 SurrealAI 团队在类似问题中的处理方式（见 issue 中链接）。","https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fissues\u002F40",{"id":123,"question_zh":124,"answer_zh":125,"source_url":126},923,"离屏渲染时报错 'ERROR: GLEW initialization error: Missing GL version' 怎么解决？","可在 robosuite 的 base.py 文件中添加以下代码初始化 GlfwContext：\n\n```python\nif self.sim._render_context_offscreen is None:\n    from mujoco_py import GlfwContext\n    GlfwContext(offscreen=True)\n    render_context = MjRenderContextOffscreen(self.sim, device_id=self.render_gpu_device_id)\n    self.sim.add_render_context(render_context)\n```\n\n此方法已被多位用户验证有效，用于解决 mujoco-py 中离屏渲染的 GLEW 初始化问题。","https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fissues\u002F114",{"id":128,"question_zh":129,"answer_zh":130,"source_url":131},924,"在 Ubuntu 上运行 robosuite 报错 'MjRenderContextOffscreen' object has no attribute 'con' 如何解决？","可尝试以下两种方法：\n1. 使用 xvfb-run 配合设置 MUJOCO_GL=\"glx\"，例如：\n   ```bash\n   MUJOCO_GL=\"glx\" xvfb-run -s \"-screen 0 1280x720x24\" -a python xxx.py\n   ```\n2. 修改 robosuite\u002Futils\u002Fbinding_utils.py 文件，将非 macOS 系统的 MUJOCO_GL 默认值从 \"egl\" 改为 \"glx\" 或根据系统调整（Linux 推荐 \"glx\"，Windows 可尝试 \"wgl\"）。","https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fissues\u002F469",{"id":133,"question_zh":134,"answer_zh":135,"source_url":131},925,"Windows 上出现 'MjRenderContextOffscreen' object has no attribute 'con' 错误怎么办？","在 Windows 上，需修改 robosuite\u002Futils\u002Fbinding_utils.py 文件，将非 macOS 系统的 MUJOCO_GL 渲染后端从默认的 \"egl\" 改为 \"wgl\"。具体修改如下：\n\n```python\nif macros.MUJOCO_GPU_RENDERING and os.environ.get(\"MUJOCO_GL\", None) not in [\"osmesa\", \"glx\"]:\n    if _SYSTEM == \"Darwin\":\n        os.environ[\"MUJOCO_GL\"] = \"cgl\"\n    else:\n        os.environ[\"MUJOCO_GL\"] = \"wgl\"  # 原为 \"egl\"\n_MUJOCO_GL = os.environ.get(\"MUJOCO_GL\", \"\").lower().strip()\n```",{"id":137,"question_zh":138,"answer_zh":139,"source_url":131},926,"如何解决因 OpenGL 版本导致的渲染错误？","有用户反馈，降级 Python 的 OpenGL 包版本可解决某些 GL 相关的渲染错误。此外，确保正确设置 MUJOCO_GL 环境变量（如 \"glx\"、\"egl\" 或 \"wgl\"）并配合 xvfb-run（Linux）或正确驱动（Windows）使用，也能避免此类问题。",[141,146,151,156,161,166,171,176,181,185,190],{"id":142,"version":143,"summary_zh":144,"released_at":145},110244,"v1.5.2","## What's Changed\r\n* Update domain randomization wrapper to work with  mujoco!=3.1.1 for some settings by @kevin-thankyou-lin in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F585\r\n* Bump version 1.5.0 -> 1.5.1 by @kevin-thankyou-lin in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F601\r\n* updates to docs for v1.5 by @Abhiram824 in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F604\r\n* Fix doc links by @kevin-thankyou-lin in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F608\r\n* potential fix to build and docs synchronization error by @Abhiram824 in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F609\r\n* Adding hdiapi to imports for docs for spacemouse by @abhihjoshi in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F610\r\n* Fix-device-docs-I by @kevin-thankyou-lin in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F613\r\n* Adding render modalities by @abhihjoshi in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F596\r\n* Adding USD requirements to requirements-extra by @abhihjoshi in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F617\r\n* small update to demos page by @Abhiram824 in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F619\r\n* Add all manipulators to demo_random_action by default by @kevin-thankyou-lin in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F618\r\n* Replace warn with warning by @squarefk in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F622\r\n* fix the mounting direction of dex hand for panda robot by @xieleo5 in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F624\r\n* fix pot with handles when pot is rectangle by @xieleo5 in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F628\r\n* Add the joint pos obs back by @squarefk in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F627\r\n* PyPI Publishing GitHub Action by @abhihjoshi in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F629\r\n* [fix] fix mink IK with delta input type by @Dingry in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F626\r\n* Adding pypi workflow by @abhihjoshi in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F632\r\n* Update v1.5.1 changelog by @kevin-thankyou-lin in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F634\r\n* Enable instance segmentation to include bodies in arena by @kevin-thankyou-lin in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F638\r\n* Update NutAssembly to handle different mounts aside from default by @kevin-thankyou-lin in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F642\r\n* modify opencv renderer to support multiple camera view by @xieleo5 in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F641\r\n* feat: Dualsense device support by @OceanPresentChao in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F645\r\n* Hot fix `Task.generate_id_mappings()`: remove `left\u002Fright_eef_target` from id mappings by @kevin-thankyou-lin in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F650\r\n* Allow base_type specification in manipulation envs by @kevin-thankyou-lin in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F655\r\n* Adding shell inertia for meshes in Sawyer by @abhihjoshi in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F662\r\n* Support multiple cameras by @squarefk in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F664\r\n* added xarm7 (revised) by @EdwardoSunny in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F667\r\n* [add] add grasp qpos for inspire and fourier hand by @Dingry in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F670\r\n* Update mobile_robot.py by @ShahRutav in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F680\r\n* Update of robot controller and robot-mounted base by @Dingry in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F679\r\n* data collection wrapper resets by @snasiriany in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F688\r\n* [add] add sensors by @Dingry in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F691\r\n* Ensure EGL ImportError includes PYOPENGL_PLATFORM value by @emmanuel-ferdman in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F684\r\n* FIx https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fissues\u002F665 and refactor variable names by @Abhiram824 in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F674\r\n* Update composite_controller.py by @kevin-thankyou-lin in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F700\r\n* fix baxter mesh issues by @Abhiram824 in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F703\r\n* update part controller config to include joint indices by @Abhiram824 in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F697\r\n* Updating base type comments by @NirshalChandraSekar in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F732\r\n* Add `set_scale` to `Arena` and `MujocoObject`  by @kevin-thankyou-lin in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F643\r\n* fix site scaling to get all sites from world body by @Abhiram824 in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F738\r\n* Env seeding by @Abhiram824 in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F724\r\n* Pin mink versio","2025-12-24T22:28:42",{"id":147,"version":148,"summary_zh":149,"released_at":150},110245,"v1.5.1","## What's Changed\r\n* Update basicusage.md by @snasiriany in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F540\r\n* Print video save path in demo vid recording by @kevin-thankyou-lin in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F543\r\n* Update overview page text by @kevin-thankyou-lin in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F544\r\n* Fix seg demo script by @kevin-thankyou-lin in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F542\r\n* Demo fixes by @Abhiram824 in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F548\r\n* Gymwrapper: support both gym and gymnasium, and support dict_obs and … by @youliangtan in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F547\r\n* [fix] fix whole_body_ik config and add a default whole_body_mink_ik config by @Dingry in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F550\r\n* Update demo docs w\u002F teleop usage info by @kevin-thankyou-lin in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F545\r\n* Fix part controller demo by @youliangtan in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F546\r\n* Remove unneeded print by @kevin-thankyou-lin in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F552\r\n* Fix demo renderer by @kevin-thankyou-lin in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F525\r\n* Add troubleshooting message for spacemouse failure to open by @Ynng in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F553\r\n* simplify composite controller keyword for body_parts by @snasiriany in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F558\r\n* Have CI check all `tests\u002F` by @kevin-thankyou-lin in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F557\r\n* update part controller jsons by @Abhiram824 in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F572\r\n* Docs update by @Abhiram824 in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F571\r\n* Fixes #569 (🐛 Bug)  by @mishmish66 in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F570\r\n* DexMimicGen assets update by @xieleo5 in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F567\r\n* Fix joints and actuators by @squarefk in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F575\r\n* Update devices + related docs by @kevin-thankyou-lin in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F577\r\n* Adding doc update workflow by @abhihjoshi in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F583\r\n* Documention CI Update .nojekyll fix by @abhihjoshi in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F587\r\n* Updating module\u002Frobots and module\u002Frenderer and module\u002Fcontrollers docs by @abhihjoshi in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F566\r\n* terminate mjviewer on resets by @snasiriany in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F591\r\n* Match the orientation of the fourier hands with the inspire hands by @squarefk in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F592\r\n* osc position fixes by @snasiriany in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F593\r\n* [fix] fix equality removal with actuator removal by @Dingry in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F594\r\n* update robots section and env task images by @Abhiram824 in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F599\r\n\r\n## New Contributors\r\n* @youliangtan made their first contribution in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F547\r\n* @Ynng made their first contribution in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F553\r\n* @mishmish66 made their first contribution in https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fpull\u002F570\r\n\r\n**Full Changelog**: https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fcompare\u002Fv1.5.0...v1.5.1","2025-02-08T17:49:31",{"id":152,"version":153,"summary_zh":154,"released_at":155},110246,"v1.5.0","# **Robosuite v1.5 Release Notes**\r\n\r\n## Highlights\r\nThe 1.5 release of **Robosuite** introduces significant advancements to extend flexibility and realism in robotic simulations. Key highlights include support for diverse robot embodiments (e.g., humanoids), custom robot compositions, composite controllers (such as whole-body controllers), expanded teleoperation devices, and photorealistic rendering capabilities.\r\n\r\n### New Features\r\n- **Diverse Robot Embodiments**: Support for complex robots, including humanoids, allowing exploration of advanced manipulation and mobility tasks. Please see [robosuite_models](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite_models) for extra robosuite-compatible robot models.\r\n- **Custom Robot Composition**: Users can now build custom robots from modular components, offering extensive configuration options.\r\n- **Composite Controllers**: New controller abstraction includes whole-body controllers, and the ability to control robots with composed body parts, arms, and grippers.\r\n- **Additional Teleoperation Devices**: Expanded compatibility with teleoperation tools like drag-and-drop in the MuJoCo viewer and Apple Vision Pro.\r\n- **Photorealistic Rendering**: Integration of NVIDIA Isaac Sim for enhanced, real-time photorealistic visuals, bringing simulations closer to real-world fidelity.\r\n\r\n### Improvements\r\n- **Updated Documentation**: New tutorials and expanded documentation on utilizing advanced controllers, teleoperation, and rendering options.\r\n- **Simulation speed improvement**: By default we set the `lite_physics` flag to True to skip redundant calls to [`env.sim.step()`](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fblob\u002F29e73bd41f9bc43ba88bb7d2573b868398905819\u002Frobosuite\u002Fenvironments\u002Fbase.py#L444)\r\n\r\n### Migration\r\n\r\n- Composite controller refactoring: please see example of [usage](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fblob\u002F29e73bd41f9bc43ba88bb7d2573b868398905819\u002Frobosuite\u002Fexamples\u002Fthird_party_controller\u002Fmink_controller.py#L421)\r\n\r\n---\r\n\r\n### Contributor Spotlight\r\n\r\nWe would like to introduce the newest member of our robosuite core team, who has contributed significantly to this release!\r\n\r\n[Kevin Lin](https:\u002F\u002Fkevin-thankyou-lin.github.io\u002F) @kevin-thankyou-lin \r\n","2024-10-29T19:15:31",{"id":157,"version":158,"summary_zh":159,"released_at":160},110247,"v1.4.0","# robosuite 1.4.0 Release Notes\r\n- Highlights\r\n- New Features\r\n- Improvements\r\n- Critical Bug Fixes\r\n- Other Bug Fixes\r\n\r\n# Highlights\r\nThis release of robosuite refactors our backend to leverage DeepMind's new [mujoco](https:\u002F\u002Fgithub.com\u002Fdeepmind\u002Fmujoco) bindings. Below, we discuss the key details of this refactoring:\r\n\r\n## Installation\r\nNow, installation has become much simpler, with mujoco being directly installed on Linux or Mac via `pip install mujoco`. Importing mujoco is now done via `import mujoco` instead of `import mujoco_py`\r\n\r\n## Rendering\r\nThe new DeepMind mujoco bindings do not ship with an onscreen renderer. As a result, we've implented an [OpenCV renderer](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fblob\u002Fmaster\u002Frobosuite\u002Futils\u002Fopencv_renderer.py), which provides most of the core functionality from the original mujoco renderer, but has a few limitations (most significantly, no glfw keyboard callbacks and no ability to move the free camera).\r\n\r\n# Improvements\r\nThe following briefly describes other changes that improve on the pre-existing structure. This is not an exhaustive list, but a highlighted list of changes.\r\n\r\n- Standardize end-effector frame inference (#25). Now, all end-effector frames are correctly inferred from raw robot XMLs and take into account arbitrary relative orientations between robot arm link frames and gripper link frames.\r\n\r\n- Improved robot textures (#27). With added support from DeepMind's mujoco bindings for obj texture files, all robots are now natively rendered with more accurate texture maps.\r\n\r\n- Revamped macros (#30). Macros now references a single macro file that can be arbitrarily specified by the user.\r\n\r\n- Improved method for specifying GPU ID (#29). The new logic is as follows:\r\n  1. If `render_device_gpu_id=-1`, `MUJOCO_EGL_DEVICE_ID` and `CUDA_VISIBLE_DEVICES` are not set, we either choose the first available device (usually `0`) if `macros.MUJOCO_GPU_RENDERING` is `True`, otherwise use CPU;\r\n  2. `CUDA_VISIBLE_DEVICES` or `MUJOCO_EGL_DEVICE_ID` are set, we make sure that they dominate over programmatically defined GPU device id.\r\n  3. If `CUDA_VISIBLE_DEVICES` and `MUJOCO_EGL_DEVICE_ID` are both set, then we use `MUJOCO_EGL_DEVICE_ID` and make sure it is defined in `CUDA_VISIBLE_DEVICES`\r\n\r\n- robosuite docs updated\r\n\r\n- Add new papers\r\n\r\n\r\n# Critical Bug Fixes\r\n- Fix Sawyer IK instability bug (#25)\r\n\r\n\r\n# Other Bug Fixes\r\n- Fix iGibson renderer bug (#21)\r\n\r\n\r\n-------\r\n\r\n## Contributor Spotlight\r\nWe would like to introduce the newest members of our robosuite core team, all of whom have contributed significantly to this release!\r\n@awesome-aj0123\r\n@snasiriany\r\n@zhuyifengzju\r\n","2022-11-30T07:48:42",{"id":162,"version":163,"summary_zh":164,"released_at":165},110248,"v1.3","# robosuite 1.3.0 Release Notes\r\n- Highlights\r\n- New Features\r\n- Improvements\r\n- Critical Bug Fixes\r\n- Other Bug Fixes\r\n\r\n# Highlights\r\nThis release of robosuite brings powerful rendering functionalities including new renderers and multiple vision modalities, in addition to some general-purpose camera utilties. Below, we discuss the key details of these new features:\r\n\r\n## Renderers\r\nIn addition to the native Mujoco renderer, we present two new renderers, [NVISII](https:\u002F\u002Fgithub.com\u002Fowl-project\u002FNVISII) and [iGibson](http:\u002F\u002Fsvl.stanford.edu\u002Figibson\u002F), and introduce a standardized rendering interface API to enable easy swapping of renderers.\r\n\r\nNVISII is a high-fidelity ray-tracing renderer originally developed by NVIDIA, and adapted for plug-and-play usage in **robosuite**. It is primarily used for training perception models and visualizing results in high quality. It can run at up to ~0.5 fps using a GTX 1080Ti GPU. Note that NVISII must be installed (`pip install nvisii`) in order to use this renderer.\r\n\r\niGibson is a much faster renderer that additionally supports physics-based rendering (PBR) and direct rendering to pytorch tensors. While not as high-fidelity as NVISII, it is incredibly fast and can run at up to ~1500 fps using a GTX 1080Ti GPU. Note that iGibson must be installed (`pip install igibson`) in order to use this renderer.\r\n\r\nWith the addition of these new renderers, we also introduce a standardized [renderer](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fblob\u002Fmaster\u002Frobosuite\u002Frenderers\u002Fbase.py) for easy usage and customization of the various renderers. During each environment step, the renderer updates its internal state by calling `update()` and renders by calling `render(...)`. The resulting visual observations can be polled by calling `get_pixel_obs()` or by calling other methods specific to individual renderers. We provide a [demo script](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fblob\u002Fmaster\u002Frobosuite\u002Fdemos\u002Fdemo_segmentation.py) for testing each new renderer, and our docs also provide [additional information](http:\u002F\u002Frobosuite.ai\u002Fdocs\u002Fmodules\u002Frenderers.md) on specific renderer details and installation procedures.\r\n\r\n## Vision Modalities\r\nIn addition to new renderers, we also provide broad support for multiple vision modalities across all (Mujoco, NVISII, iGibson) renderers:\r\n\r\n- **RGB**: Standard 3-channel color frames with values in range `[0, 255]`. This is set during environment construction with the `use_camera_obs` argument.\r\n- **Depth**: 1-channel frame with normalized values in range `[0, 1]`. This is set during environment construction with the `camera_depths` argument.\r\n- **Segmentation**: 1-channel frames with pixel values corresponding to integer IDs for various objects. Segmentation can occur by class, instance, or geom, and is set during environment construction with the `camera_segmentations` argument.\r\n    \r\nIn addition to the above modalities, the following modalities are supported by a subset of renderers:\r\n\r\n- **Surface Normals**: [NVISII, iGibson] 3-channel (x,y,z) normalized direction vectors.\r\n- **Texture Coordinates**: [NVISII] 3-channel (x,y,z) coordinate texture mappings for each element\r\n- **Texture Positioning**: [NVISII, iGibson] 3-channel (x,y,z) global coordinates of each pixel.\r\n\r\nSpecific modalities can be set during environment and renderer construction. We provide a [demo script](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fblob\u002Fmaster\u002Frobosuite\u002Fdemos\u002Fdemo_nvisii_modalities.py) for testing the different modalities supported by NVISII and a [demo script](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fblob\u002Fmaster\u002Frobosuite\u002Fdemos\u002Fdemo_igibson_modalities.py) for testing the different modalities supported by iGibson.\r\n\r\n## Camera Utilities\r\nWe provide a set of general-purpose [camera utilities](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fblob\u002Fmaster\u002Frobosuite\u002Futils\u002Fcamera_utils.py) that intended to enable easy manipulation of environment cameras. Of note, we include transform utilities for mapping between pixel, camera, and world frames, and include a [CameraMover](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fblob\u002Fmaster\u002Frobosuite\u002Futils\u002Fcamera_utils.py#L244) class for dynamically moving a camera during simulation, which can be used for many purposes such as the [DemoPlaybackCameraMover](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fblob\u002Fmaster\u002Frobosuite\u002Futils\u002Fcamera_utils.py#L419) subclass that enables smooth visualization during demonstration playback.\r\n\r\n# Improvements\r\nThe following briefly describes other changes that improve on the pre-existing structure. This is not an exhaustive list, but a highlighted list of changes.\r\n\r\n- Standardize EEF frames (#204). Now, all grippers have identical conventions for plug-and-play usage across types.\r\n\r\n- Add OSC_POSITION control option for spacemouse (#209).\r\n\r\n- Improve model class hierarchy for robots. Now, robots own a subset of models (gripper(s), mount(s), etc.), allowing easy external access to the robot'","2021-10-19T17:57:34",{"id":167,"version":168,"summary_zh":169,"released_at":170},110249,"v1.2","# robosuite 1.2.0 Release Notes\r\n- Highlights\r\n- New Features\r\n- Improvements\r\n- Critical Bug Fixes\r\n- Other Bug Fixes\r\n\r\n# Highlights\r\nThis release of robosuite tackles a major challenge of using simulators: real-world transfer! (Sim2Real)\r\n\r\nWe present two features to significantly improve the sim2real transferrability -- realistic sensor modeling (*observables*) and control over physics modeling parameters (*dynamics randomization*).\r\n\r\n## Observables\r\nThis standardizes and modularizes how observations are computed and gathered within a given env. Now, all observations received from the `env.step()` call can be modified programmatically in either a deterministic or stochastic way. Sensor realism has been increased with added support for individual sensor sampling rates, corruption, delay, and filtering. The OTS behavior (obs dict structure, default no corruption \u002F delay \u002F filtering) has been preserved for backwards compatibility.\r\n\r\nEach `Observable` owns its own `_sensor`, `_corrupter`, `_delayer`, and `_filter` functions, which are used to process new data computed during its `update()` call which is called after every _simulation_ timestep, NOT policy step! (Note, however that a new value is only computed once per sampling period, NOT at every `update()` call). Its functionality is described briefly described below:\r\n\r\n- `_sensor`: Arbitrary function that takes in an observation cache and computes the \"raw\" (potentially ground truth) value for the given observable. It can potentially leverage pre-computed values from the observation cache to compute its output. The `@sensor` decorator is provided to denote this type of function, and guarantees a modality for this sensor as well.\r\n- `_corrupter`: Arbitrary function that takes in the output from `_sensor` and outputs the corrupted data.\r\n- `_delayer`: Arbitrary function that takes no arguments and outputs a float time value (in seconds), denoting how much delay should be applied to the next sampling cycle\r\n- `_filter`: Arbitrary function that takes in the output of `_corrupter` and outputs the filtered data.\r\n\r\nAll of the above can either be (re-)defined at initialization or during runtime. Utility functions have been provided in the `base.py` environment module to easily interact with all observables owned by the environment.\r\n\r\nSome standard corrupter and delayer function generators are provided ([deterministic \u002F uniform \u002F gaussian] [corruption \u002F delay]), including dummy no-ops for standard functions. All of this can be found in [observables.py](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fblob\u002Fmaster\u002Frobosuite\u002Futils\u002Fobservables.py#L150), and has been [heavily documented](http:\u002F\u002Frobosuite.ai\u002Fdocs\u002Fmodules\u002Fsensors.html#observables).\r\n\r\nAn example script demo'ing the new functionality can be found in [demo_sensor_corruption.py](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fblob\u002Fmaster\u002Frobosuite\u002Fdemos\u002Fdemo_sensor_corruption.py).\r\n\r\n## Dynamics Randomization\r\nPhysical parameters governing the underlying physics model can now be modified in real-time via the [DynamicsModder](https:\u002F\u002Fgithub.com\u002FARISE-Initiative\u002Frobosuite\u002Fblob\u002Fmaster\u002Frobosuite\u002Futils\u002Fmjmod.py#L1401) class in `mjmod.py`. This modder allows mid-sim randomization of the following supported properties, sorted by element group (for more information, please see [Mujoco XML Reference](http:\u002F\u002Fwww.mujoco.org\u002Fbook\u002FXMLreference.html))\r\n\r\n#### Opt (Global) Parameters\r\n- `density`: Density of the medium (i.e.: air)\r\n- `viscosity`: Viscosity of the medium (i.e.: air)\r\n\r\n#### Body Parameters\r\n- `position`: (x, y, z) Position of the body relative to its parent body\r\n- `quaternion`: (qw, qx, qy, qz) Quaternion of the body relative to its parent body\r\n- `inertia`: (ixx, iyy, izz) diagonal components of the inertia matrix associated with this body\r\n- `mass`: mass of the body\r\n\r\n#### Geom Parameters\r\n- `friction`: (sliding, torsional, rolling) friction values for this geom\r\n- `solref`: (timeconst, dampratio) contact solver values for this geom\r\n- `solimp`: (dmin, dmax, width, midpoint, power) contact solver impedance values for this geom\r\n\r\n#### Joint parameters\r\n- `stiffness`: Stiffness for this joint\r\n- `frictionloss`: Friction loss associated with this joint\r\n- `damping`: Damping value for this joint\r\n- `armature`: Gear inertia for this joint\r\n\r\nThe new `DynamicsModder` follows the same basic API as the other `Modder` classes, and allows per-parameter and per-group randomization enabling. Apart from randomization, this modder can also be instantiated to selectively modify values at runtime. Detailed information can be found on our [docs page along with an informative example script](http:\u002F\u002Frobosuite.ai\u002Fdocs\u002Falgorithms\u002Fsim2real.html#dynamics).\r\n\r\n# Improvements\r\nThe following briefly describes other changes that improve on the pre-existing structure. This is not an exhaustive list, but a highlighted list of changes.\r\n\r\n- robosuite docs have been completely overhauled! Hopefully no more broken links or outdated","2021-02-18T06:56:41",{"id":172,"version":173,"summary_zh":174,"released_at":175},110250,"v1.1","# robosuite 1.1.0 Release Notes\r\n- Highlights\r\n- New Features\r\n- Improvements\r\n- Critical Bug Fixes\r\n- Other Bug Fixes\r\n\r\n# Highlights\r\nWhile most surface-level functionality hasn't changed, the underlying infrastructure has been heavily reworked to reduce redundancy, improve standardization and ease-of-usage, and future-proof against expected expansions. Specifically, the following standards were pursued:\r\n\r\n- Pretty much everything should have a name (no name = no reference in sim)\r\n- All models should have a standardized interface (`MujocoModel`)\r\n- Any manipulation-specific properties or methods should be abstracted away to a subclass to future-proof against novel robotic domains that might be added in the future.\r\n- All associated attributes should try to be kept to a single object reference, to prevent silent errors from occurring due to partially modified objects. For example, instead of having `self.object` and `self.object_name`, just have `self.object`, since it already includes its own name reference in `self.object.name`.\r\n\r\n# New Features\r\nThis is not an exhaustive list, but includes the key features \u002F changes in this PR most relevant to the common user that should greatly streamline environment prototyping and debugging.\r\n\r\n ### Standardized Model Class Hierarchy\r\nNow, all (robot, gripper, object) models inherit from the `MujocoModel` class, which defines many useful properties and methods, including references to the model joints, contact geoms, important sites, etc. This allows much more standardized usage of these models when designing environments.\r\n\r\n### Modularized Environment Class Hierarchy\r\nWe do not expect robosuite to remain solely manipulation-based. Therefore, all environment properties and methods common to manipulation-based domains were ported to `ManipulationEnv`, allowing future robot task domains to be added with little reworking. Similarly, common properties \u002F methods common to Single or TwoArm environments were ported to `SingleArmEnv` and `TwoArmEnv`, respectively. This both (a) removes much redundant code between top-level env classes, and (b) frees users to focus exclusively on the environment prototyping unique to their use case without having to duplicate much boilerplate code. So, for example, `Lift` now has a class hierarchy of `MujocoEnv` --> `RobotEnv` --> `ManipulationEnv` --> `SingleArmEnv` --> `Lift`. Note that similar changes were made to the `Robot` and `RobotModel` base classes.\r\n\r\n### Standardized and Streamlined Object Classes\r\nAll object classes now are derived from `MujocoObject`, which itself is a subclass of `MujocoModel`. This standardizes the interface across all object source modalities (`Generated` vs. `XML` based), and provides the user with an expected set of properties that can be leveraged when prototyping custom environments. Additionally, complex, procedural object generation has been added with the `CompositeObject` class, of now which the `HammerObject` and `PotWithHandles` object are now subclasses of (as examples of how to design custom composite objects).\r\n\r\n### Greater Procedural Object Generation Support\r\n`CompositeObject` and `CompositeBodyObject` classes have now been added. A `CompositeObject` is composed of multiple geoms, and a `CompositeBodyObject` is composed of multiple objects (bodies). Together, this allows for complex, procedural generation of arbitrary object shapes with potentially dynamic joint interactions. The `HammerObject` and `PotWithHandlesObject` are examples of the `CompositeObject` class, and `HingedBoxObject` is an example of the `CompositeBodyObject` class.\r\n\r\n### Standardized Geom Groups\r\nAll collision geoms now belong to group 0, while visual geoms belong to group 1. This means that methods can automatically check for the geom type by polling it's `group` attribute from its element or during sim. Moreover, all collision geoms are assigned solid rgba colors based on their semantic role (e.g.: robot vs. gripper vs. arena vs. objects). If rendering onscreen, you can easily toggle visualizing the visual and collision geoms by pressing `1` or `0`, respectively. This can be useful for debugging environments and making sure collision bodies are formed \u002F interacting as expected.\r\n\r\n### High-Utility Methods for Environment Prototyping\r\nBecause of this improved structure, many methods can now take advantage of this standardization. Some especially relevant methods are discussed briefly below:\r\n\r\n- `env.get_contacts(model)` (any env): This method will return the set of all geoms currently in contact with the inputted `model`. This is useful for debugging environments, or checking to see if certain conditions are met when designing rewards \u002F interactions.\r\n\r\n- `env._check_grasp(gripper, object_geoms)` (only manipulation envs): This method will return True if the inputted `gripper` is grasping the object specified by `object_geoms`, which could be a `MujocoModel` or simply a list of geoms that define the object. This make","2020-12-18T06:20:49",{"id":177,"version":178,"summary_zh":179,"released_at":180},110251,"v1.0","The first major version of **robosuite**. For more information, please check out https:\u002F\u002Frobosuite.ai","2020-09-28T02:28:21",{"id":182,"version":183,"summary_zh":80,"released_at":184},110252,"v0.3.0","2020-09-28T02:24:20",{"id":186,"version":187,"summary_zh":188,"released_at":189},110253,"v0.2.0","v0.2.0 release with MuJoCo 1.5 support","2019-12-09T06:32:34",{"id":191,"version":192,"summary_zh":193,"released_at":194},110254,"v0.1.0","Initial release of Surreal Robotics Suite","2018-10-27T00:37:57"]