[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-reinforcement-learning-kr--lets-do-irl":3,"tool-reinforcement-learning-kr--lets-do-irl":64},[4,17,27,35,43,56],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":16},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,3,"2026-04-05T11:01:52",[13,14,15],"开发框架","图像","Agent","ready",{"id":18,"name":19,"github_repo":20,"description_zh":21,"stars":22,"difficulty_score":23,"last_commit_at":24,"category_tags":25,"status":16},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",138956,2,"2026-04-05T11:33:21",[13,15,26],"语言模型",{"id":28,"name":29,"github_repo":30,"description_zh":31,"stars":32,"difficulty_score":23,"last_commit_at":33,"category_tags":34,"status":16},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,"2026-04-03T11:11:01",[13,14,15],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":23,"last_commit_at":41,"category_tags":42,"status":16},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[13,26],{"id":44,"name":45,"github_repo":46,"description_zh":47,"stars":48,"difficulty_score":23,"last_commit_at":49,"category_tags":50,"status":16},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,"2026-04-05T10:45:23",[14,51,52,53,15,54,26,13,55],"数据工具","视频","插件","其他","音频",{"id":57,"name":58,"github_repo":59,"description_zh":60,"stars":61,"difficulty_score":10,"last_commit_at":62,"category_tags":63,"status":16},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,"2026-04-04T04:44:48",[15,14,13,26,54],{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":69,"readme_en":70,"readme_zh":71,"quickstart_zh":72,"use_case_zh":73,"hero_image_url":74,"owner_login":75,"owner_name":76,"owner_avatar_url":77,"owner_bio":78,"owner_company":79,"owner_location":79,"owner_email":79,"owner_twitter":79,"owner_website":80,"owner_url":81,"languages":82,"stars":87,"forks":88,"last_commit_at":89,"license":90,"difficulty_score":10,"env_os":91,"env_gpu":92,"env_ram":92,"env_deps":93,"category_tags":101,"github_topics":102,"view_count":10,"oss_zip_url":79,"oss_zip_packed_at":79,"status":16,"created_at":110,"updated_at":111,"faqs":112,"releases":113},588,"reinforcement-learning-kr\u002Flets-do-irl","lets-do-irl","Inverse RL algorithms (APP, MaxEnt, GAIL, VAIL)","lets-do-irl 是一个基于 PyTorch 的开源项目，专注于实现多种逆强化学习（Inverse RL）算法。在强化学习任务中，人工设计奖励函数往往困难且主观，lets-do-irl 通过让智能体观察专家演示数据，自动推断奖励函数，从而学会模仿专家行为。\n\nlets-do-irl 涵盖了 APP、MaxEnt、GAIL 和 VAIL 等多种经典及前沿算法，并在 Mountain Car 和 Mujoco Hopper 等环境中完成了训练验证。对于希望复现 IRL 论文、探索智能体模仿机制的研究人员、开发者及学生而言，这是一个极佳的实践平台。代码结构清晰，支持 Tensorboard 可视化，有效降低了算法复现的门槛。无论是想深入理解逆强化学习原理，还是寻找可落地的模仿学习方案，lets-do-irl 都能提供有力的支持。","# Let's do Inverse RL\n\n![image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Freinforcement-learning-kr_lets-do-irl_readme_f638d3c758f5.png)\n\n## Introduction\n\nThis repository contains PyTorch (v0.4.1) implementations of **Inverse Reinforcement Learning (IRL)** algorithms.\n\n- Apprenticeship Learning via Inverse Reinforcement Learning [[2](#2)]\n- Maximum Entropy Inverse Reinforcement Learning [[4](#4)]\n- Generative Adversarial Imitation Learning [[5](#5)]\n- Variational Discriminator Bottleneck: Improving Imitation Learning, Inverse RL, and GANs by Constraining Information Flow [[6](#6)]\n\nWe have implemented and trained the agents with the IRL algorithms using the following environments.\n\n- [OpenAI GYM Mountain car](https:\u002F\u002Fgym.openai.com\u002Fenvs\u002FMountainCar-v0\u002F)\n- [Mujoco Hopper](https:\u002F\u002Fgym.openai.com\u002Fenvs\u002FHopper-v2\u002F)\n\nFor reference, reviews of below papers related to IRL (in Korean) are located in [Let's do Inverse RL Guide](https:\u002F\u002Freinforcement-learning-kr.github.io\u002F2019\u002F01\u002F22\u002F0_lets-do-irl-guide\u002F).\n\n\u003Ca name=\"1\">\u003C\u002Fa>\n\n- [1] [AY. Ng, et al., \"Algorithms for Inverse Reinforcement Learning\", ICML 2000.](http:\u002F\u002Fai.stanford.edu\u002F~ang\u002Fpapers\u002Ficml00-irl.pdf) \n\n\u003Ca name=\"2\">\u003C\u002Fa>\n\n- [2] [P. Abbeel, et al., \"Apprenticeship Learning via Inverse Reinforcement Learning\", ICML 2004.](http:\u002F\u002Fpeople.eecs.berkeley.edu\u002F~russell\u002Fclasses\u002Fcs294\u002Fs11\u002Freadings\u002FAbbeel+Ng:2004.pdf)\n\n\u003Ca name=\"3\">\u003C\u002Fa>\n\n- [3] [ND. Ratliff, et al., \"Maximum Margin Planning\", ICML 2006.](https:\u002F\u002Fwww.ri.cmu.edu\u002Fpub_files\u002Fpub4\u002Fratliff_nathan_2006_1\u002Fratliff_nathan_2006_1.pdf)\n\n\u003Ca name=\"4\">\u003C\u002Fa>\n\n- [4] [BD. Ziebart, et al., \"Maximum Entropy Inverse Reinforcement Learning\", AAAI 2008.](http:\u002F\u002Fwww.aaai.org\u002FPapers\u002FAAAI\u002F2008\u002FAAAI08-227.pdf)\n\n\u003Ca name=\"5\">\u003C\u002Fa>\n\n- [5] [J. Ho, et al., \"Generative Adversarial Imitation Learning\", NIPS 2016.](https:\u002F\u002Fpapers.nips.cc\u002Fpaper\u002F6391-generative-adversarial-imitation-learning.pdf)\n\n\u003Ca name=\"6\">\u003C\u002Fa>\n\n- [6] [XB. Peng, et al., \"Variational Discriminator Bottleneck. Improving Imitation Learning, Inverse RL, and GANs by Constraining Information Flow\", ICLR 2019.](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1810.00821.pdf)\n\n## Table of Contents\n\n\u003C!-- @import \"[TOC]\" {cmd=\"toc\" depthFrom=1 depthTo=6 orderedList=false} -->\n\u003C!-- code_chunk_output -->\n\n- [Let's do Inverse RL](#lets-do-inverse-rl)\n  - [Introduction](#introduction)\n  - [Table of Contents](#table-of-contents)\n  - [Mountain car](#mountain-car)\n    - [1. Information](#1-information)\n    - [2. Expert's demonstrations](#2-experts-demonstrations)\n    - [3. Train & Test](#3-train--test)\n      - [APP](#app)\n      - [MaxEnt](#maxent)\n    - [4. Trained Agent](#4-trained-agent)\n  - [Mujoco Hopper](#mujoco-hopper)\n    - [1. Installation](#1-installation)\n    - [2. Expert's demonstrations](#2-experts-demonstrations-1)\n    - [3. Train & Test](#3-train--test-1)\n      - [GAIL](#gail)\n      - [VAIL](#vail)\n    - [4. Tensorboard](#4-tensorboard)\n    - [5. Trained Agent](#5-trained-agent)\n  - [Reference](#reference)\n  - [Implementation team members](#implementation-team-members)\n\n\u003C!-- \u002Fcode_chunk_output -->\n\n## Mountain car\n\nWe have implemented `APP`, `MaxEnt` using Q-learning as RL step in `MountainCar-v0` environment.\n\n### 1. Information\n\n- [Mountain car Wiki](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fgym\u002Fwiki\u002FMountainCar-v0)\n\n### 2. Expert's demonstrations\n\nNavigate to `expert_demo.npy` in [lets-do-irl\u002Fmountaincar\u002Fapp\u002Fexpert_demo](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmountaincar\u002Fapp\u002Fexpert_demo) or [lets-do-irl\u002Fmountaincar\u002Fmaxent\u002Fexpert_demo](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmountaincar\u002Fmaxent\u002Fexpert_demo).\n\nShape of expert's demonstrations is (20, 130, 3); (number of demonstrations, length of demonstrations, states and actions of demonstrations)\n\nIf you make demonstrations, Navigate to `make_expert.py` in [lets-do-irl\u002Fmountaincar\u002Fapp\u002Fexpert_demo](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmountaincar\u002Fapp\u002Fexpert_demo) or [lets-do-irl\u002Fmountaincar\u002Fmaxent\u002Fexpert_demo](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmountaincar\u002Fmaxent\u002Fexpert_demo).\n\n### 3. Train & Test\n\n#### APP\n\nNavigate to [lets-do-irl\u002Fmountaincar\u002Fapp](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmountaincar\u002Fapp) folder.\n\n**Train** the agent wtih `APP` without rendering.\n\n~~~\npython train.py\n~~~\n\nIf you want to test `APP`, **Test** the agent with the saved model `app_q_table.npy` in `app\u002Fresults` folder.\n\n~~~\npython test.py\n~~~\n\n#### MaxEnt\n\nNavigate to [lets-do-irl\u002Fmountaincar\u002Fmaxent](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmountaincar\u002Fmaxent) folder.\n\n**Train** the agent wtih `MaxEnt` without rendering.\n\n~~~\npython train.py\n~~~\n\nIf you want to test `MaxEnt`, **Test** the agent with the saved model `maxent_q_table.npy` in `maxent\u002Fresults` folder.\n\n~~~\npython test.py\n~~~\n\n### 4. Trained Agent\n\nWe have trained the agents with two different IRL algortihms using `MountainCar-v0` environment.\n\n| Algorithms | Scores \u002F Episodes | GIF |\n|:---:|:---:|:---:|\n| APP | ![app](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Freinforcement-learning-kr_lets-do-irl_readme_2eb123a3b588.png) | \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Freinforcement-learning-kr_lets-do-irl_readme_c22451364a90.gif\" width=\"500\"\u002F> |\n| MaxEnt | ![maxent](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Freinforcement-learning-kr_lets-do-irl_readme_a0151a58fac9.png) | \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Freinforcement-learning-kr_lets-do-irl_readme_306c6f946a82.gif\" width=\"500\"\u002F> |\n\n## Mujoco Hopper\n\nWe have implemented `GAIL`, `VAIL` using PPO as RL step in `Hopper-v2` environment.\n\n### 1. Installation\n\n- [Mac OS (in Korean)](https:\u002F\u002Fdongminlee.tistory.com\u002F38)\n- [Ubuntu](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Fpg_travel\u002Fwiki\u002FInstalling-Mujoco-py-on-Linux)\n\n### 2. Expert's demonstrations\n\nNavigate to `expert_demo.p` in [lets-do-irl\u002Fmujoco\u002Fgail\u002Fexpert_demo](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmujoco\u002Fgail\u002Fexpert_demo) or [lets-do-irl\u002Fmujoco\u002Fvail\u002Fexpert_demo](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmujoco\u002Fvail\u002Fexpert_demo).\n\nShape of expert's demonstrations is (50000, 14); (number of demonstrations, states and actions of demonstrations)\n\nWe used demonstrations that get scores between about 2200 and 2600 on average.\n\nIf you want to make demonstrations, Navigate to `main.py` in [lets-do-irl\u002Fmojoco\u002Fppo](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmujoco\u002Fppo) folder.\n\nAlso, you can see detailed implementation story (in Korean) of PPO in [PG Travel implementation story](https:\u002F\u002Freinforcement-learning-kr.github.io\u002F2018\u002F08\u002F23\u002F8_implement\u002F).\n\n### 3. Train & Test\n\n#### GAIL\n\nNavigate to [lets-do-irl\u002Fmujoco\u002Fgail](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmujoco\u002Fgail) folder.\n\n**Train** the agent wtih `GAIL` without rendering.\n\n~~~\npython main.py\n~~~\n\nIf you want to **Continue training** from the saved checkpoint,\n\n~~~\npython main.py --load_model ckpt_4000_gail.pth.tar\n~~~\n\n- Note that `ckpt_4000_gail.pth.tar` file should be in the `mujoco\u002Fgail\u002Fsave_model` folder.\n\nIf you want to test `GAIL`, **Test** the agent with the saved model `ckpt_4000_gail.pth.tar` in the `mujoco\u002Fgail\u002Fsave_model` folder.\n\n~~~\npython test.py --load_model ckpt_4000_gail.pth.tar\n~~~\n\n- Note that `ckpt_4000_gail.pth.tar` file should be in the `mujoco\u002Fgail\u002Fsave_model` folder.\n\n#### VAIL\n\nNavigate to [lets-do-irl\u002Fmujoco\u002Fvail](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmujoco\u002Fvail) folder.\n\n**Train** the agent wtih `VAIL` without rendering.\n\n~~~\npython main.py\n~~~\n\nIf you want to **Continue training** from the saved checkpoint,\n\n~~~\npython main.py --load_model ckpt_4000_vail.pth.tar\n~~~\n\n- Note that `ckpt_4000_vail.pth.tar` file should be in the `mujoco\u002Fvail\u002Fsave_model` folder.\n\nIf you want to test `VAIL`, **Test** the agent with the saved model `ckpt_4000_vail.pth.tar` in the `mujoco\u002Fvail\u002Fsave_model` folder.\n\n~~~\npython test.py --load_model ckpt_4000_vail.pth.tar\n~~~\n\n- Note that `ckpt_4000_vail.pth.tar` file should be in the `mujoco\u002Fvail\u002Fsave_model` folder.\n\n### 4. Tensorboard\n\nNote that the results of trainings are automatically saved in `logs` folder. TensorboardX is the Tensorboard-like visualization tool for Pytorch.\n\nNavigate to the `lets-do-irl\u002Fmujoco\u002Fgail` or `lets-do-irl\u002Fmujoco\u002Fvail` folder.\n\n~~~\ntensorboard --logdir logs\n~~~\n\n### 5. Trained Agent\n\nWe have trained the agents with two different IRL algortihms using `Hopper-v2` environment.\n\n| Algorithms | Scores \u002F Iterations (total sample size : 2048) |\n|:---:|:---:|\n| PPO (to compare) | ![ppo](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Freinforcement-learning-kr_lets-do-irl_readme_dabe59a9367e.png) |\n| GAIL | ![gail](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Freinforcement-learning-kr_lets-do-irl_readme_9f16b3ffde7a.png) |\n| VAIL | ![vail](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Freinforcement-learning-kr_lets-do-irl_readme_4912133d7bd9.png) |\n| Total | ![total](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Freinforcement-learning-kr_lets-do-irl_readme_d4ae49a40240.png) |\n\n## Reference\n\nWe referenced the codes from below repositories.\n\n- [Implementation of APP](https:\u002F\u002Fgithub.com\u002Fjangirrishabh\u002FtoyCarIRL)\n- [Implementation of MaxEnt](https:\u002F\u002Fgithub.com\u002FMatthewJA\u002FInverse-Reinforcement-Learning)\n- [Pytorch implementation for Policy Gradient algorithms (REINFORCE, NPG, TRPO, PPO)](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Fpg_travel)\n- [Pytorch implementation of GAIL](https:\u002F\u002Fgithub.com\u002FKhrylx\u002FPyTorch-RL)\n\n## Implementation team members\n\nDongmin Lee (project manager) : [Github](https:\u002F\u002Fgithub.com\u002Fdongminleeai), [Facebook](https:\u002F\u002Fwww.facebook.com\u002Fdongminleeai)\n\nSeungje Yoon : [Github](https:\u002F\u002Fgithub.com\u002FsjYoondeltar), [Facebook](https:\u002F\u002Fwww.facebook.com\u002Fseungje.yoon)\n\nSeunghyun Lee : [Github](https:\u002F\u002Fgithub.com\u002FClyde21c), [Facebook](https:\u002F\u002Fwww.facebook.com\u002FClyde21c)\n\nGeonhee Lee : [Github](https:\u002F\u002Fgithub.com\u002FGeonhee-LEE), [Facebook](https:\u002F\u002Fwww.facebook.com\u002FGeonheeee)","# 让我们进行逆向强化学习\n\n![image](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Freinforcement-learning-kr_lets-do-irl_readme_f638d3c758f5.png)\n\n## 简介\n\n本仓库包含 **逆向强化学习 (Inverse Reinforcement Learning, IRL)** 算法的 PyTorch (v0.4.1) 实现。\n\n- 通过逆向强化学习进行学徒式学习 [[2](#2)]\n- 最大熵逆向强化学习 [[4](#4)]\n- 生成对抗模仿学习 [[5](#5)]\n- 变分判别瓶颈：通过约束信息流改进模仿学习、逆向 RL 和 GANs [[6](#6)]\n\n我们使用以下环境实现了基于 IRL 算法的智能体训练。\n\n- [OpenAI GYM Mountain car](https:\u002F\u002Fgym.openai.com\u002Fenvs\u002FMountainCar-v0\u002F)\n- [Mujoco Hopper](https:\u002F\u002Fgym.openai.com\u002Fenvs\u002FHopper-v2\u002F)\n\n作为参考，关于上述 IRL 相关论文的综述（韩语）位于 [让我们进行逆向强化学习指南](https:\u002F\u002Freinforcement-learning-kr.github.io\u002F2019\u002F01\u002F22\u002F0_lets-do-irl-guide\u002F)。\n\n\u003Ca name=\"1\">\u003C\u002Fa>\n\n- [1] [AY. Ng 等人，“逆向强化学习算法”，ICML 2000.](http:\u002F\u002Fai.stanford.edu\u002F~ang\u002Fpapers\u002Ficml00-irl.pdf) \n\n\u003Ca name=\"2\">\u003C\u002Fa>\n\n- [2] [P. Abbeel 等人，“通过逆向强化学习进行学徒式学习”，ICML 2004.](http:\u002F\u002Fpeople.eecs.berkeley.edu\u002F~russell\u002Fclasses\u002Fcs294\u002Fs11\u002Freadings\u002FAbbeel+Ng:2004.pdf)\n\n\u003Ca name=\"3\">\u003C\u002Fa>\n\n- [3] [ND. Ratliff 等人，“最大边际规划”，ICML 2006.](https:\u002F\u002Fwww.ri.cmu.edu\u002Fpub_files\u002Fpub4\u002Fratliff_nathan_2006_1\u002Fratliff_nathan_2006_1.pdf)\n\n\u003Ca name=\"4\">\u003C\u002Fa>\n\n- [4] [BD. Ziebart 等人，“最大熵逆向强化学习”，AAAI 2008.](http:\u002F\u002Fwww.aaai.org\u002FPapers\u002FAAAI\u002F2008\u002FAAAI08-227.pdf)\n\n\u003Ca name=\"5\">\u003C\u002Fa>\n\n- [5] [J. Ho 等人，“生成对抗模仿学习”，NIPS 2016.](https:\u002F\u002Fpapers.nips.cc\u002Fpaper\u002F6391-generative-adversarial-imitation-learning.pdf)\n\n\u003Ca name=\"6\">\u003C\u002Fa>\n\n- [6] [XB. Peng 等人，“变分判别瓶颈：通过约束信息流改进模仿学习、逆向 RL 和 GANs\"，ICLR 2019.](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1810.00821.pdf)\n\n## 目录\n\n\u003C!-- @import \"[TOC]\" {cmd=\"toc\" depthFrom=1 depthTo=6 orderedList=false} -->\n\u003C!-- code_chunk_output -->\n\n- [让我们进行逆向强化学习](#lets-do-inverse-rl)\n  - [简介](#introduction)\n  - [目录](#table-of-contents)\n  - [Mountain car](#mountain-car)\n    - [1. 信息](#1-information)\n    - [2. 专家演示数据](#2-experts-demonstrations)\n    - [3. 训练与测试](#3-train--test)\n      - [APP](#app)\n      - [MaxEnt](#maxent)\n    - [4. 训练好的智能体](#4-trained-agent)\n  - [Mujoco Hopper](#mujoco-hopper)\n    - [1. 安装](#1-installation)\n    - [2. 专家演示数据](#2-experts-demonstrations-1)\n    - [3. 训练与测试](#3-train--test-1)\n      - [GAIL](#gail)\n      - [VAIL](#vail)\n    - [4. Tensorboard](#4-tensorboard)\n    - [5. 训练好的智能体](#5-trained-agent)\n  - [参考文献](#reference)\n  - [实现团队成员](#implementation-team-members)\n\n\u003C!-- \u002Fcode_chunk_output -->\n\n## Mountain car\n\n我们在 `MountainCar-v0` 环境中使用 Q-learning 作为强化学习步骤，实现了 `APP` 和 `MaxEnt`。\n\n### 1. 信息\n\n- [Mountain car Wiki](https:\u002F\u002Fgithub.com\u002Fopenai\u002Fgym\u002Fwiki\u002FMountainCar-v0)\n\n### 2. 专家演示数据\n\n请前往 [lets-do-irl\u002Fmountaincar\u002Fapp\u002Fexpert_demo](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmountaincar\u002Fapp\u002Fexpert_demo) 或 [lets-do-irl\u002Fmountaincar\u002Fmaxent\u002Fexpert_demo](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmountaincar\u002Fmaxent\u002Fexpert_demo) 查看 `expert_demo.npy`。\n\n专家演示数据的形状为 (20, 130, 3)；(演示数量，演示长度，演示的状态和动作)\n\n如果您要制作演示数据，请前往 [lets-do-irl\u002Fmountaincar\u002Fapp\u002Fexpert_demo](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmountaincar\u002Fapp\u002Fexpert_demo) 或 [lets-do-irl\u002Fmountaincar\u002Fmaxent\u002Fexpert_demo](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmountaincar\u002Fmaxent\u002Fexpert_demo) 查看 `make_expert.py`。\n\n### 3. 训练与测试\n\n#### APP\n\n进入 [lets-do-irl\u002Fmountaincar\u002Fapp](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmountaincar\u002Fapp) 文件夹。\n\n**训练** 智能体使用 `APP` 且不渲染。\n\n~~~\npython train.py\n~~~\n\n如果您想测试 `APP`，请使用 `app\u002Fresults` 文件夹中保存的模型 `app_q_table.npy` **测试** 智能体。\n\n~~~\npython test.py\n~~~\n\n#### MaxEnt\n\n进入 [lets-do-irl\u002Fmountaincar\u002Fmaxent](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmountaincar\u002Fmaxent) 文件夹。\n\n**训练** 智能体使用 `MaxEnt` 且不渲染。\n\n~~~\npython train.py\n~~~\n\n如果您想测试 `MaxEnt`，请使用 `maxent\u002Fresults` 文件夹中保存的模型 `maxent_q_table.npy` **测试** 智能体。\n\n~~~\npython test.py\n~~~\n\n### 4. 训练好的智能体\n\n我们使用两种不同的 IRL 算法在 `MountainCar-v0` 环境中训练了智能体。\n\n| 算法 | 得分 \u002F 回合数 | GIF |\n|:---:|:---:|:---:|\n| APP | ![app](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Freinforcement-learning-kr_lets-do-irl_readme_2eb123a3b588.png) | \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Freinforcement-learning-kr_lets-do-irl_readme_c22451364a90.gif\" width=\"500\"\u002F> |\n| MaxEnt | ![maxent](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Freinforcement-learning-kr_lets-do-irl_readme_a0151a58fac9.png) | \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Freinforcement-learning-kr_lets-do-irl_readme_306c6f946a82.gif\" width=\"500\"\u002F> |\n\n## Mujoco Hopper\n\n我们在 `Hopper-v2` 环境中使用 PPO 作为强化学习步骤，实现了 `GAIL` 和 `VAIL`。\n\n### 1. 安装\n\n- [Mac OS (韩语)](https:\u002F\u002Fdongminlee.tistory.com\u002F38)\n- [Ubuntu](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Fpg_travel\u002Fwiki\u002FInstalling-Mujoco-py-on-Linux)\n\n### 2. 专家演示数据\n\n请前往 [lets-do-irl\u002Fmujoco\u002Fgail\u002Fexpert_demo](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmujoco\u002Fgail\u002Fexpert_demo) 或 [lets-do-irl\u002Fmujoco\u002Fvail\u002Fexpert_demo](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmujoco\u002Fvail\u002Fexpert_demo) 查看 `expert_demo.p`。\n\n专家演示数据的形状为 (50000, 14)；(演示数量，演示的状态和动作)\n\n我们使用了平均得分在 2200 到 2600 之间的演示数据。\n\n如果您要制作演示数据，请前往 [lets-do-irl\u002Fmojoco\u002Fppo](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmujoco\u002Fppo) 文件夹中的 `main.py`。\n\n此外，您可以在 [PG Travel implementation story](https:\u002F\u002Freinforcement-learning-kr.github.io\u002F2018\u002F08\u002F23\u002F8_implement\u002F) 中查看 PPO 的详细实现故事（韩语）。\n\n### 3. 训练与测试\n\n#### GAIL\n\n导航至 [lets-do-irl\u002Fmujoco\u002Fgail](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmujoco\u002Fgail) 文件夹。\n\n使用 `GAIL`（生成对抗模仿学习）**训练**代理，无需渲染。\n\n~~~\npython main.py\n~~~\n\n如果您想从保存的检查点**继续训练**，\n\n~~~\npython main.py --load_model ckpt_4000_gail.pth.tar\n~~~\n\n- 注意：`ckpt_4000_gail.pth.tar` 文件应位于 `mujoco\u002Fgail\u002Fsave_model` 文件夹中。\n\n如果您想测试 `GAIL`，请使用 `mujoco\u002Fgail\u002Fsave_model` 文件夹中的保存模型 `ckpt_4000_gail.pth.tar` **测试**代理。\n\n~~~\npython test.py --load_model ckpt_4000_gail.pth.tar\n~~~\n\n- 注意：`ckpt_4000_gail.pth.tar` 文件应位于 `mujoco\u002Fgail\u002Fsave_model` 文件夹中。\n\n#### VAIL\n\n导航至 [lets-do-irl\u002Fmujoco\u002Fvail](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl\u002Ftree\u002Fmaster\u002Fmujoco\u002Fvail) 文件夹。\n\n使用 `VAIL`（变分对抗模仿学习）**训练**代理，无需渲染。\n\n~~~\npython main.py\n~~~\n\n如果您想从保存的检查点**继续训练**，\n\n~~~\npython main.py --load_model ckpt_4000_vail.pth.tar\n~~~\n\n- 注意：`ckpt_4000_vail.pth.tar` 文件应位于 `mujoco\u002Fvail\u002Fsave_model` 文件夹中。\n\n如果您想测试 `VAIL`，请使用 `mujoco\u002Fvail\u002Fsave_model` 文件夹中的保存模型 `ckpt_4000_vail.pth.tar` **测试**代理。\n\n~~~\npython test.py --load_model ckpt_4000_vail.pth.tar\n~~~\n\n- 注意：`ckpt_4000_vail.pth.tar` 文件应位于 `mujoco\u002Fvail\u002Fsave_model` 文件夹中。\n\n### 4. Tensorboard\n\n请注意，训练结果会自动保存在 `logs` 文件夹中。TensorboardX 是用于 PyTorch 的类似 Tensorboard 的可视化工具。\n\n导航至 `lets-do-irl\u002Fmujoco\u002Fgail` 或 `lets-do-irl\u002Fmujoco\u002Fvail` 文件夹。\n\n~~~\ntensorboard --logdir logs\n~~~\n\n### 5. 已训练代理\n\n我们使用 `Hopper-v2` 环境，通过两种不同的 IRL（逆强化学习）算法训练了代理。\n\n| 算法 | 得分 \u002F 迭代次数（总样本数：2048） |\n|:---:|:---:|\n| PPO（用于比较） | ![ppo](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Freinforcement-learning-kr_lets-do-irl_readme_dabe59a9367e.png) |\n| GAIL | ![gail](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Freinforcement-learning-kr_lets-do-irl_readme_9f16b3ffde7a.png) |\n| VAIL | ![vail](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Freinforcement-learning-kr_lets-do-irl_readme_4912133d7bd9.png) |\n| 总计 | ![total](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Freinforcement-learning-kr_lets-do-irl_readme_d4ae49a40240.png) |\n\n## 参考文献\n\n我们参考了以下仓库的代码。\n\n- [APP 实现](https:\u002F\u002Fgithub.com\u002Fjangirrishabh\u002FtoyCarIRL)\n- [MaxEnt（最大熵）实现](https:\u002F\u002Fgithub.com\u002FMatthewJA\u002FInverse-Reinforcement-Learning)\n- [策略梯度算法（REINFORCE, NPG, TRPO, PPO）的 PyTorch 实现](https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Fpg_travel)\n- [GAIL 的 PyTorch 实现](https:\u002F\u002Fgithub.com\u002FKhrylx\u002FPyTorch-RL)\n\n## 实施团队成员\n\nDongmin Lee（项目经理）：[GitHub](https:\u002F\u002Fgithub.com\u002Fdongminleeai), [Facebook](https:\u002F\u002Fwww.facebook.com\u002Fdongminleeai)\n\nSeungje Yoon：[GitHub](https:\u002F\u002Fgithub.com\u002FsjYoondeltar), [Facebook](https:\u002F\u002Fwww.facebook.com\u002Fseungje.yoon)\n\nSeunghyun Lee：[GitHub](https:\u002F\u002Fgithub.com\u002FClyde21c), [Facebook](https:\u002F\u002Fwww.facebook.com\u002FClyde21c)\n\nGeonhee Lee：[GitHub](https:\u002F\u002Fgithub.com\u002FGeonhee-LEE), [Facebook](https:\u002F\u002Fwww.facebook.com\u002FGeonheeee)","# lets-do-irl 快速上手指南\n\n`lets-do-irl` 是一个基于 PyTorch (v0.4.1) 实现的逆强化学习 (Inverse Reinforcement Learning, IRL) 开源项目。支持 Apprenticeship Learning、Maximum Entropy IRL、GAIL 及 VAIL 等算法，可在 Mountain Car 和 Mujoco Hopper 环境中运行。\n\n## 1. 环境准备\n\n本项目依赖以下基础环境：\n\n- **操作系统**: Linux \u002F macOS \u002F Windows (推荐 Linux 用于 Mujoco)\n- **Python**: Python 3.x\n- **深度学习框架**: PyTorch v0.4.1 (建议版本，其他版本可能需调整)\n- **强化学习库**: OpenAI Gym\n- **物理引擎** (仅 Mujoco Hopper): Mujoco + mujoco_py\n\n> **注意**: 若计划运行 Mujoco Hopper 实验，请提前完成 Mujoco 许可证配置及 `mujoco_py` 的安装（参考官方文档或 README 中的链接）。\n\n## 2. 安装步骤\n\n### 克隆仓库\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr\u002Flets-do-irl.git\ncd lets-do-irl\n```\n\n### 安装依赖\n根据需求安装核心依赖包：\n```bash\npip install torch==0.4.1 gym\n# 如需运行 Mujoco Hopper，请额外安装 mujoco_py\n```\n\n## 3. 基本使用\n\n本项目提供了两个主要环境的示例，推荐使用 **Mountain Car** 进行快速验证，无需复杂的环境配置。\n\n### 场景一：Mountain Car (APP \u002F MaxEnt)\n\n此环境适合快速测试 APP 和 MaxEnt 算法。\n\n1. **进入目录**\n   ```bash\n   cd mountaincar\u002Fapp\n   ```\n   *(注：MaxEnt 算法位于 `mountaincar\u002Fmaxent` 目录)*\n\n2. **训练模型**\n   启动训练（默认不渲染）：\n   ```bash\n   python train.py\n   ```\n\n3. **测试模型**\n   加载已保存的模型进行测试：\n   ```bash\n   python test.py\n   ```\n\n### 场景二：Mujoco Hopper (GAIL \u002F VAIL)\n\n此环境涉及连续控制任务，需确保 Mujoco 环境已正确配置。\n\n1. **进入目录**\n   ```bash\n   cd mujoco\u002Fgail\n   ```\n   *(注：VAIL 算法位于 `mujoco\u002Fvail` 目录)*\n\n2. **训练模型**\n   ```bash\n   python main.py\n   ```\n   如需从断点续训：\n   ```bash\n   python main.py --load_model ckpt_4000_gail.pth.tar\n   ```\n\n3. **可视化结果**\n   训练日志保存在 `logs` 文件夹，可使用 Tensorboard 查看：\n   ```bash\n   tensorboard --logdir logs\n   ```\n\n---\n更多详细实现细节与专家演示数据说明，请参考项目根目录下的 README 文件。","某智能制造研发团队正在开发协作机器人，试图让机械臂精准完成复杂的精密组装任务。\n\n### 没有 lets-do-irl 时\n- 手动设计奖励函数极其困难，难以量化“精准度”与“平稳性”等抽象标准。\n- 依赖大量试错调整超参数，导致训练周期漫长且计算资源浪费严重。\n- 传统强化学习难以捕捉人类专家的细微操作技巧，生成的动作生硬且不自然。\n- 面对新任务需重新定义规则，系统泛化能力弱，后续维护成本高昂。\n\n### 使用 lets-do-irl 后\n- 直接导入专家演示数据，利用 GAIL 或 MaxEnt 算法自动反推潜在的奖励函数。\n- 无需繁琐的奖励塑形，模型能更快收敛并完美复现流畅的人类操作轨迹。\n- 内置多种 IRL 算法支持，可根据任务复杂度灵活切换 APP 或 VAIL 策略进行优化。\n- 显著降低对领域专家知识的依赖，使机器人能快速适应新的组装流程变化。\n\n通过逆向强化学习将人类经验转化为智能策略，彻底解决了奖励函数设计的难题，大幅提升了研发效率。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Freinforcement-learning-kr_lets-do-irl_18cee376.png","reinforcement-learning-kr","Reinforcement Learning KR","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Freinforcement-learning-kr_8d76a8f8.png","",null,"https:\u002F\u002Fwww.facebook.com\u002Fgroups\u002FReinforcementLearningKR\u002F","https:\u002F\u002Fgithub.com\u002Freinforcement-learning-kr",[83],{"name":84,"color":85,"percentage":86},"Python","#3572A5",100,779,117,"2026-04-03T09:10:42","MIT","Linux, macOS","未说明",{"notes":94,"python":92,"dependencies":95},"项目基于较旧的 PyTorch 0.4.1 版本；需提前获取专家演示数据（expert_demo.npy 或 .p）；Mujoco 环境安装需参考外部 Wiki 链接；部分文档和指南为韩语；训练结果自动保存至 logs 文件夹。",[96,97,98,99,100],"torch==0.4.1","gym","mujoco","tensorboardx","numpy",[53,13],[103,104,105,106,107,108,109],"inverse-reinforcement-learning","irl","app","maxent","gail","vail","pytorch","2026-03-27T02:49:30.150509","2026-04-06T06:46:13.278469",[],[]]