[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-sichkar-valentyn--Reinforcement_Learning_in_Python":3,"tool-sichkar-valentyn--Reinforcement_Learning_in_Python":61},[4,18,26,36,44,52],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",145895,2,"2026-04-08T11:32:59",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",108111,"2026-04-08T11:23:26",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":10,"last_commit_at":50,"category_tags":51,"status":17},4487,"LLMs-from-scratch","rasbt\u002FLLMs-from-scratch","LLMs-from-scratch 是一个基于 PyTorch 的开源教育项目，旨在引导用户从零开始一步步构建一个类似 ChatGPT 的大型语言模型（LLM）。它不仅是同名技术著作的官方代码库，更提供了一套完整的实践方案，涵盖模型开发、预训练及微调的全过程。\n\n该项目主要解决了大模型领域“黑盒化”的学习痛点。许多开发者虽能调用现成模型，却难以深入理解其内部架构与训练机制。通过亲手编写每一行核心代码，用户能够透彻掌握 Transformer 架构、注意力机制等关键原理，从而真正理解大模型是如何“思考”的。此外，项目还包含了加载大型预训练权重进行微调的代码，帮助用户将理论知识延伸至实际应用。\n\nLLMs-from-scratch 特别适合希望深入底层原理的 AI 开发者、研究人员以及计算机专业的学生。对于不满足于仅使用 API，而是渴望探究模型构建细节的技术人员而言，这是极佳的学习资源。其独特的技术亮点在于“循序渐进”的教学设计：将复杂的系统工程拆解为清晰的步骤，配合详细的图表与示例，让构建一个虽小但功能完备的大模型变得触手可及。无论你是想夯实理论基础，还是为未来研发更大规模的模型做准备",90106,"2026-04-06T11:19:32",[35,15,13,14],{"id":53,"name":54,"github_repo":55,"description_zh":56,"stars":57,"difficulty_score":10,"last_commit_at":58,"category_tags":59,"status":17},4292,"Deep-Live-Cam","hacksider\u002FDeep-Live-Cam","Deep-Live-Cam 是一款专注于实时换脸与视频生成的开源工具，用户仅需一张静态照片，即可通过“一键操作”实现摄像头画面的即时变脸或制作深度伪造视频。它有效解决了传统换脸技术流程繁琐、对硬件配置要求极高以及难以实时预览的痛点，让高质量的数字内容创作变得触手可及。\n\n这款工具不仅适合开发者和技术研究人员探索算法边界，更因其极简的操作逻辑（仅需三步：选脸、选摄像头、启动），广泛适用于普通用户、内容创作者、设计师及直播主播。无论是为了动画角色定制、服装展示模特替换，还是制作趣味短视频和直播互动，Deep-Live-Cam 都能提供流畅的支持。\n\n其核心技术亮点在于强大的实时处理能力，支持口型遮罩（Mouth Mask）以保留使用者原始的嘴部动作，确保表情自然精准；同时具备“人脸映射”功能，可同时对画面中的多个主体应用不同面孔。此外，项目内置了严格的内容安全过滤机制，自动拦截涉及裸露、暴力等不当素材，并倡导用户在获得授权及明确标注的前提下合规使用，体现了技术发展与伦理责任的平衡。",88924,"2026-04-06T03:28:53",[14,15,13,60],"视频",{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":66,"readme_en":67,"readme_zh":68,"quickstart_zh":69,"use_case_zh":70,"hero_image_url":71,"owner_login":72,"owner_name":73,"owner_avatar_url":74,"owner_bio":75,"owner_company":76,"owner_location":77,"owner_email":78,"owner_twitter":78,"owner_website":79,"owner_url":80,"languages":81,"stars":86,"forks":87,"last_commit_at":88,"license":89,"difficulty_score":90,"env_os":91,"env_gpu":92,"env_ram":92,"env_deps":93,"category_tags":96,"github_topics":97,"view_count":32,"oss_zip_url":78,"oss_zip_packed_at":78,"status":17,"created_at":118,"updated_at":119,"faqs":120,"releases":121},5623,"sichkar-valentyn\u002FReinforcement_Learning_in_Python","Reinforcement_Learning_in_Python","Implementing Reinforcement Learning, namely Q-learning and Sarsa algorithms, for global path planning of mobile robot in unknown environment with obstacles. Comparison analysis of Q-learning and Sarsa","Reinforcement_Learning_in_Python 是一个基于 Python 实现的开源项目，专注于利用强化学习算法解决移动机器人在未知环境中的全局路径规划问题。它核心实现了 Q-learning 和 Sarsa 两种经典算法，并通过模拟“悬崖、老鼠与奶酪”等典型场景，直观展示了智能体如何通过试错学习来规避障碍并寻找最优路径。\n\n该项目主要解决了传统导航方法在动态或未知环境中适应性不足的难题。通过构建“代理 - 环境”交互反馈机制，系统能让机器人在执行动作、观察状态和接收奖励的过程中，自动更新 Q 表权重，从而学会最大化累积奖励的策略。其技术亮点在于提供了两种算法的完整代码实现及对比分析，帮助用户深入理解不同策略在收敛速度和路径效率上的差异，并清晰呈现了包含学习率、折扣因子等关键参数的目标函数计算过程。\n\nReinforcement_Learning_in_Python 非常适合人工智能研究人员、机器人开发者和高校师生使用。对于希望深入探究强化学习底层原理，或需要为移动机器人项目快速搭建算法验证原型的用户来说，这是一个兼具教学价值与实用参考意义的优秀工具。","# Reinforcement Learning in Python\nImplementing Reinforcement Learning (RL) Algorithms for global path planning in tasks of mobile robot navigation. Comparison analysis of Q-learning and Sarsa algorithms fo the environment with cliff, mouse and cheese.\n\u003Cbr\u002F>[![DOI](https:\u002F\u002Fzenodo.org\u002Fbadge\u002FDOI\u002F10.5281\u002Fzenodo.1317898.svg)](https:\u002F\u002Fdoi.org\u002F10.5281\u002Fzenodo.1317898)\n\n### Related works:\n* Sichkar V. N. \"Reinforcement Learning Algorithms in Global Path Planning for Mobile Robot\", 2019 International Conference on Industrial Engineering, Applications and Manufacturing (ICIEAM), Sochi, Russia, 2019, pp. 1-5. doi: \u003Ca href=\"https:\u002F\u002Fdoi.org\u002F10.1109\u002FICIEAM.2019.8742915\" target=\"_blank\">10.1109\u002FICIEAM.2019.8742915\u003C\u002Fa> (Full-text available also here \u003Ca href=\"https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FValentyn_Sichkar\" target=\"_blank\">ResearchGate.net\u002Fprofile\u002FValentyn_Sichkar\u003C\u002Fa>)\n\n* Sichkar V. N. Effect of various dimension convolutional layer filters on traffic sign classification accuracy. Scientific and Technical Journal of Information Technologies, Mechanics and Optics, 2019, vol. 19, no. 3, pp. (in English). doi: \u003Ca href=\"https:\u002F\u002Fdoi.org\u002F10.17586\u002F2226-1494-2019-19-3-546-552\" target=\"_blank\">10.17586\u002F2226-1494-2019-19-3-546-552\u003C\u002Fa> (Full-text available also here https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FValentyn_Sichkar)\n\n* Sichkar V.N. Comparison analysis of knowledge based systems for navigation of mobile robot and collision avoidance with obstacles in unknown environment. St. Petersburg State Polytechnical University Journal. Computer Science. Telecommunications and Control Systems, 2018, Vol. 11, No. 2, Pp. 64–73. doi: \u003Ca href=\"https:\u002F\u002Fdoi.org\u002F10.18721\u002FJCSTCS.11206\" target=\"_blank\">10.18721\u002FJCSTCS.11206\u003C\u002Fa> (Full-text available also here https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FValentyn_Sichkar)\n\n* The research results for Neural Network Knowledge Based system for the tasks of collision avoidance is put in separate repository and is available here: https:\u002F\u002Fgithub.com\u002Fsichkar-valentyn\u002FMatlab_implementation_of_Neural_Networks\n\n* The study of Semantic Web languages OWL and RDF for Knowledge representation of Alarm-Warning System is put in separate repository and is available here: https:\u002F\u002Fgithub.com\u002Fsichkar-valentyn\u002FKnowledge_Base_Represented_by_Semantic_Web_Language\n\n* The study of Neural Networks for Computer Vision in autonomous vehicles and robotics is put in separate repository and is available here: https:\u002F\u002Fgithub.com\u002Fsichkar-valentyn\u002FNeural_Networks_for_Computer_Vision\n\n## Description\nRL Algorithms implemented in Python for the task of global path planning for mobile robot. Such system is said to have feedback. The agent acts on the environment, and the environment acts on the agent. At each step the agent:\n* Executes action.\n* Receives observation (new state).\n* Receives reward.\n\nThe environment:\n* Receives action.\n* Emits observation (new state).\n* Emits reward.\n\nGoal is to learn how to take actions in order to maximize the reward. The objective function is as following:\n\n\u003Cb>Q_[s_, a_] = Q[s, a] + λ * (r + γ * max(Q_[s_, a_]) – Q[s, a]),\u003C\u002Fb>\n\nwhere,\n\u003Cbr\u002F>\u003Cb>Q_[s_, a_]\u003C\u002Fb> - value of the objective function on the next step,\n\u003Cbr\u002F>\u003Cb>Q[s, a]\u003C\u002Fb> - value of the objective function on the current position,\n\u003Cbr\u002F>\u003Cb>max(Q_[s_, a_]) – Q[s, a])\u003C\u002Fb> - choosing maximum value from the possible next steps,\n\u003Cbr\u002F>\u003Cb>s\u003C\u002Fb> – current position of the agent,\n\u003Cbr\u002F>\u003Cb>a\u003C\u002Fb> – current action,\n\u003Cbr\u002F>\u003Cb>λ\u003C\u002Fb> – learning rate,\n\u003Cbr\u002F>\u003Cb>r\u003C\u002Fb> – reward that is got in the current position,\n\u003Cbr\u002F>\u003Cb>γ\u003C\u002Fb> – gamma (reward decay, discount factor),\n\u003Cbr\u002F>\u003Cb>s_\u003C\u002Fb> - next chosen position according to the next chosen action,\n\u003Cbr\u002F>\u003Cb>a_\u003C\u002Fb> - next chosen action.\n\nThe major component of the RL method is the table of weights - \u003Cb>Q-table\u003C\u002Fb> of the system state. \u003Cb>Matrix Q\u003C\u002Fb> is a set of all possible states of the system and the system response weights to different actions. During trying to go through the given environment, mobile robot learns how to avoid obstacles and find the path to the destination point. As a result, the \u003Cb>Q-table\u003C\u002Fb> is built. Looking at the values of the table it is possible to see the decision for the next action made by agent (mobile robot).\n\n\u003Cbr\u002F>Experimental results with different Environments sre shown and described below.\n\u003Cbr\u002F>Code is supported with a lot of comments. It will guide you step by step through entire idea of implementation.\n\u003Cbr\u002F>\n\u003Cbr\u002F>Each example consists of three files:\n\n* _env.py_ - building an environment with obstacles.\n* _agent_brain.py_ - implementation of algorithm itself.\n* _run_agent.py_ - running the experiments.\n\n## Content\nCodes (it'll send you to appropriate folder):\n* [RL_Q-Learning_E-1.py](https:\u002F\u002Fgithub.com\u002Fsichkar-valentyn\u002FReinforcement_Learning_in_Python\u002Ftree\u002Fmaster\u002FRL_Q-Learning_E1)\n* [RL_Q-Learning_E-2.py](https:\u002F\u002Fgithub.com\u002Fsichkar-valentyn\u002FReinforcement_Learning_in_Python\u002Ftree\u002Fmaster\u002FRL_Q-Learning_E2)\n* [RL_Q-Learning_E-3.py](https:\u002F\u002Fgithub.com\u002Fsichkar-valentyn\u002FReinforcement_Learning_in_Python\u002Ftree\u002Fmaster\u002FRL_Q-Learning_E3)\n* [RL_Sarsa_E-1.py](https:\u002F\u002Fgithub.com\u002Fsichkar-valentyn\u002FReinforcement_Learning_in_Python\u002Ftree\u002Fmaster\u002FRL_Sarsa_E1)\n* [RL_Sarsa_E-2.py](https:\u002F\u002Fgithub.com\u002Fsichkar-valentyn\u002FReinforcement_Learning_in_Python\u002Ftree\u002Fmaster\u002FRL_Sarsa_E2)\n\n\u003Cbr\u002F>\nExperimental results (figures and tables on this page):\n\n* [RL Q-Learning Environment-1. Experimental results](#rl-q-learning-environment-1-experimental-results)\n* [Q-learning algorithm resulted chart for the environment-1](#q-learning-algorithm-resulted-chart-for-the-environment-1)\n* [Final Q-table with values from the final shortest route for environment-1](#final-q-table-with-values-from-the-final-shortest-route-for-environment-1)\n* [RL Q-Learning Environment-2. Experimental results](#rl-q-learning-environment-2-experimental-results)\n* [Q-learning algorithm resulted chart for the environment-2](#q-learning-algorithm-resulted-chart-for-the-environment-2)\n* [Final Q-table with values from the final shortest route for environment-1](#final-q-table-with-values-from-the-final-shortest-route-for-environment-1)\n* [RL Q-Learning Environment-3. Experimental results](#rl-q-learning-environment-3-experimental-results)\n* [Comparison analysis of Q-Learning and Sarsa algorithms](#comparison-analysis-of-q-learning-and-sarsa-algorithms)\n\n\u003Cbr\u002F>\n\n### \u003Ca id=\"rl-q-learning-environment-1-experimental-results\">RL Q-Learning Environment-1. Experimental results\u003C\u002Fa>\nEnvironment-1 with mobile robot, goal and obstacles\n\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsichkar-valentyn_Reinforcement_Learning_in_Python_readme_4288dfd8c795.gif\" alt=\"RL_Q-Learning_E-1\" width=362 height=391> \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsichkar-valentyn_Reinforcement_Learning_in_Python_readme_8f568fdbce51.png\" alt=\"RL_Q-Learning_E-1\" width=362 height=391>\n\n\u003Cbr\u002F>\n\n### \u003Ca id=\"q-learning-algorithm-resulted-chart-for-the-environment-1\">Q-learning algorithm resulted chart for the environment-1\u003C\u002Fa>\nRepresents number of episodes via number of steps and number of episodes via cost for each episode\n\n![RL_Q-Learning_C-1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsichkar-valentyn_Reinforcement_Learning_in_Python_readme_f75bbb523dff.png)\n\n\u003Cbr\u002F>\n\n### \u003Ca id=\"final-q-table-with-values-from-the-final-shortest-route-for-environment-1\">Final Q-table with values from the final shortest route for environment-1\u003C\u002Fa>\n![RL_Q-Learning_T-1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsichkar-valentyn_Reinforcement_Learning_in_Python_readme_6767c5e7c784.png)\n\u003Cbr\u002F>Looking at the values of the table we can see the decision for the next action made by agent (mobile robot). The sequence of final actions to reach the goal after the Q-table is filled with knowledge is the following: *down-right-down-down-down-right-down-right-down-right-down-down-right-right-up-up.*\n\u003Cbr\u002F>During the experiment with Q-learning algorithm the found shortest route to reach the goal for the environment-1 consist of 16 steps and the found longest rout to reach the goal consists of 185 steps.\n\n\u003Cbr\u002F>\n\n### \u003Ca id=\"rl-q-learning-environment-2-experimental-results\">RL Q-Learning Environment-2. Experimental results\u003C\u002Fa>\nBigger environment-2 with more obstacles\n\n![RL_Q-Learning_E-2](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsichkar-valentyn_Reinforcement_Learning_in_Python_readme_67884675f12b.png)\n\n\u003Cbr\u002F>\n\n### \u003Ca id=\"q-learning-algorithm-resulted-chart-for-the-environment-2\">Q-learning algorithm resulted chart for the environment-2\u003C\u002Fa>\nRepresents number of episodes via number of steps and number of episodes via cost for each episode\n\n![RL_Q-Learning_C-2](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsichkar-valentyn_Reinforcement_Learning_in_Python_readme_15344291a773.png)\n\n\u003Cbr\u002F>\n\n### \u003Ca id=\"final-q-table-with-values-from-the-final-shortest-route-for-environment-1\">Final Q-table with values from the final shortest route for environment-1\u003C\u002Fa>\n![RL_Q-Learning_T-2](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsichkar-valentyn_Reinforcement_Learning_in_Python_readme_7c51289bc985.png)\n\n\u003Cbr\u002F>\n\n### \u003Ca id=\"rl-q-learning-environment-3-experimental-results\">RL Q-Learning Environment-3. Experimental results\u003C\u002Fa>\nSuper complex environment-3 with a lot of obstacles\n\n![RL_Q-Learning_E-3](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsichkar-valentyn_Reinforcement_Learning_in_Python_readme_e46855e25a75.png)\n\n\u003Cbr\u002F>\n\n### \u003Ca id=\"comparison-analysis-of-q-learning-and-sarsa-algorithms\">Comparison analysis of Q-Learning and Sarsa algorithms\u003C\u002Fa>\n![RQ-learning_via_Sarsa](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsichkar-valentyn_Reinforcement_Learning_in_Python_readme_b440a856c230.png)\n\n\u003Cbr\u002F>\n\n### MIT License\n### Copyright (c) 2018 Valentyn N Sichkar\n### github.com\u002Fsichkar-valentyn\n### Reference to:\nValentyn N Sichkar. Reinforcement Learning Algorithms for global path planning \u002F\u002F GitHub platform. DOI: 10.5281\u002Fzenodo.1317898\n","# Python中的强化学习\n在移动机器人导航任务中，实现用于全局路径规划的强化学习（RL）算法。针对带有悬崖、老鼠和奶酪的环境，对比分析Q-learning和Sarsa算法。\n\u003Cbr\u002F>[![DOI](https:\u002F\u002Fzenodo.org\u002Fbadge\u002FDOI\u002F10.5281\u002Fzenodo.1317898.svg)](https:\u002F\u002Fdoi.org\u002F10.5281\u002Fzenodo.1317898)\n\n### 相关工作：\n* Sichkar V. N. “移动机器人全局路径规划中的强化学习算法”，2019年工业工程、应用与制造国际会议（ICIEAM），俄罗斯索契，2019年，第1–5页。doi: \u003Ca href=\"https:\u002F\u002Fdoi.org\u002F10.1109\u002FICIEAM.2019.8742915\" target=\"_blank\">10.1109\u002FICIEAM.2019.8742915\u003C\u002Fa>（全文也可在此处获取：\u003Ca href=\"https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FValentyn_Sichkar\" target=\"_blank\">ResearchGate.net\u002Fprofile\u002FValentyn_Sichkar\u003C\u002Fa>）\n\n* Sichkar V. N. 不同尺寸卷积层滤波器对交通标志分类准确率的影响。信息技术、力学与光学科学技术期刊，2019年，第19卷，第3期，英文版。doi: \u003Ca href=\"https:\u002F\u002Fdoi.org\u002F10.17586\u002F2226-1494-2019-19-3-546-552\" target=\"_blank\">10.17586\u002F2226-1494-2019-19-3-546-552\u003C\u002Fa>（全文也可在此处获取：https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FValentyn_Sichkar）\n\n* Sichkar V.N. 基于知识的系统在未知环境中用于移动机器人导航及避障的比较分析。圣彼得堡国立理工大学学报：计算机科学、电信与控制系统，2018年，第11卷，第2期，第64–73页。doi: \u003Ca href=\"https:\u002F\u002Fdoi.org\u002F10.18721\u002FJCSTCS.11206\" target=\"_blank\">10.18721\u002FJCSTCS.11206\u003C\u002Fa>（全文也可在此处获取：https:\u002F\u002Fwww.researchgate.net\u002Fprofile\u002FValentyn_Sichkar）\n\n* 关于用于避障任务的神经网络知识库系统的研究成果已单独存放在一个仓库中，地址为：https:\u002F\u002Fgithub.com\u002Fsichkar-valentyn\u002FMatlab_implementation_of_Neural_Networks\n\n* 关于使用语义网语言OWL和RDF表示报警预警系统知识的研究成果也已单独存放在一个仓库中，地址为：https:\u002F\u002Fgithub.com\u002Fsichkar-valentyn\u002FKnowledge_Base_Represented_by_Semantic_Web_Language\n\n* 关于用于自动驾驶汽车和机器人技术中的计算机视觉神经网络的研究成果同样单独存放于一个仓库中，地址为：https:\u002F\u002Fgithub.com\u002Fsichkar-valentyn\u002FNeural_Networks_for_Computer_Vision\n\n## 描述\n用Python实现的RL算法，用于移动机器人的全局路径规划任务。此类系统具有反馈机制：智能体作用于环境，环境也反过来作用于智能体。在每一步中，智能体：\n* 执行动作。\n* 接收观测（新状态）。\n* 获得奖励。\n\n而环境则：\n* 接收动作。\n* 发出观测（新状态）。\n* 发出奖励。\n\n目标是学会如何采取行动以最大化奖励。目标函数如下：\n\n\u003Cb>Q_[s_, a_] = Q[s, a] + λ * (r + γ * max(Q_[s_, a_]) – Q[s, a]),\u003C\u002Fb>\n\n其中，\n\u003Cbr\u002F>\u003Cb>Q_[s_, a_]\u003C\u002Fb> - 下一步的目标函数值，\n\u003Cbr\u002F>\u003Cb>Q[s, a]\u003C\u002Fb> - 当前位置的目标函数值，\n\u003Cbr\u002F>\u003Cb>max(Q_[s_, a_]) – Q[s, a])\u003C\u002Fb> - 从所有可能的下一步中选择最大值，\n\u003Cbr\u002F>\u003Cb>s\u003C\u002Fb> – 智能体的当前位置，\n\u003Cbr\u002F>\u003Cb>a\u003C\u002Fb> – 当前动作，\n\u003Cbr\u002F>\u003Cb>λ\u003C\u002Fb> – 学习率，\n\u003Cbr\u002F>\u003Cb>r\u003C\u002Fb> – 当前位置获得的奖励，\n\u003Cbr\u002F>\u003Cb>γ\u003C\u002Fb> – gamma（奖励衰减因子、折扣系数），\n\u003Cbr\u002F>\u003Cb>s_\u003C\u002Fb> - 根据下一步动作选定的新位置，\n\u003Cbr\u002F>\u003Cb>a_\u003C\u002Fb> - 下一步选定的动作。\n\n强化学习方法的核心组件是权重表——系统状态的\u003Cb>Q-table\u003C\u002Fb>。\u003Cb>Q矩阵\u003C\u002Fb>包含了系统所有可能的状态以及系统对不同动作的响应权重。在尝试穿越给定环境的过程中，移动机器人学会如何避开障碍物并找到通往目标点的路径。最终会构建出\u003Cb>Q-table\u003C\u002Fb>。通过查看表格中的数值，可以确定智能体（即移动机器人）下一步应采取的动作。\n\n\u003Cbr\u002F>下面展示了不同环境下的实验结果，并进行了说明。\n\u003Cbr\u002F>代码附有大量注释，将逐步引导您完成整个实现思路。\n\u003Cbr\u002F>\n\u003Cbr\u002F>每个示例由三个文件组成：\n\n* _env.py_ - 构建包含障碍物的环境。\n* _agent_brain.py_ - 实现算法本身。\n* _run_agent.py_ - 运行实验。\n\n## 内容\n代码（点击后将跳转到相应文件夹）：\n* [RL_Q-Learning_E-1.py](https:\u002F\u002Fgithub.com\u002Fsichkar-valentyn\u002FReinforcement_Learning_in_Python\u002Ftree\u002Fmaster\u002FRL_Q-Learning_E1)\n* [RL_Q-Learning_E-2.py](https:\u002F\u002Fgithub.com\u002Fsichkar-valentyn\u002FReinforcement_Learning_in_Python\u002Ftree\u002Fmaster\u002FRL_Q-Learning_E2)\n* [RL_Q-Learning_E-3.py](https:\u002F\u002Fgithub.com\u002Fsichkar-valentyn\u002FReinforcement_Learning_in_Python\u002Ftree\u002Fmaster\u002FRL_Q-Learning_E3)\n* [RL_Sarsa_E-1.py](https:\u002F\u002Fgithub.com\u002Fsichkar-valentyn\u002FReinforcement_Learning_in_Python\u002Ftree\u002Fmaster\u002FRL_Sarsa_E1)\n* [RL_Sarsa_E-2.py](https:\u002F\u002Fgithub.com\u002Fsichkar-valentyn\u002FReinforcement_Learning_in_Python\u002Ftree\u002Fmaster\u002FRL_Sarsa_E2)\n\n\u003Cbr\u002F>\n实验结果（本页包含图表）：\n\n* [RL Q-Learning 环境-1 实验结果](#rl-q-learning-environment-1-experimental-results)\n* [环境-1下Q-learning算法的结果图](#q-learning-algorithm-resulted-chart-for-the-environment-1)\n* [环境-1最终最短路径对应的Q-table](#final-q-table-with-values-from-the-final-shortest-route-for-environment-1)\n* [RL Q-Learning 环境-2 实验结果](#rl-q-learning-environment-2-experimental-results)\n* [环境-2下Q-learning算法的结果图](#q-learning-algorithm-resulted-chart-for-the-environment-2)\n* [环境-1最终最短路径对应的Q-table](#final-q-table-with-values-from-the-final-shortest-route-for-environment-1)\n* [RL Q-Learning 环境-3 实验结果](#rl-q-learning-environment-3-experimental-results)\n* [Q-learning和Sarsa算法的对比分析](#comparison-analysis-of-q-learning-and-sarsa-algorithms)\n\n\u003Cbr\u002F>\n\n### \u003Ca id=\"rl-q-learning-environment-1-experimental-results\">RL Q-Learning 环境-1 实验结果\u003C\u002Fa>\n环境-1包含移动机器人、目标点和障碍物\n\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsichkar-valentyn_Reinforcement_Learning_in_Python_readme_4288dfd8c795.gif\" alt=\"RL_Q-Learning_E-1\" width=362 height=391> \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsichkar-valentyn_Reinforcement_Learning_in_Python_readme_8f568fdbce51.png\" alt=\"RL_Q-Learning_E-1\" width=362 height=391>\n\n\u003Cbr\u002F>\n\n### \u003Ca id=\"q-learning-algorithm-resulted-chart-for-the-environment-1\">环境-1下Q-learning算法的结果图\u003C\u002Fa>\n展示了每集的步数以及每集的成本\n\n![RL_Q-Learning_C-1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsichkar-valentyn_Reinforcement_Learning_in_Python_readme_f75bbb523dff.png)\n\n\u003Cbr\u002F>\n\n### \u003Ca id=\"final-q-table-with-values-from-the-final-shortest-route-for-environment-1\">环境-1 中最终最短路径对应的 Q 表\u003C\u002Fa>\n![RL_Q-Learning_T-1](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsichkar-valentyn_Reinforcement_Learning_in_Python_readme_6767c5e7c784.png)\n\u003Cbr\u002F>观察表格中的值，我们可以看出智能体（移动机器人）对下一步动作的决策。在 Q 表被知识填充后，到达目标的最终动作序列如下：*下-右-下-下-下-右-下-右-下-右-下-下-右-右-上-上*。\n\u003Cbr\u002F>在使用 Q 学习算法的实验中，环境-1 中找到的到达目标的最短路径由 16 步组成，而找到的最长路径则由 185 步组成。\n\n\u003Cbr\u002F>\n\n### \u003Ca id=\"rl-q-learning-environment-2-experimental-results\">RL Q 学习 环境-2. 实验结果\u003C\u002Fa>\n更大的环境-2，障碍物更多\n\n![RL_Q-Learning_E-2](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsichkar-valentyn_Reinforcement_Learning_in_Python_readme_67884675f12b.png)\n\n\u003Cbr\u002F>\n\n### \u003Ca id=\"q-learning-algorithm-resulted-chart-for-the-environment-2\">Q 学习算法在环境-2 上的结果图表\u003C\u002Fa>\n分别展示了以步数表示的迭代次数以及每轮迭代的成本\n\n![RL_Q-Learning_C-2](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsichkar-valentyn_Reinforcement_Learning_in_Python_readme_15344291a773.png)\n\n\u003Cbr\u002F>\n\n### \u003Ca id=\"final-q-table-with-values-from-the-final-shortest-route-for-environment-1\">环境-1 中最终最短路径对应的 Q 表\u003C\u002Fa>\n![RL_Q-Learning_T-2](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsichkar-valentyn_Reinforcement_Learning_in_Python_readme_7c51289bc985.png)\n\n\u003Cbr\u002F>\n\n### \u003Ca id=\"rl-q-learning-environment-3-experimental-results\">RL Q 学习 环境-3. 实验结果\u003C\u002Fa>\n超级复杂的环境-3，障碍物极多\n\n![RL_Q-Learning_E-3](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsichkar-valentyn_Reinforcement_Learning_in_Python_readme_e46855e25a75.png)\n\n\u003Cbr\u002F>\n\n### \u003Ca id=\"comparison-analysis-of-q-learning-and-sarsa-algorithms\">Q 学习与 Sarsa 算法的对比分析\u003C\u002Fa>\n![RQ-learning_via_Sarsa](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsichkar-valentyn_Reinforcement_Learning_in_Python_readme_b440a856c230.png)\n\n\u003Cbr\u002F>\n\n### MIT 许可证\n### 版权所有 © 2018 瓦伦丁·N·西奇卡尔\n### github.com\u002Fsichkar-valentyn\n### 引用自：\n瓦伦丁·N·西奇卡尔. 用于全局路径规划的强化学习算法 \u002F\u002F GitHub 平台. DOI: 10.5281\u002Fzenodo.1317898","# Reinforcement_Learning_in_Python 快速上手指南\n\n本项目使用 Python 实现了强化学习（RL）算法，主要用于移动机器人的全局路径规划任务。通过 Q-learning 和 Sarsa 算法，让智能体在包含悬崖、老鼠和奶酪等障碍的环境中学习最优路径。\n\n## 环境准备\n\n*   **操作系统**: Windows, macOS 或 Linux\n*   **Python 版本**: 推荐 Python 3.6 及以上版本\n*   **前置依赖**:\n    *   `numpy`: 用于数值计算\n    *   `matplotlib`: 用于绘制训练结果图表\n    *   `pandas` (可选): 用于数据处理\n\n确保已安装 Python 和 pip。国内用户建议使用清华源或阿里源加速依赖安装。\n\n## 安装步骤\n\n1.  **克隆项目仓库**\n    ```bash\n    git clone https:\u002F\u002Fgithub.com\u002Fsichkar-valentyn\u002FReinforcement_Learning_in_Python.git\n    cd Reinforcement_Learning_in_Python\n    ```\n\n2.  **安装依赖库**\n    如果项目中包含 `requirements.txt`，请直接运行：\n    ```bash\n    pip install -r requirements.txt -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n    ```\n    若没有该文件，请手动安装核心依赖：\n    ```bash\n    pip install numpy matplotlib -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n    ```\n\n## 基本使用\n\n本项目每个实验示例通常由三个核心文件组成：\n*   `_env.py`: 构建包含障碍物的环境。\n*   `_agent_brain.py`: 实现具体的强化学习算法（Q-Learning 或 Sarsa）。\n*   `_run_agent.py`: 运行实验的主脚本。\n\n### 运行示例：环境 1 (Q-Learning)\n\n以最基础的 **Environment-1** 为例，进入对应文件夹并运行主程序：\n\n```bash\ncd RL_Q-Learning_E1\npython run_agent.py\n```\n\n**运行说明：**\n*   脚本启动后，程序将开始训练智能体（移动机器人）。\n*   训练过程中会实时打印每一步的奖励（reward）和状态更新。\n*   训练结束后，会自动生成图表（显示步数\u002F成本随回合数的变化）并输出最终的 **Q-table**。\n*   根据生成的 Q-table，智能体将能够规划出从起点到终点的最短路径（例如：*down-right-down...*）。\n\n### 其他可用示例\n\n你可以切换目录体验不同复杂度环境或不同算法：\n\n*   **更复杂的环境 (Q-Learning)**:\n    ```bash\n    cd ..\u002FRL_Q-Learning_E2\n    python run_agent.py\n    ```\n    ```bash\n    cd ..\u002FRL_Q-Learning_E3\n    python run_agent.py\n    ```\n\n*   **Sarsa 算法对比**:\n    ```bash\n    cd ..\u002FRL_Sarsa_E1\n    python run_agent.py\n    ```\n\n代码中包含大量注释，阅读 `_agent_brain.py` 可深入理解算法核心逻辑及贝尔曼方程的实现细节。","某高校机器人实验室正在研发一款能在未知仓库环境中自主导航的配送原型机，急需解决其在复杂障碍物间的全球路径规划难题。\n\n### 没有 Reinforcement_Learning_in_Python 时\n- 开发团队需从零手写 Q-learning 和 Sarsa 算法的核心数学公式，极易在奖励衰减因子和状态更新逻辑上出现代码错误。\n- 缺乏标准的“悬崖、老鼠与奶酪”测试环境，难以量化评估机器人在避开陷阱和寻找目标时的策略优劣。\n- 无法快速对比不同强化学习算法在同一场景下的收敛速度和最终路径效率，导致技术选型依赖主观猜测。\n- 调试过程中缺少可视化的 Q 表（Q-table）更新机制，难以直观理解机器人是如何通过试错来学习避障的。\n\n### 使用 Reinforcement_Learning_in_Python 后\n- 直接调用已实现的 Q-learning 和 Sarsa 算法模块，无需重复造轮子，将核心开发时间从数周缩短至几天。\n- 利用内置的经典网格环境进行仿真，立即验证了机器人在面对“悬崖”惩罚机制时的避障能力和寻路准确性。\n- 通过工具自带的对比分析功能，清晰发现 Sarsa 算法在当前保守型任务中比 Q-learning 更安全，从而确定了最终部署方案。\n- 借助生成的权重矩阵数据，团队成员能直观看到智能体如何逐步建立状态 - 动作映射，快速定位并修正了奖励函数设计的缺陷。\n\nReinforcement_Learning_in_Python 通过提供标准化的算法实现与对比框架，让研发团队从繁琐的底层编码中解放出来，专注于策略优化与场景适配。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fsichkar-valentyn_Reinforcement_Learning_in_Python_f75bbb52.png","sichkar-valentyn","Валентин Сичкар","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fsichkar-valentyn_d55131b2.png","Ведущий ИТ специалист & Психотерапевт. Автор образовательных курсов","FlyFlat","Санкт-Петербург",null,"https:\u002F\u002Fvalentynsichkar.name","https:\u002F\u002Fgithub.com\u002Fsichkar-valentyn",[82],{"name":83,"color":84,"percentage":85},"Python","#3572A5",100,513,117,"2026-04-05T23:42:36","MIT",1,"","未说明",{"notes":94,"python":92,"dependencies":95},"该项目主要实现基于表格的强化学习算法（Q-learning 和 Sarsa），用于移动机器人全局路径规划。代码包含大量注释，每个示例由环境构建、算法实现和运行脚本三个文件组成。README 中未明确列出具体的操作系统、Python 版本或第三方依赖库要求，推测仅需标准 Python 环境及基础绘图库（如 matplotlib）即可运行。",[],[13],[98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117],"reinforcement-learning","q-learning","path-planning","rl-qlearning","reinforcement-learning-algorithms","maze-algorithms","sarsa-learning","rl-algorithms","sarsa","sarsa-search","q-learning-vs-sarsa","shortest-path","maze-solver","rl-sarsa","rl-agents","rl-environment","rl-playground","rl-experiments","rl-emulator","obstacle-avoidance","2026-03-27T02:49:30.150509","2026-04-09T02:38:33.477436",[],[122,127,131,136,141,145],{"id":123,"version":124,"summary_zh":125,"released_at":126},162838,"v1.0.5","更新 **README**。","2018-10-25T10:50:37",{"id":128,"version":129,"summary_zh":125,"released_at":130},162839,"v1.0.4","2018-08-20T11:11:01",{"id":132,"version":133,"summary_zh":134,"released_at":135},162840,"v1.0.3","正在更新与该项目相关的 **Readme** 和 **GitHub 学术主页**。","2018-08-08T14:42:38",{"id":137,"version":138,"summary_zh":139,"released_at":140},162841,"v1.0.2","更新 **README**，创建与该项目相关的 **GitHub 学术主页**。","2018-08-03T18:26:11",{"id":142,"version":143,"summary_zh":125,"released_at":144},162842,"v1.0.1","2018-07-23T14:40:33",{"id":146,"version":147,"summary_zh":148,"released_at":149},162843,"v1.0.0","在移动机器人导航任务中，实现强化学习（RL）算法用于全局路径规划。针对具有悬崖、老鼠和奶酪的环境，对Q-learning算法和Sarsa算法进行对比分析。","2018-07-20T09:55:59"]