[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-HunterMcGushion--hyperparameter_hunter":3,"tool-HunterMcGushion--hyperparameter_hunter":64},[4,17,27,35,43,56],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":16},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,3,"2026-04-05T11:01:52",[13,14,15],"开发框架","图像","Agent","ready",{"id":18,"name":19,"github_repo":20,"description_zh":21,"stars":22,"difficulty_score":23,"last_commit_at":24,"category_tags":25,"status":16},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",138956,2,"2026-04-05T11:33:21",[13,15,26],"语言模型",{"id":28,"name":29,"github_repo":30,"description_zh":31,"stars":32,"difficulty_score":23,"last_commit_at":33,"category_tags":34,"status":16},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,"2026-04-03T11:11:01",[13,14,15],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":23,"last_commit_at":41,"category_tags":42,"status":16},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[13,26],{"id":44,"name":45,"github_repo":46,"description_zh":47,"stars":48,"difficulty_score":23,"last_commit_at":49,"category_tags":50,"status":16},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,"2026-04-05T10:45:23",[14,51,52,53,15,54,26,13,55],"数据工具","视频","插件","其他","音频",{"id":57,"name":58,"github_repo":59,"description_zh":60,"stars":61,"difficulty_score":10,"last_commit_at":62,"category_tags":63,"status":16},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,"2026-04-04T04:44:48",[15,14,13,26,54],{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":69,"readme_en":70,"readme_zh":71,"quickstart_zh":72,"use_case_zh":73,"hero_image_url":74,"owner_login":75,"owner_name":76,"owner_avatar_url":77,"owner_bio":78,"owner_company":78,"owner_location":78,"owner_email":79,"owner_twitter":78,"owner_website":78,"owner_url":80,"languages":81,"stars":94,"forks":95,"last_commit_at":96,"license":97,"difficulty_score":98,"env_os":99,"env_gpu":100,"env_ram":100,"env_deps":101,"category_tags":111,"github_topics":112,"view_count":23,"oss_zip_url":78,"oss_zip_packed_at":78,"status":16,"created_at":127,"updated_at":128,"faqs":129,"releases":162},3303,"HunterMcGushion\u002Fhyperparameter_hunter","hyperparameter_hunter","Easy hyperparameter optimization and automatic result saving across machine learning algorithms and libraries","HyperparameterHunter 是一款专为机器学习开发者与研究人员设计的超参数优化助手。它的核心使命是自动记录每一次实验的关键数据，并基于历史结果进行智能优化，确保你的所有测试努力都不会被浪费。\n\n在传统工作流中，开发者往往需要编写大量重复代码来处理交叉验证、预测和评分，且难以系统性地管理纷繁复杂的超参数与实验结果，甚至可能无意中重复运行相同的实验。HyperparameterHunter 通过封装主流机器学习库，消除了这些繁琐的样板代码，让你能继续使用熟悉的工具，同时自动完成结果的保存、组织与分析。\n\n其独特的技术亮点在于“持久化记忆”能力。不同于其他从零开始优化的库，HyperparameterHunter 鼓励用户从项目初期就将其作为日常实验工具箱使用。随着实验数据的积累，它能利用过往的所有基准测试和一次性实验结果，进行真正“知情”的超参数搜索，从而随着时间推移不断提升优化效率。无论你是希望简化实验流程的算法工程师，还是追求长期模型性能提升的研究人员，HyperparameterHunter 都能成为你得力的智能搭档。","HyperparameterHunter\n====================\n\n![HyperparameterHunter Overview](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHunterMcGushion_hyperparameter_hunter_readme_134da0acd384.gif)\n\n[![Build Status](https:\u002F\u002Ftravis-ci.org\u002FHunterMcGushion\u002Fhyperparameter_hunter.svg?branch=master)](https:\u002F\u002Ftravis-ci.org\u002FHunterMcGushion\u002Fhyperparameter_hunter)\n[![Documentation Status](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHunterMcGushion_hyperparameter_hunter_readme_13d664e1afd7.png)](https:\u002F\u002Fhyperparameter-hunter.readthedocs.io\u002Fen\u002Fstable\u002F?badge=stable)\n[![Coverage Status](https:\u002F\u002Fcoveralls.io\u002Frepos\u002Fgithub\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fbadge.svg)](https:\u002F\u002Fcoveralls.io\u002Fgithub\u002FHunterMcGushion\u002Fhyperparameter_hunter?branch=master&service=github)\n[![codecov](https:\u002F\u002Fcodecov.io\u002Fgh\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fbranch\u002Fmaster\u002Fgraph\u002Fbadge.svg)](https:\u002F\u002Fcodecov.io\u002Fgh\u002FHunterMcGushion\u002Fhyperparameter_hunter)\n[![Maintainability](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHunterMcGushion_hyperparameter_hunter_readme_91fd2da65c07.png)](https:\u002F\u002Fcodeclimate.com\u002Fgithub\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fmaintainability)\n[![Codacy Badge](https:\u002F\u002Fapi.codacy.com\u002Fproject\u002Fbadge\u002FGrade\u002F1413b76fabe2400fab1958e70be593a2)](https:\u002F\u002Fwww.codacy.com\u002Fapp\u002FHunterMcGushion\u002Fhyperparameter_hunter?utm_source=github.com&amp;utm_medium=referral&amp;utm_content=HunterMcGushion\u002Fhyperparameter_hunter&amp;utm_campaign=Badge_Grade)\n\n[![PyPI version](https:\u002F\u002Fbadge.fury.io\u002Fpy\u002Fhyperparameter-hunter.svg)](https:\u002F\u002Fbadge.fury.io\u002Fpy\u002Fhyperparameter-hunter)\n[![Downloads](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHunterMcGushion_hyperparameter_hunter_readme_6aaae3f74cfb.png)](https:\u002F\u002Fpepy.tech\u002Fproject\u002Fhyperparameter-hunter)\n[![Donate](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FDonate-PayPal-green.svg)](https:\u002F\u002Fwww.paypal.com\u002Fcgi-bin\u002Fwebscr?cmd=_s-xclick&hosted_button_id=Q3EX3PQUV256G)\n[![Code style: black](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fcode%20style-black-000000.svg)](https:\u002F\u002Fgithub.com\u002Fambv\u002Fblack)\n\nAutomatically save and learn from Experiment results, leading to long-term, persistent optimization that remembers all your tests.\n\nHyperparameterHunter provides a wrapper for machine learning algorithms that saves all the important data. Simplify the experimentation and hyperparameter tuning process by letting HyperparameterHunter do the hard work\nof recording, organizing, and learning from your tests — all while using the same libraries you already do. Don't let any of your experiments go to waste, and start doing hyperparameter optimization the way it was meant to be.\n\n* **Installation:** `pip install hyperparameter-hunter`\n* **Source:** https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter\n* **Documentation:** [https:\u002F\u002Fhyperparameter-hunter.readthedocs.io](https:\u002F\u002Fhyperparameter-hunter.readthedocs.io\u002Fen\u002Flatest\u002Findex.html)\n\nFeatures\n--------\n* Automatically record Experiment results\n* Truly informed hyperparameter optimization that automatically uses past Experiments\n* Eliminate boilerplate code for cross-validation loops, predicting, and scoring\n* Stop worrying about keeping track of hyperparameters, scores, or re-running the same Experiments\n* Use the libraries and utilities you already love\n\nHow to Use HyperparameterHunter\n-------------------------------\nDon’t think of HyperparameterHunter as another optimization library that you bring out only when its time to do hyperparameter optimization. Of course, it does optimization, but its better to view HyperparameterHunter as your own personal machine learning toolbox\u002Fassistant.\n\nThe idea is to start using HyperparameterHunter immediately. Run all of your benchmark\u002Fone-off experiments through it. \n\nThe more you use HyperparameterHunter, the better your results will be. If you just use it for optimization, sure, it’ll do what you want, but that’s missing the point of HyperparameterHunter.\n\nIf you’ve been using it for experimentation and optimization along the entire course of your project, then when you decide to do hyperparameter optimization, HyperparameterHunter is already aware of all that you’ve done, and that’s when HyperparameterHunter does something remarkable. It doesn’t start optimization from scratch like other libraries. It starts from all of the Experiments and previous optimization rounds you’ve already run through it.\n\nGetting Started\n---------------\n\n### 1) Environment:\n\nSet up an Environment to organize Experiments and Optimization results.\n\u003Cbr>\nAny Experiments or Optimization rounds we perform will use our active Environment.\n\n```python\nfrom hyperparameter_hunter import Environment, CVExperiment\nimport pandas as pd\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.model_selection import StratifiedKFold\n\ndata = load_breast_cancer()\ndf = pd.DataFrame(data=data.data, columns=data.feature_names)\ndf['target'] = data.target\n\nenv = Environment(\n    train_dataset=df,  # Add holdout\u002Ftest dataframes, too\n    results_path='path\u002Fto\u002Fresults\u002Fdirectory',  # Where your result files will go\n    metrics=['roc_auc_score'],  # Callables, or strings referring to `sklearn.metrics`\n    cv_type=StratifiedKFold,  # Class, or string in `sklearn.model_selection`\n    cv_params=dict(n_splits=5, shuffle=True, random_state=32)\n)\n```\n\n### 2) Individual Experimentation:\n\nPerform Experiments with your favorite libraries simply by providing model initializers and hyperparameters\n\u003C!-- Keras -->\n\n\u003Cdetails>\n\u003Csummary>Keras\u003C\u002Fsummary>\n\n```python\n# Same format used by `keras.wrappers.scikit_learn`. Nothing new to learn\ndef build_fn(input_shape):  # `input_shape` calculated for you\n    model = Sequential([\n        Dense(100, kernel_initializer='uniform', input_shape=input_shape, activation='relu'),\n        Dropout(0.5),\n        Dense(1, kernel_initializer='uniform', activation='sigmoid')\n    ])  # All layer arguments saved (whether explicit or Keras default) for future use\n    model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n    return model\n\nexperiment = CVExperiment(\n    model_initializer=KerasClassifier,\n    model_init_params=build_fn,  # We interpret your build_fn to save hyperparameters in a useful, readable format\n    model_extra_params=dict(\n        callbacks=[ReduceLROnPlateau(patience=5)],  # Use Keras callbacks\n        batch_size=32, epochs=10, verbose=0  # Fit\u002Fpredict arguments\n    )\n)\n```\n\n\u003C\u002Fdetails>\n\n\u003C!-- SKLearn -->\n\u003Cdetails>\n\u003Csummary>SKLearn\u003C\u002Fsummary>\n\n```python\nexperiment = CVExperiment(\n    model_initializer=LinearSVC,  # (Or any of the dozens of other SK-Learn algorithms)\n    model_init_params=dict(penalty='l1', C=0.9)  # Default values used and recorded for kwargs not given\n)\n```\n\u003C\u002Fdetails>\n\u003C!-- XGBoost -->\n\u003Cdetails open>\n\u003Csummary>XGBoost\u003C\u002Fsummary>\n\n```python\nexperiment = CVExperiment(\n    model_initializer=XGBClassifier,\n    model_init_params=dict(objective='reg:linear', max_depth=3, n_estimators=100, subsample=0.5)\n)\n```\n\u003C\u002Fdetails>\n\u003C!-- LightGBM -->\n\u003Cdetails>\n\u003Csummary>LightGBM\u003C\u002Fsummary>\n\n```python\nexperiment = CVExperiment(\n    model_initializer=LGBMClassifier,\n    model_init_params=dict(boosting_type='gbdt', num_leaves=31, max_depth=-1, min_child_samples=5, subsample=0.5)\n)\n```\n\u003C\u002Fdetails>\n\u003C!-- CatBoost -->\n\u003Cdetails>\n\u003Csummary>CatBoost\u003C\u002Fsummary>\n\n```python\nexperiment = CVExperiment(\n    model_initializer=CatboostClassifier,\n    model_init_params=dict(iterations=500, learning_rate=0.01, depth=7, allow_writing_files=False),\n    model_extra_params=dict(fit=dict(verbose=True))  # Send kwargs to `fit` and other extra methods\n)\n```\n\u003C\u002Fdetails>\n\u003C!-- RGF -->\n\u003Cdetails>\n\u003Csummary>RGF\u003C\u002Fsummary>\n\n```python\nexperiment = CVExperiment(\n    model_initializer=RGFClassifier,\n    model_init_params=dict(max_leaf=1000, algorithm='RGF', min_samples_leaf=10)\n)\n```\n\u003C\u002Fdetails>\n\n### 3) Hyperparameter Optimization:\n\nJust like Experiments, but if you want to optimize a hyperparameter, use the classes imported below\n\n```python\nfrom hyperparameter_hunter import Real, Integer, Categorical\nfrom hyperparameter_hunter import optimization as opt\n```\n\n\u003C!-- Keras -->\n\u003Cdetails>\n\u003Csummary>Keras\u003C\u002Fsummary>\n\n```python\ndef build_fn(input_shape):\n    model = Sequential([\n        Dense(Integer(50, 150), input_shape=input_shape, activation='relu'),\n        Dropout(Real(0.2, 0.7)),\n        Dense(1, activation=Categorical(['sigmoid', 'softmax']))\n    ])\n    model.compile(\n        optimizer=Categorical(['adam', 'rmsprop', 'sgd', 'adadelta']),\n        loss='binary_crossentropy', metrics=['accuracy']\n    )\n    return model\n\noptimizer = opt.RandomForestOptPro(iterations=7)\noptimizer.forge_experiment(\n    model_initializer=KerasClassifier,\n    model_init_params=build_fn,\n    model_extra_params=dict(\n        callbacks=[ReduceLROnPlateau(patience=Integer(5, 10))],\n        batch_size=Categorical([32, 64]),\n        epochs=10, verbose=0\n    )\n)\noptimizer.go()\n```\n\u003C\u002Fdetails>\n\n\u003C!-- SKLearn -->\n\u003Cdetails>\n\u003Csummary>SKLearn\u003C\u002Fsummary>\n\n```python\noptimizer = opt.DummyOptPro(iterations=42)\noptimizer.forge_experiment(\n    model_initializer=AdaBoostClassifier,  # (Or any of the dozens of other SKLearn algorithms)\n    model_init_params=dict(\n        n_estimators=Integer(75, 150),\n        learning_rate=Real(0.8, 1.3),\n        algorithm='SAMME.R'\n    )\n)\noptimizer.go()\n```\n\u003C\u002Fdetails>\n\u003C!-- XGBoost -->\n\u003Cdetails open>\n\u003Csummary>XGBoost\u003C\u002Fsummary>\n\n```python\noptimizer = opt.BayesianOptPro(iterations=10)\noptimizer.forge_experiment(\n    model_initializer=XGBClassifier,\n    model_init_params=dict(\n        max_depth=Integer(low=2, high=20),\n        learning_rate=Real(0.0001, 0.5),\n        n_estimators=200,\n        subsample=0.5,\n        booster=Categorical(['gbtree', 'gblinear', 'dart']),\n    )\n)\noptimizer.go()\n```\n\u003C\u002Fdetails>\n\u003C!-- LightGBM -->\n\u003Cdetails>\n\u003Csummary>LightGBM\u003C\u002Fsummary>\n\n```python\noptimizer = opt.BayesianOptPro(iterations=100)\noptimizer.forge_experiment(\n    model_initializer=LGBMClassifier,\n    model_init_params=dict(\n        boosting_type=Categorical(['gbdt', 'dart']),\n        num_leaves=Integer(5, 20),\n        max_depth=-1,\n        min_child_samples=5,\n        subsample=0.5\n    )\n)\noptimizer.go()\n```\n\u003C\u002Fdetails>\n\u003C!-- CatBoost -->\n\u003Cdetails>\n\u003Csummary>CatBoost\u003C\u002Fsummary>\n\n```python\noptimizer = opt.GradientBoostedRegressionTreeOptPro(iterations=32)\noptimizer.forge_experiment(\n    model_initializer=CatBoostClassifier,\n    model_init_params=dict(\n        iterations=100,\n        eval_metric=Categorical(['Logloss', 'Accuracy', 'AUC']),\n        learning_rate=Real(low=0.0001, high=0.5),\n        depth=Integer(4, 7),\n        allow_writing_files=False\n    )\n)\noptimizer.go()\n```\n\u003C\u002Fdetails>\n\u003C!-- RGF -->\n\u003Cdetails>\n\u003Csummary>RGF\u003C\u002Fsummary>\n\n```python\noptimizer = opt.ExtraTreesOptPro(iterations=10)\noptimizer.forge_experiment(\n    model_initializer=RGFClassifier,\n    model_init_params=dict(\n        max_leaf=1000,\n        algorithm=Categorical(['RGF', 'RGF_Opt', 'RGF_Sib']),\n        l2=Real(0.01, 0.3),\n        normalize=Categorical([True, False]),\n        learning_rate=Real(0.3, 0.7),\n        loss=Categorical(['LS', 'Expo', 'Log', 'Abs'])\n    )\n)\noptimizer.go()\n```\n\u003C\u002Fdetails>\n\nOutput File Structure\n---------------------\nThis is a simple illustration of the file structure you can expect your `Experiment`s to generate. For an in-depth description of the directory structure and the contents of the various files, see the [File Structure Overview](https:\u002F\u002Fhyperparameter-hunter.readthedocs.io\u002Fen\u002Flatest\u002Ffile_structure_overview.html) section in the documentation. However, the essentials are as follows:\n\n1. An `Experiment` adds a file to each *HyperparameterHunterAssets\u002FExperiments* subdirectory, named by `experiment_id`\n2. Each `Experiment` also adds an entry to *HyperparameterHunterAssets\u002FLeaderboards\u002FGlobalLeaderboard.csv*\n3. Customize which files are created via `Environment`'s `file_blacklist` and `do_full_save` kwargs (documented [here](https:\u002F\u002Fhyperparameter-hunter.readthedocs.io\u002Fen\u002Flatest\u002Fapi_essentials.html#environment))\n\n```\nHyperparameterHunterAssets\n|   Heartbeat.log\n|\n└───Experiments\n|   |\n|   └───Descriptions\n|   |   |   \u003CFiles describing Experiment results, conditions, etc.>.json\n|   |\n|   └───Predictions\u003COOF\u002FHoldout\u002FTest>\n|   |   |   \u003CFiles containing Experiment predictions for the indicated dataset>.csv\n|   |\n|   └───Heartbeats\n|   |   |   \u003CFiles containing the log produced by the Experiment>.log\n|   |\n|   └───ScriptBackups\n|       |   \u003CFiles containing a copy of the script that created the Experiment>.py\n|\n└───Leaderboards\n|   |   GlobalLeaderboard.csv\n|   |   \u003COther leaderboards>.csv\n|\n└───TestedKeys\n|   |   \u003CFiles named by Environment key, containing hyperparameter keys>.json\n|\n└───KeyAttributeLookup\n    |   \u003CFiles linking complex objects used in Experiments to their hashes>\n```\n\nInstallation\n------------\n\n```\npip install hyperparameter-hunter\n```\n\nIf you like being on the cutting-edge, and you want all the latest developments, run:\n\n```\npip install git+https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter.git\n```\n\nIf you want to contribute to HyperparameterHunter, [get started here](CONTRIBUTING.md).\n\nI Still Don't Get It\n--------------------\nThat's ok. Don't feel bad. It's a bit weird to wrap your head around. Here's an example that illustrates how everything is related:\n\n```python\nfrom hyperparameter_hunter import Environment, CVExperiment, BayesianOptPro, Integer\nfrom hyperparameter_hunter.utils.learning_utils import get_breast_cancer_data\nfrom xgboost import XGBClassifier\n\n# Start by creating an `Environment` - This is where you define how Experiments (and optimization) will be conducted\nenv = Environment(\n    train_dataset=get_breast_cancer_data(target='target'),\n    results_path='HyperparameterHunterAssets',\n    metrics=['roc_auc_score'],\n    cv_type='StratifiedKFold',\n    cv_params=dict(n_splits=10, shuffle=True, random_state=32),\n)\n\n# Now, conduct an `Experiment`\n# This tells HyperparameterHunter to use the settings in the active `Environment` to train a model with these hyperparameters\nexperiment = CVExperiment(\n    model_initializer=XGBClassifier,\n    model_init_params=dict(\n        objective='reg:linear',\n        max_depth=3\n    )\n)\n\n# That's it. No annoying boilerplate code to fit models and record results\n# Now, the `Environment`'s `results_path` directory will contain new files describing the Experiment just conducted\n\n# Time for the fun part. We'll set up some hyperparameter optimization by first defining the `OptPro` (Optimization Protocol) we want\noptimizer = BayesianOptPro(verbose=1)\n\n# Now we're going to say which hyperparameters we want to optimize.\n# Notice how this looks just like our `experiment` above\noptimizer.forge_experiment(\n    model_initializer=XGBClassifier,\n    model_init_params=dict(\n        objective='reg:linear',  # We're setting this as a constant guideline - Not one to optimize\n        max_depth=Integer(2, 10)  # Instead of using an int like the `experiment` above, we provide a space to search\n    )\n)\n# Notice that our range for `max_depth` includes the `max_depth=3` value we used in our `experiment` earlier\n\noptimizer.go()  # Now, we go\n\nassert experiment.experiment_id in [_[2] for _ in optimizer.similar_experiments]\n# Here we're verifying that the `experiment` we conducted first was found by `optimizer` and used as learning material\n# You can also see via the console that we found `experiment`'s saved files, and used it to start optimization\n\nlast_experiment_id = optimizer.current_experiment.experiment_id\n# Let's save the id of the experiment that was just conducted by `optimizer`\n\noptimizer.go()  # Now, we'll start up `optimizer` again...\n\n# And we can see that this second optimization round learned from both our first `experiment` and our first optimization round\nassert experiment.experiment_id in [_[2] for _ in optimizer.similar_experiments]\nassert last_experiment_id in [_[2] for _ in optimizer.similar_experiments]\n# It even did all this without us having to tell it what experiments to learn from\n\n# Now think about how much better your hyperparameter optimization will be when it learns from:\n# - All your past experiments, and\n# - All your past optimization rounds\n# And the best part: HyperparameterHunter figures out which experiments are compatible all on its own\n# You don't have to worry about telling it that KFold=5 is different from KFold=10,\n# Or that max_depth=12 is outside of max_depth=Integer(2, 10)\n```\n\nTested Libraries\n----------------\n* [Keras](https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fblob\u002Fmaster\u002Fexamples\u002Flib_keras_example.py)\n* [scikit-learn](https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fblob\u002Fmaster\u002Fexamples\u002Flib_sklearn_example.py)\n* [LightGBM](https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fblob\u002Fmaster\u002Fexamples\u002Flib_lightgbm_example.py)\n* [CatBoost](https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fblob\u002Fmaster\u002Fexamples\u002Flib_catboost_example.py)\n* [XGBoost](https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fblob\u002Fmaster\u002Fexamples\u002Fsimple_experiment_example.py)\n* [rgf_python](https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fblob\u002Fmaster\u002Fexamples\u002Flib_rgf_example.py)\n* ... More on the way\n\nGotchas\u002FFAQs\n------------\nThese are some things that might \"getcha\"\n\n### General:\n- **Can't provide initial search points to `OptPro`?**\n   - This is intentional. If you want your optimization rounds to start with specific search points (that you haven't recorded yet), simply perform a `CVExperiment` before initializing your `OptPro`\n   - Assuming the two have the same guideline hyperparameters and the `Experiment` fits within the search space defined by your `OptPro`, the optimizer will locate and read in the results of the `Experiment`\n   - Keep in mind, you'll probably want to remove the `Experiment` after you've done it once, as the results have been saved. Leaving it there will just execute the same `Experiment` over and over again\n- **After changing things in my \"HyperparameterHunterAssets\" directory, everything stopped working**\n   - Yeah, don't do that. Especially not with \"Descriptions\", \"Leaderboards\", or \"TestedKeys\"\n   - HyperparameterHunter figures out what's going on by reading these files directly. \n   - Removing them, or changing their contents can break a lot of HyperparameterHunter's functionality\n\n### Keras:\n- **Can't find similar Experiments with simple Dense\u002FActivation neural networks?**\n   - This is likely caused by switching between using a separate `Activation` layer, and providing a `Dense` layer with the `activation` kwarg\n   - Each layer is treated as its own little set of hyperparameters (as well as being a hyperparameter, itself), which means that as far as HyperparameterHunter is concerned, the following two examples are NOT equivalent:\n      - ```Dense(10, activation=‘sigmoid’)```\n      - ```Dense(10); Activation(‘sigmoid’)```\n   - We’re working on this, but for now, the workaround is just to be consistent with how you add activations to your models\n      - Either use separate `Activation` layers, or provide `activation` kwargs to other layers, and stick with it!\n- **Can't optimize the `model.compile` arguments: `optimizer` and `optimizer_params` at the same time?**\n   - This happens because Keras’ `optimizers` expect different arguments\n   - For example, when `optimizer=Categorical(['adam', 'rmsprop'])`, there are two different possible dicts of `optimizer_params`\n   - For now, you can only optimize `optimizer`, and `optimizer_params` separately\n   - A good way to do this might be to select a few optimizers you want to test, and don’t provide an `optimizer_params` value. That way, each `optimizer` will use its default parameters\n      - Then you can select which `optimizer` was the best, and set `optimizer=\u003Cbest optimizer>`, then move on to tuning `optimizer_params`, with arguments specific to the `optimizer` you selected\n\n### CatBoost:\n- **Can't find similar Experiments for CatBoost?**\n   - This may be happening because the default values for the kwargs expected in CatBoost’s model `__init__` methods are defined somewhere else, and given placeholder values of `None` in their signatures\n   - Because of this, HyperparameterHunter assumes that the default value for an argument really is `None` if you don’t explicitly provide a value for that argument\n   - This is obviously not the case, but I just can’t seem to figure out where the actual default values used by CatBoost are located, so if anyone knows how to remedy this situation, I would love your help!\n\n\n","超参数猎人\n====================\n\n![超参数猎人概览](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHunterMcGushion_hyperparameter_hunter_readme_134da0acd384.gif)\n\n[![构建状态](https:\u002F\u002Ftravis-ci.org\u002FHunterMcGushion\u002Fhyperparameter_hunter.svg?branch=master)](https:\u002F\u002Ftravis-ci.org\u002FHunterMcGushion\u002Fhyperparameter_hunter)\n[![文档状态](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHunterMcGushion_hyperparameter_hunter_readme_13d664e1afd7.png)](https:\u002F\u002Fhyperparameter-hunter.readthedocs.io\u002Fen\u002Fstable\u002F?badge=stable)\n[![覆盖率状态](https:\u002F\u002Fcoveralls.io\u002Frepos\u002Fgithub\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fbadge.svg)](https:\u002F\u002Fcoveralls.io\u002Fgithub\u002FHunterMcGushion\u002Fhyperparameter_hunter?branch=master&service=github)\n[![codecov](https:\u002F\u002Fcodecov.io\u002Fgh\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fbranch\u002Fmaster\u002Fgraph\u002Fbadge.svg)](https:\u002F\u002Fcodecov.io\u002Fgh\u002FHunterMcGushion\u002Fhyperparameter_hunter)\n[![可维护性](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHunterMcGushion_hyperparameter_hunter_readme_91fd2da65c07.png)](https:\u002F\u002Fcodeclimate.com\u002Fgithub\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fmaintainability)\n[![Codacy徽章](https:\u002F\u002Fapi.codacy.com\u002Fproject\u002Fbadge\u002FGrade\u002F1413b76fabe2400fab1958e70be593a2)](https:\u002F\u002Fwww.codacy.com\u002Fapp\u002FHunterMcGushion\u002Fhyperparameter_hunter?utm_source=github.com&amp;utm_medium=referral&amp;utm_content=HunterMcGushion\u002Fhyperparameter_hunter&amp;utm_campaign=Badge_Grade)\n\n[![PyPI版本](https:\u002F\u002Fbadge.fury.io\u002Fpy\u002Fhyperparameter-hunter.svg)](https:\u002F\u002Fbadge.fury.io\u002Fpy\u002Fhyperparameter-hunter)\n[![下载量](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHunterMcGushion_hyperparameter_hunter_readme_6aaae3f74cfb.png)](https:\u002F\u002Fpepy.tech\u002Fproject\u002Fhyperparameter-hunter)\n[![捐赠](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FDonate-PayPal-green.svg)](https:\u002F\u002Fwww.paypal.com\u002Fcgi-bin\u002Fwebscr?cmd=_s-xclick&hosted_button_id=Q3EX3PQUV256G)\n[![代码风格：black](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fcode%20style-black-000000.svg)](https:\u002F\u002Fgithub.com\u002Fambv\u002Fblack)\n\n自动保存并从实验结果中学习，从而实现长期、持续的优化，并记住你所有的测试。\n\nHyperparameterHunter为机器学习算法提供了一个包装器，可以保存所有重要数据。通过让HyperparameterHunter完成记录、整理和从你的测试中学习这些繁重的工作，同时继续使用你已经熟悉的库，简化实验和超参数调优的过程。不要让任何一次实验白费，开始以正确的方式进行超参数优化吧。\n\n* **安装:** `pip install hyperparameter-hunter`\n* **源码:** https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter\n* **文档:** [https:\u002F\u002Fhyperparameter-hunter.readthedocs.io](https:\u002F\u002Fhyperparameter-hunter.readthedocs.io\u002Fen\u002Flatest\u002Findex.html)\n\n特性\n--------\n* 自动记录实验结果\n* 真正基于过往实验信息的超参数优化\n* 消除交叉验证循环、预测和评分中的样板代码\n* 不再需要担心跟踪超参数、分数或重复运行相同的实验\n* 使用你已经喜爱的库和工具\n\n如何使用HyperparameterHunter\n-------------------------------\n不要把HyperparameterHunter看作是仅在需要进行超参数优化时才使用的另一个优化库。当然，它确实能进行优化，但更好的方式是将HyperparameterHunter视为你个人的机器学习工具箱\u002F助手。\n\n理念是立即开始使用HyperparameterHunter。把你所有的基准测试或一次性实验都通过它来运行。\n\n你使用得越多，效果就会越好。如果你只用它来进行优化，它当然也能满足你的需求，但这并没有抓住HyperparameterHunter的核心价值。\n\n如果你在整个项目过程中一直用它来进行实验和优化，那么当你决定进行超参数优化时，HyperparameterHunter已经了解了你之前的所有工作，这时它就能发挥出非凡的作用。与其他库不同，HyperparameterHunter不会从零开始优化，而是会基于你之前通过它运行的所有实验和优化轮次来继续推进。\n\n入门指南\n---------------\n\n### 1) 环境:\n\n设置一个环境来组织实验和优化结果。\n\u003Cbr>\n我们执行的任何实验或优化轮次都会使用当前激活的环境。\n\n```python\nfrom hyperparameter_hunter import Environment, CVExperiment\nimport pandas as pd\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.model_selection import StratifiedKFold\n\ndata = load_breast_cancer()\ndf = pd.DataFrame(data=data.data, columns=data.feature_names)\ndf['target'] = data.target\n\nenv = Environment(\n    train_dataset=df,  # 也可以添加保留集\u002F测试数据框\n    results_path='path\u002Fto\u002Fresults\u002Fdirectory',  # 结果文件将保存的位置\n    metrics=['roc_auc_score'],  # 可调用函数，或指向`sklearn.metrics`的字符串\n    cv_type=StratifiedKFold,  # 类，或`sklearn.model_selection`中的字符串\n    cv_params=dict(n_splits=5, shuffle=True, random_state=32)\n)\n```\n\n### 2) 单个实验:\n\n只需提供模型初始化器和超参数，即可使用你喜欢的库进行实验\n\u003C!-- Keras -->\n\n\u003Cdetails>\n\u003Csummary>Keras\u003C\u002Fsummary>\n\n```python\n\n# 与 `keras.wrappers.scikit_learn` 使用的格式相同。无需学习新内容\ndef build_fn(input_shape):  # `input_shape` 已为您计算好\n    model = Sequential([\n        Dense(100, kernel_initializer='uniform', input_shape=input_shape, activation='relu'),\n        Dropout(0.5),\n        Dense(1, kernel_initializer='uniform', activation='sigmoid')\n    ])  # 所有层的参数（无论是显式指定的还是 Keras 默认值）都会被保存，以供将来使用\n    model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n    return model\n\nexperiment = CVExperiment(\n    model_initializer=KerasClassifier,\n    model_init_params=build_fn,  # 我们会解析您的 `build_fn` 函数，以将超参数以有用且易读的格式保存下来\n    model_extra_params=dict(\n        callbacks=[ReduceLROnPlateau(patience=5)],  # 使用 Keras 回调函数\n        batch_size=32, epochs=10, verbose=0  # 拟合\u002F预测时的参数\n    )\n)\n```\n\n\u003C\u002Fdetails>\n\n\u003C!-- SKLearn -->\n\u003Cdetails>\n\u003Csummary>SKLearn\u003C\u002Fsummary>\n\n```python\nexperiment = CVExperiment(\n    model_initializer=LinearSVC,  # （或数十种其他 SK-Learn 算法中的任意一种）\n    model_init_params=dict(penalty='l1', C=0.9)  # 对于未提供的关键字参数，将使用默认值并记录下来\n)\n```\n\u003C\u002Fdetails>\n\u003C!-- XGBoost -->\n\u003Cdetails open>\n\u003Csummary>XGBoost\u003C\u002Fsummary>\n\n```python\nexperiment = CVExperiment(\n    model_initializer=XGBClassifier,\n    model_init_params=dict(objective='reg:linear', max_depth=3, n_estimators=100, subsample=0.5)\n)\n```\n\u003C\u002Fdetails>\n\u003C!-- LightGBM -->\n\u003Cdetails>\n\u003Csummary>LightGBM\u003C\u002Fsummary>\n\n```python\nexperiment = CVExperiment(\n    model_initializer=LGBMClassifier,\n    model_init_params=dict(boosting_type='gbdt', num_leaves=31, max_depth=-1, min_child_samples=5, subsample=0.5)\n)\n```\n\u003C\u002Fdetails>\n\u003C!-- CatBoost -->\n\u003Cdetails>\n\u003Csummary>CatBoost\u003C\u002Fsummary>\n\n```python\nexperiment = CVExperiment(\n    model_initializer=CatboostClassifier,\n    model_init_params=dict(iterations=500, learning_rate=0.01, depth=7, allow_writing_files=False),\n    model_extra_params=dict(fit=dict(verbose=True))  # 将关键字参数传递给 `fit` 方法及其他额外方法\n)\n```\n\u003C\u002Fdetails>\n\u003C!-- RGF -->\n\u003Cdetails>\n\u003Csummary>RGF\u003C\u002Fsummary>\n\n```python\nexperiment = CVExperiment(\n    model_initializer=RGFClassifier,\n    model_init_params=dict(max_leaf=1000, algorithm='RGF', min_samples_leaf=10)\n)\n```\n\u003C\u002Fdetails>\n\n### 3) 超参数优化：\n\n与实验类似，但如果你想优化超参数，请使用下面导入的类。\n\n```python\nfrom hyperparameter_hunter import Real, Integer, Categorical\nfrom hyperparameter_hunter import optimization as opt\n```\n\n\u003C!-- Keras -->\n\u003Cdetails>\n\u003Csummary>Keras\u003C\u002Fsummary>\n\n```python\ndef build_fn(input_shape):\n    model = Sequential([\n        Dense(Integer(50, 150), input_shape=input_shape, activation='relu'),\n        Dropout(Real(0.2, 0.7)),\n        Dense(1, activation=Categorical(['sigmoid', 'softmax']))\n    ])\n    model.compile(\n        optimizer=Categorical(['adam', 'rmsprop', 'sgd', 'adadelta']),\n        loss='binary_crossentropy', metrics=['accuracy']\n    )\n    return model\n\noptimizer = opt.RandomForestOptPro(iterations=7)\noptimizer.forge_experiment(\n    model_initializer=KerasClassifier,\n    model_init_params=build_fn,\n    model_extra_params=dict(\n        callbacks=[ReduceLROnPlateau(patience=Integer(5, 10))],\n        batch_size=Categorical([32, 64]),\n        epochs=10, verbose=0\n    )\n)\noptimizer.go()\n```\n\u003C\u002Fdetails>\n\n\u003C!-- SKLearn -->\n\u003Cdetails>\n\u003Csummary>SKLearn\u003C\u002Fsummary>\n\n```python\noptimizer = opt.DummyOptPro(iterations=42)\noptimizer.forge_experiment(\n    model_initializer=AdaBoostClassifier,  # (或 SKLearn 的其他数十种算法之一)\n    model_init_params=dict(\n        n_estimators=Integer(75, 150),\n        learning_rate=Real(0.8, 1.3),\n        algorithm='SAMME.R'\n    )\n)\noptimizer.go()\n```\n\u003C\u002Fdetails>\n\u003C!-- XGBoost -->\n\u003Cdetails open>\n\u003Csummary>XGBoost\u003C\u002Fsummary>\n\n```python\noptimizer = opt.BayesianOptPro(iterations=10)\noptimizer.forge_experiment(\n    model_initializer=XGBClassifier,\n    model_init_params=dict(\n        max_depth=Integer(low=2, high=20),\n        learning_rate=Real(0.0001, 0.5),\n        n_estimators=200,\n        subsample=0.5,\n        booster=Categorical(['gbtree', 'gblinear', 'dart']),\n    )\n)\noptimizer.go()\n```\n\u003C\u002Fdetails>\n\u003C!-- LightGBM -->\n\u003Cdetails>\n\u003Csummary>LightGBM\u003C\u002Fsummary>\n\n```python\noptimizer = opt.BayesianOptPro(iterations=100)\noptimizer.forge_experiment(\n    model_initializer=LGBMClassifier,\n    model_init_params=dict(\n        boosting_type=Categorical(['gbdt', 'dart']),\n        num_leaves=Integer(5, 20),\n        max_depth=-1,\n        min_child_samples=5,\n        subsample=0.5\n    )\n)\noptimizer.go()\n```\n\u003C\u002Fdetails>\n\u003C!-- CatBoost -->\n\u003Cdetails>\n\u003Csummary>CatBoost\u003C\u002Fsummary>\n\n```python\noptimizer = opt.GradientBoostedRegressionTreeOptPro(iterations=32)\noptimizer.forge_experiment(\n    model_initializer=CatBoostClassifier,\n    model_init_params=dict(\n        iterations=100,\n        eval_metric=Categorical(['Logloss', 'Accuracy', 'AUC']),\n        learning_rate=Real(low=0.0001, high=0.5),\n        depth=Integer(4, 7),\n        allow_writing_files=False\n    )\n)\noptimizer.go()\n```\n\u003C\u002Fdetails>\n\u003C!-- RGF -->\n\u003Cdetails>\n\u003Csummary>RGF\u003C\u002Fsummary>\n\n```python\noptimizer = opt.ExtraTreesOptPro(iterations=10)\noptimizer.forge_experiment(\n    model_initializer=RGFClassifier,\n    model_init_params=dict(\n        max_leaf=1000,\n        algorithm=Categorical(['RGF', 'RGF_Opt', 'RGF_Sib']),\n        l2=Real(0.01, 0.3),\n        normalize=Categorical([True, False]),\n        learning_rate=Real(0.3, 0.7),\n        loss=Categorical(['LS', 'Expo', 'Log', 'Abs'])\n    )\n)\noptimizer.go()\n```\n\u003C\u002Fdetails>\n\n输出文件结构\n---------------------\n以下是一个简单的示例，展示了你的 `Experiment` 可能会生成的文件结构。有关目录结构和各文件内容的详细说明，请参阅文档中的 [文件结构概述](https:\u002F\u002Fhyperparameter-hunter.readthedocs.io\u002Fen\u002Flatest\u002Ffile_structure_overview.html) 部分。不过，核心要点如下：\n\n1. 每个 `Experiment` 会在 *HyperparameterHunterAssets\u002FExperiments* 子目录下创建一个以 `experiment_id` 命名的文件。\n2. 每个 `Experiment` 还会向 *HyperparameterHunterAssets\u002FLeaderboards\u002FGlobalLeaderboard.csv* 添加一条记录。\n3. 你可以通过 `Environment` 的 `file_blacklist` 和 `do_full_save` 参数来自定义要创建的文件（详见 [这里](https:\u002F\u002Fhyperparameter-hunter.readthedocs.io\u002Fen\u002Flatest\u002Fapi_essentials.html#environment)）。\n\n```\nHyperparameterHunterAssets\n|   Heartbeat.log\n|\n└───Experiments\n|   |\n|   └───Descriptions\n|   |   |   \u003C描述实验结果、条件等的文件>.json\n|   |\n|   └───Predictions\u003COOF\u002FHoldout\u002FTest>\n|   |   |   \u003C包含针对指定数据集的实验预测结果的文件>.csv\n|   |\n|   └───Heartbeats\n|   |   |   \u003C包含实验日志的文件>.log\n|   |\n|   └───ScriptBackups\n|       |   \u003C包含创建该实验的脚本副本的文件>.py\n|\n└───Leaderboards\n|   |   GlobalLeaderboard.csv\n|   |   \u003C其他排行榜>.csv\n|\n└───TestedKeys\n|   |   \u003C按 Environment 键命名的文件，包含超参数键>.json\n|\n└───KeyAttributeLookup\n    |   \u003C将实验中使用的复杂对象与其哈希值关联起来的文件>\n```\n\n安装\n------------\n\n```\npip install hyperparameter-hunter\n```\n\n如果你喜欢走在技术前沿，并希望获得最新的功能，请运行：\n\n```\npip install git+https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter.git\n```\n\n如果你想为 HyperparameterHunter 做贡献，请从 [这里](CONTRIBUTING.md) 开始。\n\n我还是不太明白\n--------------------\n没关系。不用感到不好意思。这确实有点难以理解。下面是一个示例，说明了各个部分是如何相互关联的：\n\n```python\nfrom hyperparameter_hunter import Environment, CVExperiment, BayesianOptPro, Integer\nfrom hyperparameter_hunter.utils.learning_utils import get_breast_cancer_data\nfrom xgboost import XGBClassifier\n\n# 首先创建一个 `Environment` - 这里定义了实验（以及优化）的执行方式\nenv = Environment(\n    train_dataset=get_breast_cancer_data(target='target'),\n    results_path='HyperparameterHunterAssets',\n    metrics=['roc_auc_score'],\n    cv_type='StratifiedKFold',\n    cv_params=dict(n_splits=10, shuffle=True, random_state=32),\n)\n\n# 接下来进行一次 `Experiment`\n# 这会告诉 HyperparameterHunter 使用当前 `Environment` 中的设置来训练具有这些超参数的模型\nexperiment = CVExperiment(\n    model_initializer=XGBClassifier,\n    model_init_params=dict(\n        objective='reg:linear',\n        max_depth=3\n    )\n)\n\n# 就这样。无需繁琐的样板代码来拟合模型并记录结果。\n# 现在，`Environment` 的 `results_path` 目录中将会出现描述刚刚完成的实验的新文件。\n\n# 现在是有趣的部分。我们将通过首先定义想要使用的 `OptPro`（优化协议）来设置超参数优化。\noptimizer = BayesianOptPro(verbose=1)\n\n# 接下来我们要指定要优化的超参数。\n\n# 注意，这里的写法与我们上面的 `experiment` 完全一致\noptimizer.forge_experiment(\n    model_initializer=XGBClassifier,\n    model_init_params=dict(\n        objective='reg:linear',  # 我们将其设定为一个常量指导原则——这不是需要优化的参数\n        max_depth=Integer(2, 10)  # 与上面的 `experiment` 不同，这里我们提供了一个搜索空间，而不是直接指定一个整数值\n    )\n)\n# 请注意，`max_depth` 的取值范围包含了我们之前 `experiment` 中使用的 `max_depth=3` 这个值\n\noptimizer.go()  # 现在，我们开始运行\n\nassert experiment.experiment_id in [_[2] for _ in optimizer.similar_experiments]\n# 在这里，我们验证最初执行的 `experiment` 已被 `optimizer` 找到，并作为学习素材使用\n# 你也可以通过控制台看到，`optimizer` 找到了该 `experiment` 的保存文件，并以此为基础开始了优化过程\n\nlast_experiment_id = optimizer.current_experiment.experiment_id\n# 让我们保存一下刚刚由 `optimizer` 执行的那次实验的 ID\n\noptimizer.go()  # 现在，我们再次启动 `optimizer`……\n\n# 我们可以看到，第二次优化不仅借鉴了我们第一次的 `experiment`，还吸收了第一次优化的结果\nassert experiment.experiment_id in [_[2] for _ in optimizer.similar_experiments]\nassert last_experiment_id in [_[2] for _ in optimizer.similar_experiments]\n# 更令人惊喜的是，这一切都无需我们手动指定要从哪些实验中学习\n\n# 想象一下，当你的超参数优化能够同时借鉴：\n# - 你过去的所有实验，以及\n# - 你过去的所有优化轮次\n# 会变得多么高效？而最棒的是：HyperparameterHunter 能够自动判断哪些实验是兼容的。\n# 你无需担心告诉它 KFold=5 和 KFold=10 是不同的，\n# 或者 max_depth=12 超出了 max_depth=Integer(2, 10) 的范围。\n\n测试过的库\n----------------\n* [Keras](https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fblob\u002Fmaster\u002Fexamples\u002Flib_keras_example.py)\n* [scikit-learn](https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fblob\u002Fmaster\u002Fexamples\u002Flib_sklearn_example.py)\n* [LightGBM](https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fblob\u002Fmaster\u002Fexamples\u002Flib_lightgbm_example.py)\n* [CatBoost](https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fblob\u002Fmaster\u002Fexamples\u002Flib_catboost_example.py)\n* [XGBoost](https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fblob\u002Fmaster\u002Fexamples\u002Fsimple_experiment_example.py)\n* [rgf_python](https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fblob\u002Fmaster\u002Fexamples\u002Flib_rgf_example.py)\n* …更多正在开发中\n\n常见问题\u002FFAQ\n------------\n以下是一些可能会“坑”到你的地方\n\n### 一般情况：\n- **无法向 `OptPro` 提供初始搜索点吗？**\n   - 这是故意设计的。如果你希望优化轮次从特定的搜索点开始（这些点尚未被记录），只需在初始化 `OptPro` 之前先执行一次 `CVExperiment`。\n   - 假设这两者具有相同的指导性超参数，并且 `Experiment` 符合 `OptPro` 定义的搜索空间，优化器就会找到并读取该 `Experiment` 的结果。\n   - 请记住，完成一次后最好移除该 `Experiment`，因为其结果已被保存。如果保留它，优化器会不断重复执行相同的实验。\n- **修改了 `HyperparameterHunterAssets` 目录中的内容后，一切都不工作了？**\n   - 是的，千万不要这样做。尤其是不要动“Descriptions”、“Leaderboards”或“TestedKeys”这些文件。\n   - HyperparameterHunter 会直接读取这些文件来判断当前状态。删除它们或更改内容会导致许多功能失效。\n\n### Keras：\n- **无法找到使用简单 Dense\u002FActivation 层的神经网络的相似实验吗？**\n   - 这很可能是因为你在使用单独的 `Activation` 层，或者在 `Dense` 层中直接使用 `activation` 参数之间切换造成的。\n   - 每一层都被视为一组独立的超参数（同时也是超参数本身），因此对 HyperparameterHunter 来说，以下两种写法并不等价：\n      - ```Dense(10, activation='sigmoid')```\n      - ```Dense(10); Activation('sigmoid')```\n   - 我们正在努力解决这个问题，但目前的应对方法就是保持模型中添加激活层的方式一致：\n      - 要么全部使用单独的 `Activation` 层，要么全部在其他层中使用 `activation` 参数，并坚持一种方式。\n- **无法同时优化 `model.compile` 中的 `optimizer` 和 `optimizer_params` 吗？**\n   - 这是因为 Keras 的不同优化器需要不同的参数。\n   - 例如，当 `optimizer=Categorical(['adam', 'rmsprop'])` 时，对应的 `optimizer_params` 可能是两个完全不同的字典。\n   - 目前，你只能分别优化 `optimizer` 和 `optimizer_params`。\n   - 一个可行的方法是先选择几个你想测试的优化器，而不指定具体的 `optimizer_params` 值。这样每个优化器都会使用其默认参数。\n      - 然后你可以选出表现最好的优化器，将其设置为 `optimizer=\u003C最佳优化器>`，再针对你选定的优化器的具体参数调整 `optimizer_params`。\n\n### CatBoost：\n- **无法找到 CatBoost 的相似实验吗？**\n   - 这可能是因为 CatBoost 模型 `__init__` 方法中预期的参数默认值是在其他地方定义的，而在其函数签名中却被赋予了占位符值 `None`。\n   - 因此，如果用户没有显式地提供某个参数的值，HyperparameterHunter 就会认为该参数的默认值确实是 `None`。\n   - 显然事实并非如此，但我一直未能找到 CatBoost 实际使用的默认值所在位置。如果有哪位朋友知道如何解决这个问题，请务必告知我！","# HyperparameterHunter 快速上手指南\n\nHyperparameterHunter 是一个机器学习实验管理工具，能够自动记录实验结果、组织数据并利用历史实验进行智能超参数优化。它支持 Scikit-Learn、Keras、XGBoost、LightGBM、CatBoost 等主流库，让你无需重复编写交叉验证和评分代码，专注于模型本身。\n\n## 环境准备\n\n*   **系统要求**：支持 Windows、Linux 和 macOS。\n*   **Python 版本**：建议 Python 3.6 及以上。\n*   **前置依赖**：\n    *   基础科学计算库：`pandas`, `numpy`, `scikit-learn`\n    *   根据你使用的模型框架安装相应库（如 `tensorflow`\u002F`keras`, `xgboost`, `lightgbm`, `catboost` 等）。\n\n## 安装步骤\n\n使用 pip 进行安装。国内用户推荐使用清华或阿里镜像源以加速下载。\n\n```bash\n# 使用默认源安装\npip install hyperparameter-hunter\n\n# 推荐：使用清华大学镜像源安装（国内加速）\npip install hyperparameter-hunter -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n```\n\n## 基本使用\n\nHyperparameterHunter 的核心工作流程分为三步：**配置环境** -> **运行实验** -> **执行优化**。\n\n### 1. 配置环境 (Environment)\n\n首先初始化一个 `Environment`，用于定义数据集、评估指标、交叉验证策略以及结果保存路径。所有后续实验都将在此环境中运行。\n\n```python\nfrom hyperparameter_hunter import Environment, CVExperiment\nimport pandas as pd\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.model_selection import StratifiedKFold\n\n# 加载示例数据\ndata = load_breast_cancer()\ndf = pd.DataFrame(data=data.data, columns=data.feature_names)\ndf['target'] = data.target\n\n# 初始化环境\nenv = Environment(\n    train_dataset=df,  # 训练数据集 (也可添加 holdout\u002Ftest 数据)\n    results_path='path\u002Fto\u002Fresults\u002Fdirectory',  # 实验结果保存目录\n    metrics=['roc_auc_score'],  # 评估指标 (支持 sklearn.metrics 中的函数名或 callable)\n    cv_type=StratifiedKFold,  # 交叉验证类型\n    cv_params=dict(n_splits=5, shuffle=True, random_state=32)  # 交叉验证参数\n)\n```\n\n### 2. 运行单个实验 (Experimentation)\n\n使用 `CVExperiment` 运行单次实验。HyperparameterHunter 会自动处理交叉验证循环、预测和评分，并记录所有超参数和结果。以下以 XGBoost 为例：\n\n```python\nfrom xgboost import XGBClassifier\n\nexperiment = CVExperiment(\n    model_initializer=XGBClassifier,\n    model_init_params=dict(\n        objective='reg:linear', \n        max_depth=3, \n        n_estimators=100, \n        subsample=0.5\n    )\n)\n# 实验运行完毕后，结果会自动保存到 Environment 指定的目录中\n```\n\n> **提示**：该工具同样支持 Keras, Scikit-Learn, LightGBM, CatBoost 等库，只需更换 `model_initializer` 和对应的参数即可。\n\n### 3. 超参数优化 (Optimization)\n\n当需要进行超参数搜索时，利用已积累的实验历史数据进行优化。定义搜索空间（使用 `Integer`, `Real`, `Categorical`），选择优化算法并启动。\n\n```python\nfrom hyperparameter_hunter import Real, Integer, Categorical\nfrom hyperparameter_hunter import optimization as opt\nfrom xgboost import XGBClassifier\n\n# 选择优化算法 (例如：贝叶斯优化)，设置迭代次数\noptimizer = opt.BayesianOptPro(iterations=10)\n\n# 定义实验模板及超参数搜索空间\noptimizer.forge_experiment(\n    model_initializer=XGBClassifier,\n    model_init_params=dict(\n        max_depth=Integer(low=2, high=20),       # 整数范围\n        learning_rate=Real(0.0001, 0.5),         # 浮点数范围\n        n_estimators=200,                        # 固定值\n        subsample=0.5,                           # 固定值\n        booster=Categorical(['gbtree', 'gblinear', 'dart']), # 类别选择\n    )\n)\n\n# 开始优化\n# 注意：优化器会自动读取之前运行过的 Experiment 历史数据，避免从零开始\noptimizer.go()\n```\n\n通过上述步骤，你可以轻松实现从单次实验记录到基于历史数据的智能超参数优化的完整流程。","某电商数据团队正在构建用户流失预测模型，需要在有限时间内对随机森林、XGBoost 等多种算法进行大规模超参数调优。\n\n### 没有 hyperparameter_hunter 时\n- 每次调整参数后需手动编写代码保存结果，实验记录散落在本地文件或笔记中，难以回溯对比。\n- 不同算法（如 Scikit-learn 与 Keras）的实验格式不统一，团队无法直接复用历史数据来指导新的搜索方向。\n- 重复运行了相同的参数组合却不自知，浪费了大量宝贵的 GPU 算力和时间。\n- 缺乏自动化的交叉验证与评分封装，导致核心逻辑被大量样板代码淹没，维护成本极高。\n- 优化过程总是“从零开始”，无法利用项目初期基准测试中积累的宝贵经验。\n\n### 使用 hyperparameter_hunter 后\n- 所有实验的配置、指标及模型产物被自动持久化存储，团队成员可随时查询任意一次历史测试详情。\n- 屏蔽了底层库的差异，统一了实验接口，让系统能智能分析过往所有实验数据以推荐更优参数。\n- 内置去重机制自动识别并跳过已运行过的参数组合，确保每一次计算都在探索新的可能性。\n- 极简的包装器自动处理交叉验证循环与评分逻辑，让工程师专注于特征工程而非繁琐的流程代码。\n- 将日常的基准测试自动转化为优化知识库，使正式调优阶段能站在“巨人肩膀”上快速收敛。\n\nhyperparameter_hunter 的核心价值在于将离散、易失的实验过程转化为可累积、可进化的长期资产，让每一次尝试都成为下一次成功的基石。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FHunterMcGushion_hyperparameter_hunter_134da0ac.gif","HunterMcGushion","Hunter McGushion","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FHunterMcGushion_ba382a39.png",null,"hunter@mcgushion.com","https:\u002F\u002Fgithub.com\u002FHunterMcGushion",[82,86,90],{"name":83,"color":84,"percentage":85},"Python","#3572A5",99.3,{"name":87,"color":88,"percentage":89},"Shell","#89e051",0.7,{"name":91,"color":92,"percentage":93},"Makefile","#427819",0,708,100,"2025-12-15T15:23:47","MIT",1,"","未说明",{"notes":102,"python":100,"dependencies":103},"该工具是一个用于自动记录和复用机器学习实验结果的包装器，支持多种主流库（如 Scikit-Learn, Keras, XGBoost, LightGBM, CatBoost, RGF）。安装命令为 `pip install hyperparameter-hunter`。README 中未明确列出具体的操作系统、Python 版本、GPU 或内存硬性要求，通常取决于所使用的基础机器学习库的需求。",[104,105,106,107,108,109,110],"pandas","scikit-learn","keras","xgboost","lightgbm","catboost","rgf",[13,15,51,54,14],[113,114,115,116,117,106,105,107,108,109,118,119,120,110,121,122,123,124,125,126],"artificial-intelligence","machine-learning","hyperparameter-optimization","hyperparameter-tuning","neural-network","deep-learning","data-science","python","sklearn","optimization","experimentation","feature-engineering","ai","ml","2026-03-27T02:49:30.150509","2026-04-06T06:45:55.009350",[130,135,140,145,149,154,158],{"id":131,"question_zh":132,"answer_zh":133,"source_url":134},15175,"如何配置 Environment 以让优化器预测概率值（用于 log_loss 等指标）？","在初始化 `Environment` 时，设置参数 `do_predict_proba=True`。如果模型返回多列概率，也可以将该参数设置为整数（例如 `1`），指定使用哪一列作为实际预测值。默认情况下为 `False`，此时调用 `predict` 方法；设置为 `True` 或整数时，将调用 `predict_proba` 方法。","https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fissues\u002F90",{"id":136,"question_zh":137,"answer_zh":138,"source_url":139},15176,"遇到 'ImportError: cannot import name Log10' 错误怎么办？","这通常是由于 `scikit-optimize` 库的版本不兼容导致的。该问题已在主分支（master branch）中修复，但可能尚未包含在正式的发布版本中。解决方法是直接从 GitHub 安装最新的主分支代码：`pip install git+https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter.git`。","https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fissues\u002F210",{"id":141,"question_zh":142,"answer_zh":143,"source_url":144},15177,"如何在 Keras 模型构建函数中使用自定义参数（如 maxlen, embedding_matrix）？","这是一个已知的作用域问题。维护者已将此问题拆分为多个具体问题跟踪（见 Issue #122, #123, #125）。主要解决方案包括：确保可调用初始化器（callable initializers）的正确匹配，以及正确处理带有初始化权重的 `Embedding` 层。建议查看拆分后的具体 Issue 获取针对 `CuDNNGRU` 层或权重初始化的详细修复方案。","https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fissues\u002F111",{"id":146,"question_zh":147,"answer_zh":148,"source_url":144},15178,"如何在 model_extra_params 中设置验证集分割以监控 val_loss？","可以在 `model_extra_params` 字典中传递 `validation_split` 参数。例如：`model_extra_params={'fit': {'validation_split': 0.2}}`。这样可以在训练过程中监控验证集损失。注意不要与 `validation_data` 混淆，具体用法需参考相关讨论（Issue #124）。",{"id":150,"question_zh":151,"answer_zh":152,"source_url":153},15179,"是否支持非字符串名称的激活函数（如 lrelu）或多层动态数量？","对于高级功能如动态层数或非标准激活函数，库的支持有限。关于回调函数（Callbacks）中使用 `Real` 类型参数（如 `min_delta`）导致类型不兼容的问题，已在后续更新中解决。如果仍遇到类似问题，请确保使用的是最新版本。对于按特征选择缩放列等复杂功能，目前不建议通过该库实现，手动处理可能更直观。","https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fissues\u002F176",{"id":155,"question_zh":156,"answer_zh":157,"source_url":144},15180,"Keras 的 CuDNN 层（如 CuDNNGRU）无法正常工作怎么办？","这是已知问题，维护者已将其单独列为 Issue #125 进行跟踪。该问题涉及特定 Keras 层在超参数搜索环境中的兼容性。建议查阅 Issue #125 获取最新的修复状态或临时变通方案。",{"id":159,"question_zh":160,"answer_zh":161,"source_url":144},15181,"如何处理 Keras Embedding 层带预训练权重（weights 参数）的情况？","在自定义构建函数中传递预训练权重矩阵时可能会遇到作用域或初始化问题。此问题已被分离为 Issue #123 专门讨论。请确保权重矩阵在函数作用域内可见，并参考该 Issue 中关于 `Embedding` 层权重初始化的具体修复建议。",[163,168,173,178,183,188,193,197,201,205,209,213,217,221,225,229],{"id":164,"version":165,"summary_zh":166,"released_at":167},89834,"v3.0.0","v3.0.0 版本正式发布。\n发行说明请参阅 [更改日志](https:\u002F\u002Fgithub.com\u002FHunterMcGushion\u002Fhyperparameter_hunter\u002Fblob\u002Fmaster\u002FCHANGELOG.md#300-2019-08-06-artemis)。","2019-08-06T09:09:45",{"id":169,"version":170,"summary_zh":171,"released_at":172},89835,"v3.0.0beta1","这是 v3.0.0 的第二个测试版。","2019-08-06T06:16:34",{"id":174,"version":175,"summary_zh":176,"released_at":177},89836,"v3.0.0beta0","这是 v3.0.0 的第一个测试版。","2019-07-14T10:02:13",{"id":179,"version":180,"summary_zh":181,"released_at":182},89837,"v3.0.0alpha2","这是 v3.0.0 的第三个 Alpha 版本。","2019-06-12T08:22:19",{"id":184,"version":185,"summary_zh":186,"released_at":187},89838,"v3.0.0alpha1","这是 v3.0.0 的第二个 Alpha 版本。","2019-06-08T06:03:26",{"id":189,"version":190,"summary_zh":191,"released_at":192},89839,"v3.0.0alpha0","这是 v3.0.0 的第一个 Alpha 版本。","2019-06-07T11:55:32",{"id":194,"version":195,"summary_zh":78,"released_at":196},89840,"v2.2.0","2019-02-11T00:30:31",{"id":198,"version":199,"summary_zh":78,"released_at":200},89841,"v2.1.1","2019-01-16T05:58:59",{"id":202,"version":203,"summary_zh":78,"released_at":204},89842,"v2.1.0","2019-01-16T05:35:18",{"id":206,"version":207,"summary_zh":78,"released_at":208},89843,"v2.0.1","2018-11-25T23:22:55",{"id":210,"version":211,"summary_zh":78,"released_at":212},89844,"v2.0.0","2018-11-17T01:49:22",{"id":214,"version":215,"summary_zh":78,"released_at":216},89845,"v1.1.0","2018-10-05T05:32:34",{"id":218,"version":219,"summary_zh":78,"released_at":220},89846,"v1.0.2","2018-09-04T03:54:00",{"id":222,"version":223,"summary_zh":78,"released_at":224},89847,"v1.0.1","2018-09-04T03:54:38",{"id":226,"version":227,"summary_zh":78,"released_at":228},89848,"v1.0.0","2018-09-04T03:55:52",{"id":230,"version":231,"summary_zh":78,"released_at":232},89849,"v0.0.1","2018-09-04T04:01:35"]