[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-DataScienceUIBK--Rankify":3,"tool-DataScienceUIBK--Rankify":64},[4,17,27,35,43,56],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":16},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,3,"2026-04-05T11:01:52",[13,14,15],"开发框架","图像","Agent","ready",{"id":18,"name":19,"github_repo":20,"description_zh":21,"stars":22,"difficulty_score":23,"last_commit_at":24,"category_tags":25,"status":16},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",138956,2,"2026-04-05T11:33:21",[13,15,26],"语言模型",{"id":28,"name":29,"github_repo":30,"description_zh":31,"stars":32,"difficulty_score":23,"last_commit_at":33,"category_tags":34,"status":16},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,"2026-04-03T11:11:01",[13,14,15],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":23,"last_commit_at":41,"category_tags":42,"status":16},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[13,26],{"id":44,"name":45,"github_repo":46,"description_zh":47,"stars":48,"difficulty_score":23,"last_commit_at":49,"category_tags":50,"status":16},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,"2026-04-05T10:45:23",[14,51,52,53,15,54,26,13,55],"数据工具","视频","插件","其他","音频",{"id":57,"name":58,"github_repo":59,"description_zh":60,"stars":61,"difficulty_score":10,"last_commit_at":62,"category_tags":63,"status":16},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,"2026-04-04T04:44:48",[15,14,13,26,54],{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":69,"readme_en":70,"readme_zh":71,"quickstart_zh":72,"use_case_zh":73,"hero_image_url":74,"owner_login":75,"owner_name":76,"owner_avatar_url":77,"owner_bio":78,"owner_company":79,"owner_location":79,"owner_email":79,"owner_twitter":79,"owner_website":79,"owner_url":80,"languages":81,"stars":105,"forks":106,"last_commit_at":107,"license":79,"difficulty_score":23,"env_os":108,"env_gpu":109,"env_ram":110,"env_deps":111,"category_tags":124,"github_topics":125,"view_count":23,"oss_zip_url":79,"oss_zip_packed_at":79,"status":16,"created_at":138,"updated_at":139,"faqs":140,"releases":171},1999,"DataScienceUIBK\u002FRankify","Rankify","🔥 Rankify: A Comprehensive Python Toolkit for Retrieval, Re-Ranking, and Retrieval-Augmented Generation 🔥. Our toolkit integrates 40 pre-retrieved benchmark datasets and supports 7+ retrieval techniques, 24+ state-of-the-art Reranking models, and multiple RAG methods.","Rankify 是一个专为信息检索、重排序和检索增强生成（RAG）设计的 Python 工具包，帮助用户轻松搭建和评估检索系统。它内置了 40 个预处理的基准数据集，支持 7 种主流检索方法和 24 个前沿的重排序模型，还能灵活对接多种 RAG 架构，让从检索到生成的全流程实验变得简单高效。传统开发中，整合不同检索模型、数据集和评估指标往往耗时繁琐，Rankify 通过统一接口和模块化设计，显著降低了技术门槛，让研究者能更专注于算法创新而非工程搭建。适合从事自然语言处理、信息检索或大模型应用的开发者与研究人员使用，尤其适合需要快速对比不同检索策略或构建 RAG 系统的团队。其亮点在于集成了 Hugging Face 上的公开数据集，并提供开箱即用的评估工具与 Colab 示例，无需复杂配置即可上手。项目采用 Apache 2.0 许可，代码开源，便于社区协作与二次开发。","\u003Cdiv align=\"center\">\n\n[ [English](README.md) | [中文](README_zh.md)]\n\n\u003Cimg src=\".\u002Fimages\u002Frankify-crop.png\" width=\"300\" style=\"border-radius: 50px;\"\u002F>\n\n### 🔥 Rankify: A Comprehensive Python Toolkit for Retrieval, Re-Ranking, and Retrieval-Augmented Generation 🔥\n\n\u003C!-- First row of badges -->\n\u003Cdiv style=\"display: flex; flex-wrap: wrap; align-items: center; justify-content: center; gap: 8px; margin-bottom: 8px;\">\n  \u003Ca href=\"https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.02464\">\n    \u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FarXiv-b5212f.svg?logo=arxiv\" style=\"height: 24px;\">\n  \u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fabdoelsayed\u002Freranking-datasets\">\n    \u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F%F0%9F%A4%97%20HuggingFace%20Datasets-27b3b4.svg\" style=\"height: 24px;\">\n  \u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fabdoelsayed\u002Freranking-datasets-light\">\n    \u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F%F0%9F%A4%97%20HuggingFace%20Datasets%20light-orange.svg\" style=\"height: 24px;\">\n  \u003C\u002Fa>\n  \u003Ca href=\"#\">\n    \u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FPython-3.10_3.11-blue\" style=\"height: 24px;\">\n  \u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fopensource.org\u002Flicense\u002Fapache-2-0\">\n    \u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fstatic\u002Fv1?label=License&message=Apache-2.0&color=red\" style=\"height: 24px;\">\n  \u003C\u002Fa>\n\u003C\u002Fdiv>\n\n\u003C!-- Second row of badges -->\n\u003Cdiv style=\"display: flex; flex-wrap: wrap; align-items: center; justify-content: center; gap: 8px; margin-bottom: 8px;\">\n  \u003Ca href=\"https:\u002F\u002Fpepy.tech\u002Fprojects\u002Frankify\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FDataScienceUIBK_Rankify_readme_cde3de62a85a.png\" style=\"height: 24px;\">\n  \u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002FDataScienceUIBK\u002Frankify\u002Freleases\">\n    \u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Frelease\u002FDataScienceUIBK\u002Frankify.svg?label=Version&color=orange\" style=\"height: 24px;\">\n  \u003C\u002Fa>\n  \u003Ca href=\"#\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FDataScienceUIBK_Rankify_readme_63c8a6c57065.png\" style=\"height: 24px;\">\n  \u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fgitcode.com\u002Fabdoelsayed2016\u002FRankify\">\n    \u003Cimg src=\"https:\u002F\u002Fgitcode.com\u002Fabdoelsayed2016\u002FRankify\u002Fstar\u002Fbadge.svg\" >\n  \u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fcolab.research.google.com\u002Fdrive\u002F1QukxP1WZHkPfD4321UcLXD24sKCpuUuP?usp=sharing\">\u003Cimg style=\"height: 24px;\" src=\"https:\u002F\u002Fimg.shields.io\u002Fstatic\u002Fv1?label=Colab&message=Install_Rankify&logo=Google%20Colab&color=f9ab00\">\u003C\u002Fa>\n\n\u003C\u002Fdiv>\n\n\u003C!-- Product Hunt badge -->\n\u003C!--\u003Cdiv style=\"margin-top: 10px;\">\n  \u003Ca href=\"https:\u002F\u002Fwww.producthunt.com\u002Fproducts\u002Fgithub-113?embed=true&utm_source=badge-featured&utm_medium=badge&utm_source=badge-github&#0045;73d2dbbf&#0045;d84f&#0045;495d&#0045;86d8&#0045;af4dd72fc31f\">\n    \u003Cimg src=\"https:\u002F\u002Fapi.producthunt.com\u002Fwidgets\u002Fembed-image\u002Fv1\u002Ffeatured.svg?post_id=980097&theme=light&t=1750416463103\" style=\"height: 40px;\">\n  \u003C\u002Fa>\n\u003C\u002Fdiv>-->\n\n\u003C\u002Fdiv>\n\n\nIf you like our Framework, **don't hesitate to ⭐ star this repository ⭐**. This helps us to **make the Framework more better and scalable to different models and methods 🤗**.\n\n\u003C!-- \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FDataScienceUIBK_Rankify_readme_ae56422bbcb3.gif\" height=50 alt=\"Star the repo   \" \u002F>-->\n\n\n\n\n_A modular and efficient retrieval, reranking  and RAG  framework designed to work with state-of-the-art models for retrieval, ranking and rag tasks._\n\n\u003C!--_Rankify is a Python toolkit designed for unified retrieval, re-ranking, and retrieval-augmented generation (RAG) research. Our toolkit integrates 40 pre-retrieved benchmark datasets and supports 7 retrieval techniques, 24 state-of-the-art re-ranking models, and multiple RAG methods. With a flexible generator architecture supporting multiple endpoints, Rankify provides a modular and extensible framework, enabling seamless experimentation and benchmarking across retrieval pipelines. Comprehensive documentation, open-source implementation, and pre-built evaluation tools make Rankify a powerful resource for researchers and practitioners in the field._-->\n\n\u003C!-- \u003Cp align=\"center\">\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FDataScienceUIBK_Rankify_readme_9323eed9257f.png\" width=\"500\" height=\"700\" >\n\u003C\u002Fp> -->\n\n---\n## 🚀 Demo\n\nTo run the demo locally:\n\n```bash\n# Make sure Rankify is installed\npip install streamlit\n\n# Then run the demo\nstreamlit run demo.py\n```\n\nhttps:\u002F\u002Fgithub.com\u002Fuser-attachments\u002Fassets\u002F13184943-55db-4f0c-b509-fde920b809bc\n\n\n---\n\n## :link: Navigation\n- [Features](#-features)\n- [Roadmap](#-roadmap)\n- [Installation](#-installation)\n- [Quick Start](#rocket-quick-start)\n  - [Pipeline API](#-one-line-pipeline-api-recommended)\n  - [RankifyAgent](#-rankifyagent---ai-powered-model-selection)\n  - [Rankify Server](#-rankify-server---deploy-as-rest-api)\n  - [Integrations](#-integrations---use-with-your-stack)\n  - [Web Playground](#-web-playground---interactive-ui)\n- [Indexing](#-indexing-via-cli)\n- [Retrievers](#2️⃣-running-retrieval)\n- [Re-Rankers](#3️⃣-running-reranking)\n- [Generators](#4️⃣-using-generator-module)\n- [Evaluation](#5️⃣-evaluating-with-metrics)\n- [Documentation](#📖-documentation)\n- [Community Contributing](#-Community-Contributions)\n- [Contributing](#-contributing)\n- [License](#bookmark-license)\n- [Acknowledgments](#-acknowledgments)\n- [Citation](#star2-citation)\n\n\n\n\n## 🎉News\n- **[2026-02-16]** Huge thanks to [@JamieHoldcroft](https:\u002F\u002Fgithub.com\u002FJamieHoldcroft) for integrating **15+** new dense retrievers, including SOTA LLM-based bi-encoders (**SFR**, **E5**, **GritLM**) and reasoning-augmented models (**RaDeR**, **ReasonIR**, **ReasonEmbed**, **BGE-Reasoner**).\n\n-  **[2025-10-14]** Updated installation with optional extras: `retriever`, `reranking`, `rag`, and `all`.\n- **[2025-10-14]** New **CLI** (`rankify-index`) syntax & examples for **BM25, DPR, ANCE, Contriever, ColBERT, BGE**.\n\n- **[2025-06-11]** Many thanks to [@tobias124](https:\u002F\u002Fgithub.com\u002Ftobias124) for implementing [Indexing](#cli-running-indexing-module) for Custom Dataset.\n\n- **[2025-06-01]** Many thanks to [@aherzinger](https:\u002F\u002Fgithub.com\u002Faherzinger) for implementing and refactoring the Generator and RAG models.\n\n- **[2025-05-30]** Huge thanks to [@baraayusry](https:\u002F\u002Fgithub.com\u002Fbaraayusry) for implementing the Online Retriever using CrawAI and ReACT.\n\n- **[2025-02-10]** Released [reranking-datasets](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fabdoelsayed\u002Freranking-datasets) and [reranking-datasets-light](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fabdoelsayed\u002Freranking-datasets-light) on Hugging Face.\n\n- **[2025-02-04]** Our [paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.02464) is released on arXiv.\n\n\n## 🔧 Installation  \n\n#### Set up the virtual environment\nFirst, create and activate a conda environment with Python 3.10:\n\n```bash\nconda create -n rankify python=3.10\nconda activate rankify\n```\n#### Install PyTorch 2.5.1\nwe recommend installing Rankify with PyTorch 2.5.1 for Rankify. Refer to the [PyTorch installation page](https:\u002F\u002Fpytorch.org\u002Fget-started\u002Fprevious-versions\u002F) for platform-specific installation commands. \n\nIf you have access to GPUs, it's recommended to install the CUDA version 12.4 or 12.6 of PyTorch, as many of the evaluation metrics are optimized for GPU use.\n\nTo install Pytorch 2.5.1 you can install it from the following cmd\n```bash\npip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu124\n```\n\n\n#### Basic Installation\n\nTo install **Rankify**, simply use **pip** (requires Python 3.10+):  \n```base\npip install rankify\n```\n\n\n#### Recommended Installation  \n\nFor full functionality, we **recommend installing Rankify with all dependencies**:\n```bash\npip install \"rankify[all]\"\n```\nThis ensures you have all necessary modules, including retrieval, re-ranking, and RAG support.\n\n#### Optional Dependencies\n\nIf you prefer to install only specific components, choose from the following:\n```bash\n# Retrieval stack (BM25, dense retrievers, web tools)\npip install \"rankify[retriever]\"\n\n# Install base re-ranking with vLLM support for `FirstModelReranker`, `LiT5ScoreReranker`, `LiT5DistillReranker`, `VicunaReranker`, and `ZephyrReranker'.\npip install \"rankify[reranking]\"\n\n# RAG endpoints (OpenAI, LiteLLM, vLLM clients)\npip install \"rankify[rag]\"\n```\n\nOr, to install from **GitHub** for the latest development version:  \n\n\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002FDataScienceUIBK\u002Frankify.git\ncd rankify\npip install -e .\n# For full functionality we recommend installing Rankify with all dependencies:\npip install -e \".[all]\"\n# Install dependencies for retrieval only (BM25, DPR, ANCE, etc.)\npip install -e \".[retriever]\"\n# Install base re-ranking with vLLM support for `FirstModelReranker`, `LiT5ScoreReranker`, `LiT5DistillReranker`, `VicunaReranker`, and `ZephyrReranker'.\npip install -e \".[reranking]\"\n# RAG endpoints (OpenAI, LiteLLM, vLLM clients)\npip install -e \".[rag]\"\n```\n\n\n\n#### Using ColBERT Retriever  \n\nIf you want to use **ColBERT Retriever**, follow these additional setup steps:\n```bash\n# Install GCC and required libraries\nconda install -c conda-forge gcc=9.4.0 gxx=9.4.0\nconda install -c conda-forge libstdcxx-ng\n```\n```bash\n# Export necessary environment variables\nexport LD_LIBRARY_PATH=$CONDA_PREFIX\u002Flib:$LD_LIBRARY_PATH\nexport CC=gcc\nexport CXX=g++\nexport PATH=$CONDA_PREFIX\u002Fbin:$PATH\n\n# Clear cached torch extensions\nrm -rf ~\u002F.cache\u002Ftorch_extensions\u002F*\n```\n\n\n## :rocket: Quick Start\n\n### 🚀 **One-Line Pipeline API** (Recommended)\n\nThe **simplest way** to use Rankify - HuggingFace-style one-line interface:\n\n```python\nfrom rankify import pipeline\n\n# Create a RAG pipeline with intelligent defaults\nrag = pipeline(\"rag\")\nanswers = rag(\"What is machine learning?\", documents)\n\n# Or customize your configuration\nrag = pipeline(\n    \"rag\",\n    retriever=\"bge\",           # State-of-the-art dense retriever\n    reranker=\"flashrank\",      # Ultra-fast reranker\n    generator=\"basic-rag\"\n)\n```\n\n**Available Pipeline Types:**\n- `pipeline(\"search\")` - Document retrieval only\n- `pipeline(\"rerank\")` - Retrieve + rerank\n- `pipeline(\"rag\")` - Full RAG pipeline (retrieve + rerank + generate)\n\n📖 **[Pipeline API Documentation](https:\u002F\u002Frankify.readthedocs.io\u002Fen\u002Flatest\u002Ftutorials\u002Fpipeline\u002F)**\n\n---\n\n### 🤖 **RankifyAgent** - AI-Powered Model Selection\n\nLet AI help you choose the best models for your use case:\n\n```python\nfrom rankify.agent import RankifyAgent, recommend\n\n# Quick recommendation\nresult = recommend(task=\"qa\", gpu=True)\nprint(f\"Best Retriever: {result.retriever.name}\")\nprint(f\"Best Reranker: {result.reranker.name}\")\n\n# Conversational agent\nagent = RankifyAgent(backend=\"azure\")  # or \"openai\", \"litellm\", \"local\"\nresponse = agent.chat(\"I need a fast search system for production\")\nprint(response.message)\nprint(response.code_snippet)  # Ready-to-use code\n```\n\n📖 **[RankifyAgent Documentation](https:\u002F\u002Frankify.readthedocs.io\u002Fen\u002Flatest\u002Ftutorials\u002Fagent\u002F)**\n\n---\n\n### 🌐 **Rankify Server** - Deploy as REST API\n\nStart a production-ready server in one command:\n\n```bash\n# CLI\nrankify serve --port 8000 --retriever bge --reranker flashrank\n\n# Or in Python\nfrom rankify.server import RankifyServer\nserver = RankifyServer(retriever=\"bge\", reranker=\"flashrank\")\nserver.start(port=8000)\n```\n\n**API Endpoints:**\n- `POST \u002Fretrieve` - Document retrieval\n- `POST \u002Frerank` - Rerank documents  \n- `POST \u002Frag` - Full RAG generation\n- `GET \u002Fhealth` - Health check\n\n```bash\n# Example API call\ncurl -X POST http:\u002F\u002Flocalhost:8000\u002Frag \\\n  -H \"Content-Type: application\u002Fjson\" \\\n  -d '{\"query\": \"What is AI?\", \"n_contexts\": 5}'\n```\n\n📖 **[Server Documentation](https:\u002F\u002Frankify.readthedocs.io\u002Fen\u002Flatest\u002Ftutorials\u002Fpipeline\u002Fserver\u002F)**\n\n---\n\n### 🔌 **Integrations** - Use with Your Stack\n\nSeamlessly integrate with LangChain, LlamaIndex, and more:\n\n```python\n# LangChain\nfrom rankify.integrations import LangChainRetriever\nfrom langchain.chains import RetrievalQA\n\nretriever = LangChainRetriever(method=\"bge\", reranker=\"flashrank\")\nchain = RetrievalQA.from_chain_type(llm=your_llm, retriever=retriever)\n\n# LlamaIndex\nfrom rankify.integrations import LlamaIndexRetriever\nretriever = LlamaIndexRetriever(method=\"colbert\", reranker=\"monot5\")\n```\n\n📖 **[Integrations Documentation](https:\u002F\u002Frankify.readthedocs.io\u002Fen\u002Flatest\u002Ftutorials\u002Fpipeline\u002Fintegrations\u002F)**\n\n---\n\n### 🎨 **Web Playground** - Interactive UI\n\nLaunch an interactive Gradio interface:\n\n```python\nfrom rankify.ui import launch_playground\nlaunch_playground(port=7860)\n```\n\nTry models, compare results, and export code - all in your browser!\n\n---\n\n### 1️⃣ **Traditional Workflow** (For Advanced Users)\n\n#### **Pre-retrieved Datasets**  \n\nWe provide **40+ benchmark datasets** with **1,000 pre-retrieved documents** each:  \n\n🔗 **[Hugging Face Dataset Repository](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fabdoelsayed\u002Freranking-datasets-light)**  \n\n#### **Dataset Format**  \n\n```json\n[\n    {\n        \"question\": \"...\",\n        \"answers\": [\"...\", \"...\", ...],\n        \"ctxs\": [\n            {\n                \"id\": \"...\",         \u002F\u002F Passage ID\n                \"score\": \"...\",      \u002F\u002F Retriever score\n                \"has_answer\": true|false\n            }\n        ]\n    }\n]\n```\n\n#### **List Available Datasets**  \n\n```python\nfrom rankify.dataset.dataset import Dataset \nDataset.available_dataset()  # Fixed typo: avaiable -> available\n```\n\n#### **Download Datasets**\n\n```python\nfrom rankify.dataset.dataset import Dataset\n\n# Download BM25-retrieved documents\ndataset = Dataset(retriever=\"bm25\", dataset_name=\"nq-dev\", n_docs=100)\ndocuments = dataset.download(force_download=False)\n\n# Load from file\ndocuments = Dataset.load_dataset('.\u002Fpath\u002Fto\u002Fdataset.json', n_docs=100)\n```\n\n\n\u003C!-- #### Feature Comparison for Pre-Retrieved Datasets  \n\nThe following table provides an overview of the availability of different retrieval methods (**BM25, DPR, ColBERT, ANCE, BGE, Contriever**) for each dataset.  \n\n✅ **Completed**\n⏳  **Part Completed, Pending other Parts**\n🕒 **Pending**  \n\n\u003Ctable style=\"width: 100%;\">\n  \u003Ctr>\n    \u003Cth align=\"center\">Dataset\u003C\u002Fth> \n    \u003Cth align=\"center\">BM25\u003C\u002Fth> \n    \u003Cth align=\"center\">DPR\u003C\u002Fth> \n    \u003Cth align=\"center\">ColBERT\u003C\u002Fth>\n    \u003Cth align=\"center\">ANCE\u003C\u002Fth>\n    \u003Cth align=\"center\">BGE\u003C\u002Fth>\n    \u003Cth align=\"center\">Contriever\u003C\u002Fth>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">2WikimultihopQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">ArchivialQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">ChroniclingAmericaQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">EntityQuestions\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">AmbigQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">ARC\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">ASQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">MS MARCO\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">AY2\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">Bamboogle\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">BoolQ\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">CommonSenseQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">CuratedTREC\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">⏳\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">ELI5\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">FERMI\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">⏳\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">FEVER\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">HellaSwag\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">HotpotQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">MMLU\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">Musique\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n    \u003Ctr>\n    \u003Ctd align=\"left\">NarrativeQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">⏳\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n    \u003Ctr>\n    \u003Ctd align=\"left\">NQ\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">⏳\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n    \u003Ctr>\n    \u003Ctd align=\"left\">OpenbookQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n    \u003Ctr>\n    \u003Ctd align=\"left\">PIQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n    \u003Ctr>\n    \u003Ctd align=\"left\">PopQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">⏳\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n    \u003Ctr>\n    \u003Ctd align=\"left\">Quartz\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n    \u003Ctr>\n    \u003Ctd align=\"left\">SIQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n    \u003Ctr>\n    \u003Ctd align=\"left\">StrategyQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n    \u003C\u002Ftr>\n    \u003Ctr>\n    \u003Ctd align=\"left\">TREX\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n    \u003C\u002Ftr>\n    \u003Ctr>\n    \u003Ctd align=\"left\">TriviaQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">⏳\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n    \u003C\u002Ftr>\n    \u003Ctr>\n    \u003Ctd align=\"left\">TruthfulQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n      \u003C\u002Ftr>\n    \u003Ctr>\n    \u003Ctd align=\"left\">TruthfulQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n      \u003C\u002Ftr>\n    \u003Ctr>\n    \u003Ctd align=\"left\">WebQ\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">⏳\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n      \u003C\u002Ftr>\n    \u003Ctr>\n    \u003Ctd align=\"left\">WikiQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">⏳\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n      \u003C\u002Ftr>\n    \u003Ctr>\n    \u003Ctd align=\"left\">WikiAsp\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n        \u003C\u002Ftr>\n    \u003Ctr>\n    \u003Ctd align=\"left\">WikiPassageQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">⏳\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n        \u003C\u002Ftr>\n    \u003Ctr>\n    \u003Ctd align=\"left\">WNED\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n        \u003C\u002Ftr>\n    \u003Ctr>\n    \u003Ctd align=\"left\">WoW\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n        \u003C\u002Ftr>\n    \u003Ctr>\n    \u003Ctd align=\"left\">Zsre\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n\u003C\u002Ftable> -->\n\n## 🧱 Indexing via CLI\nThe CLI entrypoint is **`rankify-index`** with a subcommand **`index`**.\n\n**Common flags**\n- `corpus_path` (positional): path to JSONL corpus.\n- `--retriever {bm25,dpr,ance,contriever,colbert,bge}`.\n- `--output PATH` (default: `rankify_indices`).\n- `--index_type {wiki,msmarco}` (default: `wiki`).\n- `--threads INT` (default: `32`, sparse & some dense prep).\n- `--device {cpu,cuda}` (default: retriever‑specific, typically `cuda`).\n- `--batch_size INT` (dense encoders \u002F Faiss add batches).\n- `--encoder MODEL` (dense encoders only; sensible defaults used if omitted).\n\n> **Index layout**\n> - BM25 → `\u003Coutput>\u002F\u003Cstem>\u002Fbm25_index`\n> - DPR   → `\u003Coutput>\u002F\u003Cstem>\u002Fdpr_index_\u003Cindex_type>`\n> - ANCE  → `\u003Coutput>\u002F\u003Cstem>\u002Fance_index_\u003Cindex_type>`\n> - BGE   → `\u003Coutput>\u002F\u003Cstem>\u002Fbge_index_\u003Cindex_type>`\n> - Contriever → `\u003Coutput>\u002F\u003Cstem>\u002Fcontriever_index_\u003Cindex_type>`\n> - ColBERT    → `\u003Coutput>\u002F\u003Cstem>\u002Fcolbert_index_\u003Cindex_type>`\n\n### BM25\n```bash\nrankify-index index data\u002Fwikipedia_10k.jsonl \\\n  --retriever bm25 \\\n  --output .\u002Findices\n```\n\n### DPR (single‑encoder by default)\n```bash\n# Wikipedia style\nrankify-index index data\u002Fwikipedia_100.jsonl \\\n  --retriever dpr \\\n  --encoder facebook\u002Fdpr-ctx_encoder-single-nq-base \\\n  --batch_size 16 --device cuda \\\n  --output .\u002Findices\n\n# MS MARCO\nrankify-index index data\u002Fmsmarco_100.jsonl \\\n  --retriever dpr --index_type msmarco \\\n  --encoder facebook\u002Fdpr-ctx_encoder-single-nq-base \\\n  --batch_size 16 --device cuda \\\n  --output .\u002Findices\n```\n\n### ANCE\n```bash\nrankify-index index data\u002Fwikipedia_100.jsonl \\\n  --retriever ance \\\n  --encoder castorini\u002Fance-dpr-context-multi \\\n  --batch_size 16 --device cuda \\\n  --output .\u002Findices\n```\n\n### Contriever\n```bash\nrankify-index index data\u002Fwikipedia_100.jsonl \\\n  --retriever contriever \\\n  --encoder facebook\u002Fcontriever-msmarco \\\n  --batch_size 16 --device cuda \\\n  --output .\u002Findices\n```\n\n### ColBERT\n```bash\nrankify-index index data\u002Fwikipedia_100.jsonl \\\n  --retriever colbert \\\n  --batch_size 32 --device cuda \\\n  --output .\u002Findices\n```\n\n### BGE\n```bash\nrankify-index index data\u002Fwikipedia_100.jsonl \\\n  --retriever bge \\\n  --encoder BAAI\u002Fbge-large-en-v1.5 \\\n  --batch_size 16 --device cuda \\\n  --output .\u002Findices\n```\n\n\n---\n\n### 2️⃣ Running Retrieval\nTo perform retrieval using **Rankify**, you can choose from various retrieval methods such as **BM25, DPR, ANCE, Contriever, ColBERT, BGE, Sbert, Nomic, Instructor, DiverRetriever, SRF, E5, RaDeR, M2, GritLM, ReasonEmbed, ReasonIR and BGEReasoner**.  \n\n### Step 1: Setup example queries\n\n**Example: Running Retrieval on Sample Queries**  \n```python\nfrom rankify.dataset.dataset import Document, Question, Answer, Context\nfrom rankify.retrievers.retriever import Retriever\n\n# Sample Documents\ndocuments = [\n    Document(question=Question(\"the cast of a good day to die hard?\"), answers=Answer([\n            \"Jai Courtney\",\n            \"Sebastian Koch\",\n            \"Radivoje Bukvić\",\n            \"Yuliya Snigir\",\n            \"Sergei Kolesnikov\",\n            \"Mary Elizabeth Winstead\",\n            \"Bruce Willis\"\n        ]), contexts=[]),\n    Document(question=Question(\"Who wrote Hamlet?\"), answers=Answer([\"Shakespeare\"]), contexts=[])\n]\n```\n### Step 2: Choose Retrieval Option\n\n**Option A:** \nRetrieval  ```index_type``` (e.g., ```\"wiki```\", \"```msmarco```\") to load pre-computed FAISS indices.\n\n```python\n# BM25 retrieval on Wikipedia\nbm25_retriever_wiki = Retriever(method=\"bm25\", n_docs=5, index_type=\"wiki\")\n\n# BM25 retrieval on MS MARCO\nbm25_retriever_msmarco = Retriever(method=\"bm25\", n_docs=5, index_type=\"msmarco\")\n\n\n# DPR (multi-encoder) retrieval on Wikipedia\ndpr_retriever_wiki = Retriever(method=\"dpr\", model=\"dpr-multi\", n_docs=5, index_type=\"wiki\")\n\n# DPR (multi-encoder) retrieval on MS MARCO\ndpr_retriever_msmarco = Retriever(method=\"dpr\", model=\"dpr-multi\", n_docs=5, index_type=\"msmarco\")\n\n\n# DPR (single-encoder) retrieval on Wikipedia\ndpr_retriever_wiki = Retriever(method=\"dpr\", model=\"dpr-single\", n_docs=5, index_type=\"wiki\")\n\n# DPR (single-encoder) retrieval on MS MARCO\ndpr_retriever_msmarco = Retriever(method=\"dpr\", model=\"dpr-single\", n_docs=5, index_type=\"msmarco\")\n\n\n# ANCE retrieval on Wikipedia\nance_retriever_wiki = Retriever(method=\"ance\", model=\"ance-multi\", n_docs=5, index_type=\"wiki\")\n\n# ANCE retrieval on MS MARCO\nance_retriever_msmarco = Retriever(method=\"ance\", model=\"ance-multi\", n_docs=5, index_type=\"msmarco\")\n\n\n# Contriever retrieval on Wikipedia\ncontriever_retriever_wiki = Retriever(method=\"contriever\", model=\"facebook\u002Fcontriever-msmarco\", n_docs=5, index_type=\"wiki\")\n\n# Contriever retrieval on MS MARCO\ncontriever_retriever_msmarco = Retriever(method=\"contriever\", model=\"facebook\u002Fcontriever-msmarco\", n_docs=5, index_type=\"msmarco\")\n\n\n# ColBERT retrieval on Wikipedia\ncolbert_retriever_wiki = Retriever(method=\"colbert\", model=\"colbert-ir\u002Fcolbertv2.0\", n_docs=5, index_type=\"wiki\")\n\n# ColBERT retrieval on MS MARCO\ncolbert_retriever_msmarco = Retriever(method=\"colbert\", model=\"colbert-ir\u002Fcolbertv2.0\", n_docs=5, index_type=\"msmarco\")\n\n\n# BGE retrieval on Wikipedia\nbge_retriever_wiki = Retriever(method=\"bge\", model=\"BAAI\u002Fbge-large-en-v1.5\", n_docs=5, index_type=\"wiki\")\n\n# BGE retrieval on MS MARCO\nbge_retriever_msmarco = Retriever(method=\"bge\", model=\"BAAI\u002Fbge-large-en-v1.5\", n_docs=5, index_type=\"msmarco\")\n\n\n# Hyde retrieval on Wikipedia\nhyde_retriever_wiki = Retriever(method=\"hyde\" , n_docs=5, index_type=\"wiki\", api_key=OPENAI_API_KEY )\n\n# Hyde retrieval on MS MARCO\nhyde_retriever_msmarco = Retriever(method=\"hyde\", n_docs=5, index_type=\"msmarco\", api_key=OPENAI_API_KEY)\n```\n\n**Option B:**\nRetrieval with custom datasets and automated caching.\n\nFeaturing some of the latest 7B+ parameter models, all of the models below are purposed only for usage with custom datasets. \n\nSimply pass a ```.jsonl``` file to ```corpus_path```, ensuring your data maps to the required ```id:``` and ```text:``` fields, and the model will embed and cache the data locally on the first run.\n\n```python\n# Bi-encoders as implemented in the diver framework (11 configurable models, specified by model_id)\nbge_large_retriever = Retriever(method=\"diver-dense\", model_id=\"bge\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\nsbert_retriever = Retriever(method=\"diver-dense\", model_id=\"sbert\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\ninst_l_retriever = Retriever(method=\"diver-dense\", model_id=\"inst-l\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\ninst_xl_retriever = Retriever(method=\"diver-dense\", model_id=\"inst-xl\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\nsfr_retriever = Retriever(method=\"diver-dense\", model_id=\"sf\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\ne5_retriever = Retriever(method=\"diver-dense\", model_id=\"e5\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\ncontriever_retriever = Retriever(method=\"diver-dense\", model_id=\"contriever\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\nm2_retriever = Retriever(method=\"diver-dense\", model_id=\"m2\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\ngrit_retriever = Retriever(method=\"diver-dense\", model_id=\"grit\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\nrader_retriever = Retriever(method=\"diver-dense\", model_id=\"rader\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\nnomic_retriever = Retriever(method=\"diver-dense\", model_id=\"nomic\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=4, n_docs=5)\n\ndiver_retriever = Retriever(method=\"diver-dense\", model_id=\"diver\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=4, n_docs=5)\n\n\n# Reasonir retrieval \nreasonir_retriever = Retriever(method=\"reasonir\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=4, n_docs=5)\n\n\n# ReasonEmbed retrieval (3 configurable models specified by model_id)\nreasonembed_qwen8b_retriever = Retriever(method=\"reason-embed\", model_id=\"qwen3-8b\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\nreasonembed_llama8b_retriever = Retriever(method=\"reason-embed\", model_id=\"qwen3-4b\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\nreasonembed_qwen4b_retriever = Retriever(method=\"reason-embed\", model_id=\"llama-8b\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\n\n# BgeReasonEmbed retrieval\nbge_reasoner_retriever = Retriever(method=\"bge-reasoner-embed\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n```\n### Retrieval Example: ReasonIR on the BRIGHT Benchmark (Biology queries)\nThis example demonstrates how to evaluate the `reasonir\u002FReasonIR-8B` model on the reasoning-intensive BRIGHT benchmark.\n\n```python\nfrom datasets import load_dataset\nfrom rankify.dataset.dataset import Document, Question, Answer\nfrom rankify.retrievers.retriever import Retriever\n\ncorpus_path = \"bright_biology_corpus.jsonl\"       # .jsonl corpus for retrieval\n\ndocs = load_dataset(\"xlangai\u002FBRIGHT\", \"documents\", split=\"biology\")\ndocs.to_json(corpus_path, force_ascii=False)  \n\nqueries = load_dataset(\"xlangai\u002FBRIGHT\", \"examples\", split=\"biology\")\n    \ndocuments = []\nfor item in queries:\n    doc = Document(id=item[\"id\"], \n                   question=Question(question=item[\"query\"]), \n                   answers=Answer(answers=item.get(\"gold_ids\", [])))\n    documents.append(doc)\n    \nretriever = Retriever(\n    method=\"reasonir\",            # Use ReasonIR retriever\n    n_docs=3,                     # Retrieve top 3 documents per query\n    corpus_path=corpus_path,      # Path to the JSONL we just created\n    text_field=\"content\",         # BRIGHT uses 'content' instead of 'text'\n    batch_size=4,\n)\n\nresults = retriever.retrieve(documents)\n\n```\n\n### Step 3: Execute and View Results\n**Running Retrieval**\n\nAfter defining the retriever, you can retrieve documents using:\n```python\nretrieved_documents = bm25_retriever_wiki.retrieve(documents)\n\nfor i, doc in enumerate(retrieved_documents):\n    print(f\"\\nDocument {i+1}:\")\n    print(doc)\n```\n\n---\n## 3️⃣ Running Reranking\nRankify provides support for multiple reranking models. Below are examples of how to use each model.  \n\n**Example: Reranking a Document**  \n```python\nfrom rankify.dataset.dataset import Document, Question, Answer, Context\nfrom rankify.models.reranking import Reranking\n\n# Sample document setup\nquestion = Question(\"When did Thomas Edison invent the light bulb?\")\nanswers = Answer([\"1879\"])\ncontexts = [\n    Context(text=\"Lightning strike at Seoul National University\", id=1),\n    Context(text=\"Thomas Edison tried to invent a device for cars but failed\", id=2),\n    Context(text=\"Coffee is good for diet\", id=3),\n    Context(text=\"Thomas Edison invented the light bulb in 1879\", id=4),\n    Context(text=\"Thomas Edison worked with electricity\", id=5),\n]\ndocument = Document(question=question, answers=answers, contexts=contexts)\n\n# Initialize the reranker\nreranker = Reranking(method=\"monot5\", model_name=\"monot5-base-msmarco\")\n\n# Apply reranking\nreranker.rank([document])\n\n# Print reordered contexts\nfor context in document.reorder_contexts:\n    print(f\"  - {context.text}\")\n```\n\n\n**Examples of Using Different Reranking Models**  \n```python\n# UPR\nmodel = Reranking(method='upr', model_name='t5-base')\n\n# API-Based Rerankers\nmodel = Reranking(method='apiranker', model_name='voyage', api_key='your-api-key')\nmodel = Reranking(method='apiranker', model_name='jina', api_key='your-api-key')\nmodel = Reranking(method='apiranker', model_name='mixedbread.ai', api_key='your-api-key')\n\n# Blender Reranker\nmodel = Reranking(method='blender_reranker', model_name='PairRM')\n\n# ColBERT Reranker\nmodel = Reranking(method='colbert_ranker', model_name='Colbert')\n\n# EchoRank\nmodel = Reranking(method='echorank', model_name='flan-t5-large')\n\n# First Ranker\nmodel = Reranking(method='first_ranker', model_name='base')\n\n# FlashRank\nmodel = Reranking(method='flashrank', model_name='ms-marco-TinyBERT-L-2-v2')\n\n# InContext Reranker\nReranking(method='incontext_reranker', model_name='llamav3.1-8b')\n\n# InRanker\nmodel = Reranking(method='inranker', model_name='inranker-small')\n\n# ListT5\nmodel = Reranking(method='listt5', model_name='listt5-base')\n\n# LiT5 Distill\nmodel = Reranking(method='lit5distill', model_name='LiT5-Distill-base')\n\n# LiT5 Score\nmodel = Reranking(method='lit5score', model_name='LiT5-Distill-base')\n\n# LLM Layerwise Ranker\nmodel = Reranking(method='llm_layerwise_ranker', model_name='bge-multilingual-gemma2')\n\n# LLM2Vec\nmodel = Reranking(method='llm2vec', model_name='Meta-Llama-31-8B')\n\n# MonoBERT\nmodel = Reranking(method='monobert', model_name='monobert-large')\n\n# MonoT5\nReranking(method='monot5', model_name='monot5-base-msmarco')\n\n# RankGPT\nmodel = Reranking(method='rankgpt', model_name='llamav3.1-8b')\n\n# RankGPT API\nmodel = Reranking(method='rankgpt-api', model_name='gpt-3.5', api_key=\"gpt-api-key\")\nmodel = Reranking(method='rankgpt-api', model_name='gpt-4', api_key=\"gpt-api-key\")\nmodel = Reranking(method='rankgpt-api', model_name='llamav3.1-8b', api_key=\"together-api-key\")\nmodel = Reranking(method='rankgpt-api', model_name='claude-3-5', api_key=\"claude-api-key\")\n\n# RankT5\nmodel = Reranking(method='rankt5', model_name='rankt5-base')\n\n# Sentence Transformer Reranker\nmodel = Reranking(method='sentence_transformer_reranker', model_name='all-MiniLM-L6-v2')\nmodel = Reranking(method='sentence_transformer_reranker', model_name='gtr-t5-base')\nmodel = Reranking(method='sentence_transformer_reranker', model_name='sentence-t5-base')\nmodel = Reranking(method='sentence_transformer_reranker', model_name='distilbert-multilingual-nli-stsb-quora-ranking')\nmodel = Reranking(method='sentence_transformer_reranker', model_name='msmarco-bert-co-condensor')\n\n# SPLADE\nmodel = Reranking(method='splade', model_name='splade-cocondenser')\n\n# Transformer Ranker\nmodel = Reranking(method='transformer_ranker', model_name='mxbai-rerank-xsmall')\nmodel = Reranking(method='transformer_ranker', model_name='bge-reranker-base')\nmodel = Reranking(method='transformer_ranker', model_name='bce-reranker-base')\nmodel = Reranking(method='transformer_ranker', model_name='jina-reranker-tiny')\nmodel = Reranking(method='transformer_ranker', model_name='gte-multilingual-reranker-base')\nmodel = Reranking(method='transformer_ranker', model_name='nli-deberta-v3-large')\nmodel = Reranking(method='transformer_ranker', model_name='ms-marco-TinyBERT-L-6')\nmodel = Reranking(method='transformer_ranker', model_name='msmarco-MiniLM-L12-en-de-v1')\n\n# TwoLAR\nmodel = Reranking(method='twolar', model_name='twolar-xl')\n\n# Vicuna Reranker\nmodel = Reranking(method='vicuna_reranker', model_name='rank_vicuna_7b_v1')\n\n# Zephyr Reranker\nmodel = Reranking(method='zephyr_reranker', model_name='rank_zephyr_7b_v1_full')\n```\n---\n\n## 4️⃣ Using Generator Module\n\nRankify provides a **Generator Module** for **retrieval-augmented generation (RAG)**, integrating retrieved documents with generative models like OpenAI, LiteLLM, vLLM, and Hugging Face. Its modular design allows easy addition of new **RAG methods** and **endpoints**, enabling seamless experimentation with approaches like zero-shot RAG, chain-of-thought RAG, and FiD-based RAG.  Below there are examples of how to use different RAG methods and how to include different LLM endpoints.\n\nPlease note that in order to use API-based endpoints (OpenAI, LiteLLM), you need to specify an api-key. See how to do this in our example below. \n\n**Examples of Using Different RAG methods and backends**  \n\n```python\n# Zero-shot with Huggingface endpoint\ngenerator = Generator(method=\"zero-shot\", model_name='meta-llama\u002FMeta-Llama-3.1-8B-Instruct', backend=\"huggingface\")\n\n# Basic RAG with LiteLLM endpoint\ngenerator = Generator(method=\"basic-rag\", model_name='ollama\u002Fmistral', backend=\"litellm\", api_key=api_key)\n\n# Chain-of-Thought RAG with vLLM endpoint\ngenerator = Generator(method=\"chain-of-thought-rag\", model_name='mistralai\u002FMistral-7B-v0.1', backend=\"vllm\")\n\n# In-context-RALM with OpenAI endpoint\ngenerator = Generator(method=\"in-context-ralm\", model_name='gpt-3.5-turbo', backend=\"openai\", api_keys=[api_key])\n```\n\n**Usage example without API-inference**\n\n```python\nfrom rankify.dataset.dataset import Document, Question, Answer, Context\nfrom rankify.generator.generator import Generator\n\n# Define question and answer\nquestion = Question(\"What is the capital of Austria?\")\nanswers=Answer(\"\")\ncontexts = [\n    Context(id=1, title=\"France\", text=\"The capital of France is Paris.\", score=0.9),\n    Context(id=2, title=\"Germany\", text=\"Berlin is the capital of Germany.\", score=0.5)\n]\n\n# Construct document\ndoc = Document(question=question, answers=answers, contexts=contexts)\n\n# Initialize Generator (e.g., Meta Llama)\ngenerator = Generator(method=\"basic-rag\", model_name='meta-llama\u002FMeta-Llama-3.1-8B-Instruct', backend=\"huggingface\")\n\n# Generate answer\ngenerated_answers = generator.generate([doc])\nprint(generated_answers)  # Output: [\"Paris\"]\n```\n**Usage example with API-inference**\n\nSaving your API-keys in a .env.local file, you can access them via the listed methods:\n```python\n# in .env.local:\nOPENAI_API_KEY=your-api-key\nLITELLM_API_KEY=your-api-key\n```\n**Usage**\n```python\n# load LiteLLM api-key\napi_key = get_litellm_api_key()\n# load OpenAI api-key\napi_key = get_openai_api_key()\n```\n**Full example using LiteLLM:**\n\n```python\nfrom rankify.dataset.dataset import Document, Question, Answer, Context\nfrom rankify.generator.generator import Generator\nfrom rankify.utils.models.rank_llm.rerank.api_keys import get_litellm_api_key\n\n# Define question and answer\nquestion = Question(\"What is the capital of France?\")\nanswers = Answer([\"\"])\ncontexts = [\n    Context(id=1, title=\"France\", text=\"The capital of France is Paris.\", score=0.9),\n    Context(id=2, title=\"Germany\", text=\"Berlin is the capital of Germany.\", score=0.5)\n]\n\n# Construct document\ndoc = Document(question=question, answers=answers, contexts=contexts)\n\n#load api-key\napi_key = get_litellm_api_key()\n\n# Initialize Generator (e.g., Meta Llama)\ngenerator = Generator(method=\"basic-rag\", model_name='ollama\u002Fmistral', backend=\"litellm\", api_key=api_key)\n\n# Generate answer\ngenerated_answers = generator.generate([doc])\nprint(generated_answers)  # Output: [\"Paris\"]\n```\n\n\n---\n## 5️⃣ Evaluating with Metrics  \n\nRankify provides built-in **evaluation metrics** for **retrieval, re-ranking, and retrieval-augmented generation (RAG)**. These metrics help assess the quality of retrieved documents, the effectiveness of ranking models, and the accuracy of generated answers.  \n\n**Evaluating Generated Answers**  \n\nYou can evaluate the quality of **retrieval-augmented generation (RAG) results** by comparing generated answers with ground-truth answers.\n```python\nfrom rankify.metrics.metrics import Metrics\nfrom rankify.dataset.dataset import Dataset\n\n# Load dataset\ndataset = Dataset('bm25', 'nq-test', 100)\ndocuments = dataset.download(force_download=False)\n\n# Initialize Generator\ngenerator = Generator(method=\"in-context-ralm\", model_name='meta-llama\u002FLlama-3.1-8B')\n\n# Generate answers\ngenerated_answers = generator.generate(documents)\n\n# Evaluate generated answers\nmetrics = Metrics(documents)\nprint(metrics.calculate_generation_metrics(generated_answers))\n```\n\n**Evaluating Retrieval Performance**  \n\n```python\n# Calculate retrieval metrics before reranking\nmetrics = Metrics(documents)\nbefore_ranking_metrics = metrics.calculate_retrieval_metrics(ks=[1, 5, 10, 20, 50, 100], use_reordered=False)\n\nprint(before_ranking_metrics)\n```\n\n**Evaluating Reranked Results**  \n```python\n# Calculate retrieval metrics after reranking\nafter_ranking_metrics = metrics.calculate_retrieval_metrics(ks=[1, 5, 10, 20, 50, 100], use_reordered=True)\nprint(after_ranking_metrics)\n```\n\n\n## 🧪 BEIR & TREC DL19\u002FDL20 with BM25\n\n**Rankify** ships convenient hooks to run BM25 baselines on **BEIR** tasks and **TREC DL'19\u002F20**, and to evaluate with TREC-style metrics (nDCG, MAP, MRR).\n\n### Quick start (single dataset)\n```python\nfrom rankify.dataset.dataset import Dataset\nfrom rankify.metrics.metrics import Metrics\n\n# Download pre-retrieved BM25 results (top-k per query)\ndocs = Dataset('bm25', 'dl19', n_docs=1000).download(force_download=False)\n\n# Evaluate with TREC metrics (nDCG@10\u002F100 by default shown here)\nmetrics = Metrics(docs)\nprint(metrics.calculate_trec_metrics(ndcg_cuts=[10, 100], use_reordered=False))\n```\n\n> **Notes**\n> - Supported names include **`dl19`**, **`dl20`**, and BEIR tasks with the `beir-` prefix, e.g.:\n>   `beir-arguana`, `beir-covid`, `beir-dbpedia`, `beir-fever`, `beir-fiqa`, `beir-news`,\n>   `beir-nfc`, `beir-quora`, `beir-robust04`, `beir-scidocs`, `beir-scifact`, `beir-signal`, `beir-touche`.\n> - If you need explicit qrels selection, pass `qrel=name.replace(\"beir-\", \"\")` to `calculate_trec_metrics`.\n\n### Batch over BEIR & DL datasets\n```python\nfrom rankify.dataset.dataset import Dataset\nfrom rankify.metrics.metrics import Metrics\n\nBEIR_TASKS = [\n    \"beir-arguana\", \"beir-covid\", \"beir-dbpedia\", \"beir-fever\", \"beir-fiqa\", \"beir-news\",\n    \"beir-nfc\", \"beir-quora\", \"beir-robust04\", \"beir-scidocs\", \"beir-scifact\",\n    \"beir-signal\", \"beir-touche\",\n]\n\nfor name in [\"dl19\", \"dl20\", *BEIR_TASKS]:\n    docs = Dataset('bm25', name, n_docs=100).download(force_download=False)\n    m = Metrics(docs)\n    res = m.calculate_trec_metrics(ndcg_cuts=[10, 100], use_reordered=False)\n    print(name, res)\n```\n\n### (Optional) Add a reranker, then evaluate\n```python\nfrom rankify.models.reranking import Reranking\nfrom rankify.dataset.dataset import Dataset\nfrom rankify.metrics.metrics import Metrics\n\nname = \"beir-arguana\"\ndocs = Dataset('bm25', name, n_docs=100).download(force_download=False)\nreranker = Reranking(method='transformer_ranker', model_name='bge-reranker-base')\nreranker.rank(docs)\n\nm = Metrics(docs)\nprint(\"Before:\", m.calculate_trec_metrics(ndcg_cuts=[10, 100], use_reordered=False))\nprint(\"After :\", m.calculate_trec_metrics(ndcg_cuts=[10, 100], use_reordered=True))\n```\n\n\n## 📏 Evaluating RAG with **RAGAS**\n\nRankify ships a thin wrapper around **ragas** to make quality evaluation of generated answers simple and flexible—whether you judge with a local HF model or a hosted API like OpenAI. You can run **fast defaults**, **pick specific metrics**, or **simulate predictions** when compute is tight.\n### ✅ Install\n\n```bash\n# core Rankify RAG deps\npip install bert-score\npip install ragas\npip install langchain_huggingface\npip install rouge-score\n```\n\n\n```python\nimport torch\nfrom rankify.dataset.dataset import Document, Question, Answer, Context\nfrom rankify.generator.generator import Generator\nfrom rankify.metrics.generator_metrics import GeneratorMetrics\nfrom rankify.metrics.ragas_bridge import RagasModels\n\n# 1) Build a tiny document\nquestion = Question(\"What is the capital of France?\")\nanswers  = Answer([\"Paris\"])\ncontexts = [\n    Context(id=1, title=\"France\",   text=\"The capital of France is Paris.\", score=0.9),\n    Context(id=2, title=\"Germany\",  text=\"Berlin is the capital of Germany.\", score=0.5),\n]\ndoc = Document(question=question, answers=answers, contexts=contexts)\n\n# 2) Generate an answer (or skip and provide your own predictions list)\ngenerator   = Generator(method=\"basic-rag\",\n                        model_name=\"meta-llama\u002FMeta-Llama-3.1-8B-Instruct\",\n                        backend=\"huggingface\",\n                        torch_dtype=torch.float16)\npredictions = generator.generate([doc])\nprint(\"Generated:\", predictions)\n\n# 3) Evaluate with RAGAS (HF judge)\ngen_metrics = GeneratorMetrics([doc])\n\nragas_hf = RagasModels(\n    llm_kind=\"hf\",\n    llm_name=\"meta-llama\u002FMeta-Llama-3.1-8B-Instruct\",\n    embeddings_kind=\"hf\",\n    embeddings_name=\"sentence-transformers\u002Fall-MiniLM-L6-v2\",\n    torch_dtype=\"float16\",\n    max_new_tokens=256,  # shorter outputs = faster + cheaper\n    timeout=180,         # seconds per metric call\n    max_retries=1,\n    max_workers=2,       # keep small on limited hardware\n)\n\n# (A) Fast defaults\nscores_fast = gen_metrics.all(predictions, ragas_models=ragas_hf)\nprint(\"RAGAS (fast):\", scores_fast)\n\n# (B) Pick specific metrics\nscores_specific = gen_metrics.ragas_generator(\n    predictions,\n    judge=ragas_hf,\n    metrics=[\"faithfulness\", \"response_relevancy\", \"context_precision\", \"context_recall\"],\n)\nprint(\"RAGAS (specific):\", scores_specific)\n\n# (C) OpenAI judge (much faster if you have an API key)\nragas_openai = RagasModels(llm_kind=\"openai\", llm_name=\"gpt-4o-mini\", timeout=30)\nscores_openai = gen_metrics.all(predictions, ragas_models=ragas_openai)\nprint(\"RAGAS (OpenAI):\", {k: v for k, v in scores_openai.items() if k.startswith(\"ragas_\")})\n```\n\n## 📜 Supported Models\n\n### **1️⃣ Index**  \n- ✅ **Wikipedia**\n- ✅ **MS-MARCO**\n- 🕒 **Online Search** \n\n### **1️⃣ Retrievers**  \n- ✅ **[BM25](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1561\u002F1500000019)**\n- ✅ **[DPR](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.04906)** \n- ✅ **[ColBERT](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.12832)**   \n- ✅ **[ANCE](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.00808)**\n- ✅ **[BGE](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.03216)** \n- ✅ **[Contriever](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.09118)** \n- ✅ **[BPR](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.00882)** \n- ✅ **[HYDE](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.10496)**\n- ✅ **[SFR](https:\u002F\u002Fhuggingface.co\u002FSalesforce\u002FSFR-Embedding-Mistral)**\n- ✅ **[E5](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.03533)**\n- ✅ **[GritLM](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.09906)**\n- ✅ **[M2](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.12109)**\n- ✅ **[Nomic](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.01613)**\n- ✅ **[Instructor](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.09741)** \n- ✅ **[RaDeR](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.18405)**\n- ✅ **[ReasonIR](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.20595)** \n- ✅ **[BGE-Reasoner](https:\u002F\u002Fhuggingface.co\u002FBAAI\u002Fbge-en-icl)**\n- ✅ **[ReasonEmbed](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.08252)**\n- ✅ **[DiverRetriever](https:\u002F\u002Fhuggingface.co\u002FAQ-MedAI\u002FDiver-Retriever-4B)**\n- 🕒 **RepLlama**\n- 🕒 **coCondenser**   \n- 🕒 **Spar** \n- 🕒 **Dragon** \n- 🕒 **Hybrid** \n---\n\n### **2️⃣ Rerankers**  \n\n- ✅ **[Cross-Encoders](https:\u002F\u002Fhuggingface.co\u002Fcross-encoder)** \n- ✅ **[RankGPT](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.09542)**\n- ✅ **[RankGPT-API](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.09542)** \n- ✅ **[MonoT5](https:\u002F\u002Farxiv.org\u002Fabs\u002F2003.06713)**\n- ✅ **[MonoBert](https:\u002F\u002Farxiv.org\u002Fabs\u002F1910.14424)**\n- ✅ **[RankT5](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.10634)** \n- ✅ **[ListT5](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.15838)** \n- ✅ **[LiT5Score](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.16098)**\n- ✅ **[LiT5Dist](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.16098)**\n- ✅ **[Vicuna Reranker](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.15088)**\n- ✅ **[Zephyr Reranker](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.02724)**\n- ✅ **[Sentence Transformer-based](https:\u002F\u002Fhuggingface.co\u002Fsentence-transformers)** \n- ✅ **[FlashRank Models](https:\u002F\u002Fgithub.com\u002FPrithivirajDamodaran\u002FFlashRank)**  \n- ✅ **API-Based Rerankers**  \n- ✅ **[ColBERT Reranker](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.12832)**\n- ✅ **LLM Layerwise Ranker** \n- ✅ **[Splade Reranker](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3477495.3531857)**\n- ✅ **[UPR Reranker](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.07496)**\n- ✅ **[Inranker Reranker](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.06910)**\n- ✅ **Transformer Reranker**\n- ✅ **[FIRST Reranker](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.05508)**\n- ✅ **[Blender Reranker](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.02561)**\n- ✅ **[LLM2VEC Reranker](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.05961)**\n- ✅ **[ECHO Reranker](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.10866)**\n- ✅ **[Incontext Reranker](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.02642)**\n- 🕒 **DynRank**\n- 🕒 **ASRank**\n- 🕒 **RankLlama**\n\n---\n\n### **3️⃣ Generator**\n#### **RAG-Methods**\n- ✅ **Zero-shot**\n- ✅ **Basic-RAG**\n- ✅ **Chain-of-Thought-RAG**  \n- ✅ **Fusion-in-Decoder (FiD) with T5**\n- ✅ **In-Context Learning RALM**\n- 🕒 **Self-Consistency RAG**\n- 🕒 **Retrieval Chain-of-Thought**  \n\n#### **LLM-Endpoints**\n- ✅ **Hugging Face**\n- ✅ **vLLM**\n- ✅ **LiteLLM**  \n- ✅ **OpenAI**\n\n---\n\n\n### **✨ Features**  \n\n- 🔥 **Unified Framework**: Combines **retrieval**, **re-ranking**, and **retrieval-augmented generation (RAG)** into a single modular toolkit.  \n- 📚 **Rich Dataset Support**: Includes **40+ benchmark datasets** with **pre-retrieved documents** for seamless experimentation.  \n- 🧲 **Diverse Retrieval Methods**: Supports **BM25, DPR, ANCE, BPR, ColBERT, BGE, Contriever, SFR, E5, GritLM, M2, Nomic, Instructor, RaDeR, ReasonIR, BGE-Reasoner and  ReasonEmbed** for flexible retrieval strategies.  \n- 🎯 **Powerful Re-Ranking**: Implements **24 advanced models** with **41 sub-methods** to optimize ranking performance.  \n- 🏗️ **Prebuilt Indices**: Provides **Wikipedia and MS MARCO** corpora, eliminating indexing overhead and speeding up retrieval.  \n- 🔮 **Seamless RAG Integration**: Works with backends like **Hugging Face, OpenAI, vLLM, LiteLLM** inferening models like **GPT, LLAMA, T5, and Fusion-in-Decoder (FiD)** for multiple **retrieval-augmented generation** methods.  \n- 🛠 **Extensible & Modular**: Easily integrates **custom datasets, retrievers, ranking models, and RAG pipelines**.  \n- 📊 **Built-in Evaluation Suite**: Includes **retrieval, ranking, and RAG metrics** for robust benchmarking.  \n- 📖 **User-Friendly Documentation**: Access detailed **[📖 online docs](http:\u002F\u002Frankify.readthedocs.io\u002F)**, **example notebooks**, and **tutorials** for easy adoption.  \n \n\n## 🔍 Roadmap  \n\n**Rankify** is still under development, and this is our first release (**v0.1.0**). While it already supports a wide range of retrieval, re-ranking, and RAG techniques, we are actively enhancing its capabilities by adding more retrievers, rankers, datasets, and features.  \n\n\n## 📖 Documentation\n\nFor full API documentation, visit the [Rankify Docs](http:\u002F\u002Frankify.readthedocs.io\u002F).\n\n---\n\n## 💡 Contributing\n\n\nFollow these steps to get involved:\n\n1. **Fork this repository** to your GitHub account.\n\n2. **Create a new branch** for your feature or fix:\n\n   ```bash\n   git checkout -b feature\u002FYourFeatureName\n   ```\n\n3. **Make your changes** and **commit them**:\n\n   ```bash\n   git commit -m \"Add YourFeatureName\"\n   ```\n\n4. **Push the changes** to your branch:\n\n   ```bash\n   git push origin feature\u002FYourFeatureName\n   ```\n\n5. **Submit a Pull Request** to propose your changes.\n\nThank you for helping make this project better!\n\n---\n\n## 🌐 Community Contributions\n\n **Chinese community resources available!**  \n\nSpecial thanks to [Xiumao](https:\u002F\u002Fgithub.com\u002Fxiumao) for writing two exceptional Chinese blog posts about Rankify:  \n\n> - 📘 [Introduction to Rankify](https:\u002F\u002Fmp.weixin.qq.com\u002Fs\u002F-dH64Q_KWvj8VQq7Ys383Q)  \n> - 📘 [Deep dive into re-ranking models in Rankify](https:\u002F\u002Fmp.weixin.qq.com\u002Fs\u002FXcOmXGv4CqUIp0oBcOgltw)  \n\nThese articles were crafted with high-traffic optimization in mind and are widely recommended in Chinese academic and developer circles. \n\nWe updated the [中文版本](README_zh.md) to reflect these blog contributions while keeping original content intact—thank you Xiumao for your continued support!\n\n\n## :bookmark: License\n\nRankify is licensed under the Apache-2.0 License - see the [LICENSE](https:\u002F\u002Fopensource.org\u002Flicense\u002Fapache-2-0) file for details.\n\n\n## 🙏 Acknowledgments  \n\nWe would like to express our gratitude to the following libraries, which have greatly contributed to the development of **Rankify**:  \n\n- **Diver** – For the reference implementation of the dense retriever routing and caching logic used to integrate various bi-encoders.  \n  🔗 [GitHub Repository](https:\u002F\u002Fgithub.com\u002FAQ-MedAI\u002FDiver)\n\n- **Rerankers** – A powerful Python library for integrating various reranking methods.  \n  🔗 [GitHub Repository](https:\u002F\u002Fgithub.com\u002FAnswerDotAI\u002Frerankers\u002Ftree\u002Fmain)  \n\n- **Pyserini** – A toolkit for supporting BM25-based retrieval and integration with sparse\u002Fdense retrievers.  \n  🔗 [GitHub Repository](https:\u002F\u002Fgithub.com\u002Fcastorini\u002Fpyserini)  \n\n- **FlashRAG** – A modular framework for Retrieval-Augmented Generation (RAG) research.  \n  🔗 [GitHub Repository](https:\u002F\u002Fgithub.com\u002FRUC-NLPIR\u002FFlashRAG)  \n\n\n\n\n## :star2: Citation\n\nPlease kindly cite our paper if helps your research:\n\n```BibTex\n@article{abdallah2025rankify,\n  title={Rankify: A Comprehensive Python Toolkit for Retrieval, Re-Ranking, and Retrieval-Augmented Generation},\n  author={Abdallah, Abdelrahman and Mozafari, Jamshid and Piryani, Bhawna and Ali, Mohammed and Jatowt, Adam},\n  journal={arXiv preprint arXiv:2502.02464},\n  year={2025}\n}\n```\n\n## Star History\n\n\n[![Star History Chart](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FDataScienceUIBK_Rankify_readme_c0b19f8fef21.png)](https:\u002F\u002Fstar-history.com\u002F#DataScienceUIBK\u002FRankify&Date)\n\n\n\n\n","\u003Cdiv align=\"center\">\n\n[ [英文](README.md) | [中文](README_zh.md)]\n\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FDataScienceUIBK_Rankify_readme_dffe7e478ca1.png\" width=\"300\" style=\"border-radius: 50px;\"\u002F>\n\n### 🔥 Rankify：用于检索、重排序和检索增强生成的全面Python工具包 🔥\n\n\u003C!-- 第一行徽章 -->\n\u003Cdiv style=\"display: flex; flex-wrap: wrap; align-items: center; justify-content: center; gap: 8px; margin-bottom: 8px;\">\n  \u003Ca href=\"https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.02464\">\n    \u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FarXiv-b5212f.svg?logo=arxiv\" style=\"height: 24px;\">\n  \u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fabdoelsayed\u002Freranking-datasets\">\n    \u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F%F0%9F%A4%97%20HuggingFace%20Datasets-27b3b4.svg\" style=\"height: 24px;\">\n  \u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fabdoelsayed\u002Freranking-datasets-light\">\n    \u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F%F0%9F%A4%97%20HuggingFace%20Datasets%20light-orange.svg\" style=\"height: 24px;\">\n  \u003C\u002Fa>\n  \u003Ca href=\"#\">\n    \u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FPython-3.10_3.11-blue\" style=\"height: 24px;\">\n  \u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fopensource.org\u002Flicense\u002Fapache-2-0\">\n    \u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fstatic\u002Fv1?label=License&message=Apache-2.0&color=red\" style=\"height: 24px;\">\n  \u003C\u002Fa>\n\u003C\u002Fdiv>\n\n\u003C!-- 第二行徽章 -->\n\u003Cdiv style=\"display: flex; flex-wrap: wrap; align-items: center; justify-content: center; gap: 8px; margin-bottom: 8px;\">\n  \u003Ca href=\"https:\u002F\u002Fpepy.tech\u002Fprojects\u002Frankify\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FDataScienceUIBK_Rankify_readme_cde3de62a85a.png\" style=\"height: 24px;\">\n  \u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002FDataScienceUIBK\u002Frankify\u002Freleases\">\n    \u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Frelease\u002FDataScienceUIBK\u002Frankify.svg?label=Version&color=orange\" style=\"height: 24px;\">\n  \u003C\u002Fa>\n  \u003Ca href=\"#\">\n    \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FDataScienceUIBK_Rankify_readme_63c8a6c57065.png\" style=\"height: 24px;\">\n  \u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fgitcode.com\u002Fabdoelsayed2016\u002FRankify\">\n    \u003Cimg src=\"https:\u002F\u002Fgitcode.com\u002Fabdoelsayed2016\u002FRankify\u002Fstar\u002Fbadge.svg\" >\n  \u003C\u002Fa>\n  \u003Ca href=\"https:\u002F\u002Fcolab.research.google.com\u002Fdrive\u002F1QukxP1WZHkPfD4321UcLXD24sKCpuUuP?usp=sharing\">\u003Cimg style=\"height: 24px;\" src=\"https:\u002F\u002Fimg.shields.io\u002Fstatic\u002Fv1?label=Colab&message=Install_Rankify&logo=Google%20Colab&color=f9ab00\">\u003C\u002Fa>\n\n\u003C\u002Fdiv>\n\n\u003C!-- Product Hunt 徽章 -->\n\u003C!--\u003Cdiv style=\"margin-top: 10px;\">\n  \u003Ca href=\"https:\u002F\u002Fwww.producthunt.com\u002Fproducts\u002Fgithub-113?embed=true&utm_source=badge-featured&utm_medium=badge&utm_source=badge-github&#0045;73d2dbbf&#0045;d84f&#0045;495d&#0045;86d8&#0045;af4dd72fc31f\">\n    \u003Cimg src=\"https:\u002F\u002Fapi.producthunt.com\u002Fwidgets\u002Fembed-image\u002Fv1\u002Ffeatured.svg?post_id=980097&theme=light&t=1750416463103\" style=\"height: 40px;\">\n  \u003C\u002Fa>\n\u003C\u002Fdiv>-->\n\n\u003C\u002Fdiv>\n\n\n如果您喜欢我们的框架，**请毫不犹豫地 ⭐ 星标这个仓库 ⭐**。这有助于我们 **让框架变得更好，并能扩展到不同的模型和方法 🤗**。\n\n\u003C!-- \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FDataScienceUIBK_Rankify_readme_ae56422bbcb3.gif\" height=50 alt=\"Star the repo   \" \u002F>-->\n\n\n\n\n一个模块化且高效的检索、重排序和RAG框架，专为与最先进的检索、排序和RAG任务模型配合使用而设计。\n\n\u003C!--Rankify是一个用于统一检索、重排序和检索增强生成（RAG）研究的Python工具包。我们的工具包集成了40个预检索基准数据集，支持7种检索技术、24种最先进的重排序模型以及多种RAG方法。凭借支持多个端点的灵活生成器架构，Rankify提供了一个模块化且可扩展的框架，使跨检索管道的无缝实验和基准测试成为可能。全面的文档、开源实现和预构建的评估工具使Rankify成为该领域研究人员和从业者的强大资源。-->\n\n\u003C!-- \u003Cp align=\"center\">\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FDataScienceUIBK_Rankify_readme_9323eed9257f.png\" width=\"500\" height=\"700\" >\n\u003C\u002Fp> -->\n\n---\n## 🚀 演示\n\n要在本地运行演示：\n\n```bash\n# 确保已安装Rankify\npip install streamlit\n\n# 然后运行演示\nstreamlit run demo.py\n```\n\nhttps:\u002F\u002Fgithub.com\u002Fuser-attachments\u002Fassets\u002F13184943-55db-4f0c-b509-fde920b809bc\n\n\n---\n\n## :link: 导航\n- [功能](#-features)\n- [路线图](#-roadmap)\n- [安装](#-installation)\n- [快速开始](#rocket-quick-start)\n  - [Pipeline API](#-one-line-pipeline-api-recommended)\n  - [RankifyAgent](#-rankifyagent---ai-powered-model-selection)\n  - [Rankify服务器](#-rankify-server---deploy-as-rest-api)\n  - [集成](#-integrations---use-with-your-stack)\n  - [Web Playground](#-web-playground---interactive-ui)\n- [索引](#-indexing-via-cli)\n- [检索器](#2️⃣-running-retrieval)\n- [重排序器](#3️⃣-running-reranking)\n- [生成器](#4️⃣-using-generator-module)\n- [评估](#5️⃣-evaluating-with-metrics)\n- [文档](#📖-documentation)\n- [社区贡献](#-Community-Contributions)\n- [贡献](#-contributing)\n- [许可证](#bookmark-license)\n- [致谢](#-acknowledgments)\n- [引用](#star2-citation)\n\n\n\n\n## 🎉新闻\n- **[2026-02-16]** 非常感谢[@JamieHoldcroft](https:\u002F\u002Fgithub.com\u002FJamieHoldcroft)集成了**15+**种新的密集检索器，包括SOTA基于LLM的双编码器（**SFR**、**E5**、**GritLM**）和推理增强模型（**RaDeR**、**ReasonIR**、**ReasonEmbed**、**BGE-Reasoner**）。\n\n-  **[2025-10-14]** 更新了安装选项，新增了可选组件：`retriever`、`reranking`、`rag` 和 `all`。\n- **[2025-10-14]** 新的**CLI** (`rankify-index`) 语法及**BM25、DPR、ANCE、Contriever、ColBERT、BGE** 的示例。\n\n- **[2025-06-11]** 非常感谢[@tobias124](https:\u002F\u002Fgithub.com\u002Ftobias124)实现了自定义数据集的【索引】功能（#cli-running-indexing-module）。\n\n- **[2025-06-01]** 非常感谢[@aherzinger](https:\u002F\u002Fgithub.com\u002Faherzinger)实现了并重构了生成器和RAG模型。\n\n- **[2025-05-30]** 非常感谢[@baraayusry](https:\u002F\u002Fgithub.com\u002Fbaraayusry)使用CrawAI和ReACT实现了在线检索器。\n\n- **[2025-02-10]** 在Hugging Face上发布了【reranking-datasets】（https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fabdoelsayed\u002Freranking-datasets）和【reranking-datasets-light】（https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fabdoelsayed\u002Freranking-datasets-light）。\n\n- **[2025-02-04]** 我们的【论文】（https:\u002F\u002Farxiv.org\u002Fabs\u002F2502.02464）已在arXiv上发布。\n\n## 🔧 安装  \n\n#### 设置虚拟环境\n首先，创建并激活一个使用 Python 3.10 的 conda 环境：\n\n```bash\nconda create -n rankify python=3.10\nconda activate rankify\n```\n\n#### 安装 PyTorch 2.5.1\n我们建议使用 PyTorch 2.5.1 安装 Rankify。请参考 [PyTorch 安装页面](https:\u002F\u002Fpytorch.org\u002Fget-started\u002Fprevious-versions\u002F) 获取针对不同平台的安装命令。\n\n如果您可以访问 GPU，建议安装 CUDA 12.4 或 12.6 版本的 PyTorch，因为许多评估指标都针对 GPU 使用进行了优化。\n\n要安装 PyTorch 2.5.1，您可以运行以下命令：\n```bash\npip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu124\n```\n\n\n#### 基本安装\n\n要安装 **Rankify**，只需使用 **pip**（需要 Python 3.10+）：\n```base\npip install rankify\n```\n\n\n#### 推荐安装\n\n为了获得完整功能，我们**建议安装包含所有依赖项的 Rankify**：\n```bash\npip install \"rankify[all]\"\n```\n这将确保您拥有所有必要的模块，包括检索、重排序和 RAG 支持。\n\n#### 可选依赖项\n\n如果您只想安装特定组件，可以从以下选项中选择：\n```bash\n# 检索栈（BM25、密集检索器、网络工具）\npip install \"rankify[retriever]\"\n\n# 安装基础重排序，支持 vLLM 的 `FirstModelReranker`、`LiT5ScoreReranker`、`LiT5DistillReranker`、`VicunaReranker` 和 `ZephyrReranker`\npip install \"rankify[reranking]\"\n\n# RAG 端点（OpenAI、LiteLLM、vLLM 客户端）\npip install \"rankify[rag]\"\n```\n\n或者，要从 **GitHub** 安装最新开发版本：\n\n\n```bash\ngit clone https:\u002F\u002Fgithub.com\u002FDataScienceUIBK\u002Frankify.git\ncd rankify\npip install -e .\n# 为了获得完整功能，我们建议安装包含所有依赖项的 Rankify：\npip install -e \".[all]\"\n# 安装仅用于检索的依赖项（BM25、DPR、ANCE 等）\npip install -e \".[retriever]\"\n# 安装基础重排序，支持 vLLM 的 `FirstModelReranker`、`LiT5ScoreReranker`、`LiT5DistillReranker`、`VicunaReranker` 和 `ZephyrReranker`\npip install -e \".[reranking]\"\n# RAG 端点（OpenAI、LiteLLM、vLLM 客户端）\npip install -e \".[rag]\"\n```\n\n\n#### 使用 ColBERT 检索器  \n\n如果您想使用 **ColBERT 检索器**，请按照以下额外步骤进行设置：\n```bash\n# 安装 GCC 和所需库\nconda install -c conda-forge gcc=9.4.0 gxx=9.4.0\nconda install -c conda-forge libstdcxx-ng\n```\n```bash\n# 导出必要的环境变量\nexport LD_LIBRARY_PATH=$CONDA_PREFIX\u002Flib:$LD_LIBRARY_PATH\nexport CC=gcc\nexport CXX=g++\nexport PATH=$CONDA_PREFIX\u002Fbin:$PATH\n\n# 清除缓存的 Torch 扩展\nrm -rf ~\u002F.cache\u002Ftorch_extensions\u002F*\n```\n\n\n## :rocket: 快速开始\n\n### 🚀 **单行管道 API**（推荐）\n\n使用 Rankify 的**最简单方式**——HuggingFace 风格的单行接口：\n\n```python\nfrom rankify import pipeline\n\n# 创建带有智能默认值的 RAG 管道\nrag = pipeline(\"rag\")\nanswers = rag(\"什么是机器学习？\", documents)\n\n# 或者自定义您的配置\nrag = pipeline(\n    \"rag\",\n    retriever=\"bge\",           # 最先进的密集检索器\n    reranker=\"flashrank\",      # 超快速重排序器\n    generator=\"basic-rag\"\n)\n```\n\n**可用的管道类型：**\n- `pipeline(\"search\")` - 仅文档检索\n- `pipeline(\"rerank\")` - 检索 + 重排序\n- `pipeline(\"rag\")` - 完整 RAG 管道（检索 + 重排序 + 生成）\n\n📖 **[管道 API 文档](https:\u002F\u002Frankify.readthedocs.io\u002Fen\u002Flatest\u002Ftutorials\u002Fpipeline\u002F)**\n\n---\n\n### 🤖 **RankifyAgent** - AI 驱动的模型选择\n\n让 AI 帮助您为您的用例选择最佳模型：\n\n```python\nfrom rankify.agent import RankifyAgent, recommend\n\n# 快速推荐\nresult = recommend(task=\"qa\", gpu=True)\nprint(f\"最佳检索器: {result.retriever.name}\")\nprint(f\"最佳重排序器: {result.reranker.name}\")\n\n# 对话式代理\nagent = RankifyAgent(backend=\"azure\")  # 或 \"openai\", \"litellm\", \"local\"\nresponse = agent.chat(\"我需要一个适用于生产环境的快速搜索系统\")\nprint(response.message)\nprint(response.code_snippet)  # 可直接使用的代码\n```\n\n📖 **[RankifyAgent 文档](https:\u002F\u002Frankify.readthedocs.io\u002Fen\u002Flatest\u002Ftutorials\u002Fagent\u002F)**\n\n---\n\n### 🌐 **Rankify 服务器** - 作为 REST API 部署\n\n通过一条命令启动一个可投入生产的服务器：\n\n```bash\n# CLI\nrankify serve --port 8000 --retriever bge --reranker flashrank\n\n# 或在 Python 中\nfrom rankify.server import RankifyServer\nserver = RankifyServer(retriever=\"bge\", reranker=\"flashrank\")\nserver.start(port=8000)\n```\n\n**API 端点：**\n- `POST \u002Fretrieve` - 文档检索\n- `POST \u002Frerank` - 文档重排序\n- `POST \u002Frag` - 完整 RAG 生成\n- `GET \u002Fhealth` - 健康检查\n\n```bash\n# 示例 API 调用\ncurl -X POST http:\u002F\u002Flocalhost:8000\u002Frag \\\n  -H \"Content-Type: application\u002Fjson\" \\\n  -d '{\"query\": \"什么是 AI？\", \"n_contexts\": 5}'\n```\n\n📖 **[服务器文档](https:\u002F\u002Frankify.readthedocs.io\u002Fen\u002Flatest\u002Ftutorials\u002Fpipeline\u002Fserver\u002F)**\n\n---\n\n### 🔌 **集成** - 与您的技术栈一起使用\n\n无缝集成 LangChain、LlamaIndex 等：\n\n```python\n# LangChain\nfrom rankify.integrations import LangChainRetriever\nfrom langchain.chains import RetrievalQA\n\nretriever = LangChainRetriever(method=\"bge\", reranker=\"flashrank\")\nchain = RetrievalQA.from_chain_type(llm=your_llm, retriever=retriever)\n\n# LlamaIndex\nfrom rankify.integrations import LlamaIndexRetriever\nretriever = LlamaIndexRetriever(method=\"colbert\", reranker=\"monot5\")\n```\n\n📖 **[集成文档](https:\u002F\u002Frankify.readthedocs.io\u002Fen\u002Flatest\u002Ftutorials\u002Fpipeline\u002Fintegrations\u002F)**\n\n---\n\n### 🎨 **Web 玩具箱** - 交互式 UI\n\n启动一个交互式的 Gradio 界面：\n\n```python\nfrom rankify.ui import launch_playground\nlaunch_playground(port=7860)\n```\n\n尝试各种模型，比较结果，并导出代码——一切尽在您的浏览器中！\n\n---\n\n### 1️⃣ **传统工作流**（面向高级用户）\n\n#### **预检索数据集**\n\n我们提供**40 多个基准数据集**，每个数据集包含**1,000 个预检索文档**：\n\n🔗 **[Hugging Face 数据集仓库](https:\u002F\u002Fhuggingface.co\u002Fdatasets\u002Fabdoelsayed\u002Freranking-datasets-light)**  \n\n#### **数据集格式**\n\n```json\n[\n    {\n        \"question\": \"...\",\n        \"answers\": [\"...\", \"...\", ...],\n        \"ctxs\": [\n            {\n                \"id\": \"...\",         \u002F\u002F 文段 ID\n                \"score\": \"...\",      \u002F\u002F 检索器得分\n                \"has_answer\": true|false\n            }\n        ]\n    }\n]\n```\n\n#### **列出可用数据集**\n\n```python\nfrom rankify.dataset.dataset import Dataset \nDataset.available_dataset()  # 修正拼写错误：avaiable -> available\n```\n\n#### **下载数据集**\n\n```python\nfrom rankify.dataset.dataset import Dataset\n\n# 下载 BM25 检索的文档\ndataset = Dataset(retriever=\"bm25\", dataset_name=\"nq-dev\", n_docs=100)\ndocuments = dataset.download(force_download=False)\n\n# 从文件加载\ndocuments = Dataset.load_dataset('.\u002Fpath\u002Fto\u002Fdataset.json', n_docs=100)\n```\n\n\u003C!-- #### 预检索数据集的功能对比  \n\n下表概述了每种数据集对不同检索方法（**BM25、DPR、ColBERT、ANCE、BGE、Contriever**）的可用性。  \n\n✅ **已完成**  \n⏳  **部分完成，其他部分待完成**  \n🕒 **待完成**\n\n\u003Ctable style=\"width: 100%;\">\n  \u003Ctr>\n    \u003Cth align=\"center\">数据集\u003C\u002Fth> \n    \u003Cth align=\"center\">BM25\u003C\u002Fth> \n    \u003Cth align=\"center\">DPR\u003C\u002Fth> \n    \u003Cth align=\"center\">ColBERT\u003C\u002Fth>\n    \u003Cth align=\"center\">ANCE\u003C\u002Fth>\n    \u003Cth align=\"center\">BGE\u003C\u002Fth>\n    \u003Cth align=\"center\">Contriever\u003C\u002Fth>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">2WikimultihopQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">ArchivialQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">ChroniclingAmericaQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">EntityQuestions\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">AmbigQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">ARC\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">ASQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">MS MARCO\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">AY2\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">Bamboogle\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">BoolQ\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">CommonSenseQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">CuratedTREC\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">⏳\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">ELI5\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">FERMI\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">⏳\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">FEVER\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">HellaSwag\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">HotpotQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">MMLU\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">Musique\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">NarrativeQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">⏳\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">NQ\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">⏳\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">OpenbookQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">PIQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">PopQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">⏳\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">Quartz\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">SIQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">StrategyQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">TREX\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">TriviaQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">⏳\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">TruthfulQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">TruthfulQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">WebQ\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">⏳\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">WikiQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">⏳\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">WikiAsp\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">WikiPassageQA\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">⏳\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">WNED\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">WoW\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n  \u003Ctr>\n    \u003Ctd align=\"left\">Zsre\u003C\u002Ftd>\n    \u003Ctd align=\"center\">✅\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n    \u003Ctd align=\"center\">🕒\u003C\u002Ftd>\n  \u003C\u002Ftr>\n\u003C\u002Ftable> -->\n\n## 🧱 通过 CLI 进行索引\nCLI 入口点是 **`rankify-index`**，其子命令为 **`index`**。\n\n**常用标志**\n- `corpus_path`（位置参数）：JSONL 语料库的路径。\n- `--retriever {bm25,dpr,ance,contriever,colbert,bge}`。\n- `--output PATH`（默认值：`rankify_indices`）。\n- `--index_type {wiki,msmarco}`（默认值：`wiki`）。\n- `--threads INT`（默认值：`32`，用于稀疏及部分稠密预处理）。\n- `--device {cpu,cuda}`（默认值：检索器专用，通常为 `cuda`）。\n- `--batch_size INT`（稠密编码器 \u002F Faiss 添加批次）。\n- `--encoder MODEL`（仅适用于稠密编码器；省略时使用合理默认值）。\n\n> **索引布局**\n> - BM25 → `\u003Coutput>\u002F\u003Cstem>\u002Fbm25_index`\n> - DPR   → `\u003Coutput>\u002F\u003Cstem>\u002Fdpr_index_\u003Cindex_type>`\n> - ANCE  → `\u003Coutput>\u002F\u003Cstem>\u002Fance_index_\u003Cindex_type>`\n> - BGE   → `\u003Coutput>\u002F\u003Cstem>\u002Fbge_index_\u003Cindex_type>`\n> - Contriever → `\u003Coutput>\u002F\u003Cstem>\u002Fcontriever_index_\u003Cindex_type>`\n> - ColBERT    → `\u003Coutput>\u002F\u003Cstem>\u002Fcolbert_index_\u003Cindex_type>`\n\n### BM25\n```bash\nrankify-index index data\u002Fwikipedia_10k.jsonl \\\n  --retriever bm25 \\\n  --output .\u002Findices\n```\n\n### DPR（默认单编码器）\n```bash\n# 维基百科风格\nrankify-index index data\u002Fwikipedia_100.jsonl \\\n  --retriever dpr \\\n  --encoder facebook\u002Fdpr-ctx_encoder-single-nq-base \\\n  --batch_size 16 --device cuda \\\n  --output .\u002Findices\n\n# MS MARCO\nrankify-index index data\u002Fmsmarco_100.jsonl \\\n  --retriever dpr --index_type msmarco \\\n  --encoder facebook\u002Fdpr-ctx_encoder-single-nq-base \\\n  --batch_size 16 --device cuda \\\n  --output .\u002Findices\n```\n\n### ANCE\n```bash\nrankify-index index data\u002Fwikipedia_100.jsonl \\\n  --retriever ance \\\n  --encoder castorini\u002Fance-dpr-context-multi \\\n  --batch_size 16 --device cuda \\\n  --output .\u002Findices\n```\n\n### Contriever\n```bash\nrankify-index index data\u002Fwikipedia_100.jsonl \\\n  --retriever contriever \\\n  --encoder facebook\u002Fcontriever-msmarco \\\n  --batch_size 16 --device cuda \\\n  --output .\u002Findices\n```\n\n### ColBERT\n```bash\nrankify-index index data\u002Fwikipedia_100.jsonl \\\n  --retriever colbert \\\n  --batch_size 32 --device cuda \\\n  --output .\u002Findices\n```\n\n### BGE\n```bash\nrankify-index index data\u002Fwikipedia_100.jsonl \\\n  --retriever bge \\\n  --encoder BAAI\u002Fbge-large-en-v1.5 \\\n  --batch_size 16 --device cuda \\\n  --output .\u002Findices\n```\n\n\n---\n\n### 2️⃣ 执行检索\n要使用 **Rankify** 执行检索，您可以选择多种检索方法，例如 **BM25、DPR、ANCE、Contriever、ColBERT、BGE、Sbert、Nomic、Instructor、DiverRetriever、SRF、E5、RaDeR、M2、GritLM、ReasonEmbed、ReasonIR 和 BGEReasoner**。\n\n### 第一步：设置示例查询\n\n**示例：在样本查询上执行检索**  \n```python\nfrom rankify.dataset.dataset import Document, Question, Answer, Context\nfrom rankify.retrievers.retriever import Retriever\n\n# 示例文档\ndocuments = [\n    Document(question=Question(\"《虎胆龙威》的演员阵容？\"), answers=Answer([\n            \"杰·考特尼\",\n            \"塞巴斯蒂安·科赫\",\n            \"拉迪沃耶·布克维奇\",\n            \"尤莉娅·斯尼吉尔\",\n            \"谢尔盖·科列斯尼科夫\",\n            \"玛丽·伊丽莎白·温斯顿\",\n            \"布鲁斯·威利斯\"\n        ]), contexts=[]),\n    Document(question=Question(\"谁写了《哈姆雷特》？\"), answers=Answer([\"莎士比亚\"]), contexts=[])\n]\n```\n### 第二步：选择检索选项\n\n**选项 A：**\n使用 ```index_type```（例如 `\"wiki\"`、`\"msmarco\"`）加载预先计算好的 FAISS 索引。\n\n```python\n# 维基百科上的 BM25 检索\nbm25_retriever_wiki = Retriever(method=\"bm25\", n_docs=5, index_type=\"wiki\")\n\n# MS MARCO 上的 BM25 检索\nbm25_retriever_msmarco = Retriever(method=\"bm25\", n_docs=5, index_type=\"msmarco\")\n\n\n# 维基百科上的 DPR（多编码器）检索\ndpr_retriever_wiki = Retriever(method=\"dpr\", model=\"dpr-multi\", n_docs=5, index_type=\"wiki\")\n\n# MS MARCO 上的 DPR（多编码器）检索\ndpr_retriever_msmarco = Retriever(method=\"dpr\", model=\"dpr-multi\", n_docs=5, index_type=\"msmarco\")\n\n\n# 维基百科上的 DPR（单编码器）检索\ndpr_retriever_wiki = Retriever(method=\"dpr\", model=\"dpr-single\", n_docs=5, index_type=\"wiki\")\n\n# MS MARCO 上的 DPR（单编码器）检索\ndpr_retriever_msmarco = Retriever(method=\"dpr\", model=\"dpr-single\", n_docs=5, index_type=\"msmarco\")\n\n\n# 维基百科上的 ANCE 检索\nance_retriever_wiki = Retriever(method=\"ance\", model=\"ance-multi\", n_docs=5, index_type=\"wiki\")\n\n# MS MARCO 上的 ANCE 检索\nance_retriever_msmarco = Retriever(method=\"ance\", model=\"ance-multi\", n_docs=5, index_type=\"msmarco\")\n\n\n# 维基百科上的 Contriever 检索\ncontriever_retriever_wiki = Retriever(method=\"contriever\", model=\"facebook\u002Fcontriever-msmarco\", n_docs=5, index_type=\"wiki\")\n\n# MS MARCO 上的 Contriever 检索\ncontriever_retriever_msmarco = Retriever(method=\"contriever\", model=\"facebook\u002Fcontriever-msmarco\", n_docs=5, index_type=\"msmarco\")\n\n\n# 维基百科上的 ColBERT 检索\ncolbert_retriever_wiki = Retriever(method=\"colbert\", model=\"colbert-ir\u002Fcolbertv2.0\", n_docs=5, index_type=\"wiki\")\n\n# MS MARCO 上的 ColBERT 检索\ncolbert_retriever_msmarco = Retriever(method=\"colbert\", model=\"colbert-ir\u002Fcolbertv2.0\", n_docs=5, index_type=\"msmarco\")\n\n\n# 维基百科上的 BGE 检索\nbge_retriever_wiki = Retriever(method=\"bge\", model=\"BAAI\u002Fbge-large-en-v1.5\", n_docs=5, index_type=\"wiki\")\n\n# MS MARCO 上的 BGE 检索\nbge_retriever_msmarco = Retriever(method=\"bge\", model=\"BAAI\u002Fbge-large-en-v1.5\", n_docs=5, index_type=\"msmarco\")\n\n\n# 维基百科上的 Hyde 检索\nhyde_retriever_wiki = Retriever(method=\"hyde\", n_docs=5, index_type=\"wiki\", api_key=OPENAI_API_KEY )\n\n# MS MARCO 上的 Hyde 检索\nhyde_retriever_msmarco = Retriever(method=\"hyde\", n_docs=5, index_type=\"msmarco\", api_key=OPENAI_API_KEY)\n```\n\n**选项 B：**\n使用自定义数据集并自动缓存进行检索。\n\n这些模型均基于最新 70 亿+ 参数模型，所有以下模型仅适用于自定义数据集。只需将 `.jsonl` 文件传递给 `corpus_path`，确保您的数据映射到所需的 `id:` 和 `text:` 字段，模型将在首次运行时嵌入并本地缓存数据。\n\n```python\n\n# 在Diver框架中实现的双编码器（11种可配置模型，由model_id指定）\nbge_large_retriever = Retriever(method=\"diver-dense\", model_id=\"bge\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\nsbert_retriever = Retriever(method=\"diver-dense\", model_id=\"sbert\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\ninst_l_retriever = Retriever(method=\"diver-dense\", model_id=\"inst-l\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\ninst_xl_retriever = Retriever(method=\"diver-dense\", model_id=\"inst-xl\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\nsfr_retriever = Retriever(method=\"diver-dense\", model_id=\"sf\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\ne5_retriever = Retriever(method=\"diver-dense\", model_id=\"e5\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\ncontriever_retriever = Retriever(method=\"diver-dense\", model_id=\"contriever\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\nm2_retriever = Retriever(method=\"diver-dense\", model_id=\"m2\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\ngrit_retriever = Retriever(method=\"diver-dense\", model_id=\"grit\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\nrader_retriever = Retriever(method=\"diver-dense\", model_id=\"rader\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\nnomic_retriever = Retriever(method=\"diver-dense\", model_id=\"nomic\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=4, n_docs=5)\n\ndiver_retriever = Retriever(method=\"diver-dense\", model_id=\"diver\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=4, n_docs=5)\n\n\n# Reasonir检索\nreasonir_retriever = Retriever(method=\"reasonir\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=4, n_docs=5)\n\n\n# ReasonEmbed检索（3种可配置模型，由model_id指定）\nreasonembed_qwen8b_retriever = Retriever(method=\"reason-embed\", model_id=\"qwen3-8b\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\nreasonembed_llama8b_retriever = Retriever(method=\"reason-embed\", model_id=\"qwen3-4b\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\nreasonembed_qwen4b_retriever = Retriever(method=\"reason-embed\", model_id=\"llama-8b\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n\n\n# BgeReasonEmbed检索\nbge_reasoner_retriever = Retriever(method=\"bge-reasoner-embed\", corpus_path=\"data\u002Fmy_corpus.jsonl\", encode_batch_size=8, n_docs=5)\n```\n### 检索示例：在BRIGHT基准测试上使用ReasonIR（生物查询）\n本示例演示了如何在推理密集型的BRIGHT基准测试上评估`reasonir\u002FReasonIR-8B`模型。\n\n```python\nfrom datasets import load_dataset\nfrom rankify.dataset.dataset import Document, Question, Answer\nfrom rankify.retrievers.retriever import Retriever\n\ncorpus_path = \"bright_biology_corpus.jsonl\"       # 用于检索的.jsonl语料库\n\ndocs = load_dataset(\"xlangai\u002FBRIGHT\", \"documents\", split=\"biology\")\ndocs.to_json(corpus_path, force_ascii=False)  \n\nqueries = load_dataset(\"xlangai\u002FBRIGHT\", \"examples\", split=\"biology\")\n    \ndocuments = []\nfor item in queries:\n    doc = Document(id=item[\"id\"], \n                   question=Question(question=item[\"query\"]), \n                   answers=Answer(answers=item.get(\"gold_ids\", [])))\n    documents.append(doc)\n    \nretriever = Retriever(\n    method=\"reasonir\",            # 使用ReasonIR检索器\n    n_docs=3,                     # 每个查询检索前3个文档\n    corpus_path=corpus_path,      # 刚刚创建的JSONL文件路径\n    text_field=\"content\",         # BRIGHT使用'content'而非'text'\n    batch_size=4,\n)\n\nresults = retriever.retrieve(documents)\n\n```\n\n### 第三步：执行并查看结果\n**运行检索**\n\n定义好检索器后，可以使用以下代码检索文档：\n```python\nretrieved_documents = bm25_retriever_wiki.retrieve(documents)\n\nfor i, doc in enumerate(retrieved_documents):\n    print(f\"\\n文档 {i+1}:\")\n    print(doc)\n```\n\n---\n## 3️⃣ 运行重排序\nRankify支持多种重排序模型。以下是每种模型的使用示例。\n\n**示例：对文档进行重排序**\n```python\nfrom rankify.dataset.dataset import Document, Question, Answer, Context\nfrom rankify.models.reranking import Reranking\n\n# 示例文档设置\nquestion = Question(\"托马斯·爱迪生是什么时候发明电灯泡的？\")\nanswers = Answer([\"1879\"])\ncontexts = [\n    Context(text=\"首尔大学发生雷击事件\", id=1),\n    Context(text=\"托马斯·爱迪生试图为汽车发明一种装置但失败了\", id=2),\n    Context(text=\"咖啡对减肥有好处\", id=3),\n    Context(text=\"托马斯·爱迪生于1879年发明了电灯泡\", id=4),\n    Context(text=\"托马斯·爱迪生从事电力相关工作\", id=5),\n]\ndocument = Document(question=question, answers=answers, contexts=contexts)\n\n# 初始化重排序器\nreranker = Reranking(method=\"monot5\", model_name=\"monot5-base-msmarco\")\n\n# 应用重排序\nreranker.rank([document])\n\n# 打印重新排序后的上下文\nfor context in document.reorder_contexts:\n    print(f\"  - {context.text}\")\n```\n\n\n**不同重排序模型的使用示例**\n```python\n# UPR\nmodel = Reranking(method='upr', model_name='t5-base')\n\n# 基于API的重排序器\nmodel = Reranking(method='apiranker', model_name='voyage', api_key='your-api-key')\nmodel = Reranking(method='apiranker', model_name='jina', api_key='your-api-key')\nmodel = Reranking(method='apiranker', model_name='mixedbread.ai', api_key='your-api-key')\n\n# Blender重排序器\nmodel = Reranking(method='blender_reranker', model_name='PairRM')\n\n# ColBERT重排序器\nmodel = Reranking(method='colbert_ranker', model_name='Colbert')\n\n# EchoRank\nmodel = Reranking(method='echorank', model_name='flan-t5-large')\n\n# First Ranker\nmodel = Reranking(method='first_ranker', model_name='base')\n\n# FlashRank\nmodel = Reranking(method='flashrank', model_name='ms-marco-TinyBERT-L-2-v2')\n\n# InContext重排序器\nReranking(method='incontext_reranker', model_name='llamav3.1-8b')\n\n# InRanker\nmodel = Reranking(method='inranker', model_name='inranker-small')\n\n# ListT5\nmodel = Reranking(method='listt5', model_name='listt5-base')\n\n# LiT5 Distill\nmodel = Reranking(method='lit5distill', model_name='LiT5-Distill-base')\n\n# LiT5 Score\nmodel = Reranking(method='lit5score', model_name='LiT5-Distill-base')\n\n# LLM Layerwise Ranker\nmodel = Reranking(method='llm_layerwise_ranker', model_name='bge-multilingual-gemma2')\n\n# LLM2Vec\nmodel = Reranking(method='llm2vec', model_name='Meta-Llama-31-8B')\n\n# MonoBERT\nmodel = Reranking(method='monobert', model_name='monobert-large')\n\n# MonoT5\nReranking(method='monot5', model_name='monot5-base-msmarco')\n\n# RankGPT\nmodel = Reranking(method='rankgpt', model_name='llamav3.1-8b')\n\n# RankGPT API\nmodel = Reranking(method='rankgpt-api', model_name='gpt-3.5', api_key=\"gpt-api-key\")\nmodel = Reranking(method='rankgpt-api', model_name='gpt-4', api_key=\"gpt-api-key\")\nmodel = Reranking(method='rankgpt-api', model_name='llamav3.1-8b', api_key=\"together-api-key\")\nmodel = Reranking(method='rankgpt-api', model_name='claude-3-5', api_key=\"claude-api-key\")\n\n# RankT5\nmodel = Reranking(method='rankt5', model_name='rankt5-base')\n\n# Sentence Transformer Reranker\nmodel = Reranking(method='sentence_transformer_reranker', model_name='all-MiniLM-L6-v2')\nmodel = Reranking(method='sentence_transformer_reranker', model_name='gtr-t5-base')\nmodel = Reranking(method='sentence_transformer_reranker', model_name='sentence-t5-base')\nmodel = Reranking(method='sentence_transformer_reranker', model_name='distilbert-multilingual-nli-stsb-quora-ranking')\nmodel = Reranking(method='sentence_transformer_reranker', model_name='msmarco-bert-co-condensor')\n\n# SPLADE\nmodel = Reranking(method='splade', model_name='splade-cocondenser')\n\n# Transformer Ranker\nmodel = Reranking(method='transformer_ranker', model_name='mxbai-rerank-xsmall')\nmodel = Reranking(method='transformer_ranker', model_name='bge-reranker-base')\nmodel = Reranking(method='transformer_ranker', model_name='bce-reranker-base')\nmodel = Reranking(method='transformer_ranker', model_name='jina-reranker-tiny')\nmodel = Reranking(method='transformer_ranker', model_name='gte-multilingual-reranker-base')\nmodel = Reranking(method='transformer_ranker', model_name='nli-deberta-v3-large')\nmodel = Reranking(method='transformer_ranker', model_name='ms-marco-TinyBERT-L-6')\nmodel = Reranking(method='transformer_ranker', model_name='msmarco-MiniLM-L12-en-de-v1')\n\n# TwoLAR\nmodel = Reranking(method='twolar', model_name='twolar-xl')\n\n# Vicuna Reranker\nmodel = Reranking(method='vicuna_reranker', model_name='rank_vicuna_7b_v1')\n\n# Zephyr Reranker\nmodel = Reranking(method='zephyr_reranker', model_name='rank_zephyr_7b_v1_full')\n```\n\n---\n\n## 4️⃣ 使用生成器模块\n\nRankify 提供了**生成器模块**，用于**检索增强生成（RAG）**，可将检索到的文档与 OpenAI、LiteLLM、vLLM 和 Hugging Face 等生成模型集成。其模块化设计允许轻松添加新的 **RAG 方法** 和 **端点**，从而无缝尝试零样本 RAG、思维链 RAG 和基于 FiD 的 RAG 等方法。以下是一些使用不同 RAG 方法以及如何引入不同 LLM 端点的示例。\n\n请注意，要使用基于 API 的端点（OpenAI、LiteLLM），您需要指定 api-key。请参阅下面的示例了解如何操作。\n\n**使用不同 RAG 方法和后端的示例**\n\n```python\n# 零样本与 Huggingface 端点\ngenerator = Generator(method=\"zero-shot\", model_name='meta-llama\u002FMeta-Llama-3.1-8B-Instruct', backend=\"huggingface\")\n\n# 基本 RAG 与 LiteLLM 端点\ngenerator = Generator(method=\"basic-rag\", model_name='ollama\u002Fmistral', backend=\"litellm\", api_key=api_key)\n\n# 思维链 RAG 与 vLLM 端点\ngenerator = Generator(method=\"chain-of-thought-rag\", model_name='mistralai\u002FMistral-7B-v0.1', backend=\"vllm\")\n\n# 上下文内 RALM 与 OpenAI 端点\ngenerator = Generator(method=\"in-context-ralm\", model_name='gpt-3.5-turbo', backend=\"openai\", api_keys=[api_key])\n```\n\n**无需 API 推理的使用示例**\n\n```python\nfrom rankify.dataset.dataset import Document, Question, Answer, Context\nfrom rankify.generator.generator import Generator\n\n# 定义问题和答案\nquestion = Question(\"奥地利的首都是什么？\")\nanswers = Answer(\"\")\ncontexts = [\n    Context(id=1, title=\"法国\", text=\"法国的首都巴黎。\", score=0.9),\n    Context(id=2, title=\"德国\", text=\"柏林是德国的首都。\", score=0.5)\n]\n\n# 构建文档\ndoc = Document(question=question, answers=answers, contexts=contexts)\n\n# 初始化生成器（例如 Meta Llama）\ngenerator = Generator(method=\"basic-rag\", model_name='meta-llama\u002FMeta-Llama-3.1-8B-Instruct', backend=\"huggingface\")\n\n# 生成答案\ngenerated_answers = generator.generate([doc])\nprint(generated_answers)  # 输出: [\"巴黎\"]\n```\n\n**带 API 推理的使用示例**\n\n将您的 API 密钥保存在 .env.local 文件中，您可以通过以下列出的方法访问它们：\n```python\n# 在 .env.local 中：\nOPENAI_API_KEY=your-api-key\nLITELLM_API_KEY=your-api-key\n```\n\n**使用**\n```python\n# 加载 LiteLLM api-key\napi_key = get_litellm_api_key()\n# 加载 OpenAI api-key\napi_key = get_openai_api_key()\n```\n\n**使用 LiteLLM 的完整示例：**\n\n```python\nfrom rankify.dataset.dataset import Document, Question, Answer, Context\nfrom rankify.generator.generator import Generator\nfrom rankify.utils.models.rank_llm.rerank.api_keys import get_litellm_api_key\n\n# 定义问题和答案\nquestion = Question(\"法国的首都是什么？\")\nanswers = Answer([\"\"])\ncontexts = [\n    Context(id=1, title=\"法国\", text=\"法国的首都巴黎。\", score=0.9),\n    Context(id=2, title=\"德国\", text=\"柏林是德国的首都。\", score=0.5)\n]\n\n# 构建文档\ndoc = Document(question=question, answers=answers, contexts=contexts)\n\n# 加载 api-key\napi_key = get_litellm_api_key()\n\n# 初始化生成器（例如 Meta Llama）\ngenerator = Generator(method=\"basic-rag\", model_name='ollama\u002Fmistral', backend=\"litellm\", api_key=api_key)\n\n# 生成答案\ngenerated_answers = generator.generate([doc])\nprint(generated_answers)  # 输出: [\"巴黎\"]\n```\n\n\n---\n## 5️⃣ 使用指标评估\n\nRankify 提供了内置的**评估指标**，用于**检索、重排序和检索增强生成（RAG）**。这些指标有助于评估检索到的文档质量、排序模型的有效性以及生成答案的准确性。\n\n**评估生成的答案**\n\n您可以比较生成的答案与真实答案，以评估**检索增强生成（RAG）结果**的质量。\n```python\nfrom rankify.metrics.metrics import Metrics\nfrom rankify.dataset.dataset import Dataset\n\n# 加载数据集\ndataset = Dataset('bm25', 'nq-test', 100)\ndocuments = dataset.download(force_download=False)\n\n# 初始化生成器\ngenerator = Generator(method=\"in-context-ralm\", model_name='meta-llama\u002FLlama-3.1-8B')\n\n# 生成答案\ngenerated_answers = generator.generate(documents)\n\n# 评估生成的答案\nmetrics = Metrics(documents)\nprint(metrics.calculate_generation_metrics(generated_answers))\n```\n\n**评估检索性能**\n\n```python\n# 计算重排序前的检索指标\nmetrics = Metrics(documents)\nbefore_ranking_metrics = metrics.calculate_retrieval_metrics(ks=[1, 5, 10, 20, 50, 100], use_reordered=False)\n\nprint(before_ranking_metrics)\n```\n\n**评估重排序后的结果**\n\n```python\n# 计算重排序后的检索指标\nafter_ranking_metrics = metrics.calculate_retrieval_metrics(ks=[1, 5, 10, 20, 50, 100], use_reordered=True)\nprint(after_ranking_metrics)\n```\n\n## 🧪 BEIR & TREC DL19\u002FDL20 与 BM25\n\n**Rankify** 提供便捷的钩子，可在 **BEIR** 任务和 **TREC DL'19\u002F20** 上运行 BM25 基线，并使用 TREC 风格指标（nDCG、MAP、MRR）进行评估。\n\n### 快速入门（单个数据集）\n```python\nfrom rankify.dataset.dataset import Dataset\nfrom rankify.metrics.metrics import Metrics\n\n# 下载预检索的 BM25 结果（每个查询的 top-k）\ndocs = Dataset('bm25', 'dl19', n_docs=1000).download(force_download=False)\n\n# 使用 TREC 指标进行评估（此处默认显示 nDCG@10\u002F100）\nmetrics = Metrics(docs)\nprint(metrics.calculate_trec_metrics(ndcg_cuts=[10, 100], use_reordered=False))\n```\n\n> **注意**\n> - 支持的名称包括 **`dl19`**、**`dl20`**，以及以 `beir-` 前缀命名的 BEIR 任务，例如：\n>   `beir-arguana`、`beir-covid`、`beir-dbpedia`、`beir-fever`、`beir-fiqa`、`beir-news`、\n>   `beir-nfc`、`beir-quora`、`beir-robust04`、`beir-scidocs`、`beir-scifact`、`beir-signal`、`beir-touche`。\n> - 如果需要明确选择 qrel 文件，请将 `qrel=name.replace(\"beir-\", \"\")` 传递给 `calculate_trec_metrics`。\n\n### 批量处理 BEIR 和 DL 数据集\n```python\nfrom rankify.dataset.dataset import Dataset\nfrom rankify.metrics.metrics import Metrics\n\nBEIR_TASKS = [\n    \"beir-arguana\", \"beir-covid\", \"beir-dbpedia\", \"beir-fever\", \"beir-fiqa\", \"beir-news\",\n    \"beir-nfc\", \"beir-quora\", \"beir-robust04\", \"beir-scidocs\", \"beir-scifact\",\n    \"beir-signal\", \"beir-touche\",\n]\n\nfor name in [\"dl19\", \"dl20\", *BEIR_TASKS]:\n    docs = Dataset('bm25', name, n_docs=100).download(force_download=False)\n    m = Metrics(docs)\n    res = m.calculate_trec_metrics(ndcg_cuts=[10, 100], use_reordered=False)\n    print(name, res)\n```\n\n### （可选）添加重排序器并进行评估\n```python\nfrom rankify.models.reranking import Reranking\nfrom rankify.dataset.dataset import Dataset\nfrom rankify.metrics.metrics import Metrics\n\nname = \"beir-arguana\"\ndocs = Dataset('bm25', name, n_docs=100).download(force_download=False)\nreranker = Reranking(method='transformer_ranker', model_name='bge-reranker-base')\nreranker.rank(docs)\n\nm = Metrics(docs)\nprint(\"之前:\", m.calculate_trec_metrics(ndcg_cuts=[10, 100], use_reordered=False))\nprint(\"之后:\", m.calculate_trec_metrics(ndcg_cuts=[10, 100], use_reordered=True))\n```\n\n\n## 📏 使用 **RAGAS** 评估 RAG\n\nRankify 提供了一个轻量级封装，围绕 **ragas** 构建，让生成答案的质量评估变得简单而灵活——无论您是用本地 HF 模型还是 OpenAI 等托管 API 进行判断。您可以运行 **快速默认值**，**挑选特定指标**，或在计算资源紧张时**模拟预测结果**。\n### ✅ 安装\n\n```bash\n# Rankify RAG 的核心依赖\npip install bert-score\npip install ragas\npip install langchain_huggingface\npip install rouge-score\n```\n\n\n```python\nimport torch\nfrom rankify.dataset.dataset import Document, Question, Answer, Context\nfrom rankify.generator.generator import Generator\nfrom rankify.metrics.generator_metrics import GeneratorMetrics\nfrom rankify.metrics.ragas_bridge import RagasModels\n\n# 1) 构建一个小型文档\nquestion = Question(\"法国的首都是什么？\")\nanswers  = Answer([\"巴黎\"])\ncontexts = [\n    Context(id=1, title=\"法国\",   text=\"法国的首都就是巴黎。\", score=0.9),\n    Context(id=2, title=\"德国\",  text=\"柏林是德国的首都。\", score=0.5),\n]\ndoc = Document(question=question, answers=answers, contexts=contexts)\n\n# 2) 生成答案（或跳过并提供您自己的预测列表）\ngenerator   = Generator(method=\"basic-rag\",\n                        model_name=\"meta-llama\u002FMeta-Llama-3.1-8B-Instruct\",\n                        backend=\"huggingface\",\n                        torch_dtype=torch.float16)\npredictions = generator.generate([doc])\nprint(\"生成的结果:\", predictions)\n\n# 3) 使用 RAGAS 进行评估（HF 判断器）\ngen_metrics = GeneratorMetrics([doc])\n\nragas_hf = RagasModels(\n    llm_kind=\"hf\",\n    llm_name=\"meta-llama\u002FMeta-Llama-3.1-8B-Instruct\",\n    embeddings_kind=\"hf\",\n    embeddings_name=\"sentence-transformers\u002Fall-MiniLM-L6-v2\",\n    torch_dtype=\"float16\",\n    max_new_tokens=256,  # 输出越短，速度越快且成本越低\n    timeout=180,         # 每次指标调用的秒数\n    max_retries=1,\n    max_workers=2,       # 在有限硬件上保持较小规模\n)\n\n# (A) 快速默认值\nscores_fast = gen_metrics.all(predictions, ragas_models=ragas_hf)\nprint(\"RAGAS（快速）:\", scores_fast)\n\n# (B) 挑选特定指标\nscores_specific = gen_metrics.ragas_generator(\n    predictions,\n    judge=ragas_hf,\n    metrics=[\"faithfulness\", \"response_relevancy\", \"context_precision\", \"context_recall\"],\n)\nprint(\"RAGAS（特定）:\", scores_specific)\n\n# (C) OpenAI 判断器（如果您有 API 密钥，速度会快很多）\nragas_openai = RagasModels(llm_kind=\"openai\", llm_name=\"gpt-4o-mini\", timeout=30)\nscores_openai = gen_metrics.all(predictions, ragas_models=ragas_openai)\nprint(\"RAGAS（OpenAI）:\", {k: v for k, v in scores_openai.items() if k.startswith(\"ragas_\")})\n```\n\n## 📜 支持的模型\n\n### **1️⃣ 索引**  \n- ✅ **维基百科**\n- ✅ **MS-MARCO**\n- 🕒 **在线搜索** \n\n### **1️⃣ 检索器**  \n- ✅ **[BM25](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1561\u002F1500000019)**\n- ✅ **[DPR](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.04906)** \n- ✅ **[ColBERT](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.12832)**   \n- ✅ **[ANCE](https:\u002F\u002Farxiv.org\u002Fabs\u002F2007.00808)**\n- ✅ **[BGE](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.03216)** \n- ✅ **[Contriever](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.09118)** \n- ✅ **[BPR](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.00882)** \n- ✅ **[HYDE](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.10496)**\n- ✅ **[SFR](https:\u002F\u002Fhuggingface.co\u002FSalesforce\u002FSFR-Embedding-Mistral)**\n- ✅ **[E5](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.03533)**\n- ✅ **[GritLM](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.09906)**\n- ✅ **[M2](https:\u002F\u002Farxiv.org\u002Fabs\u002F2310.12109)**\n- ✅ **[Nomic](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.01613)**\n- ✅ **[Instructor](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.09741)** \n- ✅ **[RaDeR](https:\u002F\u002Farxiv.org\u002Fabs\u002F2505.18405)**\n- ✅ **[ReasonIR](https:\u002F\u002Farxiv.org\u002Fabs\u002F2504.20595)** \n- ✅ **[BGE-Reasoner](https:\u002F\u002Fhuggingface.co\u002FBAAI\u002Fbge-en-icl)**\n- ✅ **[ReasonEmbed](https:\u002F\u002Farxiv.org\u002Fabs\u002F2510.08252)**\n- ✅ **[DiverRetriever](https:\u002F\u002Fhuggingface.co\u002FAQ-MedAI\u002FDiver-Retriever-4B)**\n- 🕒 **RepLlama**\n- 🕒 **coCondenser**   \n- 🕒 **Spar** \n- 🕒 **Dragon** \n- 🕒 **混合** \n---\n\n### **2️⃣ 重排序器**  \n\n- ✅ **[交叉编码器](https:\u002F\u002Fhuggingface.co\u002Fcross-encoder)**  \n- ✅ **[RankGPT](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.09542)**  \n- ✅ **[RankGPT-API](https:\u002F\u002Farxiv.org\u002Fabs\u002F2304.09542)**  \n- ✅ **[MonoT5](https:\u002F\u002Farxiv.org\u002Fabs\u002F2003.06713)**  \n- ✅ **[MonoBert](https:\u002F\u002Farxiv.org\u002Fabs\u002F1910.14424)**  \n- ✅ **[RankT5](https:\u002F\u002Farxiv.org\u002Fabs\u002F2210.10634)**  \n- ✅ **[ListT5](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.15838)**  \n- ✅ **[LiT5Score](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.16098)**  \n- ✅ **[LiT5Dist](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.16098)**  \n- ✅ **[Vicuna 重排序器](https:\u002F\u002Farxiv.org\u002Fabs\u002F2309.15088)**  \n- ✅ **[Zephyr 重排序器](https:\u002F\u002Farxiv.org\u002Fabs\u002F2312.02724)**  \n- ✅ **[基于句子转换器的](https:\u002F\u002Fhuggingface.co\u002Fsentence-transformers)**  \n- ✅ **[FlashRank 模型](https:\u002F\u002Fgithub.com\u002FPrithivirajDamodaran\u002FFlashRank)**  \n- ✅ **基于 API 的重排序器**  \n- ✅ **[ColBERT 重排序器](https:\u002F\u002Farxiv.org\u002Fabs\u002F2004.12832)**  \n- ✅ **LLM 分层排名器**  \n- ✅ **[Splade 重排序器](https:\u002F\u002Fdl.acm.org\u002Fdoi\u002F10.1145\u002F3477495.3531857)**  \n- ✅ **[UPR 重排序器](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.07496)**  \n- ✅ **[Inranker 重排序器](https:\u002F\u002Farxiv.org\u002Fabs\u002F2401.06910)**  \n- ✅ **Transformer 重排序器**  \n- ✅ **[FIRST 重排序器](https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.05508)**  \n- ✅ **[Blender 重排序器](https:\u002F\u002Farxiv.org\u002Fabs\u002F2306.02561)**  \n- ✅ **[LLM2VEC 重排序器](https:\u002F\u002Farxiv.org\u002Fabs\u002F2404.05961)**  \n- ✅ **[ECHO 重排序器](https:\u002F\u002Farxiv.org\u002Fabs\u002F2402.10866)**  \n- ✅ **[上下文内重排序器](https:\u002F\u002Farxiv.org\u002Fabs\u002F2410.02642)**  \n- 🕒 **DynRank**  \n- 🕒 **ASRank**  \n- 🕒 **RankLlama**\n\n---\n\n### **3️⃣ 生成器**\n#### **RAG 方法**\n- ✅ **零样本**\n- ✅ **基础 RAG**\n- ✅ **思维链 RAG**  \n- ✅ **解码器中的融合（FiD）与 T5**\n- ✅ **上下文学习 RALM**\n- 🕒 **自一致性 RAG**\n- 🕒 **检索思维链**\n\n#### **LLM 端点**\n- ✅ **Hugging Face**\n- ✅ **vLLM**\n- ✅ **LiteLLM**  \n- ✅ **OpenAI**\n\n---\n\n### **✨ 功能**  \n\n- 🔥 **统一框架**：将**检索**、**重排序**和**检索增强生成（RAG）**整合到一个模块化工具包中。  \n- 📚 **丰富的数据集支持**：包含**40 多个基准数据集**，并附带**预检索文档**，方便无缝实验。  \n- 🧲 **多样化的检索方法**：支持**BM25、DPR、ANCE、BPR、ColBERT、BGE、Contriever、SFR、E5、GritLM、M2、Nomic、Instructor、RaDeR、ReasonIR、BGE-Reasoner 和 ReasonEmbed**，提供灵活的检索策略。  \n- 🎯 **强大的重排序功能**：实现**24 种先进模型**，包含**41 种子方法**，优化排名性能。  \n- 🛠 **预构建索引**：提供**Wikipedia 和 MS MARCO** 语料库，消除索引开销，加快检索速度。  \n- 🔮 **无缝 RAG 集成**：可与**Hugging Face、OpenAI、vLLM、LiteLLM** 等后端配合，支持**GPT、LLAMA、T5 和解码器融合（FiD）**等推理模型，实现多种**检索增强生成**方法。  \n- 🛠 **可扩展与模块化**：轻松集成**自定义数据集、检索器、排名模型和 RAG 流水线**。  \n- 📊 **内置评估套件**：包括**检索、排名和 RAG 指标**，用于稳健的基准测试。  \n- 📖 **用户友好的文档**：访问详细的**[📖 在线文档](http:\u002F\u002Frankify.readthedocs.io\u002F)**、**示例笔记本**和**教程**，便于快速上手。  \n\n## 🔍 路线图  \n\n**Rankify** 仍在开发中，这是我们的首个发布版本（**v0.1.0**）。尽管它已支持广泛的检索、重排序和 RAG 技术，我们正在积极增强其功能，增加更多检索器、排名器、数据集和特性。  \n\n\n## 📖 文档\n\n如需完整 API 文档，请访问 [Rankify 文档](http:\u002F\u002Frankify.readthedocs.io\u002F)。\n\n---\n\n## 💡 贡献\n\n按照以下步骤参与开发：\n\n1. **将此仓库 Fork 到你的 GitHub 账户**。\n\n2. **创建一个新分支**用于你的功能或修复：\n\n   ```bash\n   git checkout -b feature\u002FYourFeatureName\n   ```\n\n3. **进行修改并提交**：\n\n   ```bash\n   git commit -m \"Add YourFeatureName\"\n   ```\n\n4. **将更改推送到你的分支**：\n\n   ```bash\n   git push origin feature\u002FYourFeatureName\n   ```\n\n5. **提交 Pull Request**以提出你的更改。\n\n感谢你帮助让这个项目变得更好！\n\n---\n\n## 🌐 社区贡献\n\n**中文社区资源现已上线！**  \n\n特别感谢 [Xiumao](https:\u002F\u002Fgithub.com\u002Fxiumao) 撰写了两篇关于 Rankify 的优秀中文博客文章：  \n\n> - 📘 [Rankify 简介](https:\u002F\u002Fmp.weixin.qq.com\u002Fs\u002F-dH64Q_KWvj8VQq7Ys383Q)  \n> - 📘 [深入解析 Rankify 中的重排序模型](https:\u002F\u002Fmp.weixin.qq.com\u002Fs\u002FXcOmXGv4CqUIp0oBcOgltw)  \n\n这些文章经过高度流量优化，在中国学术界和开发者圈中广受推荐。  \n\n我们更新了 [中文版](README_zh.md)，以反映这些博客贡献，同时保留原始内容——感谢 Xiumao 的持续支持！\n\n\n## :bookmark: 许可证\n\nRankify 采用 Apache-2.0 许可证——详情请参阅 [LICENSE](https:\u002F\u002Fopensource.org\u002Flicense\u002Fapache-2-0) 文件。\n\n\n## 🙏 致谢  \n\n我们衷心感谢以下库，它们为 **Rankify** 的开发做出了巨大贡献：  \n\n- **Diver** —— 提供密集检索器路由和缓存逻辑的参考实现，用于集成各种双编码器。  \n  🔗 [GitHub 仓库](https:\u002F\u002Fgithub.com\u002FAQ-MedAI\u002FDiver)\n\n- **Rerankers** —— 强大的 Python 库，用于集成各种重排序方法。  \n  🔗 [GitHub 仓库](https:\u002F\u002Fgithub.com\u002FAnswerDotAI\u002Frerankers\u002Ftree\u002Fmain)  \n\n- **Pyserini** —— 支持 BM25 基础检索，并与稀疏\u002F密集检索器集成的工具包。  \n  🔗 [GitHub 仓库](https:\u002F\u002Fgithub.com\u002Fcastorini\u002Fpyserini)  \n\n- **FlashRAG** —— 用于检索增强生成（RAG）研究的模块化框架。  \n  🔗 [GitHub 仓库](https:\u002F\u002Fgithub.com\u002FRUC-NLPIR\u002FFlashRAG)  \n\n\n## :star2: 引用\n\n如果您的研究需要，请引用我们的论文：\n\n```BibTex\n@article{abdallah2025rankify,\n  title={Rankify：用于检索、重排序和检索增强生成的全面 Python 工具包},\n  author={Abdallah, Abdelrahman 和 Mozafari, Jamshid 和 Piryani, Bhawna 和 Ali, Mohammed 和 Jatowt, Adam},\n  journal={arXiv 预印本 arXiv:2502.02464},\n  year={2025}\n}\n```\n\n## 星数历史\n\n\n[![星数历史图表](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FDataScienceUIBK_Rankify_readme_c0b19f8fef21.png)](https:\u002F\u002Fstar-history.com\u002F#DataScienceUIBK\u002FRankify&Date)","# Rankify 快速上手指南\n\n## 环境准备\n\n- **Python 版本**：3.10 或 3.11  \n- **推荐系统**：Linux \u002F macOS（Windows 可用 WSL2）  \n- **GPU 支持**（可选）：NVIDIA 显卡 + CUDA 12.4\u002F12.6，显著提升检索与重排序性能  \n- **前置依赖**：确保已安装 `conda` 或 `pip`\n\n> 推荐使用 Conda 环境管理，避免依赖冲突。\n\n## 安装步骤\n\n### 1. 创建并激活虚拟环境\n```bash\nconda create -n rankify python=3.10\nconda activate rankify\n```\n\n### 2. 安装 PyTorch（推荐 GPU 版本）\n```bash\npip install torch==2.5.1 torchvision==0.20.1 torchaudio==2.5.1 --index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu124\n```\n\n> 若无 GPU，可替换为 `cu118` 版本：`--index-url https:\u002F\u002Fdownload.pytorch.org\u002Fwhl\u002Fcu118`\n\n### 3. 安装 Rankify（推荐完整版）\n```bash\npip install \"rankify[all]\"\n```\n\n> 国内用户可使用清华源加速：\n```bash\npip install \"rankify[all]\" -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n```\n\n> 如需仅安装特定模块：\n- 检索：`pip install \"rankify[retriever]\"`\n- 重排序：`pip install \"rankify[reranking]\"`\n- RAG 生成：`pip install \"rankify[rag]\"`\n\n## 基本使用\n\n### 一键 RAG 流水线（推荐）\n\n```python\nfrom rankify import pipeline\n\n# 使用默认配置执行 RAG\nrag = pipeline(\"rag\")\nanswers = rag(\"机器学习是什么？\", documents)\n\n# 自定义检索器、重排序器和生成器\nrag = pipeline(\n    \"rag\",\n    retriever=\"bge\",           # 使用 BGE 检索器\n    reranker=\"flashrank\",      # 使用 FlashRank 重排序\n    generator=\"basic-rag\"\n)\nanswers = rag(\"什么是人工智能？\", documents)\n```\n\n**可用流水线类型**：\n- `pipeline(\"search\")`：仅检索\n- `pipeline(\"rerank\")`：检索 + 重排序\n- `pipeline(\"rag\")`：完整 RAG 流程（检索 → 重排序 → 生成）\n\n> 文档：[Pipeline API 文档](https:\u002F\u002Frankify.readthedocs.io\u002Fen\u002Flatest\u002Ftutorials\u002Fpipeline\u002F)","某大型电商公司的智能客服团队正在构建基于知识库的问答系统，需从百万级商品说明书、售后政策和用户手册中精准检索用户问题的答案，提升响应准确率与客服效率。\n\n### 没有 Rankify 时\n- 团队使用传统关键词匹配（如BM25）检索，结果常返回无关的促销信息，误答率高达40%。\n- 尝试接入多个重排序模型（如Cohere、BGE-Reranker），但每个模型需独立部署、数据格式不统一，集成耗时近两周。\n- 缺乏标准化评估体系，每次调整策略后需手动编写测试用例，无法快速对比效果。\n- 不同部门使用不同数据集（如客服日志、产品文档），无法复用已有标注数据，重复劳动严重。\n- 无法快速尝试RAG生成方案，最终答案仍依赖人工校对，自动化程度低。\n\n### 使用 Rankify 后\n- 一键切换7种检索器（如DPR、ColBERT），结合10万条真实客服对话数据，将准确率从60%提升至89%。\n- 集成24个SOTA重排序模型（如E5-Reranker、Jina-Reranker），通过统一API调用，3天内完成模型对比实验，选出最优组合。\n- 直接加载Rankify内置的40个预处理基准数据集，快速构建评估流水线，自动输出MRR、Hit@5等指标，节省80%测试时间。\n- 复用团队已有的售后文档数据集，无需重新清洗，直接接入Rankify的数据加载器，实现跨项目复用。\n- 结合Hugging Face的Llama3生成器，构建端到端RAG流程，自动生成结构化答案，人工校对工作量减少70%。\n\nRankify 让智能客服系统从“勉强可用”升级为“高精度、可迭代、可复用”的生产级AI引擎。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002FDataScienceUIBK_Rankify_dffe7e47.png","DataScienceUIBK","Data Science Group at Univ. of Innsbruck","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002FDataScienceUIBK_1e23bba2.png","",null,"https:\u002F\u002Fgithub.com\u002FDataScienceUIBK",[82,86,90,94,98,101],{"name":83,"color":84,"percentage":85},"Python","#3572A5",90.2,{"name":87,"color":88,"percentage":89},"TypeScript","#3178c6",8.4,{"name":91,"color":92,"percentage":93},"C++","#f34b7d",0.9,{"name":95,"color":96,"percentage":97},"Cuda","#3A4E3A",0.2,{"name":99,"color":100,"percentage":97},"CSS","#663399",{"name":102,"color":103,"percentage":104},"JavaScript","#f1e05a",0,667,68,"2026-04-02T14:24:40","Linux, macOS, Windows","需要 NVIDIA GPU，推荐显存 8GB+，CUDA 12.4 或 12.6","16GB+",{"notes":112,"python":113,"dependencies":114},"建议使用 conda 创建 Python 3.10 环境，安装 ColBERT 检索器需配置 GCC 9.4.0 及环境变量，首次运行可能下载数 GB 模型文件，推荐使用 pip install \"rankify[all]\" 获取完整功能。","3.10+",[115,116,117,118,119,120,121,122,123],"torch==2.5.1","transformers","accelerate","sentencepiece","vllm","huggingface-hub","streamlit","gradio","fastapi",[54,14,26,15,13],[126,127,128,129,130,131,132,133,134,135,136,137],"nlp","question-answering","rag","reranking","retrieval","retrival-augmented-generation","agent","llm","ranked-retrieval","ai","chatgpt","information-retrieval","2026-03-27T02:49:30.150509","2026-04-06T05:15:45.058756",[141,146,151,156,161,166],{"id":142,"question_zh":143,"answer_zh":144,"source_url":145},9028,"如何获取 Hugging Face 上 Bamboogle 数据集中的 text 字段？","每个分区的数据与原始数据集相同，text 字段为空是因为数据集仅提供索引（idx），实际文本需从原始数据源加载。请确保使用与原始数据集一致的分割方式，并检查是否需要从原始 Hugging Face 数据集页面下载完整文本内容。","https:\u002F\u002Fgithub.com\u002FDataScienceUIBK\u002FRankify\u002Fissues\u002F8",{"id":147,"question_zh":148,"answer_zh":149,"source_url":150},9029,"在 Jupyter 或 Colab 中导入 Reranker 时内核崩溃，如何解决？","该问题由 Pyserini 依赖的 Java 版本不兼容导致。请安装 Java 21：在 Colab 中运行 !apt-get install openjdk-21-jdk，然后重新安装 rankify 和 pyserini。参考官方修复 Notebook：https:\u002F\u002Fcolab.research.google.com\u002Fdrive\u002F1QukxP1WZHkPfD4321UcLXD24sKCpuUuP?usp=sharing","https:\u002F\u002Fgithub.com\u002FDataScienceUIBK\u002FRankify\u002Fissues\u002F42",{"id":152,"question_zh":153,"answer_zh":154,"source_url":155},9030,"如何在 Rankify 中集成自定义数据集？","支持两种方式：1) 若只有问题，使用 Dataset.load_dataset_qa(\"path_to_your_input_file.json\") 加载；2) 若已有预检索结果（如 BM25 输出），使用 Dataset.load_dataset(\"path_to_your_retriever_dataset.json\", top_k=100) 加载。无需等待官方支持，当前版本已支持。","https:\u002F\u002Fgithub.com\u002FDataScienceUIBK\u002FRankify\u002Fissues\u002F5",{"id":157,"question_zh":158,"answer_zh":159,"source_url":160},9031,"使用 UPR 路由器时出现 TypeError: bad operand type for unary -: 'list'，如何修复？","这是 UPR 模型中的一个已知 bug，已在 v0.1.3 版本中修复。请升级 Rankify：pip install --upgrade rankify==0.1.3 即可解决。","https:\u002F\u002Fgithub.com\u002FDataScienceUIBK\u002FRankify\u002Fissues\u002F4",{"id":162,"question_zh":163,"answer_zh":164,"source_url":165},9032,"如何避免在使用 Rankify 时混淆检索、重排序和生成的质量问题？","建议参考已合并的文档《troubleshooting_rag_and_reranking.md》，检查以下内容：1) 区分是检索质量差（召回率低）还是重排序质量差（排序错误）；2) 验证数据集分割、索引路径、模型名称和随机种子是否正确；3) 使用标准配置片段复现问题；4) 提交 issue 时附上数据集 ID、检索设置、重排序器、RAG 方法及日志片段。","https:\u002F\u002Fgithub.com\u002FDataScienceUIBK\u002FRankify\u002Fissues\u002F66",{"id":167,"question_zh":168,"answer_zh":169,"source_url":170},9033,"Rankify 是否支持通过 API 调用 LLM 和重排序模型？","Rankify 已支持通过 backend 参数调用外部 API 模型，例如使用 backend=\"vllm\" 或 backend=\"huggingface\" 启动生成器。对于重排序模型，可通过自定义模型类集成外部 API，具体可参考 Generator 类的实现方式，传入 model_name 和 backend 配置。","https:\u002F\u002Fgithub.com\u002FDataScienceUIBK\u002FRankify\u002Fissues\u002F6",[172,177,182,187,191,195,199,204],{"id":173,"version":174,"summary_zh":175,"released_at":176},106483,"v0.1.4","Highlights\r\n\r\n🛠️ Fixed numerous bugs across indexing and retrievers\r\n\r\n📏 Integrated RAGAS evaluation metrics\r\n\r\n🧩 Added new RAG pipelines\u002Fconfigs\r\n\r\n💻 Introduced CLI for indexing: rankify-index\r\n\r\n\r\nBug fixes (selected)\r\n\r\n- Lucene (BM25): stable JsonCollection wiring, index dir layout, robust load_index.\r\n\r\n- Contriever: fixed JSONL\u002FTSV mismatch; chunked embedding generation; float32 normalization; safer serialization & cleanup.\r\n\r\n- BGE: correct CLS pooling + L2 normalization; cosine via IndexFlatIP; chunk merge validation.\r\n\r\n- ColBERT: deterministic collection.tsv with sequential IDs; original↔sequential ID mappings; TSV verification & better diagnostics; loader-based load.\r\n\r\n- ANCE: robust doc-id extraction across fields; consistent FAISS↔docid mapping; safer metadata writer.\r\n\r\n- DPR: reliable Pyserini encode\u002Findex orchestration; mapping & metadata persisted.","2025-10-15T00:22:47",{"id":178,"version":179,"summary_zh":180,"released_at":181},106484,"v0.1.3"," features:\r\n  - \"🔧 Fixed UPR bugs to enhance retrieval and ranking stability.\"\r\n  - \"📦 Removed 7z dependency and added native Python extraction for datasets.\"\r\n  - \"🚀 Added Hyde Retriever ([arXiv:2212.10496](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.10496)) for improved retrieval using hypothetical document embeddings.\"\r\n  - \"📂 Expanded support for pre-retrieved datasets, adding more benchmark datasets.\"","2025-03-10T18:33:51",{"id":183,"version":184,"summary_zh":185,"released_at":186},106485,"v0.1.2","We have updated the pyproject.toml file to adjust the default installation behavior. With this release, users can install the Rankify library without including vllm by default. vllm is now part of the optional reranking dependencies group.","2025-02-18T21:44:28",{"id":188,"version":189,"summary_zh":79,"released_at":190},106486,"v0.1.0.post4","2025-02-10T03:28:33",{"id":192,"version":193,"summary_zh":79,"released_at":194},106487,"v0.1.0.post3","2025-02-10T00:17:41",{"id":196,"version":197,"summary_zh":79,"released_at":198},106488,"v0.1.0.post1","2025-02-09T22:48:17",{"id":200,"version":201,"summary_zh":202,"released_at":203},106489,"v0.1.0","**Full Changelog**: https:\u002F\u002Fgithub.com\u002FDataScienceUIBK\u002FRankify\u002Fcommits\u002Fv0.1.0","2025-02-09T22:09:54",{"id":205,"version":206,"summary_zh":79,"released_at":207},106490,"0.1.0.post2","2025-02-09T23:38:08"]