[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-openspeech-team--openspeech":3,"tool-openspeech-team--openspeech":62},[4,18,26,35,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",108322,2,"2026-04-10T11:39:34",[14,15,13],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":32,"last_commit_at":41,"category_tags":42,"status":17},6121,"gemini-cli","google-gemini\u002Fgemini-cli","gemini-cli 是一款由谷歌推出的开源 AI 命令行工具，它将强大的 Gemini 大模型能力直接集成到用户的终端环境中。对于习惯在命令行工作的开发者而言，它提供了一条从输入提示词到获取模型响应的最短路径，无需切换窗口即可享受智能辅助。\n\n这款工具主要解决了开发过程中频繁上下文切换的痛点，让用户能在熟悉的终端界面内直接完成代码理解、生成、调试以及自动化运维任务。无论是查询大型代码库、根据草图生成应用，还是执行复杂的 Git 操作，gemini-cli 都能通过自然语言指令高效处理。\n\n它特别适合广大软件工程师、DevOps 人员及技术研究人员使用。其核心亮点包括支持高达 100 万 token 的超长上下文窗口，具备出色的逻辑推理能力；内置 Google 搜索、文件操作及 Shell 命令执行等实用工具；更独特的是，它支持 MCP（模型上下文协议），允许用户灵活扩展自定义集成，连接如图像生成等外部能力。此外，个人谷歌账号即可享受免费的额度支持，且项目基于 Apache 2.0 协议完全开源，是提升终端工作效率的理想助手。",100752,"2026-04-10T01:20:03",[43,13,15,14],"插件",{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":10,"last_commit_at":50,"category_tags":51,"status":17},4487,"LLMs-from-scratch","rasbt\u002FLLMs-from-scratch","LLMs-from-scratch 是一个基于 PyTorch 的开源教育项目，旨在引导用户从零开始一步步构建一个类似 ChatGPT 的大型语言模型（LLM）。它不仅是同名技术著作的官方代码库，更提供了一套完整的实践方案，涵盖模型开发、预训练及微调的全过程。\n\n该项目主要解决了大模型领域“黑盒化”的学习痛点。许多开发者虽能调用现成模型，却难以深入理解其内部架构与训练机制。通过亲手编写每一行核心代码，用户能够透彻掌握 Transformer 架构、注意力机制等关键原理，从而真正理解大模型是如何“思考”的。此外，项目还包含了加载大型预训练权重进行微调的代码，帮助用户将理论知识延伸至实际应用。\n\nLLMs-from-scratch 特别适合希望深入底层原理的 AI 开发者、研究人员以及计算机专业的学生。对于不满足于仅使用 API，而是渴望探究模型构建细节的技术人员而言，这是极佳的学习资源。其独特的技术亮点在于“循序渐进”的教学设计：将复杂的系统工程拆解为清晰的步骤，配合详细的图表与示例，让构建一个虽小但功能完备的大模型变得触手可及。无论你是想夯实理论基础，还是为未来研发更大规模的模型做准备",90106,"2026-04-06T11:19:32",[52,15,13,14],"语言模型",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":10,"last_commit_at":59,"category_tags":60,"status":17},4292,"Deep-Live-Cam","hacksider\u002FDeep-Live-Cam","Deep-Live-Cam 是一款专注于实时换脸与视频生成的开源工具，用户仅需一张静态照片，即可通过“一键操作”实现摄像头画面的即时变脸或制作深度伪造视频。它有效解决了传统换脸技术流程繁琐、对硬件配置要求极高以及难以实时预览的痛点，让高质量的数字内容创作变得触手可及。\n\n这款工具不仅适合开发者和技术研究人员探索算法边界，更因其极简的操作逻辑（仅需三步：选脸、选摄像头、启动），广泛适用于普通用户、内容创作者、设计师及直播主播。无论是为了动画角色定制、服装展示模特替换，还是制作趣味短视频和直播互动，Deep-Live-Cam 都能提供流畅的支持。\n\n其核心技术亮点在于强大的实时处理能力，支持口型遮罩（Mouth Mask）以保留使用者原始的嘴部动作，确保表情自然精准；同时具备“人脸映射”功能，可同时对画面中的多个主体应用不同面孔。此外，项目内置了严格的内容安全过滤机制，自动拦截涉及裸露、暴力等不当素材，并倡导用户在获得授权及明确标注的前提下合规使用，体现了技术发展与伦理责任的平衡。",88924,"2026-04-06T03:28:53",[14,15,13,61],"视频",{"id":63,"github_repo":64,"name":65,"description_en":66,"description_zh":67,"ai_summary_zh":67,"readme_en":68,"readme_zh":69,"quickstart_zh":70,"use_case_zh":71,"hero_image_url":72,"owner_login":73,"owner_name":74,"owner_avatar_url":75,"owner_bio":76,"owner_company":77,"owner_location":77,"owner_email":78,"owner_twitter":77,"owner_website":79,"owner_url":80,"languages":81,"stars":90,"forks":91,"last_commit_at":92,"license":93,"difficulty_score":94,"env_os":95,"env_gpu":96,"env_ram":95,"env_deps":97,"category_tags":103,"github_topics":105,"view_count":32,"oss_zip_url":77,"oss_zip_packed_at":77,"status":17,"created_at":113,"updated_at":114,"faqs":115,"releases":116},8197,"openspeech-team\u002Fopenspeech","openspeech","Open-Source Toolkit for End-to-End Speech Recognition leveraging PyTorch-Lightning and Hydra.","OpenSpeech 是一个专为端到端自动语音识别（ASR）打造的开源工具包，旨在让复杂的语音识别技术变得触手可及。它汇集了多种主流 ASR 模型的参考实现，并提供了涵盖三种语言的完整训练食谱，帮助用户快速复现论文成果或构建自己的识别系统。\n\n对于深受模型复现难、训练配置繁琐困扰的研究人员和开发者而言，OpenSpeech 提供了一站式的解决方案。它基于强大的 PyTorch-Lightning 和 Hydra 框架构建，不仅支持多 GPU\u002FTPU 分布式训练、混合精度加速等高性能特性，还引入了层级化的配置管理，让用户能灵活调整实验参数而无需深陷代码细节。此外，工具包还集成了智能批处理、Transducer 束搜索以及 ContextNet 等前沿功能，进一步提升了训练效率与识别准确率。\n\n无论是希望快速验证新算法的学术研究者，还是致力于将语音技术落地应用的工程师，OpenSpeech 都能成为得力的助手。它通过标准化的流程降低了技术门槛，让大家能更专注于核心算法的创新与优化，共同推动语音识别领域的开放与进步。","\u003Cdiv align=\"center\">\n\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fopenspeech-team_openspeech_readme_169b4cf11280.png\" width=500>\n\n\n\u003Cp align=\"center\">\n  \u003Ci>\u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fsooftware\u002FOpenSpeech\u002Fblob\u002Fmain\u002FCONTRIBUTING.md\">\u003Ch3> 🤗 Contributing to OpenSpeech 🤗 \u003C\u002Fh3>\u003C\u002Fa>\u003C\u002Fi>\n  \u003C\u002Fp>\n\n\u003C\u002Fdiv>\n\n\n\u003Cp align=\"center\">\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech\u002Fblob\u002Fmain\u002FLICENSE\">\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Flicense-MIT-informational\">\n  \u003Ca href=\"https:\u002F\u002Fpypi.org\u002Fproject\u002Fopenspeech-core\u002F\">\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fpypi-v0.4.0-informational\">\n  \u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fbuild-passing-33CF57?&logo=GitHub\">\n  \u003Ca href=\"https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002F\">\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fdocs-passing-33CF57?&logo=GitHub\">\u003C\u002Fa>\n\u003C\u002Fp>\n\n\u003C\u002Fdiv>\n\n---\n\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fopenspeech-team_openspeech_readme_e9cf0c4a6b26.png\" height=20> OpenSpeech provides reference implementations of various ASR modeling papers and three languages recipe to perform tasks on automatic speech recognition. We aim to make ASR technology easier to use for everyone.\n\n\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fopenspeech-team_openspeech_readme_e9cf0c4a6b26.png\" height=20>  OpenSpeech is backed by the two powerful libraries — [PyTorch-Lightning](https:\u002F\u002Fgithub.com\u002FPyTorchLightning\u002Fpytorch-lightning) and [Hydra](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fhydra).\nVarious features are available in the above two libraries, including Multi-GPU and TPU training, Mixed-precision, and hierarchical configuration management.\n\n\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fopenspeech-team_openspeech_readme_e9cf0c4a6b26.png\" height=20>  We appreciate any kind of feedback or contribution. Feel free to proceed with small issues like bug fixes, documentation improvement. For major contributions and new features, please discuss with the collaborators in corresponding issues.\n\n## What's New\n\n- May 2022 [openspeech 0.4.0 released](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Freleases\u002Ftag\u002Fv0.4.0)\n- Aug 2021 [Added Smart Batching](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fpull\u002F83)\n- Jul 2021 [openspeech 0.3.0 released](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Freleases\u002Ftag\u002Fv0.3.0)\n- Jul 2021 [Added transducer beam search logic](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Fmodules\u002FSearch.html)\n- Jun 2021 [Added ContextNet](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FContextNet.html)\n- Jun 2021 [Added language model training pipeline](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Fmodels\u002FOpenspeech%20Language%20Model.html)\n- Jun 2021 [openspeech 0.1.0 released](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Ftree\u002Fv0.1)\n\n## Contents\n\n- [**What is OpenSpeech?**](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech#what-is-openspeech)\n\n- [**Why should I use OpenSpeech?**](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech#why-should-i-use-openspeech)\n\n- [**Why shouldn't I use OpenSpeech?**](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech#why-should-i-use-openspeech)\n\n- [**Model Architecture**](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech#model-architectures)\n\n- [**Get Started**](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech#get-started)\n\n- [**OpenSpeech's Hydra Configuration**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Fnotes\u002Fhydra_configs.html)\n\n- [**Installation**](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech#installation)\n\n- [**How to contribute to OpenSpeech?**](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech\u002Fblob\u002Fmain\u002FCONTRIBUTING.md)\n\n- [**Contributors**](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech\u002Fblob\u002Fmain\u002FCONTRIBUTORS.md)\n\n- [**Citation**](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech#citation)\n\n## What is OpenSpeech?\n\nOpenSpeech is a framework for making end-to-end speech recognizers. End-to-end (E2E) automatic speech recognition (ASR) is an emerging paradigm in the field of neural network-based speech recognition that offers multiple benefits. Traditional “hybrid” ASR systems, which are comprised of an acoustic model, language model, and pronunciation model, require separate training of these components, each of which can be complex.\n\nFor example, training of an acoustic model is a multi-stage process of model training and time alignment between the speech acoustic feature sequence and output label sequence. In contrast, E2E ASR is a single integrated approach with a much simpler training pipeline with models that operate at low audio frame rates. This reduces the training time, decoding time, and allows joint optimization with downstream processing such as natural language understanding.\n\nBecause of these advantages, many end-to-end speech recognition related open sources have emerged. But, Many of them are based on basic PyTorch or Tensorflow, it is very difficult to use various functions such as mixed-precision, multi-node training, and TPU training etc. However, with frameworks such as PyTorch-Lighting, these features can be easily used. So we have created a speech recognition framework that introduced PyTorch-Lightning and Hydra for easy use of these advanced features.\n\n## Why should I use OpenSpeech?\n\n1. PyTorch-Lighting base framework.\n    - Various functions: mixed-precision, multi-node training, tpu training etc.\n    - Models become hardware agnostic\n    - Make fewer mistakes because lightning handles the tricky engineering\n    - Lightning has dozens of integrations with popular machine learning tools.\n1. Easy-to-experiment with the famous ASR models.\n    - Supports 20+ models and is continuously updated.\n    - Low barrier to entry for educators and practitioners.\n    - Save time for researchers who want to conduct various experiments.\n2. Provides recipes for the most widely used languages, English, Chinese, and + Korean.\n    - LibriSpeech - 1,000 hours of English dataset most widely used in ASR tasks.\n    - AISHELL-1 - 170 hours of Chinese Mandarin speech corpus.\n    - KsponSpeech - 1,000 hours of Korean open-domain dialogue speech.\n3. Easily customize a model or a new dataset to your needs:\n    - The default hparams of the supported models are provided but can be easily adjusted.\n    - Easily create a custom model by combining modules that are already provided.\n    - If you want to use the new dataset, you only need to define a `pl.LightingDataModule` and `Tokenizer` classes.\n4. Audio processing\n    - Representative audio features such as Spectrogram, Mel-Spectrogram, Filter-Bank, and MFCC can be used easily.\n    - Provides a variety of augmentation, including SpecAugment, Noise Injection, and Audio Joining.\n\n## Why shouldn't I use OpenSpeech?\n\n- This framework provides code for training ASR models, but does not provide APIs by pre-trained models.\n- This framework does not provides pre-trained models.\n\n## Model architectures\n\nWe support all the models below. Note that, the important concepts of the model have been implemented to match, but the details of the implementation may vary.\n\n1. [**DeepSpeech2**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FDeepSpeech2.html) (from Baidu Research) released with paper [Deep Speech 2: End-to-End Speech Recognition in\nEnglish and Mandarin](https:\u002F\u002Farxiv.org\u002Fabs\u002F1512.02595.pdf), by Dario Amodei, Rishita Anubhai, Eric Battenberg, Carl Case, Jared Casper, Bryan Catanzaro, Jingdong Chen, Mike Chrzanowski, Adam Coates, Greg Diamos, Erich Elsen, Jesse Engel, Linxi Fan, Christopher Fougner, Tony Han, Awni Hannun, Billy Jun, Patrick LeGresley, Libby Lin, Sharan Narang, Andrew Ng, Sherjil Ozair, Ryan Prenger, Jonathan Raiman, Sanjeev Satheesh, David Seetapun, Shubho Sengupta, Yi Wang, Zhiqian Wang, Chong Wang, Bo Xiao, Dani Yogatama, Jun Zhan, Zhenyao Zhu.\n2. [**RNN-Transducer**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FRNN%20Transducer.html) (from University of Toronto) released with paper [Sequence Transduction with Recurrent Neural Networks](https:\u002F\u002Farxiv.org\u002Fabs\u002F1211.3711.pdf), by Alex Graves.\n3. [**LSTM Language Model**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FLSTM%20LM.html) (from RWTH Aachen University) released with paper [LSTM Neural Networks for Language Modeling](http:\u002F\u002Fwww-i6.informatik.rwth-aachen.de\u002Fpublications\u002Fdownload\u002F820\u002FSundermeyer-2012.pdf), by  Martin Sundermeyer, Ralf Schluter, and Hermann Ney.\n3. [**Listen Attend Spell**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FListen%20Attend%20Spell.html) (from Carnegie Mellon University and Google Brain) released with paper [Listen, Attend and Spell](https:\u002F\u002Farxiv.org\u002Fabs\u002F1508.01211), by William Chan, Navdeep Jaitly, Quoc V. Le, Oriol Vinyals.\n4. [**Location-aware attention based Listen Attend Spell**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FListen%20Attend%20Spell.html) (from University of Wrocław and Jacobs University and Universite de Montreal) released with paper [Attention-Based Models for Speech Recognition](https:\u002F\u002Farxiv.org\u002Fabs\u002F1506.07503), by Jan Chorowski, Dzmitry Bahdanau, Dmitriy Serdyuk, Kyunghyun Cho, Yoshua Bengio.\n5. [**Joint CTC-Attention based Listen Attend Spell**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FListen%20Attend%20Spell.html) (from Mitsubishi Electric Research Laboratories and Carnegie Mellon University) released with paper [Joint CTC-Attention based End-to-End Speech Recognition using Multi-task Learning](https:\u002F\u002Farxiv.org\u002Fabs\u002F1609.06773), by Suyoun Kim, Takaaki Hori, Shinji Watanabe.\n6. [**Deep CNN Encoder with Joint CTC-Attention Listen Attend Spell**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FListen%20Attend%20Spell.html) (from Mitsubishi Electric Research Laboratories and Massachusetts Institute of Technology and Carnegie Mellon University) released with paper [Advances in Joint CTC-Attention based End-to-End Speech Recognition with a Deep CNN Encoder and RNN-LM](https:\u002F\u002Farxiv.org\u002Fabs\u002F1706.02737), by Takaaki Hori, Shinji Watanabe, Yu Zhang, William Chan.\n7. [**Multi-head attention based Listen Attend Spell**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FListen%20Attend%20Spell.html) (from Google) released with paper [State-of-the-art Speech Recognition With Sequence-to-Sequence Models](https:\u002F\u002Farxiv.org\u002Fabs\u002F1712.01769), by Chung-Cheng Chiu, Tara N. Sainath, Yonghui Wu, Rohit Prabhavalkar, Patrick Nguyen, Zhifeng Chen, Anjuli Kannan, Ron J. Weiss, Kanishka Rao, Ekaterina Gonina, Navdeep Jaitly, Bo Li, Jan Chorowski, Michiel Bacchiani.\n8. [**Speech-Transformer**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FTransformer.html) (from University of Chinese Academy of Sciences and Institute of Automation and Chinese Academy of Sciences) released with paper [Speech-Transformer: A No-Recurrence Sequence-to-Sequence Model for Speech Recognition](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F8462506), by Linhao Dong; Shuang Xu; Bo Xu.\n9. [**VGG-Transformer**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FTransformer.html) (from Facebook AI Research) released with paper [Transformers with convolutional context for ASR](https:\u002F\u002Farxiv.org\u002Fabs\u002F1904.11660), by Abdelrahman Mohamed, Dmytro Okhonko, Luke Zettlemoyer.\n10. [**Transformer with CTC**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FTransformer.html) (from NTT Communication Science Laboratories, Waseda University, Center for Language and Speech Processing, Johns Hopkins University) released with paper [Improving Transformer-based End-to-End Speech Recognition with Connectionist Temporal Classification and Language Model Integration](https:\u002F\u002Fwww.isca-speech.org\u002Farchive\u002FInterspeech_2019\u002Fpdfs\u002F1938.pdf), by Shigeki Karita, Nelson Enrique Yalta Soplin, Shinji Watanabe, Marc Delcroix, Atsunori Ogawa, Tomohiro Nakatani.\n11. [**Joint CTC-Attention based Transformer**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FTransformer.html) (from NTT Corporation) released with paper [Self-Distillation for Improving CTC-Transformer-based ASR Systems](https:\u002F\u002Fwww.isca-speech.org\u002Farchive\u002FInterspeech_2020\u002Fpdfs\u002F1223.pdf), by Takafumi Moriya, Tsubasa Ochiai, Shigeki Karita, Hiroshi Sato, Tomohiro Tanaka, Takanori Ashihara, Ryo Masumura, Yusuke Shinohara, Marc Delcroix.\n12. [**Transformer Language Model**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FTransformer%20LM.html) (from Amazon Web Services) released with paper [Language Models with Transformers](https:\u002F\u002Farxiv.org\u002Fabs\u002F1904.09408), by Chenguang Wang, Mu Li, Alexander J. Smola.\n12. [**Jasper**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Fmodules\u002FEncoders.html#module-openspeech.encoders.jasper) (from NVIDIA and New York University) released with paper [Jasper: An End-to-End Convolutional Neural Acoustic Model](https:\u002F\u002Farxiv.org\u002Fpdf\u002F1904.03288.pdf), by Jason Li, Vitaly Lavrukhin, Boris Ginsburg, Ryan Leary, Oleksii Kuchaiev, Jonathan M. Cohen, Huyen Nguyen, Ravi Teja Gadde.\n13. [**QuartzNet**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Fmodules\u002FEncoders.html#module-openspeech.encoders.quartznet) (from NVIDIA and Univ. of Illinois and Univ. of Saint Petersburg) released with paper [QuartzNet: Deep Automatic Speech Recognition with 1D Time-Channel Separable Convolutions](https:\u002F\u002Farxiv.org\u002Fabs\u002F1910.10261.pdf), by Samuel Kriman, Stanislav Beliaev, Boris Ginsburg, Jocelyn Huang, Oleksii Kuchaiev, Vitaly Lavrukhin, Ryan Leary, Jason Li, Yang Zhang.\n14. [**Transformer Transducer**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FTransformer%20Transducer.html) (from Facebook AI) released with paper [Transformer-Transducer: End-to-End Speech Recognition with Self-Attention](https:\u002F\u002Farxiv.org\u002Fabs\u002F1910.12977.pdf), by Ching-Feng Yeh, Jay Mahadeokar, Kaustubh Kalgaonkar, Yongqiang Wang, Duc Le, Mahaveer Jain, Kjell Schubert, Christian Fuegen, Michael L. Seltzer.\n15. [**Conformer**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FConformer.html) (from Google) released with paper [Conformer: Convolution-augmented Transformer for Speech Recognition](https:\u002F\u002Farxiv.org\u002Fabs\u002F2005.08100), by Anmol Gulati, James Qin, Chung-Cheng Chiu, Niki Parmar, Yu Zhang, Jiahui Yu, Wei Han, Shibo Wang, Zhengdong Zhang, Yonghui Wu, Ruoming Pang.\n16. [**Conformer with CTC**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FConformer.html) (from Northwestern Polytechnical University and University of Bordeaux and Johns Hopkins University and Human Dataware Lab and Kyoto University and NTT Corporation and Shanghai Jiao Tong University and  Chinese Academy of Sciences) released with paper [Recent Developments on ESPNET Toolkit Boosted by Conformer](https:\u002F\u002Farxiv.org\u002Fabs\u002F2010.13956.pdf), by Pengcheng Guo, Florian Boyer, Xuankai Chang, Tomoki Hayashi, Yosuke Higuchi, Hirofumi Inaguma, Naoyuki Kamo, Chenda Li, Daniel Garcia-Romero, Jiatong Shi, Jing Shi, Shinji Watanabe, Kun Wei, Wangyou Zhang, Yuekai Zhang.\n17. [**Conformer with LSTM Decoder**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FConformer.html) (from IBM Research AI) released with paper [On the limit of English conversational speech recognition](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.00982.pdf), by Zoltán Tüske, George Saon, Brian Kingsbury.\n18. [**ContextNet**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FContextNet.html) (from Google) released with paper [ContextNet: Improving Convolutional Neural Networks for Automatic Speech Recognition with Global Context](https:\u002F\u002Farxiv.org\u002Fabs\u002F2005.03191), by Wei Han, Zhengdong Zhang, Yu Zhang, Jiahui Yu, Chung-Cheng Chiu, James Qin, Anmol Gulati, Ruoming Pang, Yonghui Wu.\n19. [**Squeezeformer**](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fblob\u002Fmain\u002Fopenspeech\u002Fmodels\u002Fsqueezeformer\u002Fmodel.py) (from University of Berkeley) released with paper [Squeezeformer: An Efficient Transformer for Automatic Speech Recognition](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.00888), by Sehoon Kim, Amir Gholami, Albert Shaw, Nicholas Lee, Karttikeya Mangalam, Jitendra Malik, Michael W. Mahoney, Kurt Keutzer.\n\n\n## Get Started\n\nWe use [Hydra](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fhydra) to control all the training configurations.\nIf you are not familiar with Hydra we recommend visiting the [Hydra website](https:\u002F\u002Fhydra.cc\u002F).\nGenerally, Hydra is an open-source framework that simplifies the development of research applications by providing the ability to create a hierarchical configuration dynamically.\nIf you want to know how we used Hydra, we recommend you to read [here](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Fnotes\u002Fhydra_configs.html).\n\n### Supported Datasets\n\nWe support [LibriSpeech](https:\u002F\u002Fwww.openslr.org\u002F12), [KsponSpeech](https:\u002F\u002Faihub.or.kr\u002Faidata\u002F105), and [AISHELL-1](https:\u002F\u002Fwww.openslr.org\u002F33\u002F).\n\nLibriSpeech is a corpus of approximately 1,000 hours of 16kHz read English speech, prepared by Vassil Panayotov with the assistance of Daniel Povey. The data was derived from reading audiobooks from the LibriVox project, and has been carefully segmented and aligned.\n\nAishell is an open-source Chinese Mandarin speech corpus published by Beijing Shell Shell Technology Co.,Ltd. 400 people from different accent areas in China were invited to participate in the recording, which was conducted in a quiet indoor environment using high fidelity microphone and downsampled to 16kHz.\n\nKsponSpeech is a large-scale spontaneous speech corpus of Korean. This corpus contains 969 hours of general open-domain dialog utterances, spoken by about 2,000 native Korean speakers in a clean environment. All data were constructed by recording the dialogue of two people freely conversing on a variety of topics and manually transcribing the utterances. To start training, the KsponSpeech dataset must be prepared in advance. To download KsponSpeech, you need permission from [AI Hub](https:\u002F\u002Faihub.or.kr\u002F).\n\n\n\n### Manifest File\n\n- Acoustic model manifest file format:\n\n```\nLibriSpeech\u002Ftest-other\u002F8188\u002F269288\u002F8188-269288-0052.flac        ▁ANNIE ' S ▁MANNER ▁WAS ▁VERY ▁MYSTERIOUS       4039 20 5 531 17 84 2352\nLibriSpeech\u002Ftest-other\u002F8188\u002F269288\u002F8188-269288-0053.flac        ▁ANNIE ▁DID ▁NOT ▁MEAN ▁TO ▁CONFIDE ▁IN ▁ANYONE ▁THAT ▁NIGHT ▁AND ▁THE ▁KIND EST ▁THING ▁WAS ▁TO ▁LEAVE ▁HER ▁A LONE    4039 99 35 251 9 4758 11 2454 16 199 6 4 323 200 255 17 9 370 30 10 492\nLibriSpeech\u002Ftest-other\u002F8188\u002F269288\u002F8188-269288-0054.flac        ▁TIRED ▁OUT ▁LESLIE ▁HER SELF ▁DROPP ED ▁A SLEEP        1493 70 4708 30 115 1231 7 10 1706\nLibriSpeech\u002Ftest-other\u002F8188\u002F269288\u002F8188-269288-0055.flac        ▁ANNIE ▁IS ▁THAT ▁YOU ▁SHE ▁CALL ED ▁OUT        4039 34 16 25 37 208 7 70\nLibriSpeech\u002Ftest-other\u002F8188\u002F269288\u002F8188-269288-0056.flac        ▁THERE ▁WAS ▁NO ▁REPLY ▁BUT ▁THE ▁SOUND ▁OF ▁HURRY ING ▁STEPS ▁CAME ▁QUICK ER ▁AND ▁QUICK ER ▁NOW ▁AND ▁THEN ▁THEY ▁WERE ▁INTERRUPTED ▁BY ▁A ▁GROAN     57 17 56 1368 33 4 489 8 1783 14 1381 133 571 49 6 571 49 82 6 76 45 54 2351 44 10 3154\nLibriSpeech\u002Ftest-other\u002F8188\u002F269288\u002F8188-269288-0057.flac        ▁OH ▁THIS ▁WILL ▁KILL ▁ME ▁MY ▁HEART ▁WILL ▁BREAK ▁THIS ▁WILL ▁KILL ▁ME 299 46 71 669 50 41 235 71 977 46 71 669 50\n...\n...\n```\n\n### Training examples\n\nYou can simply train with LibriSpeech dataset like below:\n\n- Example1: Train the `conformer-lstm` model with `filter-bank` features on GPU.\n\n```\n$ python3 .\u002Fopenspeech_cli\u002Fhydra_train.py \\\n    dataset=librispeech \\\n    dataset.dataset_download=True \\\n    dataset.dataset_path=$DATASET_PATH \\\n    dataset.manifest_file_path=$MANIFEST_FILE_PATH \\\n    tokenizer=libri_subword \\\n    model=conformer_lstm \\\n    audio=fbank \\\n    lr_scheduler=warmup_reduce_lr_on_plateau \\\n    trainer=gpu \\\n    criterion=cross_entropy\n```\n\nYou can simply train with KsponSpeech dataset like below:\n\n- Example2: Train the `listen-attend-spell` model with `mel-spectrogram` features On TPU:\n\n```\n$ python3 .\u002Fopenspeech_cli\u002Fhydra_train.py \\\n    dataset=ksponspeech \\\n    dataset.dataset_path=$DATASET_PATH \\\n    dataset.manifest_file_path=$MANIFEST_FILE_PATH \\\n    dataset.test_dataset_path=$TEST_DATASET_PATH \\\n    dataset.test_manifest_dir=$TEST_MANIFEST_DIR \\\n    tokenizer=kspon_character \\\n    model=listen_attend_spell \\\n    audio=melspectrogram \\\n    lr_scheduler=warmup_reduce_lr_on_plateau \\\n    trainer=tpu \\\n    criterion=cross_entropy\n```\n\nYou can simply train with AISHELL-1 dataset like below:\n\n- Example3: Train the `quartznet` model with `mfcc` features On GPU with FP16:\n\n```\n$ python3 .\u002Fopenspeech_cli\u002Fhydra_train.py \\\n    dataset=aishell \\\n    dataset.dataset_path=$DATASET_PATH \\\n    dataset.dataset_download=True \\\n    dataset.manifest_file_path=$MANIFEST_FILE_PATH \\\n    tokenizer=aishell_character \\\n    model=quartznet15x5 \\\n    audio=mfcc \\\n    lr_scheduler=warmup_reduce_lr_on_plateau \\\n    trainer=gpu-fp16 \\\n    criterion=ctc\n```\n\n### Evaluation examples\n\n- Example1: Evaluation the `listen_attend_spell` model:\n\n```\n$ python3 .\u002Fopenspeech_cli\u002Fhydra_eval.py \\\n    audio=melspectrogram \\\n    eval.dataset_path=$DATASET_PATH \\\n    eval.checkpoint_path=$CHECKPOINT_PATH \\\n    eval.manifest_file_path=$MANIFEST_FILE_PATH \\\n    model=listen_attend_spell \\\n    tokenizer=kspon_character \\\n    tokenizer.vocab_path=$VOCAB_FILE_PATH \\\n```\n\n- Example2: Evaluation the `listen_attend_spell`, `conformer_lstm` models with ensemble:\n\n```\n$ python3 .\u002Fopenspeech_cli\u002Fhydra_eval.py \\\n    audio=melspectrogram \\\n    eval.model_names=(listen_attend_spell, conformer_lstm) \\\n    eval.dataset_path=$DATASET_PATH \\\n    eval.checkpoint_paths=($CHECKPOINT_PATH1, $CHECKPOINT_PATH2) \\\n    eval.ensemble_weights=(0.3, 0.7) \\\n    eval.ensemble_method=weighted \\\n    eval.manifest_file_path=$MANIFEST_FILE_PATH\n```\n\n#### KsponSpeech Directory Structure\n\n- `dataset.dataset_path`: $BASE_PATH\u002FKsponSpeech\n```\n$BASE_PATH\u002FKsponSpeech\n├── KsponSpeech_01\n├── KsponSpeech_02\n├── KsponSpeech_03\n├── KsponSpeech_04\n└── KsponSpeech_05\n```\n\n- `dataset.test_dataset_path`: $BASE_PATH\u002FKsponSpeech_eval\n```\n$BASE_PATH\u002FKsponSpeech_eval\n├── eval_clean\n└── eval_other\n```\n\n- `dataset.test_manifest_dir`: $BASE_PATH\u002FKsponSpeech_scripts\n```\n$BASE_PATH\u002FKsponSpeech_scripts\n├── eval_clean.trn\n└── eval_other.trn\n```\n\n### Language model training example\n\nLanguage model training requires only data to be prepared in the following format:\n\n```\nopenspeech is a framework for making end-to-end speech recognizers.\nend to end automatic speech recognition is an emerging paradigm in the field of neural network-based speech recognition that offers multiple benefits.\nbecause of these advantages, many end-to-end speech recognition related open sources have emerged.\n...\n...\n```\n\nNote that you need to use the same vocabulary as the acoustic model.\n\n- Example: Train the `lstm_lm` model:\n```\n$ python3 .\u002Fopenspeech_cli\u002Fhydra_lm_train.py \\\n    dataset=lm \\\n    dataset.dataset_path=..\u002F..\u002F..\u002Flm.txt \\\n    tokenizer=kspon_character \\\n    tokenizer.vocab_path=..\u002F..\u002F..\u002Flabels.csv \\\n    model=lstm_lm \\\n    lr_scheduler=tri_stage \\\n    trainer=gpu \\\n    criterion=perplexity\n```\n\n## Installation\n\nThis project recommends Python 3.7 or higher.\nWe recommend creating a new virtual environment for this project (using virtual env or conda).\n\n\n### Prerequisites\n\n* numpy: `pip install numpy` (Refer [here](https:\u002F\u002Fgithub.com\u002Fnumpy\u002Fnumpy) for problem installing Numpy).\n* pytorch: Refer to [PyTorch website](http:\u002F\u002Fpytorch.org\u002F) to install the version w.r.t. your environment.\n* librosa: `conda install -c conda-forge librosa` (Refer [here](https:\u002F\u002Fgithub.com\u002Flibrosa\u002Flibrosa) for problem installing librosa)\n* torchaudio: `pip install torchaudio==0.6.0` (Refer [here](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fpytorch) for problem installing torchaudio)\n* sentencepiece: `pip install sentencepiece` (Refer [here](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fsentencepiece) for problem installing sentencepiece)\n* pytorch-lightning: `pip install pytorch-lightning` (Refer [here](https:\u002F\u002Fgithub.com\u002FPyTorchLightning\u002Fpytorch-lightning) for problem installing pytorch-lightning)\n* hydra: `pip install hydra-core --upgrade` (Refer [here](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fhydra) for problem installing hydra)\n* warp-rnnt: Refer to [warp-rnnt page](https:\u002F\u002Fgithub.com\u002F1ytic\u002Fwarp-rnnt) to install the library.\n* ctcdecode: Refer to [ctcdecode page](https:\u002F\u002Fgithub.com\u002Fparlance\u002Fctcdecode) to install the library.\n\n### Install from pypi\n\nYou can install OpenSpeech with pypi.\n```\npip install openspeech-core\n```\n\n### Install from source\nCurrently we only support installation from source code using setuptools. Checkout the source code and run the\nfollowing commands:\n```\n$ .\u002Finstall.sh\n```\n\n### Install Apex (for 16-bit training)\n\nFor faster training install NVIDIA's apex library:\n\n```\n$ git clone https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fapex\n$ cd apex\n\n# ------------------------\n# OPTIONAL: on your cluster you might need to load CUDA 10 or 9\n# depending on how you installed PyTorch\n\n# see available modules\nmodule avail\n\n# load correct CUDA before install\nmodule load cuda-10.0\n# ------------------------\n\n# make sure you've loaded a cuda version > 4.0 and \u003C 7.0\nmodule load gcc-6.1.0\n\n$ pip install -v --no-cache-dir --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" .\u002F\n```\n\n## Troubleshoots and Contributing\nIf you have any questions, bug reports, and feature requests, please [open an issue](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech\u002Fissues) on Github.\n\nWe appreciate any kind of feedback or contribution.  Feel free to proceed with small issues like bug fixes, documentation improvement.  For major contributions and new features, please discuss with the collaborators in corresponding issues.\n\n### Code Style\nWe follow [PEP-8](https:\u002F\u002Fwww.python.org\u002Fdev\u002Fpeps\u002Fpep-0008\u002F) for code style. Especially the style of docstrings is important to generate documentation.\n\n### License\nThis project is licensed under the MIT LICENSE - see the [LICENSE.md](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech\u002Fblob\u002Fmaster\u002FLICENSE) file for details\n\n## Citation\n\nIf you use the system for academic work, please cite:\n\n```\n@GITHUB{2021-OpenSpeech,\n  author       = {Kim, Soohwan and Ha, Sangchun and Cho, Soyoung},\n  author email = {sh951011@gmail.com, seomk9896@gmail.com, soyoung.cho@kaist.ac.kr}\n  title        = {OpenSpeech: Open-Source Toolkit for End-to-End Speech Recognition},\n  howpublished = {\\url{https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech}},\n  docs         = {\\url{https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech}},\n  year         = {2021}\n}\n```\n","\u003Cdiv align=\"center\">\n\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fopenspeech-team_openspeech_readme_169b4cf11280.png\" width=500>\n\n\n\u003Cp align=\"center\">\n  \u003Ci>\u003Ca href=\"https:\u002F\u002Fgithub.com\u002FsoFTWARE\u002FOpenSpeech\u002Fblob\u002Fmain\u002FCONTRIBUTING.md\">\u003Ch3> 🤗 欢迎为 OpenSpeech 做贡献 🤗 \u003C\u002Fh3>\u003C\u002Fa>\u003C\u002Fi>\n  \u003C\u002Fp>\n\n\u003C\u002Fdiv>\n\n\n\u003Cp align=\"center\">\n  \u003Ca href=\"https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech\u002Fblob\u002Fmain\u002FLICENSE\">\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Flicense-MIT-informational\">\n  \u003Ca href=\"https:\u002F\u002Fpypi.org\u002Fproject\u002Fopenspeech-core\u002F\">\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fpypi-v0.4.0-informational\">\n  \u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fbuild-passing-33CF57?&logo=GitHub\">\n  \u003Ca href=\"https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002F\">\u003Cimg src=\"https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fdocs-passing-33CF57?&logo=GitHub\">\u003C\u002Fa>\n\u003C\u002Fp>\n\n\u003C\u002Fdiv>\n\n---\n\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fopenspeech-team_openspeech_readme_e9cf0c4a6b26.png\" height=20> OpenSpeech 提供了多种 ASR 模型论文的参考实现，并针对自动语音识别任务提供了三种语言的配方。我们的目标是让所有人都能更轻松地使用 ASR 技术。\n\n\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fopenspeech-team_openspeech_readme_e9cf0c4a6b26.png\" height=20> OpenSpeech 依托于两个强大的库——[PyTorch-Lightning](https:\u002F\u002Fgithub.com\u002FPyTorchLightning\u002Fpytorch-lightning) 和 [Hydra](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fhydra)。这两个库提供了丰富的功能，包括多 GPU 和 TPU 训练、混合精度以及分层配置管理等。\n\n\n\u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fopenspeech-team_openspeech_readme_e9cf0c4a6b26.png\" height=20> 我们欢迎任何形式的反馈和贡献。无论是修复 bug 还是改进文档，都可以从小处着手。对于重大贡献或新功能，请先在相关议题中与合作者讨论。\n\n## 最新动态\n\n- 2022年5月 [openspeech 0.4.0 发布](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Freleases\u002Ftag\u002Fv0.4.0)\n- 2021年8月 [新增智能批处理功能](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fpull\u002F83)\n- 2021年7月 [openspeech 0.3.0 发布](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Freleases\u002Ftag\u002Fv0.3.0)\n- 2021年7月 [添加转换器束搜索逻辑](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Fmodules\u002FSearch.html)\n- 2021年6月 [添加 ContextNet 模型](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FContextNet.html)\n- 2021年6月 [添加语言模型训练流水线](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Fmodels\u002FOpenspeech%20Language%20Model.html)\n- 2021年6月 [openspeech 0.1.0 发布](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Ftree\u002Fv0.1)\n\n## 目录\n\n- [**什么是 OpenSpeech？**](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech#what-is-openspeech)\n\n- [**为什么应该使用 OpenSpeech？**](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech#why-should-i-use-openspeech)\n\n- [**为什么不应该使用 OpenSpeech？**](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech#why-should-i-use-openspeech)\n\n- [**模型架构**](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech#model-architectures)\n\n- [**快速入门**](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech#get-started)\n\n- [**OpenSpeech 的 Hydra 配置**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Fnotes\u002Fhydra_configs.html)\n\n- [**安装指南**](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech#installation)\n\n- [**如何为 OpenSpeech 做贡献？**](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech\u002Fblob\u002Fmain\u002FCONTRIBUTING.md)\n\n- [**贡献者名单**](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech\u002Fblob\u002Fmain\u002FCONTRIBUTORS.md)\n\n- [**引用信息**](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech#citation)\n\n## 什么是 OpenSpeech？\n\nOpenSpeech 是一个用于构建端到端语音识别系统的框架。端到端（E2E）自动语音识别（ASR）是基于神经网络的语音识别领域中的一种新兴范式，具有诸多优势。传统的“混合”ASR 系统由声学模型、语言模型和发音模型组成，这些组件需要分别训练，且每个组件的训练过程都可能非常复杂。\n\n例如，声学模型的训练通常涉及多个阶段，包括模型训练以及语音声学特征序列与输出标签序列之间的对齐。相比之下，E2E ASR 采用单一的集成方法，其训练流程更为简单，模型运行时的音频帧率也较低。这不仅缩短了训练和解码时间，还能够与下游处理（如自然语言理解）进行联合优化。\n\n由于这些优势，许多与端到端语音识别相关的开源项目应运而生。然而，其中许多项目仅基于基础的 PyTorch 或 TensorFlow，难以充分利用混合精度、多节点训练、TPU 训练等功能。而借助 PyTorch-Lightning 等框架，则可以轻松实现这些高级功能。因此，我们创建了 OpenSpeech 框架，引入了 PyTorch-Lightning 和 Hydra，以方便用户使用这些先进特性。\n\n## 为什么应该使用 OpenSpeech？\n\n1. 基于 PyTorch-Lightning 的框架。\n    - 支持多种功能：混合精度、多节点训练、TPU 训练等。\n    - 模型与硬件无关，更具通用性。\n    - Lightning 能够处理复杂的工程细节，从而减少出错几率。\n    - Lightning 与众多主流机器学习工具无缝集成。\n1. 易于尝试流行的 ASR 模型。\n    - 支持 20 多种模型，并持续更新。\n    - 对教育工作者和从业者来说，上手门槛低。\n    - 为希望进行各种实验的研究人员节省时间。\n2. 提供最常用语言的配方，包括英语、中文和韩语。\n    - LibriSpeech：1,000 小时的英语数据集，广泛应用于 ASR 任务。\n    - AISHELL-1：170 小时的汉语普通话语音语料库。\n    - KsponSpeech：1,000 小时的韩语开放域对话语音数据集。\n3. 可根据需求轻松自定义模型或数据集：\n    - 提供支持模型的默认超参数，但也可轻松调整。\n    - 可通过组合现有模块轻松创建自定义模型。\n    - 如果想使用新的数据集，只需定义 `pl.LightingDataModule` 和 `Tokenizer` 类即可。\n4. 音频处理功能。\n    - 可轻松使用频谱图、梅尔频谱图、滤波器组和 MFCC 等代表性音频特征。\n    - 提供多种数据增强技术，包括 SpecAugment、噪声注入和音频拼接等。\n\n## 为什么不应该使用 OpenSpeech？\n\n- 本框架提供的是 ASR 模型的训练代码，但不提供预训练模型的 API。\n- 本框架不包含任何预训练模型。\n\n## 模型架构\n\n我们支持以下所有模型。请注意，模型的核心概念已实现匹配，但具体实现细节可能会有所不同。\n\n1. [**DeepSpeech2**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FDeepSpeech2.html)（来自百度研究院）随论文《Deep Speech 2: 英语和普通话的端到端语音识别》（Dario Amodei, Rishita Anubhai, Eric Battenberg, Carl Case, Jared Casper, Bryan Catanzaro, Jingdong Chen, Mike Chrzanowski, Adam Coates, Greg Diamos, Erich Elsen, Jesse Engel, Linxi Fan, Christopher Fougner, Tony Han, Awni Hannun, Billy Jun, Patrick LeGresley, Libby Lin, Sharan Narang, Andrew Ng, Sherjil Ozair, Ryan Prenger, Jonathan Raiman, Sanjeev Satheesh, David Seetapun, Shubho Sengupta, Yi Wang, Zhiqian Wang, Chong Wang, Bo Xiao, Dani Yogatama, Jun Zhan, Zhenyao Zhu）发布。\n2. [**RNN-Transducer**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FRNN%20Transducer.html)（来自多伦多大学）随论文《基于循环神经网络的序列转换》（Alex Graves）发布。\n3. [**LSTM语言模型**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FLSTM%20LM.html)（来自亚琛工业大学）随论文《用于语言建模的LSTM神经网络》（Martin Sundermeyer, Ralf Schluter, Hermann Ney）发布。\n3. [**Listen Attend Spell**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FListen%20Attend%20Spell.html)（来自卡内基梅隆大学和谷歌大脑）随论文《听、关注与拼写》（William Chan, Navdeep Jaitly, Quoc V. Le, Oriol Vinyals）发布。\n4. [**基于位置感知注意力的Listen Attend Spell**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FListen%20Attend%20Spell.html)（来自弗罗茨瓦夫大学、雅各布斯大学和蒙特利尔大学）随论文《基于注意力的语音识别模型》（Jan Chorowski, Dzmitry Bahdanau, Dmitriy Serdyuk, Kyunghyun Cho, Yoshua Bengio）发布。\n5. [**联合CTC-注意力的Listen Attend Spell**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FListen%20Attend%20Spell.html)（来自三菱电机研究实验室和卡内基梅隆大学）随论文《基于联合CTC-注意力的多任务学习端到端语音识别》（Suyoun Kim, Takaaki Hori, Shinji Watanabe）发布。\n6. [**带有深度CNN编码器的联合CTC-注意力Listen Attend Spell**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FListen%20Attend%20Spell.html)（来自三菱电机研究实验室、麻省理工学院和卡内基梅隆大学）随论文《采用深度CNN编码器和RNN-LM的联合CTC-注意力端到端语音识别进展》（Takaaki Hori, Shinji Watanabe, Yu Zhang, William Chan）发布。\n7. [**基于多头注意力的Listen Attend Spell**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FListen%20Attend%20Spell.html)（来自谷歌）随论文《基于序列到序列模型的最先进语音识别》（Chung-Cheng Chiu, Tara N. Sainath, Yonghui Wu, Rohit Prabhavalkar, Patrick Nguyen, Zhifeng Chen, Anjuli Kannan, Ron J. Weiss, Kanishka Rao, Ekaterina Gonina, Navdeep Jaitly, Bo Li, Jan Chorowski, Michiel Bacchiani）发布。\n8. [**语音Transformer**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FTransformer.html)（来自中国科学院大学和中国科学院自动化研究所）随论文《语音Transformer：一种无递归的序列到序列语音识别模型》（Linhao Dong, Shuang Xu, Bo Xu）发布。\n9. [**VGG-Transformer**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FTransformer.html)（来自Facebook AI研究）随论文《具有卷积上下文的Transformer用于自动语音识别》（Abdelrahman Mohamed, Dmytro Okhonko, Luke Zettlemoyer）发布。\n10. [**带有CTC的Transformer**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FTransformer.html)（来自NTT通信科学实验室、早稻田大学、约翰霍普金斯大学语言与语音处理中心）随论文《通过连接时序分类和语言模型集成改进基于Transformer的端到端语音识别》（Shigeki Karita, Nelson Enrique Yalta Soplin, Shinji Watanabe, Marc Delcroix, Atsunori Ogawa, Tomohiro Nakatani）发布。\n11. [**联合CTC-注意力的Transformer**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FTransformer.html)（来自NTT公司）随论文《自蒸馏法提升CTC-Transformer语音识别系统性能》（Takafumi Moriya, Tsubasa Ochiai, Shigeki Karita, Hiroshi Sato, Tomohiro Tanaka, Takanori Ashihara, Ryo Masumura, Yusuke Shinohara, Marc Delcroix）发布。\n12. [**Transformer语言模型**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FTransformer%20LM.html)（来自亚马逊云服务）随论文《基于Transformer的语言模型》（Chenguang Wang, Mu Li, Alexander J. Smola）发布。\n12. [**Jasper**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Fmodules\u002FEncoders.html#module-openspeech.encoders.jasper)（来自NVIDIA和纽约大学）随论文《Jasper：一种端到端卷积神经网络声学模型》（Jason Li, Vitaly Lavrukhin, Boris Ginsburg, Ryan Leary, Oleksii Kuchaiev, Jonathan M. Cohen, Huyen Nguyen, Ravi Teja Gadde）发布。\n13. [**QuartzNet**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Fmodules\u002FEncoders.html#module-openspeech.encoders.quartznet)（来自NVIDIA、伊利诺伊大学和圣彼得堡大学）随论文《QuartzNet：使用1D时空可分离卷积的深度自动语音识别》（Samuel Kriman, Stanislav Beliaev, Boris Ginsburg, Jocelyn Huang, Oleksii Kuchaiev, Vitaly Lavrukhin, Ryan Leary, Jason Li, Yang Zhang）发布。\n14. [**Transformer Transducer**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FTransformer%20Transducer.html)（来自Facebook AI）随论文《Transformer-Transducer：基于自注意力的端到端语音识别》（Ching-Feng Yeh, Jay Mahadeokar, Kaustubh Kalgaonkar, Yongqiang Wang, Duc Le, Mahaveer Jain, Kjell Schubert, Christian Fuegen, Michael L. Seltzer）发布。\n15. [**Conformer**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FConformer.html)（来自谷歌）随论文《Conformer：用于语音识别的卷积增强型Transformer》（Anmol Gulati, James Qin, Chung-Cheng Chiu, Niki Parmar, Yu Zhang, Jiahui Yu, Wei Han, Shibo Wang, Zhengdong Zhang, Yonghui Wu, Ruoming Pang）发布。\n16. [**带有CTC的Conformer**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FConformer.html)（来自西北工业大学、波尔多大学、约翰霍普金斯大学、Human Dataware实验室、京都大学、NTT公司、上海交通大学和中国科学院）随论文《受Conformer推动的ESPNET工具包最新进展》（Pengcheng Guo, Florian Boyer, Xuankai Chang, Tomoki Hayashi, Yosuke Higuchi, Hirofumi Inaguma, Naoyuki Kamo, Chenda Li, Daniel Garcia-Romero, Jiatong Shi, Jing Shi, Shinji Watanabe, Kun Wei, Wangyou Zhang, Yuekai Zhang）发布。\n17. [**带有LSTM解码器的Conformer**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FConformer.html)（来自IBM研究AI）随论文《英语会话语音识别的极限》（Zoltán Tüske, George Saon, Brian Kingsbury）发布。\n18. [**ContextNet**](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Farchitectures\u002FContextNet.html)（来自谷歌）随论文《ContextNet：利用全局上下文改进用于自动语音识别的卷积神经网络》（Wei Han, Zhengdong Zhang, Yu Zhang, Jiahui Yu, Chung-Cheng Chiu, James Qin, Anmol Gulati, Ruoming Pang, Yonghui Wu）发布。\n19. [**Squeezeformer**](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fblob\u002Fmain\u002Fopenspeech\u002Fmodels\u002Fsqueezeformer\u002Fmodel.py)（来自伯克利大学）随论文《Squeezeformer：一种高效的自动语音识别Transformer》（Sehoon Kim, Amir Gholami, Albert Shaw, Nicholas Lee, Karttikeya Mangalam, Jitendra Malik, Michael W. Mahoney, Kurt Keutzer）发布。\n\n## 开始使用\n\n我们使用 [Hydra](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fhydra) 来管理所有的训练配置。如果您不熟悉 Hydra，建议访问 [Hydra 官网](https:\u002F\u002Fhydra.cc\u002F)。\n\n通常来说，Hydra 是一个开源框架，它通过提供动态创建层次化配置的能力，简化了科研应用的开发。如果您想了解我们是如何使用 Hydra 的，建议阅读 [这里](https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002Fnotes\u002Fhydra_configs.html)。\n\n### 支持的数据集\n\n我们支持 [LibriSpeech](https:\u002F\u002Fwww.openslr.org\u002F12)、[KsponSpeech](https:\u002F\u002Faihub.or.kr\u002Faidata\u002F105) 和 [AISHELL-1](https:\u002F\u002Fwww.openslr.org\u002F33\u002F)。\n\nLibriSpeech 是一个约 1,000 小时的 16kHz 英语朗读语音语料库，由 Vassil Panayotov 在 Daniel Povey 的协助下整理而成。数据来源于 LibriVox 项目的有声读物，并经过仔细的切分和对齐处理。\n\nAishell 是由北京壳壳科技有限公司发布的开源中文普通话语音语料库。邀请了来自中国不同方言区的 400 名参与者，在安静的室内环境中使用高保真麦克风进行录音，并将采样率降为 16kHz。\n\nKsponSpeech 是一个大规模的韩语自发性语音语料库。该语料库包含 969 小时的一般开放域对话片段，由约 2,000 名韩国本地使用者在干净的环境中录制而成。所有数据均通过记录两人自由交谈的各种话题并手动转录而成。要开始训练，必须提前准备好 KsponSpeech 数据集。下载 KsponSpeech 需获得 [AI Hub](https:\u002F\u002Faihub.or.kr\u002F) 的许可。\n\n\n\n### 清单文件\n\n- 声学模型清单文件格式：\n\n```\nLibriSpeech\u002Ftest-other\u002F8188\u002F269288\u002F8188-269288-0052.flac        ▁ANNIE ' S ▁MANNER ▁WAS ▁VERY ▁MYSTERIOUS       4039 20 5 531 17 84 2352\nLibriSpeech\u002Ftest-other\u002F8188\u002F269288\u002F8188-269288-0053.flac        ▁ANNIE ▁DID ▁NOT ▁MEAN ▁TO ▁CONFIDE ▁IN ▁ANYONE ▁THAT ▁NIGHT ▁AND ▁THE ▁KIND EST ▁THING ▁WAS ▁TO ▁LEAVE ▁HER ▁A LONE    4039 99 35 251 9 4758 11 2454 16 199 6 4 323 200 255 17 9 370 30 10 492\nLibriSpeech\u002Ftest-other\u002F8188\u002F269288\u002F8188-269288-0054.flac        ▁TIRED ▁OUT ▁LESLIE ▁HER SELF ▁DROPP ED ▁A SLEEP        1493 70 4708 30 115 1231 7 10 1706\nLibriSpeech\u002Ftest-other\u002F8188\u002F269288\u002F8188-269288-0055.flac        ▁ANNIE ▁IS ▁THAT ▁YOU ▁SHE ▁CALL ED ▁OUT        4039 34 16 25 37 208 7 70\nLibriSpeech\u002Ftest-other\u002F8188\u002F269288\u002F8188-269288-0056.flac        ▁THERE ▁WAS ▁NO ▁REPLY ▁BUT ▁THE ▁SOUND ▁OF ▁HURRY ING ▁STEPS ▁CAME ▁QUICK ER ▁AND ▁QUICK ER ▁NOW ▁AND ▁THEN ▁THEY ▁WERE ▁INTERRUPTED ▁BY ▁A ▁GROAN     57 17 56 1368 33 4 489 8 1783 14 1381 133 571 49 6 571 49 82 6 76 45 54 2351 44 10 3154\nLibriSpeech\u002Ftest-other\u002F8188\u002F269288\u002F8188-269288-0057.flac        ▁OH ▁THIS ▁WILL ▁KILL ▁ME ▁MY ▁HEART ▁WILL ▁BREAK ▁THIS ▁WILL ▁KILL ▁ME 299 46 71 669 50 41 235 71 977 46 71 669 50\n...\n...\n```\n\n### 训练示例\n\n您可以像下面这样简单地使用 LibriSpeech 数据集进行训练：\n\n- 示例1：在 GPU 上使用 `filter-bank` 特征训练 `conformer-lstm` 模型。\n\n```\n$ python3 .\u002Fopenspeech_cli\u002Fhydra_train.py \\\n    dataset=librispeech \\\n    dataset.dataset_download=True \\\n    dataset.dataset_path=$DATASET_PATH \\\n    dataset.manifest_file_path=$MANIFEST_FILE_PATH \\\n    tokenizer=libri_subword \\\n    model=conformer_lstm \\\n    audio=fbank \\\n    lr_scheduler=warmup_reduce_lr_on_plateau \\\n    trainer=gpu \\\n    criterion=cross_entropy\n```\n\n您也可以像下面这样简单地使用 KsponSpeech 数据集进行训练：\n\n- 示例2：在 TPU 上使用 `mel-spectrogram` 特征训练 `listen-attend-spell` 模型。\n\n```\n$ python3 .\u002Fopenspeech_cli\u002Fhydra_train.py \\\n    dataset=ksponspeech \\\n    dataset.dataset_path=$DATASET_PATH \\\n    dataset.manifest_file_path=$MANIFEST_FILE_PATH \\\n    dataset.test_dataset_path=$TEST_DATASET_PATH \\\n    dataset.test_manifest_dir=$TEST_MANIFEST_DIR \\\n    tokenizer=kspon_character \\\n    model=listen_attend_spell \\\n    audio=melspectrogram \\\n    lr_scheduler=warmup_reduce_lr_on_plateau \\\n    trainer=tpu \\\n    criterion=cross_entropy\n```\n\n您还可以像下面这样简单地使用 AISHELL-1 数据集进行训练：\n\n- 示例3：在 GPU 上以 FP16 精度使用 `mfcc` 特征训练 `quartznet` 模型。\n\n```\n$ python3 .\u002Fopenspeech_cli\u002Fhydra_train.py \\\n    dataset=aishell \\\n    dataset.dataset_path=$DATASET_PATH \\\n    dataset.dataset_download=True \\\n    dataset.manifest_file_path=$MANIFEST_FILE_PATH \\\n    tokenizer=aishell_character \\\n    model=quartznet15x5 \\\n    audio=mfcc \\\n    lr_scheduler=warmup_reduce_lr_on_plateau \\\n    trainer=gpu-fp16 \\\n    criterion=ctc\n```\n\n### 评估示例\n\n- 示例1：评估 `listen_attend_spell` 模型：\n\n```\n$ python3 .\u002Fopenspeech_cli\u002Fhydra_eval.py \\\n    audio=melspectrogram \\\n    eval.dataset_path=$DATASET_PATH \\\n    eval.checkpoint_path=$CHECKPOINT_PATH \\\n    eval.manifest_file_path=$MANIFEST_FILE_PATH \\\n    model=listen_attend_spell \\\n    tokenizer=kspon_character \\\n    tokenizer.vocab_path=$VOCAB_FILE_PATH \\\n```\n\n- 示例2：使用集成方法评估 `listen_attend_spell` 和 `conformer_lstm` 模型：\n\n```\n$ python3 .\u002Fopenspeech_cli\u002Fhydra_eval.py \\\n    audio=melspectrogram \\\n    eval.model_names=(listen_attend_spell, conformer_lstm) \\\n    eval.dataset_path=$DATASET_PATH \\\n    eval.checkpoint_paths=($CHECKPOINT_PATH1, $CHECKPOINT_PATH2) \\\n    eval.ensemble_weights=(0.3, 0.7) \\\n    eval.ensemble_method=weighted \\\n    eval.manifest_file_path=$MANIFEST_FILE_PATH\n```\n\n#### KsponSpeech 目录结构\n\n- `dataset.dataset_path`: $BASE_PATH\u002FKsponSpeech\n```\n$BASE_PATH\u002FKsponSpeech\n├── KsponSpeech_01\n├── KsponSpeech_02\n├── KsponSpeech_03\n├── KsponSpeech_04\n└── KsponSpeech_05\n```\n\n- `dataset.test_dataset_path`: $BASE_PATH\u002FKsponSpeech_eval\n```\n$BASE_PATH\u002FKsponSpeech_eval\n├── eval_clean\n└── eval_other\n```\n\n- `dataset.test_manifest_dir`: $BASE_PATH\u002FKsponSpeech_scripts\n```\n$BASE_PATH\u002FKsponSpeech_scripts\n├── eval_clean.trn\n└── eval_other.trn\n```\n\n### 语言模型训练示例\n\n语言模型训练只需要准备以下格式的数据：\n\n```\nopenspeech 是一个用于构建端到端语音识别系统的框架。\n端到端自动语音识别是基于神经网络的语音识别领域中一种新兴范式，具有多重优势。\n由于这些优势，许多与端到端语音识别相关的开源项目应运而生。\n...\n...\n```\n\n请注意，您需要使用与声学模型相同的词汇表。\n\n- 示例：训练 `lstm_lm` 模型：\n```\n$ python3 .\u002Fopenspeech_cli\u002Fhydra_lm_train.py \\\n    dataset=lm \\\n    dataset.dataset_path=..\u002F..\u002F..\u002Flm.txt \\\n    tokenizer=kspon_character \\\n    tokenizer.vocab_path=..\u002F..\u002F..\u002Flabels.csv \\\n    model=lstm_lm \\\n    lr_scheduler=tri_stage \\\n    trainer=gpu \\\n    criterion=perplexity\n```\n\n## 安装\n\n本项目推荐使用 Python 3.7 或更高版本。\n我们建议为该项目创建一个新的虚拟环境（使用 virtualenv 或 conda）。\n\n\n### 先决条件\n\n* numpy: `pip install numpy`（安装 numpy 时遇到问题可参考 [这里](https:\u002F\u002Fgithub.com\u002Fnumpy\u002Fnumpy)）。\n* PyTorch: 请参考 [PyTorch 官网](http:\u002F\u002Fpytorch.org\u002F)，根据您的环境安装相应版本。\n* librosa: `conda install -c conda-forge librosa`（安装 librosa 时遇到问题可参考 [这里](https:\u002F\u002Fgithub.com\u002Flibrosa\u002Flibrosa)）。\n* torchaudio: `pip install torchaudio==0.6.0`（安装 torchaudio 时遇到问题可参考 [这里](https:\u002F\u002Fgithub.com\u002Fpytorch\u002Fpytorch)）。\n* sentencepiece: `pip install sentencepiece`（安装 sentencepiece 时遇到问题可参考 [这里](https:\u002F\u002Fgithub.com\u002Fgoogle\u002Fsentencepiece)）。\n* PyTorch Lightning: `pip install pytorch-lightning`（安装 PyTorch Lightning 时遇到问题可参考 [这里](https:\u002F\u002Fgithub.com\u002FPyTorchLightning\u002Fpytorch-lightning)）。\n* hydra: `pip install hydra-core --upgrade`（安装 hydra 时遇到问题可参考 [这里](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Fhydra)）。\n* warp-rnnt: 请参考 [warp-rnnt 页面](https:\u002F\u002Fgithub.com\u002F1ytic\u002Fwarp-rnnt)，按照说明安装该库。\n* ctcdecode: 请参考 [ctcdecode 页面](https:\u002F\u002Fgithub.com\u002Fparlance\u002Fctcdecode)，按照说明安装该库。\n\n### 通过 PyPI 安装\n\n您可以通过 PyPI 安装 OpenSpeech：\n```\npip install openspeech-core\n```\n\n### 从源码安装\n目前我们仅支持使用 setuptools 从源码进行安装。请检出源代码并运行以下命令：\n```\n$ .\u002Finstall.sh\n```\n\n### 安装 Apex（用于 16 位训练）\n为了加快训练速度，您可以安装 NVIDIA 的 Apex 库：\n\n```\n$ git clone https:\u002F\u002Fgithub.com\u002FNVIDIA\u002Fapex\n$ cd apex\n\n# ------------------------\n# 可选：在集群上，您可能需要加载 CUDA 10 或 9，\n# 具体取决于您安装 PyTorch 的方式。\n\n# 查看可用模块\nmodule avail\n\n# 在安装前加载正确的 CUDA 版本\nmodule load cuda-10.0\n# ------------------------\n\n# 确保已加载的 CUDA 版本在 4.0 到 7.0 之间\nmodule load gcc-6.1.0\n\n$ pip install -v --no-cache-dir --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" .\u002F\n```\n\n## 故障排除与贡献\n如果您有任何问题、错误报告或功能请求，请在 GitHub 上 [提交一个问题](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech\u002Fissues)。\n\n我们非常欢迎任何形式的反馈和贡献。您可以先从修复小 bug 或改进文档等小事入手。对于重大贡献或新功能，请先在相关 issue 中与合作者讨论。\n\n### 代码风格\n我们遵循 [PEP-8](https:\u002F\u002Fwww.python.org\u002Fdev\u002Fpeps\u002Fpep-0008\u002F) 代码规范。尤其是 docstring 的风格对生成文档非常重要。\n\n### 许可证\n本项目采用 MIT 许可证授权——详情请参阅 [LICENSE.md](https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002FOpenSpeech\u002Fblob\u002Fmaster\u002FLICENSE) 文件。\n\n## 引用\n如果您在学术研究中使用本系统，请引用如下：\n\n```\n@GITHUB{2021-OpenSpeech,\n  author       = {Kim, Soohwan 和 Ha, Sangchun 和 Cho, Soyoung},\n  author email = {sh951011@gmail.com, seomk9896@gmail.com, soyoung.cho@kaist.ac.kr}\n  title        = {OpenSpeech: 开源端到端语音识别工具包},\n  howpublished = {\\url{https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech}},\n  docs         = {\\url{https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech}},\n  year         = {2021}\n}\n```","# OpenSpeech 快速上手指南\n\nOpenSpeech 是一个基于 **PyTorch-Lightning** 和 **Hydra** 构建的端到端自动语音识别（ASR）框架。它提供了多种主流 ASR 模型（如 DeepSpeech2, Transformer, Conformer 等）的参考实现，并支持英语、中文和韩语的训练配方。该框架旨在简化混合精度训练、多 GPU\u002FTPU 分布式训练等高级功能的使用。\n\n> **注意**：OpenSpeech 仅提供模型训练代码和架构实现，**不提供**预训练模型权重或直接调用的推理 API。用户需自行准备数据并进行模型训练。\n\n## 环境准备\n\n在开始之前，请确保您的开发环境满足以下要求：\n\n*   **操作系统**: Linux (推荐), macOS, Windows\n*   **Python**: 3.7 或更高版本\n*   **深度学习框架**: PyTorch 1.8.0+\n*   **硬件**: 支持 CUDA 的 NVIDIA GPU（可选，用于加速训练），或 CPU\u002FTPU\n\n**前置依赖库**：\nOpenSpeech 强依赖于以下两个核心库，安装时会自动处理：\n*   `pytorch-lightning`: 用于简化训练循环和多设备支持。\n*   `hydra-core`: 用于灵活的层级化配置管理。\n\n## 安装步骤\n\n推荐使用 `pip` 进行安装。国内开发者建议使用清华源或阿里源以加速下载。\n\n### 1. 安装核心包\n\n```bash\n# 使用官方源\npip install openspeech-core\n\n# 【推荐】国内开发者使用清华源加速安装\npip install openspeech-core -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n```\n\n### 2. 验证安装\n\n安装完成后，可通过以下命令检查版本，确认安装成功：\n\n```bash\npython -c \"import openspeech; print(openspeech.__version__)\"\n```\n\n## 基本使用\n\nOpenSpeech 采用配置文件驱动的方式运行。以下以训练一个基础的 **DeepSpeech2** 模型为例，展示最简使用流程。\n\n### 1. 准备数据\nOpenSpeech 内置了 LibriSpeech (英文), AISHELL-1 (中文), KsponSpeech (韩文) 的数据加载逻辑。您需要先下载对应数据集并按照框架要求的格式整理，或在配置文件中指定本地数据路径。\n\n### 2. 运行训练\n使用 `openspeech-cli` 命令行工具启动训练。通过 `model` 和 `dataset` 参数指定模型架构和数据集，通过 `trainer` 参数控制训练行为（如 GPU 数量）。\n\n**示例：使用 DeepSpeech2 模型在 LibriSpeech 数据集上进行单卡训练**\n\n```bash\nopenspeech-cli \\\n  model=deep_speech2 \\\n  dataset=librispeech \\\n  trainer=gpu \\\n  trainer.devices=1 \\\n  dataset.train_dataset_path=\u002Fpath\u002Fto\u002Fyour\u002Ftrain_data \\\n  dataset.valid_dataset_path=\u002Fpath\u002Fto\u002Fyour\u002Fvalid_data\n```\n\n**示例：开启混合精度训练 (Mixed Precision) 以节省显存并加速**\n\n```bash\nopenspeech-cli \\\n  model=conformer \\\n  dataset=aishell_1 \\\n  trainer=gpu \\\n  trainer.precision=16 \\\n  dataset.train_dataset_path=.\u002Fdata\u002Faishell\u002Ftrain \\\n  dataset.valid_dataset_path=.\u002Fdata\u002Faishell\u002Fval\n```\n\n### 3. 自定义配置\nOpenSpeech 基于 Hydra，您可以轻松覆盖默认参数。例如，修改学习率或批次大小：\n\n```bash\nopenspeech-cli \\\n  model=transformer \\\n  optimizer.lr=0.0005 \\\n  dataset.batch_size=32 \\\n  dataset.train_dataset_path=.\u002Fmy_data\u002Ftrain\n```\n\n更多详细的模型架构列表和配置项说明，请参考官方文档或查看项目根目录下的 `conf` 文件夹。","某初创团队正致力于开发一款支持中韩英三语的实时会议转录系统，需要在有限算力下快速复现前沿语音识别模型并投入生产。\n\n### 没有 openspeech 时\n- **重复造轮子耗时久**：工程师需手动从零搭建 PyTorch 训练循环，处理多 GPU 同步和混合精度训练，耗费数周时间仅完成基础框架。\n- **模型复现门槛高**：想要尝试最新的 ContextNet 或 Transducer 架构，必须逐行研读论文并自行实现，极易因细节偏差导致效果不达标。\n- **配置管理混乱**：针对不同语言和实验参数，团队依靠修改硬编码脚本或分散的配置文件，版本迭代时经常发生配置冲突。\n- **多语言适配困难**：缺乏统一的预处理和数据加载接口，为每种新语言（如韩语）定制数据管道需要大量定制化开发。\n\n### 使用 openspeech 后\n- **开箱即用的训练引擎**：基于 PyTorch-Lightning 和 Hydra，openspeech 直接提供多卡训练、TPU 支持及混合精度功能，团队当天即可启动模型训练。\n- **前沿架构一键调用**：内置了多种主流 ASR 论文的参考实现，开发人员只需更改配置即可切换 ContextNet 等模型，大幅降低复现成本。\n- **层级化配置管理**：利用 Hydra 的强大特性，通过清晰的 YAML 文件管理实验参数，轻松实现不同语言场景下的配置隔离与快速切换。\n- **标准化多语言食谱**：直接使用官方提供的三语食谱（Recipe），快速构建标准化的数据加载与评估流程，显著缩短新语言上线周期。\n\nopenspeech 将原本需要数月的基础设施搭建与模型验证工作压缩至数天，让团队能专注于核心业务逻辑的优化。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fopenspeech-team_openspeech_8c38ca1e.png","openspeech-team","Openspeech TEAM","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fopenspeech-team_78068a2c.png","Open source ecosystem for automatic speech recognition.",null,"openspeech.team@gmail.com","https:\u002F\u002Fopenspeech-team.github.io\u002Fopenspeech\u002F","https:\u002F\u002Fgithub.com\u002Fopenspeech-team",[82,86],{"name":83,"color":84,"percentage":85},"Python","#3572A5",99.8,{"name":87,"color":88,"percentage":89},"Shell","#89e051",0.2,717,115,"2026-03-19T01:22:23","MIT",4,"未说明","支持多 GPU 和 TPU 训练（基于 PyTorch-Lightning），具体型号、显存大小及 CUDA 版本未在文档中明确说明",{"notes":98,"python":95,"dependencies":99},"该工具是一个用于训练端到端自动语音识别（ASR）模型的框架，不提供预训练模型或直接可用的 API。它基于 PyTorch-Lightning 和 Hydra 构建，支持混合精度训练、多节点训练和 TPU 训练等高级功能。用户需自行准备数据集并定义数据模块（LightningDataModule）和分词器（Tokenizer）。",[100,101,102],"PyTorch-Lightning","Hydra","PyTorch",[104,15],"音频",[106,107,108,109,110,111,112],"asr","speech","recognition","speech-recognition","open","end-to-end","e2e","2026-03-27T02:49:30.150509","2026-04-17T08:24:11.736300",[],[117,122,127,132,136,141],{"id":118,"version":119,"summary_zh":120,"released_at":121},289868,"v0.4.0","## 变更内容\n* 由 @upskyy 在 https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fpull\u002F56 中解决了 #38 问题\n* 由 @sooftware 在 https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fpull\u002F57 中添加了 CheckpointEveryNSteps 类，用于每 N 步保存一次检查点\n* 由 @upskyy 在 https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fpull\u002F59 中解决了 #58 问题\n* 由 @upskyy 在 https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fpull\u002F63 中解决了 (#62) 问题\n* 由 @upskyy 在 https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fpull\u002F72 中解决了 #71 问题\n* 由 @upskyy 在 https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fpull\u002F73 中解决了 #70 问题\n* 由 @upskyy 在 https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fpull\u002F78 中解决了 #76 问题\n* 由 @upskyy 在 https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fpull\u002F80 中解决了 #79 问题\n* 添加统一长度批处理（智能批处理）[已解决 #82] - Soohwan Kim，由 @sooftware 在 https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fpull\u002F83 中实现\n* openspeech\u002Fdatasets\u002Faishell\u002Fpreprocess.py 第 137 行的修改，由 @wuxiuzhi738 在 https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fpull\u002F95 中完成\n* 由 @soFTWARE 在 https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fpull\u002F107 中修复了拼写错误\n* 由 @YongWookHa 在 https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fpull\u002F114 中修复了 URL 拼写错误\n* 由 @upskyy 在 https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fpull\u002F118 中解决了 #116 问题\n* 由 @upskyy 在 https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fpull\u002F129 中解决了 #128 问题\n* 更新评估代码（修复 #86）- 由 @upskyy 在 https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fpull\u002F145 中完成\n* 版本 0.4.0 - 由 @upskyy 在 https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fpull\u002F165 中发布\n\n## 新贡献者\n* @wuxiuzhi738 在 https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fpull\u002F95 中做出了首次贡献\n* @YongWookHa 在 https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fpull\u002F114 中做出了首次贡献\n\n**完整变更日志**: https:\u002F\u002Fgithub.com\u002Fopenspeech-team\u002Fopenspeech\u002Fcompare\u002Fv0.3.0...v0.4.0","2022-05-22T05:29:44",{"id":123,"version":124,"summary_zh":125,"released_at":126},289869,"v0.3.0","- `词汇表` => `分词器` 类\n- 添加 RNN 转导器束搜索\n- 添加 Transformer 转导器束搜索\n- 重新编写文档\n- 重构模型目录","2021-07-20T16:49:34",{"id":128,"version":129,"summary_zh":130,"released_at":131},289870,"v0.2.1","## 版本 0.2.1\n- 添加 Transformer-转换器模型\n- 添加 ContextNet 模型\n- 更新文档\n- 添加语言模型训练流水线\n  - 添加 `lstm_lm` 模型\n  - 添加 `transformer_lm` 模型\n  - 添加 `perplexity` 损失函数\n- 向 `Vocabulary` 类添加 `string_to_label` 方法\n- 修复错误\n  - 问题 #47","2021-07-18T11:25:08",{"id":133,"version":134,"summary_zh":77,"released_at":135},289871,"v0.2.0","2021-06-07T18:59:25",{"id":137,"version":138,"summary_zh":139,"released_at":140},289872,"0.2","- 修复小 bug\n- 添加集成搜索\n- 添加评估流水线","2021-06-07T16:00:04",{"id":142,"version":143,"summary_zh":144,"released_at":145},289873,"v0.1","版本 0.1（首次发布）","2021-06-06T15:11:01"]