[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-justadudewhohacks--face-api.js":3,"tool-justadudewhohacks--face-api.js":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",154349,2,"2026-04-13T23:32:16",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",108322,"2026-04-10T11:39:34",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":32,"last_commit_at":50,"category_tags":51,"status":17},6121,"gemini-cli","google-gemini\u002Fgemini-cli","gemini-cli 是一款由谷歌推出的开源 AI 命令行工具，它将强大的 Gemini 大模型能力直接集成到用户的终端环境中。对于习惯在命令行工作的开发者而言，它提供了一条从输入提示词到获取模型响应的最短路径，无需切换窗口即可享受智能辅助。\n\n这款工具主要解决了开发过程中频繁上下文切换的痛点，让用户能在熟悉的终端界面内直接完成代码理解、生成、调试以及自动化运维任务。无论是查询大型代码库、根据草图生成应用，还是执行复杂的 Git 操作，gemini-cli 都能通过自然语言指令高效处理。\n\n它特别适合广大软件工程师、DevOps 人员及技术研究人员使用。其核心亮点包括支持高达 100 万 token 的超长上下文窗口，具备出色的逻辑推理能力；内置 Google 搜索、文件操作及 Shell 命令执行等实用工具；更独特的是，它支持 MCP（模型上下文协议），允许用户灵活扩展自定义集成，连接如图像生成等外部能力。此外，个人谷歌账号即可享受免费的额度支持，且项目基于 Apache 2.0 协议完全开源，是提升终端工作效率的理想助手。",100752,"2026-04-10T01:20:03",[52,13,15,14],"插件",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":32,"last_commit_at":59,"category_tags":60,"status":17},4721,"markitdown","microsoft\u002Fmarkitdown","MarkItDown 是一款由微软 AutoGen 团队打造的轻量级 Python 工具，专为将各类文件高效转换为 Markdown 格式而设计。它支持 PDF、Word、Excel、PPT、图片（含 OCR）、音频（含语音转录）、HTML 乃至 YouTube 链接等多种格式的解析，能够精准提取文档中的标题、列表、表格和链接等关键结构信息。\n\n在人工智能应用日益普及的今天，大语言模型（LLM）虽擅长处理文本，却难以直接读取复杂的二进制办公文档。MarkItDown 恰好解决了这一痛点，它将非结构化或半结构化的文件转化为模型“原生理解”且 Token 效率极高的 Markdown 格式，成为连接本地文件与 AI 分析 pipeline 的理想桥梁。此外，它还提供了 MCP（模型上下文协议）服务器，可无缝集成到 Claude Desktop 等 LLM 应用中。\n\n这款工具特别适合开发者、数据科学家及 AI 研究人员使用，尤其是那些需要构建文档检索增强生成（RAG）系统、进行批量文本分析或希望让 AI 助手直接“阅读”本地文件的用户。虽然生成的内容也具备一定可读性，但其核心优势在于为机器",93400,"2026-04-06T19:52:38",[52,14],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":66,"readme_en":67,"readme_zh":68,"quickstart_zh":69,"use_case_zh":70,"hero_image_url":71,"owner_login":72,"owner_name":73,"owner_avatar_url":74,"owner_bio":75,"owner_company":76,"owner_location":77,"owner_email":78,"owner_twitter":76,"owner_website":79,"owner_url":80,"languages":81,"stars":90,"forks":91,"last_commit_at":92,"license":93,"difficulty_score":32,"env_os":94,"env_gpu":95,"env_ram":96,"env_deps":97,"category_tags":104,"github_topics":106,"view_count":32,"oss_zip_url":76,"oss_zip_packed_at":76,"status":17,"created_at":119,"updated_at":120,"faqs":121,"releases":151},7324,"justadudewhohacks\u002Fface-api.js","face-api.js","JavaScript API for face detection and face recognition in the browser and nodejs with tensorflow.js","face-api.js 是一款基于 TensorFlow.js 构建的 JavaScript 人脸分析工具，让开发者能够轻松在浏览器或 Node.js 环境中实现人脸识别功能。它主要解决了以往人脸技术依赖复杂后端服务、难以在前端直接运行的痛点，让用户无需配置重型服务器即可在网页端完成实时人脸检测与分析。\n\n这款工具非常适合前端开发者、全栈工程师以及希望快速原型验证的研究人员使用。无论是想制作互动滤镜、安防监控演示，还是进行用户身份验证，face-api.js 都能提供开箱即用的解决方案。普通用户虽不直接操作代码，但能享受到由它驱动的流畅网页体验，如自动美颜、表情互动游戏等。\n\n其技术亮点在于集成了多种高精度模型，不仅能精准定位人脸关键点，还能识别面部表情、估算年龄与性别，甚至支持多人脸实时追踪。所有计算均在客户端完成，既保护了用户隐私，又降低了延迟。配合丰富的示例代码和清晰的文档，face-api.js 让人脸智能应用的开发门槛大幅降低，成为连接人工智能与 Web 开发的桥梁。","# face-api.js\n\n[![Build Status](https:\u002F\u002Ftravis-ci.org\u002Fjustadudewhohacks\u002Fface-api.js.svg?branch=master)](https:\u002F\u002Ftravis-ci.org\u002Fjustadudewhohacks\u002Fface-api.js)\n[![Slack](https:\u002F\u002Fslack.bri.im\u002Fbadge.svg)](https:\u002F\u002Fslack.bri.im)\n\n**JavaScript face recognition API for the browser and nodejs implemented on top of tensorflow.js core ([tensorflow\u002Ftfjs-core](https:\u002F\u002Fgithub.com\u002Ftensorflow\u002Ftfjs))**\n\n![faceapi](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjustadudewhohacks_face-api.js_readme_4eb37a7890d0.gif)\n\n## **[Click me for Live Demos!](https:\u002F\u002Fjustadudewhohacks.github.io\u002Fface-api.js\u002F)**\n\n## Tutorials\n\n* **[face-api.js — JavaScript API for Face Recognition in the Browser with tensorflow.js](https:\u002F\u002Fitnext.io\u002Fface-api-js-javascript-api-for-face-recognition-in-the-browser-with-tensorflow-js-bcc2a6c4cf07)**\n* **[Realtime JavaScript Face Tracking and Face Recognition using face-api.js’ MTCNN Face Detector](https:\u002F\u002Fitnext.io\u002Frealtime-javascript-face-tracking-and-face-recognition-using-face-api-js-mtcnn-face-detector-d924dd8b5740)**\n* **[Realtime Webcam Face Detection And Emotion Recognition - Video](https:\u002F\u002Fyoutu.be\u002FCVClHLwv-4I)**\n* **[Easy Face Recognition Tutorial With JavaScript - Video](https:\u002F\u002Fyoutu.be\u002FAZ4PdALMqx0)**\n* **[Using face-api.js with Vue.js and Electron](https:\u002F\u002Fmedium.com\u002F@andreas.schallwig\u002Fdo-not-laugh-a-simple-ai-powered-game-3e22ad0f8166)**\n* **[Add Masks to People - Gant Laborde on Learn with Jason](https:\u002F\u002Fwww.learnwithjason.dev\u002Ffun-with-machine-learning-pt-2)**\n\n## Table of Contents\n\n* **[Features](#features)**\n* **[Running the Examples](#running-the-examples)**\n* **[face-api.js for the Browser](#face-api.js-for-the-browser)**\n* **[face-api.js for Nodejs](#face-api.js-for-nodejs)**\n* **[Usage](#getting-started)**\n  * **[Loading the Models](#getting-started-loading-models)**\n  * **[High Level API](#high-level-api)**\n  * **[Displaying Detection Results](#getting-started-displaying-detection-results)**\n  * **[Face Detection Options](#getting-started-face-detection-options)**\n  * **[Utility Classes](#getting-started-utility-classes)**\n  * **[Other Useful Utility](#other-useful-utility)**\n* **[Available Models](#models)**\n  * **[Face Detection](#models-face-detection)**\n  * **[Face Landmark Detection](#models-face-landmark-detection)**\n  * **[Face Recognition](#models-face-recognition)**\n  * **[Face Expression Recognition](#models-face-expression-recognition)**\n  * **[Age Estimation and Gender Recognition](#models-age-and-gender-recognition)**\n* **[API Documentation](https:\u002F\u002Fjustadudewhohacks.github.io\u002Fface-api.js\u002Fdocs\u002Fglobals.html)**\n\n# Features\n\n## Face Recognition\n\n![face-recognition](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjustadudewhohacks_face-api.js_readme_42b42171d188.gif)\n\n## Face Landmark Detection\n\n![face_landmark_detection](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjustadudewhohacks_face-api.js_readme_2adac9a101ff.jpg)\n\n## Face Expression Recognition\n\n![preview_face-expression-recognition](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjustadudewhohacks_face-api.js_readme_f7fcb649c9fc.png)\n\n## Age Estimation & Gender Recognition\n\n![age_gender_recognition](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjustadudewhohacks_face-api.js_readme_037949bcc06b.jpg)\n\n\u003Ca name=\"running-the-examples\">\u003C\u002Fa>\n\n# Running the Examples\n\nClone the repository:\n\n``` bash\ngit clone https:\u002F\u002Fgithub.com\u002Fjustadudewhohacks\u002Fface-api.js.git\n```\n\n## Running the Browser Examples\n\n``` bash\ncd face-api.js\u002Fexamples\u002Fexamples-browser\nnpm i\nnpm start\n```\n\nBrowse to http:\u002F\u002Flocalhost:3000\u002F.\n\n## Running the Nodejs Examples\n\n``` bash\ncd face-api.js\u002Fexamples\u002Fexamples-nodejs\nnpm i\n```\n\nNow run one of the examples using ts-node:\n\n``` bash\nts-node faceDetection.ts\n```\n\nOr simply compile and run them with node:\n\n``` bash\ntsc faceDetection.ts\nnode faceDetection.js\n```\n\n\u003Ca name=\"face-api.js-for-the-browser\">\u003C\u002Fa>\n\n# face-api.js for the Browser\n\nSimply include the latest script from [dist\u002Fface-api.js](https:\u002F\u002Fgithub.com\u002Fjustadudewhohacks\u002Fface-api.js\u002Ftree\u002Fmaster\u002Fdist).\n\nOr install it via npm:\n\n``` bash\nnpm i face-api.js\n```\n\n\u003Ca name=\"face-api.js-for-nodejs\">\u003C\u002Fa>\n\n# face-api.js for Nodejs\n\nWe can use the equivalent API in a nodejs environment by polyfilling some browser specifics, such as HTMLImageElement, HTMLCanvasElement and ImageData. The easiest way to do so is by installing the node-canvas package.\n\nAlternatively you can simply construct your own tensors from image data and pass tensors as inputs to the API.\n\nFurthermore you want to install @tensorflow\u002Ftfjs-node (not required, but highly recommended), which speeds things up drastically by compiling and binding to the native Tensorflow C++ library:\n\n``` bash\nnpm i face-api.js canvas @tensorflow\u002Ftfjs-node\n```\n\nNow we simply monkey patch the environment to use the polyfills:\n\n``` javascript\n\u002F\u002F import nodejs bindings to native tensorflow,\n\u002F\u002F not required, but will speed up things drastically (python required)\nimport '@tensorflow\u002Ftfjs-node';\n\n\u002F\u002F implements nodejs wrappers for HTMLCanvasElement, HTMLImageElement, ImageData\nimport * as canvas from 'canvas';\n\nimport * as faceapi from 'face-api.js';\n\n\u002F\u002F patch nodejs environment, we need to provide an implementation of\n\u002F\u002F HTMLCanvasElement and HTMLImageElement\nconst { Canvas, Image, ImageData } = canvas\nfaceapi.env.monkeyPatch({ Canvas, Image, ImageData })\n```\n\n\u003Ca name=\"getting-started\">\u003C\u002Fa>\n\n# Getting Started\n\n\u003Ca name=\"getting-started-loading-models\">\u003C\u002Fa>\n\n## Loading the Models\n\nAll global neural network instances are exported via faceapi.nets:\n\n``` javascript\nconsole.log(faceapi.nets)\n\u002F\u002F ageGenderNet\n\u002F\u002F faceExpressionNet\n\u002F\u002F faceLandmark68Net\n\u002F\u002F faceLandmark68TinyNet\n\u002F\u002F faceRecognitionNet\n\u002F\u002F ssdMobilenetv1\n\u002F\u002F tinyFaceDetector\n\u002F\u002F tinyYolov2\n```\n\nTo load a model, you have to provide the corresponding manifest.json file as well as the model weight files (shards) as assets. Simply copy them to your public or assets folder. The manifest.json and shard files of a model have to be located in the same directory \u002F accessible under the same route.\n\nAssuming the models reside in **public\u002Fmodels**:\n\n``` javascript\nawait faceapi.nets.ssdMobilenetv1.loadFromUri('\u002Fmodels')\n\u002F\u002F accordingly for the other models:\n\u002F\u002F await faceapi.nets.faceLandmark68Net.loadFromUri('\u002Fmodels')\n\u002F\u002F await faceapi.nets.faceRecognitionNet.loadFromUri('\u002Fmodels')\n\u002F\u002F ...\n```\n\nIn a nodejs environment you can furthermore load the models directly from disk:\n\n``` javascript\nawait faceapi.nets.ssdMobilenetv1.loadFromDisk('.\u002Fmodels')\n```\n\nYou can also load the model from a tf.NamedTensorMap:\n\n``` javascript\nawait faceapi.nets.ssdMobilenetv1.loadFromWeightMap(weightMap)\n```\n\nAlternatively, you can also create own instances of the neural nets:\n\n``` javascript\nconst net = new faceapi.SsdMobilenetv1()\nawait net.loadFromUri('\u002Fmodels')\n```\n\nYou can also load the weights as a Float32Array (in case you want to use the uncompressed models):\n\n``` javascript\n\u002F\u002F using fetch\nnet.load(await faceapi.fetchNetWeights('\u002Fmodels\u002Fface_detection_model.weights'))\n\n\u002F\u002F using axios\nconst res = await axios.get('\u002Fmodels\u002Fface_detection_model.weights', { responseType: 'arraybuffer' })\nconst weights = new Float32Array(res.data)\nnet.load(weights)\n```\n\n\u003Ca name=\"getting-high-level-api\">\u003C\u002Fa>\n\n## High Level API\n\nIn the following **input** can be an HTML img, video or canvas element or the id of that element.\n\n``` html\n\u003Cimg id=\"myImg\" src=\"images\u002Fexample.png\" \u002F>\n\u003Cvideo id=\"myVideo\" src=\"media\u002Fexample.mp4\" \u002F>\n\u003Ccanvas id=\"myCanvas\" \u002F>\n```\n\n``` javascript\nconst input = document.getElementById('myImg')\n\u002F\u002F const input = document.getElementById('myVideo')\n\u002F\u002F const input = document.getElementById('myCanvas')\n\u002F\u002F or simply:\n\u002F\u002F const input = 'myImg'\n```\n\n### Detecting Faces\n\nDetect all faces in an image. Returns **Array\u003C[FaceDetection](#interface-face-detection)>**:\n\n``` javascript\nconst detections = await faceapi.detectAllFaces(input)\n```\n\nDetect the face with the highest confidence score in an image. Returns **[FaceDetection](#interface-face-detection) | undefined**:\n\n``` javascript\nconst detection = await faceapi.detectSingleFace(input)\n```\n\nBy default **detectAllFaces** and **detectSingleFace** utilize the SSD Mobilenet V1 Face Detector. You can specify the face detector by passing the corresponding options object:\n\n``` javascript\nconst detections1 = await faceapi.detectAllFaces(input, new faceapi.SsdMobilenetv1Options())\nconst detections2 = await faceapi.detectAllFaces(input, new faceapi.TinyFaceDetectorOptions())\n```\n\nYou can tune the options of each face detector as shown [here](#getting-started-face-detection-options).\n\n### Detecting 68 Face Landmark Points\n\n**After face detection, we can furthermore predict the facial landmarks for each detected face as follows:**\n\nDetect all faces in an image + computes 68 Point Face Landmarks for each detected face. Returns **Array\u003C[WithFaceLandmarks\u003CWithFaceDetection\u003C{}>>](#getting-started-utility-classes)>**:\n\n``` javascript\nconst detectionsWithLandmarks = await faceapi.detectAllFaces(input).withFaceLandmarks()\n```\n\nDetect the face with the highest confidence score in an image + computes 68 Point Face Landmarks for that face. Returns **[WithFaceLandmarks\u003CWithFaceDetection\u003C{}>>](#getting-started-utility-classes) | undefined**:\n\n``` javascript\nconst detectionWithLandmarks = await faceapi.detectSingleFace(input).withFaceLandmarks()\n```\n\nYou can also specify to use the tiny model instead of the default model:\n\n``` javascript\nconst useTinyModel = true\nconst detectionsWithLandmarks = await faceapi.detectAllFaces(input).withFaceLandmarks(useTinyModel)\n```\n\n### Computing Face Descriptors\n\n**After face detection and facial landmark prediction the face descriptors for each face can be computed as follows:**\n\nDetect all faces in an image + compute 68 Point Face Landmarks for each detected face. Returns **Array\u003C[WithFaceDescriptor\u003CWithFaceLandmarks\u003CWithFaceDetection\u003C{}>>>](#getting-started-utility-classes)>**:\n\n``` javascript\nconst results = await faceapi.detectAllFaces(input).withFaceLandmarks().withFaceDescriptors()\n```\n\nDetect the face with the highest confidence score in an image + compute 68 Point Face Landmarks and face descriptor for that face. Returns **[WithFaceDescriptor\u003CWithFaceLandmarks\u003CWithFaceDetection\u003C{}>>>](#getting-started-utility-classes) | undefined**:\n\n``` javascript\nconst result = await faceapi.detectSingleFace(input).withFaceLandmarks().withFaceDescriptor()\n```\n\n### Recognizing Face Expressions\n\n**Face expression recognition can be performed for detected faces as follows:**\n\nDetect all faces in an image + recognize face expressions of each face. Returns **Array\u003C[WithFaceExpressions\u003CWithFaceLandmarks\u003CWithFaceDetection\u003C{}>>>](#getting-started-utility-classes)>**:\n\n``` javascript\nconst detectionsWithExpressions = await faceapi.detectAllFaces(input).withFaceLandmarks().withFaceExpressions()\n```\n\nDetect the face with the highest confidence score in an image + recognize the face expressions for that face. Returns **[WithFaceExpressions\u003CWithFaceLandmarks\u003CWithFaceDetection\u003C{}>>>](#getting-started-utility-classes) | undefined**:\n\n``` javascript\nconst detectionWithExpressions = await faceapi.detectSingleFace(input).withFaceLandmarks().withFaceExpressions()\n```\n\n**You can also skip .withFaceLandmarks(), which will skip the face alignment step (less stable accuracy):**\n\nDetect all faces without face alignment + recognize face expressions of each face. Returns **Array\u003C[WithFaceExpressions\u003CWithFaceDetection\u003C{}>>](#getting-started-utility-classes)>**:\n\n``` javascript\nconst detectionsWithExpressions = await faceapi.detectAllFaces(input).withFaceExpressions()\n```\n\nDetect the face with the highest confidence score without face alignment + recognize the face expression for that face. Returns **[WithFaceExpressions\u003CWithFaceDetection\u003C{}>>](#getting-started-utility-classes) | undefined**:\n\n``` javascript\nconst detectionWithExpressions = await faceapi.detectSingleFace(input).withFaceExpressions()\n```\n\n### Age Estimation and Gender Recognition\n\n**Age estimation and gender recognition from detected faces can be done as follows:**\n\nDetect all faces in an image + estimate age and recognize gender of each face. Returns **Array\u003C[WithAge\u003CWithGender\u003CWithFaceLandmarks\u003CWithFaceDetection\u003C{}>>>>](#getting-started-utility-classes)>**:\n\n``` javascript\nconst detectionsWithAgeAndGender = await faceapi.detectAllFaces(input).withFaceLandmarks().withAgeAndGender()\n```\n\nDetect the face with the highest confidence score in an image  + estimate age and recognize gender for that face. Returns **[WithAge\u003CWithGender\u003CWithFaceLandmarks\u003CWithFaceDetection\u003C{}>>>>](#getting-started-utility-classes) | undefined**:\n\n``` javascript\nconst detectionWithAgeAndGender = await faceapi.detectSingleFace(input).withFaceLandmarks().withAgeAndGender()\n```\n\n**You can also skip .withFaceLandmarks(), which will skip the face alignment step (less stable accuracy):**\n\nDetect all faces without face alignment + estimate age and recognize gender of each face. Returns **Array\u003C[WithAge\u003CWithGender\u003CWithFaceDetection\u003C{}>>>](#getting-started-utility-classes)>**:\n\n``` javascript\nconst detectionsWithAgeAndGender = await faceapi.detectAllFaces(input).withAgeAndGender()\n```\n\nDetect the face with the highest confidence score without face alignment + estimate age and recognize gender for that face. Returns **[WithAge\u003CWithGender\u003CWithFaceDetection\u003C{}>>>](#getting-started-utility-classes) | undefined**:\n\n``` javascript\nconst detectionWithAgeAndGender = await faceapi.detectSingleFace(input).withAgeAndGender()\n```\n\n### Composition of Tasks\n\n**Tasks can be composed as follows:**\n\n``` javascript\n\u002F\u002F all faces\nawait faceapi.detectAllFaces(input)\nawait faceapi.detectAllFaces(input).withFaceExpressions()\nawait faceapi.detectAllFaces(input).withFaceLandmarks()\nawait faceapi.detectAllFaces(input).withFaceLandmarks().withFaceExpressions()\nawait faceapi.detectAllFaces(input).withFaceLandmarks().withFaceExpressions().withFaceDescriptors()\nawait faceapi.detectAllFaces(input).withFaceLandmarks().withAgeAndGender().withFaceDescriptors()\nawait faceapi.detectAllFaces(input).withFaceLandmarks().withFaceExpressions().withAgeAndGender().withFaceDescriptors()\n\n\u002F\u002F single face\nawait faceapi.detectSingleFace(input)\nawait faceapi.detectSingleFace(input).withFaceExpressions()\nawait faceapi.detectSingleFace(input).withFaceLandmarks()\nawait faceapi.detectSingleFace(input).withFaceLandmarks().withFaceExpressions()\nawait faceapi.detectSingleFace(input).withFaceLandmarks().withFaceExpressions().withFaceDescriptor()\nawait faceapi.detectSingleFace(input).withFaceLandmarks().withAgeAndGender().withFaceDescriptor()\nawait faceapi.detectSingleFace(input).withFaceLandmarks().withFaceExpressions().withAgeAndGender().withFaceDescriptor()\n```\n\n### Face Recognition by Matching Descriptors\n\nTo perform face recognition, one can use faceapi.FaceMatcher to compare reference face descriptors to query face descriptors.\n\nFirst, we initialize the FaceMatcher with the reference data, for example we can simply detect faces in a **referenceImage** and match the descriptors of the detected faces to faces of subsequent images:\n\n``` javascript\nconst results = await faceapi\n  .detectAllFaces(referenceImage)\n  .withFaceLandmarks()\n  .withFaceDescriptors()\n\nif (!results.length) {\n  return\n}\n\n\u002F\u002F create FaceMatcher with automatically assigned labels\n\u002F\u002F from the detection results for the reference image\nconst faceMatcher = new faceapi.FaceMatcher(results)\n```\n\nNow we can recognize a persons face shown in **queryImage1**:\n\n``` javascript\nconst singleResult = await faceapi\n  .detectSingleFace(queryImage1)\n  .withFaceLandmarks()\n  .withFaceDescriptor()\n\nif (singleResult) {\n  const bestMatch = faceMatcher.findBestMatch(singleResult.descriptor)\n  console.log(bestMatch.toString())\n}\n```\n\nOr we can recognize all faces shown in **queryImage2**:\n\n``` javascript\nconst results = await faceapi\n  .detectAllFaces(queryImage2)\n  .withFaceLandmarks()\n  .withFaceDescriptors()\n\nresults.forEach(fd => {\n  const bestMatch = faceMatcher.findBestMatch(fd.descriptor)\n  console.log(bestMatch.toString())\n})\n```\n\nYou can also create labeled reference descriptors as follows:\n\n``` javascript\nconst labeledDescriptors = [\n  new faceapi.LabeledFaceDescriptors(\n    'obama',\n    [descriptorObama1, descriptorObama2]\n  ),\n  new faceapi.LabeledFaceDescriptors(\n    'trump',\n    [descriptorTrump]\n  )\n]\n\nconst faceMatcher = new faceapi.FaceMatcher(labeledDescriptors)\n```\n\n\u003Ca name=\"getting-started-displaying-detection-results\">\u003C\u002Fa>\n\n## Displaying Detection Results\n\nPreparing the overlay canvas:\n\n``` javascript\nconst displaySize = { width: input.width, height: input.height }\n\u002F\u002F resize the overlay canvas to the input dimensions\nconst canvas = document.getElementById('overlay')\nfaceapi.matchDimensions(canvas, displaySize)\n```\n\nface-api.js predefines some highlevel drawing functions, which you can utilize:\n\n``` javascript\n\u002F* Display detected face bounding boxes *\u002F\nconst detections = await faceapi.detectAllFaces(input)\n\u002F\u002F resize the detected boxes in case your displayed image has a different size than the original\nconst resizedDetections = faceapi.resizeResults(detections, displaySize)\n\u002F\u002F draw detections into the canvas\nfaceapi.draw.drawDetections(canvas, resizedDetections)\n\n\u002F* Display face landmarks *\u002F\nconst detectionsWithLandmarks = await faceapi\n  .detectAllFaces(input)\n  .withFaceLandmarks()\n\u002F\u002F resize the detected boxes and landmarks in case your displayed image has a different size than the original\nconst resizedResults = faceapi.resizeResults(detectionsWithLandmarks, displaySize)\n\u002F\u002F draw detections into the canvas\nfaceapi.draw.drawDetections(canvas, resizedResults)\n\u002F\u002F draw the landmarks into the canvas\nfaceapi.draw.drawFaceLandmarks(canvas, resizedResults)\n\n\n\u002F* Display face expression results *\u002F\nconst detectionsWithExpressions = await faceapi\n  .detectAllFaces(input)\n  .withFaceLandmarks()\n  .withFaceExpressions()\n\u002F\u002F resize the detected boxes and landmarks in case your displayed image has a different size than the original\nconst resizedResults = faceapi.resizeResults(detectionsWithExpressions, displaySize)\n\u002F\u002F draw detections into the canvas\nfaceapi.draw.drawDetections(canvas, resizedResults)\n\u002F\u002F draw a textbox displaying the face expressions with minimum probability into the canvas\nconst minProbability = 0.05\nfaceapi.draw.drawFaceExpressions(canvas, resizedResults, minProbability)\n```\n\nYou can also draw boxes with custom text ([DrawBox](https:\u002F\u002Fgithub.com\u002Fjustadudewhohacks\u002Ftfjs-image-recognition-base\u002Fblob\u002Fmaster\u002Fsrc\u002Fdraw\u002FDrawBox.ts)):\n\n``` javascript\nconst box = { x: 50, y: 50, width: 100, height: 100 }\n\u002F\u002F see DrawBoxOptions below\nconst drawOptions = {\n  label: 'Hello I am a box!',\n  lineWidth: 2\n}\nconst drawBox = new faceapi.draw.DrawBox(box, drawOptions)\ndrawBox.draw(document.getElementById('myCanvas'))\n```\n\nDrawBox drawing options:\n\n``` javascript\nexport interface IDrawBoxOptions {\n  boxColor?: string\n  lineWidth?: number\n  drawLabelOptions?: IDrawTextFieldOptions\n  label?: string\n}\n```\n\nFinally you can draw custom text fields ([DrawTextField](https:\u002F\u002Fgithub.com\u002Fjustadudewhohacks\u002Ftfjs-image-recognition-base\u002Fblob\u002Fmaster\u002Fsrc\u002Fdraw\u002FDrawTextField.ts)):\n\n``` javascript\nconst text = [\n  'This is a textline!',\n  'This is another textline!'\n]\nconst anchor = { x: 200, y: 200 }\n\u002F\u002F see DrawTextField below\nconst drawOptions = {\n  anchorPosition: 'TOP_LEFT',\n  backgroundColor: 'rgba(0, 0, 0, 0.5)'\n}\nconst drawBox = new faceapi.draw.DrawTextField(text, anchor, drawOptions)\ndrawBox.draw(document.getElementById('myCanvas'))\n```\n\nDrawTextField drawing options:\n\n``` javascript\nexport interface IDrawTextFieldOptions {\n  anchorPosition?: AnchorPosition\n  backgroundColor?: string\n  fontColor?: string\n  fontSize?: number\n  fontStyle?: string\n  padding?: number\n}\n\nexport enum AnchorPosition {\n  TOP_LEFT = 'TOP_LEFT',\n  TOP_RIGHT = 'TOP_RIGHT',\n  BOTTOM_LEFT = 'BOTTOM_LEFT',\n  BOTTOM_RIGHT = 'BOTTOM_RIGHT'\n}\n```\n\n\u003Ca name=\"getting-started-face-detection-options\">\u003C\u002Fa>\n\n## Face Detection Options\n\n### SsdMobilenetv1Options\n\n``` javascript\nexport interface ISsdMobilenetv1Options {\n  \u002F\u002F minimum confidence threshold\n  \u002F\u002F default: 0.5\n  minConfidence?: number\n\n  \u002F\u002F maximum number of faces to return\n  \u002F\u002F default: 100\n  maxResults?: number\n}\n\n\u002F\u002F example\nconst options = new faceapi.SsdMobilenetv1Options({ minConfidence: 0.8 })\n```\n\n### TinyFaceDetectorOptions\n\n``` javascript\nexport interface ITinyFaceDetectorOptions {\n  \u002F\u002F size at which image is processed, the smaller the faster,\n  \u002F\u002F but less precise in detecting smaller faces, must be divisible\n  \u002F\u002F by 32, common sizes are 128, 160, 224, 320, 416, 512, 608,\n  \u002F\u002F for face tracking via webcam I would recommend using smaller sizes,\n  \u002F\u002F e.g. 128, 160, for detecting smaller faces use larger sizes, e.g. 512, 608\n  \u002F\u002F default: 416\n  inputSize?: number\n\n  \u002F\u002F minimum confidence threshold\n  \u002F\u002F default: 0.5\n  scoreThreshold?: number\n}\n\n\u002F\u002F example\nconst options = new faceapi.TinyFaceDetectorOptions({ inputSize: 320 })\n```\n\n\u003Ca name=\"getting-started-utility-classes\">\u003C\u002Fa>\n\n## Utility Classes\n\n### IBox\n\n``` javascript\nexport interface IBox {\n  x: number\n  y: number\n  width: number\n  height: number\n}\n```\n\n### IFaceDetection\n\n``` javascript\nexport interface IFaceDetection {\n  score: number\n  box: Box\n}\n```\n\n### IFaceLandmarks\n\n``` javascript\nexport interface IFaceLandmarks {\n  positions: Point[]\n  shift: Point\n}\n```\n\n### WithFaceDetection\n\n``` javascript\nexport type WithFaceDetection\u003CTSource> = TSource & {\n  detection: FaceDetection\n}\n```\n\n### WithFaceLandmarks\n\n``` javascript\nexport type WithFaceLandmarks\u003CTSource> = TSource & {\n  unshiftedLandmarks: FaceLandmarks\n  landmarks: FaceLandmarks\n  alignedRect: FaceDetection\n}\n```\n\n### WithFaceDescriptor\n\n``` javascript\nexport type WithFaceDescriptor\u003CTSource> = TSource & {\n  descriptor: Float32Array\n}\n```\n\n### WithFaceExpressions\n\n``` javascript\nexport type WithFaceExpressions\u003CTSource> = TSource & {\n  expressions: FaceExpressions\n}\n```\n\n### WithAge\n\n``` javascript\nexport type WithAge\u003CTSource> = TSource & {\n  age: number\n}\n```\n\n### WithGender\n\n``` javascript\nexport type WithGender\u003CTSource> = TSource & {\n  gender: Gender\n  genderProbability: number\n}\n\nexport enum Gender {\n  FEMALE = 'female',\n  MALE = 'male'\n}\n```\n\n\u003Ca name=\"getting-started-other-useful-utility\">\u003C\u002Fa>\n\n## Other Useful Utility\n\n### Using the Low Level API\n\nInstead of using the high level API, you can directly use the forward methods of each neural network:\n\n``` javascript\nconst detections1 = await faceapi.ssdMobilenetv1(input, options)\nconst detections2 = await faceapi.tinyFaceDetector(input, options)\nconst landmarks1 = await faceapi.detectFaceLandmarks(faceImage)\nconst landmarks2 = await faceapi.detectFaceLandmarksTiny(faceImage)\nconst descriptor = await faceapi.computeFaceDescriptor(alignedFaceImage)\n```\n\n### Extracting a Canvas for an Image Region\n\n``` javascript\nconst regionsToExtract = [\n  new faceapi.Rect(0, 0, 100, 100)\n]\n\u002F\u002F actually extractFaces is meant to extract face regions from bounding boxes\n\u002F\u002F but you can also use it to extract any other region\nconst canvases = await faceapi.extractFaces(input, regionsToExtract)\n```\n\n### Euclidean Distance\n\n``` javascript\n\u002F\u002F ment to be used for computing the euclidean distance between two face descriptors\nconst dist = faceapi.euclideanDistance([0, 0], [0, 10])\nconsole.log(dist) \u002F\u002F 10\n```\n\n### Retrieve the Face Landmark Points and Contours\n\n``` javascript\nconst landmarkPositions = landmarks.positions\n\n\u002F\u002F or get the positions of individual contours,\n\u002F\u002F only available for 68 point face ladnamrks (FaceLandmarks68)\nconst jawOutline = landmarks.getJawOutline()\nconst nose = landmarks.getNose()\nconst mouth = landmarks.getMouth()\nconst leftEye = landmarks.getLeftEye()\nconst rightEye = landmarks.getRightEye()\nconst leftEyeBbrow = landmarks.getLeftEyeBrow()\nconst rightEyeBrow = landmarks.getRightEyeBrow()\n```\n\n### Fetch and Display Images from an URL\n\n``` html\n\u003Cimg id=\"myImg\" src=\"\">\n```\n\n``` javascript\nconst image = await faceapi.fetchImage('\u002Fimages\u002Fexample.png')\n\nconsole.log(image instanceof HTMLImageElement) \u002F\u002F true\n\n\u002F\u002F displaying the fetched image content\nconst myImg = document.getElementById('myImg')\nmyImg.src = image.src\n```\n\n### Fetching JSON\n\n``` javascript\nconst json = await faceapi.fetchJson('\u002Ffiles\u002Fexample.json')\n```\n\n### Creating an Image Picker\n\n``` html\n\u003Cimg id=\"myImg\" src=\"\">\n\u003Cinput id=\"myFileUpload\" type=\"file\" onchange=\"uploadImage()\" accept=\".jpg, .jpeg, .png\">\n```\n\n``` javascript\nasync function uploadImage() {\n  const imgFile = document.getElementById('myFileUpload').files[0]\n  \u002F\u002F create an HTMLImageElement from a Blob\n  const img = await faceapi.bufferToImage(imgFile)\n  document.getElementById('myImg').src = img.src\n}\n```\n\n### Creating a Canvas Element from an Image or Video Element\n\n``` html\n\u003Cimg id=\"myImg\" src=\"images\u002Fexample.png\" \u002F>\n\u003Cvideo id=\"myVideo\" src=\"media\u002Fexample.mp4\" \u002F>\n```\n\n``` javascript\nconst canvas1 = faceapi.createCanvasFromMedia(document.getElementById('myImg'))\nconst canvas2 = faceapi.createCanvasFromMedia(document.getElementById('myVideo'))\n```\n\n\u003Ca name=\"models\">\u003C\u002Fa>\n\n# Available Models\n\n\u003Ca name=\"models-face-detection\">\u003C\u002Fa>\n\n## Face Detection Models\n\n### SSD Mobilenet V1\n\nFor face detection, this project implements a SSD (Single Shot Multibox Detector) based on MobileNetV1. The neural net will compute the locations of each face in an image and will return the bounding boxes together with it's probability for each face. This face detector is aiming towards obtaining high accuracy in detecting face bounding boxes instead of low inference time. The size of the quantized model is about 5.4 MB (**ssd_mobilenetv1_model**).\n\nThe face detection model has been trained on the [WIDERFACE dataset](http:\u002F\u002Fmmlab.ie.cuhk.edu.hk\u002Fprojects\u002FWIDERFace\u002F) and the weights are provided by [yeephycho](https:\u002F\u002Fgithub.com\u002Fyeephycho) in [this](https:\u002F\u002Fgithub.com\u002Fyeephycho\u002Ftensorflow-face-detection) repo.\n\n### Tiny Face Detector\n\nThe Tiny Face Detector is a very performant, realtime face detector, which is much faster, smaller and less resource consuming compared to the SSD Mobilenet V1 face detector, in return it performs slightly less well on detecting small faces. This model is extremely mobile and web friendly, thus it should be your GO-TO face detector on mobile devices and resource limited clients. The size of the quantized model is only 190 KB (**tiny_face_detector_model**).\n\nThe face detector has been trained on a custom dataset of ~14K images labeled with bounding boxes. Furthermore the model has been trained to predict bounding boxes, which entirely cover facial feature points, thus it in general produces better results in combination with subsequent face landmark detection than SSD Mobilenet V1.\n\nThis model is basically an even tinier version of Tiny Yolo V2, replacing the regular convolutions of Yolo with depthwise separable convolutions. Yolo is fully convolutional, thus can easily adapt to different input image sizes to trade off accuracy for performance (inference time).\n\n\u003Ca name=\"models-face-landmark-detection\">\u003C\u002Fa>\n\n## 68 Point Face Landmark Detection Models\n\nThis package implements a very lightweight and fast, yet accurate 68 point face landmark detector. The default model has a size of only 350kb (**face_landmark_68_model**) and the tiny model is only 80kb (**face_landmark_68_tiny_model**). Both models employ the ideas of depthwise separable convolutions as well as densely connected blocks. The models have been trained on a dataset of ~35k face images labeled with 68 face landmark points.\n\n\u003Ca name=\"models-face-recognition\">\u003C\u002Fa>\n\n## Face Recognition Model\n\nFor face recognition, a ResNet-34 like architecture is implemented to compute a face descriptor (a feature vector with 128 values) from any given face image, which is used to describe the characteristics of a persons face. The model is **not** limited to the set of faces used for training, meaning you can use it for face recognition of any person, for example yourself. You can determine the similarity of two arbitrary faces by comparing their face descriptors, for example by computing the euclidean distance or using any other classifier of your choice.\n\nThe neural net is equivalent to the **FaceRecognizerNet** used in [face-recognition.js](https:\u002F\u002Fgithub.com\u002Fjustadudewhohacks\u002Fface-recognition.js) and the net used in the [dlib](https:\u002F\u002Fgithub.com\u002Fdavisking\u002Fdlib\u002Fblob\u002Fmaster\u002Fexamples\u002Fdnn_face_recognition_ex.cpp) face recognition example. The weights have been trained by [davisking](https:\u002F\u002Fgithub.com\u002Fdavisking) and the model achieves a prediction accuracy of 99.38% on the LFW (Labeled Faces in the Wild) benchmark for face recognition.\n\nThe size of the quantized model is roughly 6.2 MB (**face_recognition_model**).\n\n\u003Ca name=\"models-face-expression-recognition\">\u003C\u002Fa>\n\n## Face Expression Recognition Model\n\nThe face expression recognition model is lightweight, fast and provides reasonable accuracy. The model has a size of roughly 310kb and it employs depthwise separable convolutions and densely connected blocks. It has been trained on a variety of images from publicly available datasets as well as images scraped from the web. Note, that wearing glasses might decrease the accuracy of the prediction results.\n\n\u003Ca name=\"models-age-and-gender-recognition\">\u003C\u002Fa>\n\n## Age and Gender Recognition Model\n\nThe age and gender recognition model is a multitask network, which employs a feature extraction layer, an age regression layer and a gender classifier. The model has a size of roughly 420kb and the feature extractor employs a tinier but very similar architecture to Xception.\n\nThis model has been trained and tested on the following databases with an 80\u002F20 train\u002Ftest split each: UTK, FGNET, Chalearn, Wiki, IMDB*, CACD*, MegaAge, MegaAge-Asian. The `*` indicates, that these databases have been algorithmically cleaned up, since the initial databases are very noisy.\n\n### Total Test Results\n\nTotal MAE (Mean Age Error): **4.54**\n\nTotal Gender Accuracy: **95%**\n\n### Test results for each database\n\nThe `-` indicates, that there are no gender labels available for these databases.\n\nDatabase        | UTK    | FGNET | Chalearn | Wiki | IMDB* | CACD* | MegaAge | MegaAge-Asian |\n----------------|-------:|------:|---------:|-----:|------:|------:|--------:|--------------:|\nMAE             | 5.25   | 4.23  | 6.24     | 6.54 | 3.63  | 3.20  | 6.23    | 4.21          |\nGender Accuracy | 0.93   | -     | 0.94     | 0.95 | -     | 0.97  | -       | -             |\n\n### Test results for different age category groups\n\nAge Range       | 0 - 3  | 4 - 8 | 9 - 18 | 19 - 28 | 29 - 40 | 41 - 60 | 60 - 80 | 80+     |\n----------------|-------:|------:|-------:|--------:|--------:|--------:|--------:|--------:|\nMAE             | 1.52   | 3.06  | 4.82   | 4.99    | 5.43    | 4.94    | 6.17    | 9.91    |\nGender Accuracy | 0.69   | 0.80  | 0.88   | 0.96    | 0.97    | 0.97    | 0.96    | 0.9     |\n","# face-api.js\n\n[![构建状态](https:\u002F\u002Ftravis-ci.org\u002Fjustadudewhohacks\u002Fface-api.js.svg?branch=master)](https:\u002F\u002Ftravis-ci.org\u002Fjustadudewhohacks\u002Fface-api.js)\n[![Slack](https:\u002F\u002Fslack.bri.im\u002Fbadge.svg)](https:\u002F\u002Fslack.bri.im)\n\n**基于 tensorflow.js core（[tensorflow\u002Ftfjs-core](https:\u002F\u002Fgithub.com\u002Ftensorflow\u002Ftfjs)）实现的浏览器和 Node.js 的 JavaScript 人脸识别 API**\n\n![faceapi](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjustadudewhohacks_face-api.js_readme_4eb37a7890d0.gif)\n\n## **[点击这里查看实时演示！](https:\u002F\u002Fjustadudewhohacks.github.io\u002Fface-api.js\u002F)**\n\n## 教程\n\n* **[face-api.js —— 使用 tensorflow.js 在浏览器中进行人脸识别的 JavaScript API](https:\u002F\u002Fitnext.io\u002Fface-api-js-javascript-api-for-face-recognition-in-the-browser-with-tensorflow-js-bcc2a6c4cf07)**\n* **[使用 face-api.js 的 MTCNN 人脸检测器进行实时 JavaScript 人脸跟踪和人脸识别](https:\u002F\u002Fitnext.io\u002Frealtime-javascript-face-tracking-and-face-recognition-using-face-api-js-mtcnn-face-detector-d924dd8b5740)**\n* **[实时网络摄像头人脸检测与情绪识别 - 视频](https:\u002F\u002Fyoutu.be\u002FCVClHLwv-4I)**\n* **[使用 JavaScript 进行简单的人脸识别教程 - 视频](https:\u002F\u002Fyoutu.be\u002FAZ4PdALMqx0)**\n* **[在 Vue.js 和 Electron 中使用 face-api.js](https:\u002F\u002Fmedium.com\u002F@andreas.schallwig\u002Fdo-not-laugh-a-simple-ai-powered-game-3e22ad0f8166)**\n* **[为人物添加面具 - Gant Laborde 在 Learn with Jason 上的分享](https:\u002F\u002Fwww.learnwithjason.dev\u002Ffun-with-machine-learning-pt-2)**\n\n## 目录\n\n* **[功能特性](#features)**\n* **[运行示例](#running-the-examples)**\n* **[适用于浏览器的 face-api.js](#face-api.js-for-the-browser)**\n* **[适用于 Node.js 的 face-api.js](#face-api.js-for-nodejs)**\n* **[使用方法](#getting-started)**\n  * **[加载模型](#getting-started-loading-models)**\n  * **[高级 API](#high-level-api)**\n  * **[显示检测结果](#getting-started-displaying-detection-results)**\n  * **[人脸检测选项](#getting-started-face-detection-options)**\n  * **[工具类](#getting-started-utility-classes)**\n  * **[其他实用工具](#other-useful-utility)**\n* **[可用模型](#models)**\n  * **[人脸检测](#models-face-detection)**\n  * **[人脸关键点检测](#models-face-landmark-detection)**\n  * **[人脸识别](#models-face-recognition)**\n  * **[面部表情识别](#models-face-expression-recognition)**\n  * **[年龄估计与性别识别](#models-age-and-gender-recognition)**\n* **[API 文档](https:\u002F\u002Fjustadudewhohacks.github.io\u002Fface-api.js\u002Fdocs\u002Fglobals.html)**\n\n# 功能特性\n\n## 人脸识别\n\n![face-recognition](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjustadudewhohacks_face-api.js_readme_42b42171d188.gif)\n\n## 人脸关键点检测\n\n![face_landmark_detection](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjustadudewhohacks_face-api.js_readme_2adac9a101ff.jpg)\n\n## 面部表情识别\n\n![preview_face-expression-recognition](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjustadudewhohacks_face-api.js_readme_f7fcb649c9fc.png)\n\n## 年龄估计与性别识别\n\n![age_gender_recognition](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjustadudewhohacks_face-api.js_readme_037949bcc06b.jpg)\n\n\u003Ca name=\"running-the-examples\">\u003C\u002Fa>\n\n# 运行示例\n\n克隆仓库：\n\n``` bash\ngit clone https:\u002F\u002Fgithub.com\u002Fjustadudewhohacks\u002Fface-api.js.git\n```\n\n## 运行浏览器示例\n\n``` bash\ncd face-api.js\u002Fexamples\u002Fexamples-browser\nnpm i\nnpm start\n```\n\n打开 http:\u002F\u002Flocalhost:3000\u002F。\n\n## 运行 Node.js 示例\n\n``` bash\ncd face-api.js\u002Fexamples\u002Fexamples-nodejs\nnpm i\n```\n\n现在可以使用 ts-node 运行其中一个示例：\n\n``` bash\nts-node faceDetection.ts\n```\n\n或者直接编译并用 node 运行：\n\n``` bash\ntsc faceDetection.ts\nnode faceDetection.js\n```\n\n\u003Ca name=\"face-api.js-for-the-browser\">\u003C\u002Fa>\n\n# 适用于浏览器的 face-api.js\n\n只需从 [dist\u002Fface-api.js](https:\u002F\u002Fgithub.com\u002Fjustadudewhohacks\u002Fface-api.js\u002Ftree\u002Fmaster\u002Fdist) 引入最新脚本即可。\n\n或者通过 npm 安装：\n\n``` bash\nnpm i face-api.js\n```\n\n\u003Ca name=\"face-api.js-for-nodejs\">\u003C\u002Fa>\n\n# 适用于 Node.js 的 face-api.js\n\n我们可以通过 polyfill 一些浏览器特有的对象，如 HTMLImageElement、HTMLCanvasElement 和 ImageData，在 Node.js 环境中使用等效的 API。最简单的方法是安装 node-canvas 包。\n\n另外，你也可以直接从图像数据构造张量，并将张量作为输入传递给 API。\n\n此外，建议安装 @tensorflow\u002Ftfjs-node（非必需，但强烈推荐），它通过编译并与原生 TensorFlow C++ 库绑定，可显著提升性能：\n\n``` bash\nnpm i face-api.js canvas @tensorflow\u002Ftfjs-node\n```\n\n接下来，我们只需对环境进行 monkey patch，以使用这些 polyfill：\n\n``` javascript\n\u002F\u002F 导入 Node.js 绑定到原生 TensorFlow，\n\u002F\u002F 虽然不是必需的，但会极大地加速处理（需要 Python）\nimport '@tensorflow\u002Ftfjs-node';\n\n\u002F\u002F 实现 Node.js 对 HTMLCanvasElement、HTMLImageElement 和 ImageData 的封装\nimport * as canvas from 'canvas';\n\nimport * as faceapi from 'face-api.js';\n\n\u002F\u002F 对 Node.js 环境进行补丁，我们需要提供 HTMLCanvasElement 和 HTMLImageElement 的实现\nconst { Canvas, Image, ImageData } = canvas\nfaceapi.env.monkeyPatch({ Canvas, Image, ImageData })\n```\n\n\u003Ca name=\"getting-started\">\u003C\u002Fa>\n\n# 开始使用\n\n\u003Ca name=\"getting-started-loading-models\">\u003C\u002Fa>\n\n## 加载模型\n\n所有全局神经网络实例都通过 `faceapi.nets` 导出：\n\n``` javascript\nconsole.log(faceapi.nets)\n\u002F\u002F ageGenderNet\n\u002F\u002F faceExpressionNet\n\u002F\u002F faceLandmark68Net\n\u002F\u002F faceLandmark68TinyNet\n\u002F\u002F faceRecognitionNet\n\u002F\u002F ssdMobilenetv1\n\u002F\u002F tinyFaceDetector\n\u002F\u002F tinyYolov2\n```\n\n要加载一个模型，你需要提供对应的 `manifest.json` 文件以及模型权重文件（分片）作为资源。只需将它们复制到你的 `public` 或 `assets` 文件夹中。模型的 `manifest.json` 和分片文件必须位于同一目录下，或可通过相同的路径访问。\n\n假设模型存放在 **public\u002Fmodels** 目录中：\n\n``` javascript\nawait faceapi.nets.ssdMobilenetv1.loadFromUri('\u002Fmodels')\n\u002F\u002F 对其他模型同样操作：\n\u002F\u002F await faceapi.nets.faceLandmark68Net.loadFromUri('\u002Fmodels')\n\u002F\u002F await faceapi.nets.faceRecognitionNet.loadFromUri('\u002Fmodels')\n\u002F\u002F ...\n```\n\n在 Node.js 环境中，你还可以直接从磁盘加载模型：\n\n``` javascript\nawait faceapi.nets.ssdMobilenetv1.loadFromDisk('.\u002Fmodels')\n```\n\n你也可以从 `tf.NamedTensorMap` 加载模型：\n\n``` javascript\nawait faceapi.nets.ssdMobilenetv1.loadFromWeightMap(weightMap)\n```\n\n此外，你还可以创建自己的神经网络实例：\n\n``` javascript\nconst net = new faceapi.SsdMobilenetv1()\nawait net.loadFromUri('\u002Fmodels')\n```\n\n你还可以将权重作为 `Float32Array` 加载（如果你希望使用未压缩的模型）：\n\n``` javascript\n\u002F\u002F 使用 fetch\nnet.load(await faceapi.fetchNetWeights('\u002Fmodels\u002Fface_detection_model.weights'))\n\n\u002F\u002F 使用 axios\nconst res = await axios.get('\u002Fmodels\u002Fface_detection_model.weights', { responseType: 'arraybuffer' })\nconst weights = new Float32Array(res.data)\nnet.load(weights)\n```\n\n\u003Ca name=\"getting-high-level-api\">\u003C\u002Fa>\n\n## 高级 API\n\n以下示例中，**input** 可以是 HTML 的 `\u003Cimg>`、`\u003Cvideo>` 或 `\u003Ccanvas>` 元素，也可以是这些元素的 ID。\n\n``` html\n\u003Cimg id=\"myImg\" src=\"images\u002Fexample.png\" \u002F>\n\u003Cvideo id=\"myVideo\" src=\"media\u002Fexample.mp4\" \u002F>\n\u003Ccanvas id=\"myCanvas\" \u002F>\n```\n\n``` javascript\nconst input = document.getElementById('myImg')\n\u002F\u002F const input = document.getElementById('myVideo')\n\u002F\u002F const input = document.getElementById('myCanvas')\n\u002F\u002F 或者直接：\n\u002F\u002F const input = 'myImg'\n```\n\n### 检测人脸\n\n检测图像中的所有人脸。返回 **Array\u003C[FaceDetection](#interface-face-detection)>**：\n\n``` javascript\nconst detections = await faceapi.detectAllFaces(input)\n```\n\n检测图像中置信度最高的单张人脸。返回 **[FaceDetection](#interface-face-detection) | undefined**：\n\n``` javascript\nconst detection = await faceapi.detectSingleFace(input)\n```\n\n默认情况下，`detectAllFaces` 和 `detectSingleFace` 使用 SSD MobileNet V1 人脸检测器。你可以通过传递相应的选项对象来指定使用的检测器：\n\n``` javascript\nconst detections1 = await faceapi.detectAllFaces(input, new faceapi.SsdMobilenetv1Options())\nconst detections2 = await faceapi.detectAllFaces(input, new faceapi.TinyFaceDetectorOptions())\n```\n\n你可以根据 [这里](#getting-started-face-detection-options) 所示调整每个检测器的选项。\n\n### 检测 68 个面部关键点\n\n**在完成人脸检测后，我们还可以为每个检测到的人脸预测面部关键点，具体如下：**\n\n检测图像中的所有人脸，并为每个检测到的人脸计算 68 个关键点。返回 **Array\u003C[WithFaceLandmarks\u003CWithFaceDetection\u003C{}>>](#getting-started-utility-classes)>**：\n\n``` javascript\nconst detectionsWithLandmarks = await faceapi.detectAllFaces(input).withFaceLandmarks()\n```\n\n检测图像中置信度最高的单张人脸，并为其计算 68 个面部关键点。返回 **[WithFaceLandmarks\u003CWithFaceDetection\u003C{}>>](#getting-started-utility-classes) | undefined**：\n\n``` javascript\nconst detectionWithLandmarks = await faceapi.detectSingleFace(input).withFaceLandmarks()\n```\n\n你也可以选择使用小型模型而不是默认模型：\n\n``` javascript\nconst useTinyModel = true\nconst detectionsWithLandmarks = await faceapi.detectAllFaces(input).withFaceLandmarks(useTinyModel)\n```\n\n### 计算人脸描述符\n\n**在完成人脸检测和面部关键点预测后，可以为每个人脸计算描述符，具体如下：**\n\n检测图像中的所有人脸，并为每个检测到的人脸计算 68 个关键点。返回 **Array\u003C[WithFaceDescriptor\u003CWithFaceLandmarks\u003CWithFaceDetection\u003C{}>>>](#getting-started-utility-classes)>**：\n\n``` javascript\nconst results = await faceapi.detectAllFaces(input).withFaceLandmarks().withFaceDescriptors()\n```\n\n检测图像中置信度最高的单张人脸，并为其计算 68 个关键点和人脸描述符。返回 **[WithFaceDescriptor\u003CWithFaceLandmarks\u003CWithFaceDetection\u003C{}>>>](#getting-started-utility-classes) | undefined**：\n\n``` javascript\nconst result = await faceapi.detectSingleFace(input).withFaceLandmarks().withFaceDescriptor()\n```\n\n### 识别面部表情\n\n**可以对检测到的人脸进行面部表情识别，具体如下：**\n\n检测图像中的所有人脸，并识别每个人脸的面部表情。返回 **Array\u003C[WithFaceExpressions\u003CWithFaceLandmarks\u003CWithFaceDetection\u003C{}>>>](#getting-started-utility-classes)>**：\n\n``` javascript\nconst detectionsWithExpressions = await faceapi.detectAllFaces(input).withFaceLandmarks().withFaceExpressions()\n```\n\n检测图像中置信度最高的单张人脸，并识别该人脸的面部表情。返回 **[WithFaceExpressions\u003CWithFaceLandmarks\u003CWithFaceDetection\u003C{}>>>](#getting-started-utility-classes) | undefined**：\n\n``` javascript\nconst detectionWithExpressions = await faceapi.detectSingleFace(input).withFaceLandmarks().withFaceExpressions()\n```\n\n**你也可以跳过 `.withFaceLandmarks()`，这样会跳过人脸对齐步骤（准确性可能会降低）：**\n\n检测所有人脸而不进行人脸对齐，并识别每个人脸的面部表情。返回 **Array\u003C[WithFaceExpressions\u003CWithFaceDetection\u003C{}>>](#getting-started-utility-classes)>**：\n\n``` javascript\nconst detectionsWithExpressions = await faceapi.detectAllFaces(input).withFaceExpressions()\n```\n\n检测置信度最高的单张人脸，不进行人脸对齐，并识别该人脸的面部表情。返回 **[WithFaceExpressions\u003CWithFaceDetection\u003C{}>>](#getting-started-utility-classes) | undefined**：\n\n``` javascript\nconst detectionWithExpressions = await faceapi.detectSingleFace(input).withFaceExpressions()\n```\n\n### 年龄估计与性别识别\n\n**可以从检测到的人脸中进行年龄估计和性别识别，具体方法如下：**\n\n检测图像中的所有人脸 + 估计每个脸的年龄并识别性别。返回 **Array\u003C[WithAge\u003CWithGender\u003CWithFaceLandmarks\u003CWithFaceDetection\u003C{}>>>>](#getting-started-utility-classes)>**：\n\n``` javascript\nconst detectionsWithAgeAndGender = await faceapi.detectAllFaces(input).withFaceLandmarks().withAgeAndGender()\n```\n\n检测图像中置信度最高的那张人脸 + 估计该脸的年龄并识别性别。返回 **[WithAge\u003CWithGender\u003CWithFaceLandmarks\u003CWithFaceDetection\u003C{}>>>>](#getting-started-utility-classes) | undefined**：\n\n``` javascript\nconst detectionWithAgeAndGender = await faceapi.detectSingleFace(input).withFaceLandmarks().withAgeAndGender()\n```\n\n**你也可以跳过 .withFaceLandmarks()，这样就会跳过人脸对齐步骤（准确性会稍差）：**\n\n检测所有未进行人脸对齐的人脸 + 估计每个脸的年龄并识别性别。返回 **Array\u003C[WithAge\u003CWithGender\u003CWithFaceDetection\u003C{}>>>](#getting-started-utility-classes)>**：\n\n``` javascript\nconst detectionsWithAgeAndGender = await faceapi.detectAllFaces(input).withAgeAndGender()\n```\n\n检测未进行人脸对齐的置信度最高的那张人脸 + 估计该脸的年龄并识别性别。返回 **[WithAge\u003CWithGender\u003CWithFaceDetection\u003C{}>>>](#getting-started-utility-classes) | undefined**：\n\n``` javascript\nconst detectionWithAgeAndGender = await faceapi.detectSingleFace(input).withAgeAndGender()\n```\n\n### 任务组合\n\n**任务可以按以下方式组合：**\n\n``` javascript\n\u002F\u002F 所有人脸\nawait faceapi.detectAllFaces(input)\nawait faceapi.detectAllFaces(input).withFaceExpressions()\nawait faceapi.detectAllFaces(input).withFaceLandmarks()\nawait faceapi.detectAllFaces(input).withFaceLandmarks().withFaceExpressions()\nawait faceapi.detectAllFaces(input).withFaceLandmarks().withFaceExpressions().withFaceDescriptors()\nawait faceapi.detectAllFaces(input).withFaceLandmarks().withAgeAndGender().withFaceDescriptors()\nawait faceapi.detectAllFaces(input).withFaceLandmarks().withFaceExpressions().withAgeAndGender().withFaceDescriptors()\n\n\u002F\u002F 单张人脸\nawait faceapi.detectSingleFace(input)\nawait faceapi.detectSingleFace(input).withFaceExpressions()\nawait faceapi.detectSingleFace(input).withFaceLandmarks()\nawait faceapi.detectSingleFace(input).withFaceLandmarks().withFaceExpressions()\nawait faceapi.detectSingleFace(input).withFaceLandmarks().withFaceExpressions().withFaceDescriptor()\nawait faceapi.detectSingleFace(input).withFaceLandmarks().withAgeAndGender().withFaceDescriptor()\nawait faceapi.detectSingleFace(input).withFaceLandmarks().withFaceExpressions().withAgeAndGender().withFaceDescriptor()\n```\n\n### 通过描述符匹配进行人脸识别\n\n要进行人脸识别，可以使用 faceapi.FaceMatcher 将参考人脸描述符与查询人脸描述符进行比较。\n\n首先，我们用参考数据初始化 FaceMatcher。例如，我们可以简单地在 **referenceImage** 中检测人脸，并将检测到的人脸描述符与后续图像中的人脸进行匹配：\n\n``` javascript\nconst results = await faceapi\n  .detectAllFaces(referenceImage)\n  .withFaceLandmarks()\n  .withFaceDescriptors()\n\nif (!results.length) {\n  return\n}\n\n\u002F\u002F 使用参考图像的检测结果自动分配标签，创建 FaceMatcher\nconst faceMatcher = new faceapi.FaceMatcher(results)\n```\n\n现在我们可以识别 **queryImage1** 中出现的人脸：\n\n``` javascript\nconst singleResult = await faceapi\n  .detectSingleFace(queryImage1)\n  .withFaceLandmarks()\n  .withFaceDescriptor()\n\nif (singleResult) {\n  const bestMatch = faceMatcher.findBestMatch(singleResult.descriptor)\n  console.log(bestMatch.toString())\n}\n```\n\n或者我们可以识别 **queryImage2** 中出现的所有人脸：\n\n``` javascript\nconst results = await faceapi\n  .detectAllFaces(queryImage2)\n  .withFaceLandmarks()\n  .withFaceDescriptors()\n\nresults.forEach(fd => {\n  const bestMatch = faceMatcher.findBestMatch(fd.descriptor)\n  console.log(bestMatch.toString())\n})\n```\n\n你也可以按照以下方式创建带标签的参考描述符：\n\n``` javascript\nconst labeledDescriptors = [\n  new faceapi.LabeledFaceDescriptors(\n    'obama',\n    [descriptorObama1, descriptorObama2]\n  ),\n  new faceapi.LabeledFaceDescriptors(\n    'trump',\n    [descriptorTrump]\n  )\n]\n\nconst faceMatcher = new faceapi.FaceMatcher(labeledDescriptors)\n```\n\n\u003Ca name=\"getting-started-displaying-detection-results\">\u003C\u002Fa>\n\n## 显示检测结果\n\n准备叠加画布：\n\n``` javascript\nconst displaySize = { width: input.width, height: input.height }\n\u002F\u002F 将叠加画布调整为输入图像的尺寸\nconst canvas = document.getElementById('overlay')\nfaceapi.matchDimensions(canvas, displaySize)\n```\n\nface-api.js 预定义了一些高级绘图函数，你可以直接使用：\n\n``` javascript\n\u002F* 显示检测到的人脸边界框 *\u002F\nconst detections = await faceapi.detectAllFaces(input)\n\u002F\u002F 如果显示的图像尺寸与原始图像不同，则调整检测框的大小\nconst resizedDetections = faceapi.resizeResults(detections, displaySize)\n\u002F\u002F 将检测结果绘制到画布上\nfaceapi.draw.drawDetections(canvas, resizedDetections)\n\n\u002F* 显示人脸关键点 *\u002F\nconst detectionsWithLandmarks = await faceapi\n  .detectAllFaces(input)\n  .withFaceLandmarks()\n\u002F\u002F 如果显示的图像尺寸与原始图像不同，则调整检测框和关键点的大小\nconst resizedResults = faceapi.resizeResults(detectionsWithLandmarks, displaySize)\n\u002F\u002F 将检测结果绘制到画布上\nfaceapi.draw.drawDetections(canvas, resizedResults)\n\u002F\u002F 将关键点绘制到画布上\nfaceapi.draw.drawFaceLandmarks(canvas, resizedResults)\n\n\n\u002F* 显示面部表情结果 *\u002F\nconst detectionsWithExpressions = await faceapi\n  .detectAllFaces(input)\n  .withFaceLandmarks()\n  .withFaceExpressions()\n\u002F\u002F 如果显示的图像尺寸与原始图像不同，则调整检测框和关键点的大小\nconst resizedResults = faceapi.resizeResults(detectionsWithExpressions, displaySize)\n\u002F\u002F 将检测结果绘制到画布上\nfaceapi.draw.drawDetections(canvas, resizedResults)\n\u002F\u002F 在画布上绘制一个显示最低置信度面部表情的文本框\nconst minProbability = 0.05\nfaceapi.draw.drawFaceExpressions(canvas, resizedResults, minProbability)\n```\n\n你还可以绘制带有自定义文本的矩形框（[DrawBox](https:\u002F\u002Fgithub.com\u002Fjustadudewhohacks\u002Ftfjs-image-recognition-base\u002Fblob\u002Fmaster\u002Fsrc\u002Fdraw\u002FDrawBox.ts)）：\n\n``` javascript\nconst box = { x: 50, y: 50, width: 100, height: 100 }\n\u002F\u002F 参见下方的 DrawBoxOptions\nconst drawOptions = {\n  label: '你好，我是一个矩形框！',\n  lineWidth: 2\n}\nconst drawBox = new faceapi.draw.DrawBox(box, drawOptions)\ndrawBox.draw(document.getElementById('myCanvas'))\n```\n\nDrawBox 绘图选项：\n\n``` javascript\nexport interface IDrawBoxOptions {\n  boxColor?: string\n  lineWidth?: number\n  drawLabelOptions?: IDrawTextFieldOptions\n  label?: string\n}\n```\n\n最后，你还可以绘制自定义文本框（[DrawTextField](https:\u002F\u002Fgithub.com\u002Fjustadudewhohacks\u002Ftfjs-image-recognition-base\u002Fblob\u002Fmaster\u002Fsrc\u002Fdraw\u002FDrawTextField.ts)）：\n\n``` javascript\nconst text = [\n  '这是一行文字！',\n  '这是另一行文字！'\n]\nconst anchor = { x: 200, y: 200 }\n\u002F\u002F 参见下方的 DrawTextField\nconst drawOptions = {\n  anchorPosition: 'TOP_LEFT',\n  backgroundColor: 'rgba(0, 0, 0, 0.5)'\n}\nconst drawBox = new faceapi.draw.DrawTextField(text, anchor, drawOptions)\ndrawBox.draw(document.getElementById('myCanvas'))\n```\n\nDrawTextField 绘图选项：\n\n``` javascript\nexport interface IDrawTextFieldOptions {\n  anchorPosition?: AnchorPosition\n  backgroundColor?: string\n  fontColor?: string\n  fontSize?: number\n  fontStyle?: string\n  padding?: number\n}\n\nexport enum AnchorPosition {\n  TOP_LEFT = 'TOP_LEFT',\n  TOP_RIGHT = 'TOP_RIGHT',\n  BOTTOM_LEFT = 'BOTTOM_LEFT',\n  BOTTOM_RIGHT = 'BOTTOM_RIGHT'\n}\n```\n\n\u003Ca name=\"getting-started-face-detection-options\">\u003C\u002Fa>\n\n## 人脸检测选项\n\n### SsdMobilenetv1Options\n\n``` javascript\nexport interface ISsdMobilenetv1Options {\n  \u002F\u002F 最小置信度阈值\n  \u002F\u002F 默认：0.5\n  minConfidence?: number\n\n  \u002F\u002F 返回的最大人脸数量\n  \u002F\u002F 默认：100\n  maxResults?: number\n}\n\n\u002F\u002F 示例\nconst options = new faceapi.SsdMobilenetv1Options({ minConfidence: 0.8 })\n```\n\n### TinyFaceDetectorOptions\n\n``` javascript\nexport interface ITinyFaceDetectorOptions {\n  \u002F\u002F 图像处理时的尺寸，越小速度越快，\n  \u002F\u002F 但对较小人脸的检测精度较低，必须能被32整除，\n  \u002F\u002F 常用尺寸有128、160、224、320、416、512、608等。\n  \u002F\u002F 对于通过摄像头进行人脸跟踪，建议使用较小尺寸，\n  \u002F\u002F 比如128或160；而检测较小人脸时则应使用较大尺寸，\n  \u002F\u002F 比如512或608。\n  \u002F\u002F 默认：416\n  inputSize?: number\n\n  \u002F\u002F 最小置信度阈值\n  \u002F\u002F 默认：0.5\n  scoreThreshold?: number\n}\n\n\u002F\u002F 示例\nconst options = new faceapi.TinyFaceDetectorOptions({ inputSize: 320 })\n```\n\n\u003Ca name=\"getting-started-utility-classes\">\u003C\u002Fa>\n\n## 工具类\n\n### IBox\n\n``` javascript\nexport interface IBox {\n  x: number\n  y: number\n  width: number\n  height: number\n}\n```\n\n### IFaceDetection\n\n``` javascript\nexport interface IFaceDetection {\n  score: number\n  box: Box\n}\n```\n\n### IFaceLandmarks\n\n``` javascript\nexport interface IFaceLandmarks {\n  positions: Point[]\n  shift: Point\n}\n```\n\n### WithFaceDetection\n\n``` javascript\nexport type WithFaceDetection\u003CTSource> = TSource & {\n  detection: FaceDetection\n}\n```\n\n### WithFaceLandmarks\n\n``` javascript\nexport type WithFaceLandmarks\u003CTSource> = TSource & {\n  unshiftedLandmarks: FaceLandmarks\n  landmarks: FaceLandmarks\n  alignedRect: FaceDetection\n}\n```\n\n### WithFaceDescriptor\n\n``` javascript\nexport type WithFaceDescriptor\u003CTSource> = TSource & {\n  descriptor: Float32Array\n}\n```\n\n### WithFaceExpressions\n\n``` javascript\nexport type WithFaceExpressions\u003CTSource> = TSource & {\n  expressions: FaceExpressions\n}\n```\n\n### WithAge\n\n``` javascript\nexport type WithAge\u003CTSource> = TSource & {\n  age: number\n}\n```\n\n### WithGender\n\n``` javascript\nexport type WithGender\u003CTSource> = TSource & {\n  gender: Gender\n  genderProbability: number\n}\n\nexport enum Gender {\n  FEMALE = 'female',\n  MALE = 'male'\n}\n```\n\n\u003Ca name=\"getting-started-other-useful-utility\">\u003C\u002Fa>\n\n## 其他实用工具\n\n### 使用低级 API\n\n除了使用高级 API 外，你还可以直接调用各个神经网络的前向方法：\n\n``` javascript\nconst detections1 = await faceapi.ssdMobilenetv1(input, options)\nconst detections2 = await faceapi.tinyFaceDetector(input, options)\nconst landmarks1 = await faceapi.detectFaceLandmarks(faceImage)\nconst landmarks2 = await faceapi.detectFaceLandmarksTiny(faceImage)\nconst descriptor = await faceapi.computeFaceDescriptor(alignedFaceImage)\n```\n\n### 提取图像区域的画布\n\n``` javascript\nconst regionsToExtract = [\n  new faceapi.Rect(0, 0, 100, 100)\n]\n\u002F\u002F extractFaces 实际上用于从边界框中提取人脸区域，\n\u002F\u002F 但你也可以用它来提取任何其他区域。\nconst canvases = await faceapi.extractFaces(input, regionsToExtract)\n```\n\n### 欧几里得距离\n\n``` javascript\n\u002F\u002F 用于计算两个面部描述符之间的欧几里得距离\nconst dist = faceapi.euclideanDistance([0, 0], [0, 10])\nconsole.log(dist) \u002F\u002F 10\n```\n\n### 获取人脸关键点和轮廓\n\n``` javascript\nconst landmarkPositions = landmarks.positions\n\n\u002F\u002F 或者获取单个轮廓的关键点位置，\n\u002F\u002F 仅适用于68点人脸关键点（FaceLandmarks68）\nconst 下颌轮廓 = landmarks.getJawOutline()\nconst 鼻子 = landmarks.getNose()\nconst 嘴巴 = landmarks.getMouth()\nconst 左眼 = landmarks.getLeftEye()\nconst 右眼 = landmarks.getRightEye()\nconst 左眉 = landmarks.getLeftEyeBrow()\nconst 右眉 = landmarks.getRightEyeBrow()\n```\n\n### 从 URL 获取并显示图片\n\n``` html\n\u003Cimg id=\"myImg\" src=\"\">\n```\n\n``` javascript\nconst image = await faceapi.fetchImage('\u002Fimages\u002Fexample.png')\n\nconsole.log(image instanceof HTMLImageElement) \u002F\u002F 输出: true\n\n\u002F\u002F 显示获取到的图片内容\nconst myImg = document.getElementById('myImg')\nmyImg.src = image.src\n```\n\n### 获取 JSON 数据\n\n``` javascript\nconst json = await faceapi.fetchJson('\u002Ffiles\u002Fexample.json')\n```\n\n### 创建图片选择器\n\n``` html\n\u003Cimg id=\"myImg\" src=\"\">\n\u003Cinput id=\"myFileUpload\" type=\"file\" onchange=\"uploadImage()\" accept=\".jpg, .jpeg, .png\">\n```\n\n``` javascript\nasync function uploadImage() {\n  const imgFile = document.getElementById('myFileUpload').files[0]\n  \u002F\u002F 从 Blob 对象创建 HTMLImageElement\n  const img = await faceapi.bufferToImage(imgFile)\n  document.getElementById('myImg').src = img.src\n}\n```\n\n### 从图片或视频元素创建 Canvas 元素\n\n``` html\n\u003Cimg id=\"myImg\" src=\"images\u002Fexample.png\" \u002F>\n\u003Cvideo id=\"myVideo\" src=\"media\u002Fexample.mp4\" \u002F>\n```\n\n``` javascript\nconst canvas1 = faceapi.createCanvasFromMedia(document.getElementById('myImg'))\nconst canvas2 = faceapi.createCanvasFromMedia(document.getElementById('myVideo'))\n```\n\n\u003Ca name=\"models\">\u003C\u002Fa>\n\n# 可用模型\n\n\u003Ca name=\"models-face-detection\">\u003C\u002Fa>\n\n## 人脸检测模型\n\n### SSD MobileNet V1\n\n本项目实现了一个基于 MobileNetV1 的 SSD（单次多框检测器）用于人脸检测。该神经网络会计算图像中每张人脸的位置，并返回包含置信度分数的边界框。此检测器旨在高精度地定位人脸边界框，而非追求极低的推理时间。量化后的模型大小约为 5.4 MB（**ssd_mobilenetv1_model**）。\n\n该人脸检测模型在 [WIDERFACE 数据集](http:\u002F\u002Fmmlab.ie.cuhk.edu.hk\u002Fprojects\u002FWIDERFace\u002F) 上训练，权重由 [yeephycho](https:\u002F\u002Fgithub.com\u002Fyeephycho) 在 [此仓库](https:\u002F\u002Fgithub.com\u002Fyeephycho\u002Ftensorflow-face-detection) 中提供。\n\n### Tiny 人脸检测器\n\nTiny 人脸检测器是一种性能优异、实时性很强的人脸检测器，相比 SSD MobileNet V1 检测器速度更快、体积更小、资源消耗更低，但对小人脸的检测效果稍逊一筹。该模型非常适合移动设备和资源受限的客户端使用，因此在这些场景下应优先选择它。量化后的模型大小仅为 190 KB（**tiny_face_detector_model**）。\n\n该检测器基于一个约 1.4 万张带边界框标注的自定义数据集进行训练。此外，该模型被优化为预测能够完全覆盖面部特征点的边界框，因此与后续的人脸关键点检测结合使用时，通常能获得比 SSD MobileNet V1 更好的结果。\n\n该模型本质上是 Tiny YOLO V2 的更小型版本，用深度可分离卷积替换了 YOLO 中的常规卷积。YOLO 是全卷积网络，因此可以轻松适应不同输入图像尺寸，在准确性和性能（推理时间）之间进行权衡。\n\n\u003Ca name=\"models-face-landmark-detection\">\u003C\u002Fa>\n\n## 68 点人脸关键点检测模型\n\n本包实现了一种轻量级、快速且准确的 68 点人脸关键点检测器。默认模型大小仅为 350 KB（**face_landmark_68_model**），而小型模型仅为 80 KB（**face_landmark_68_tiny_model**）。这两个模型均采用了深度可分离卷积和密集连接块的设计思想。它们是在包含约 3.5 万张标注了 68 个关键点的人脸图像的数据集上训练而成的。\n\n\u003Ca name=\"models-face-recognition\">\u003C\u002Fa>\n\n## 人脸识别模型\n\n对于人脸识别任务，本项目实现了一个类似 ResNet-34 的架构，可以从任意一张人脸图像中计算出一个包含 128 个值的特征向量（即人脸描述符），用于表征一个人的面部特征。该模型并不局限于训练数据集中的人脸，因此可用于识别任何人，例如您自己。通过比较两张人脸的描述符，例如计算欧几里得距离或其他分类器，即可判断两人的相似程度。\n\n该神经网络等同于 [face-recognition.js](https:\u002F\u002Fgithub.com\u002Fjustadudewhohacks\u002Fface-recognition.js) 中使用的 **FaceRecognizerNet**，以及 [dlib](https:\u002F\u002Fgithub.com\u002Fdavisking\u002Fdlib\u002Fblob\u002Fmaster\u002Fexamples\u002Fdnn_face_recognition_ex.cpp) 人脸识别示例中所用的网络。权重由 [davisking](https:\u002F\u002Fgithub.com\u002Fdavisking) 训练，该模型在 LFW（Labeled Faces in the Wild）人脸识别基准测试中的预测准确率达到 99.38%。\n\n量化后的模型大小约为 6.2 MB（**face_recognition_model**）。\n\n\u003Ca name=\"models-face-expression-recognition\">\u003C\u002Fa>\n\n## 人脸表情识别模型\n\n该表情识别模型轻量、快速，且具有合理的准确性。模型大小约为 310 KB，采用深度可分离卷积和密集连接块结构。它基于公开数据集中的多种图像以及从网上抓取的图像进行训练。需要注意的是，佩戴眼镜可能会影响预测结果的准确性。\n\n\u003Ca name=\"models-age-and-gender-recognition\">\u003C\u002Fa>\n\n## 年龄和性别识别模型\n\n年龄和性别识别模型是一个多任务网络，包含特征提取层、年龄回归层和性别分类器。该模型大小约为 420 KB，其特征提取部分采用了与 Xception 极为相似的小型架构。\n\n该模型分别在以下数据库上进行了训练和测试，每次采用 80\u002F20 的训练\u002F测试划分：UTK、FGNET、Chalearn、Wiki、IMDB*、CACD*、MegaAge、MegaAge-Asian。其中“*”表示这些数据库经过算法清理，因为原始数据集噪声较大。\n\n### 总体测试结果\n\n总平均年龄误差（MAE）：**4.54**\n\n性别识别准确率：**95%**\n\n### 各数据库的测试结果\n\n“-”表示这些数据库中没有性别标签。\n\n数据库        | UTK    | FGNET | Chalearn | Wiki | IMDB* | CACD* | MegaAge | MegaAge-Asian |\n----------------|-------:|------:|---------:|-----:|------:|------:|--------:|--------------:|\nMAE             | 5.25   | 4.23  | 6.24     | 6.54 | 3.63  | 3.20  | 6.23    | 4.21          |\n性别准确率    | 0.93   | -     | 0.94     | 0.95 | -     | 0.97  | -       | -             |\n\n### 不同年龄类别组别的测试结果\n\n年龄范围       | 0 - 3  | 4 - 8 | 9 - 18 | 19 - 28 | 29 - 40 | 41 - 60 | 60 - 80 | 80+     |\n----------------|-------:|------:|-------:|--------:|--------:|--------:|--------:|--------:|\nMAE             | 1.52   | 3.06  | 4.82   | 4.99    | 5.43    | 4.94    | 6.17    | 9.91    |\n性别准确率    | 0.69   | 0.80  | 0.88   | 0.96    | 0.97    | 0.97    | 0.96    | 0.9     |","# face-api.js 快速上手指南\n\nface-api.js 是一个基于 tensorflow.js 构建的 JavaScript 人脸识别 API，适用于浏览器和 Node.js 环境。它支持人脸检测、关键点定位、人脸识别、表情识别以及年龄和性别预测。\n\n## 环境准备\n\n### 系统要求\n- **浏览器端**：现代浏览器（Chrome, Firefox, Edge 等），需支持 WebGL。\n- **Node.js 端**：Node.js v12+，推荐 v14 或更高版本。\n- **依赖项**：\n  - 浏览器端：无需额外系统依赖。\n  - Node.js 端：需安装 `canvas` 包以模拟浏览器图像对象；强烈建议安装 `@tensorflow\u002Ftfjs-node` 以提升性能（需要 Python 环境用于编译原生模块）。\n\n### 模型文件\n使用前需下载预训练模型文件（包含 `.json` 清单文件和 `.weights` 权重分片文件）。你可以从官方示例仓库的 `models` 目录获取，或自行训练。\n\n## 安装步骤\n\n### 1. 初始化项目\n```bash\nnpm init -y\n```\n\n### 2. 安装核心依赖\n```bash\nnpm i face-api.js\n```\n\n### 3. Node.js 环境额外依赖（仅服务端使用）\n为了在 Node.js 中运行，需要 polyfill 浏览器对象并加速计算：\n```bash\n# 推荐使用淘宝镜像加速安装\nnpm i canvas @tensorflow\u002Ftfjs-node --registry=https:\u002F\u002Fregistry.npmmirror.com\n```\n\n## 基本使用\n\n以下示例展示如何在浏览器中最简单地加载模型并检测人脸。\n\n### 1. 引入库\n如果你通过 npm 安装并使用打包工具（如 Webpack\u002FVite）：\n```javascript\nimport * as faceapi from 'face-api.js';\n```\n或者直接在 HTML 中通过 CDN 引入：\n```html\n\u003Cscript src=\"https:\u002F\u002Fcdn.jsdelivr.net\u002Fnpm\u002Fface-api.js@0.22.2\u002Fdist\u002Fface-api.min.js\">\u003C\u002Fscript>\n```\n\n### 2. 加载模型\n将下载的模型文件放置在项目的 `public\u002Fmodels` 目录下。\n\n```javascript\n\u002F\u002F 加载人脸检测模型（其他模型同理）\nawait faceapi.nets.ssdMobilenetv1.loadFromUri('\u002Fmodels');\n\u002F\u002F await faceapi.nets.faceLandmark68Net.loadFromUri('\u002Fmodels');\n\u002F\u002F await faceapi.nets.faceRecognitionNet.loadFromUri('\u002Fmodels');\n```\n\n*注：在 Node.js 环境中，请使用 `loadFromDisk('.\u002Fmodels')`。*\n\n### 3. 执行检测\n假设你有一个 ID 为 `myImg` 的图片元素：\n\n```html\n\u003Cimg id=\"myImg\" src=\"images\u002Fexample.png\" \u002F>\n```\n\n执行以下代码进行检测：\n\n```javascript\nconst input = document.getElementById('myImg');\n\n\u002F\u002F 检测所有人脸\nconst detections = await faceapi.detectAllFaces(input);\n\n\u002F\u002F 检测置信度最高的一张人脸\nconst detection = await faceapi.detectSingleFace(input);\n\nconsole.log(detections);\n```\n\n### 4. 进阶：链式调用（检测 + 关键点 + 描述子）\nface-api.js 支持流畅的链式 API，可一次性获取人脸位置、68 个关键点坐标及人脸特征向量：\n\n```javascript\nconst results = await faceapi\n  .detectAllFaces(input)\n  .withFaceLandmarks()\n  .withFaceDescriptors();\n\nconsole.log(results);\n```\n\n### Node.js 特殊配置\n如果在 Node.js 环境下运行，必须在代码开头进行环境补丁（Monkey Patch）：\n\n```javascript\nimport '@tensorflow\u002Ftfjs-node';\nimport * as canvas from 'canvas';\nimport * as faceapi from 'face-api.js';\n\n\u002F\u002F 补丁环境，提供 HTMLCanvasElement 等实现\nconst { Canvas, Image, ImageData } = canvas;\nfaceapi.env.monkeyPatch({ Canvas, Image, ImageData });\n\n\u002F\u002F 之后即可正常使用 loadFromDisk 和检测 API\n```","某在线教育平台希望在其直播课堂中实时分析学生的专注度与情绪状态，以便教师及时调整授课节奏。\n\n### 没有 face-api.js 时\n- 开发者必须自行搭建复杂的 Python 后端服务来处理视频流，导致前端架构臃肿且延迟高。\n- 引入重型深度学习框架（如 TensorFlow 或 PyTorch）需要昂贵的 GPU 服务器支持，大幅推高运营成本。\n- 实现人脸关键点定位和表情识别需从零训练模型或集成多个独立库，开发周期长达数周。\n- 用户隐私数据需上传至云端处理，面临严格的数据合规风险及用户信任危机。\n- 跨浏览器兼容性差，移动端设备往往因算力不足而无法流畅运行检测功能。\n\n### 使用 face-api.js 后\n- 直接在浏览器端利用 WebGL 加速运行模型，无需后端转发视频流，实现了毫秒级实时反馈。\n- 基于轻量级的 tensorflow.js 核心，普通用户的笔记本电脑或手机即可流畅运行，彻底省去 GPU 服务器开支。\n- 调用封装好的人脸检测、 landmarks 定位及表情识别 API，仅需几十行代码即可在两天内完成功能上线。\n- 所有图像数据仅在本地设备处理，不离开用户浏览器，完美解决隐私泄露顾虑并符合 GDPR 规范。\n- 依托成熟的 JavaScript 生态，天然兼容 Chrome、Firefox 及各类移动浏览器，无需额外适配工作。\n\nface-api.js 将原本依赖昂贵后端算力的人脸分析能力下沉至浏览器端，以极低的成本实现了高性能、高隐私保护的实时交互体验。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjustadudewhohacks_face-api.js_4eb37a78.gif","justadudewhohacks","Vincent Mühler","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fjustadudewhohacks_20de9d46.jpg","Just hacking stuff.",null,"Berlin, Germany","muehler.v@gmail.com","https:\u002F\u002Fmedium.com\u002F@muehler.v","https:\u002F\u002Fgithub.com\u002Fjustadudewhohacks",[82,86],{"name":83,"color":84,"percentage":85},"TypeScript","#3178c6",99.1,{"name":87,"color":88,"percentage":89},"JavaScript","#f1e05a",0.9,17821,3896,"2026-04-13T15:38:17","MIT","Linux, macOS, Windows","非必需。在 Node.js 环境下可选安装 @tensorflow\u002Ftfjs-node 以利用本地 TensorFlow C++ 库加速（隐含支持 CPU 及可能的 GPU 后端，但 README 未明确指定 CUDA 版本或特定显卡型号要求）；浏览器端依赖 WebGL。","未说明",{"notes":98,"python":99,"dependencies":100},"该工具主要面向浏览器和 Node.js 环境。在 Node.js 中运行时，必须安装 'canvas' 包来模拟浏览器图像元素（HTMLImageElement, HTMLCanvasElement）。若需高性能，建议安装 '@tensorflow\u002Ftfjs-node'（安装此包可能需要 Python 环境来编译原生绑定，但运行时本身是 JS）。模型文件（manifest.json 和权重分片）需单独下载并放置在可访问的目录中。","不需要 Python (基于 JavaScript\u002FNode.js)",[64,101,102,103],"@tensorflow\u002Ftfjs-core","canvas (Node.js 环境必选)","@tensorflow\u002Ftfjs-node (Node.js 环境推荐)",[15,105,14],"视频",[107,108,109,110,111,112,113,114,115,116,117,118],"face-recognition","javascript","tensorflow","tfjs","face-detection","face-landmarks","tensorflowjs","js","nodejs","age-estimation","gender-recognition","emotion-recognition","2026-03-27T02:49:30.150509","2026-04-14T12:30:14.115627",[122,127,132,137,142,147],{"id":123,"question_zh":124,"answer_zh":125,"source_url":126},32894,"加载模型时出现张量形状错误（例如：tensor should have 512 values but has 93）如何解决？","这通常是因为权重文件缺少扩展名导致加载失败。解决方法是：\n1. 给没有扩展名的分片文件（如 age_gender_model-shard1）添加 '.shard' 或 '.bin' 扩展名。\n2. 打开对应的 manifest.json 文件（如 age_gender_model-weights_manifest.json），将 \"paths\" 数组中的文件名修改为包含新扩展名的名称。\n例如：将 \"age_gender_model-shard1\" 改为 \"age_gender_model-shard1.shard\"。注意部分 manifest 文件可能包含多个路径，需全部修改。","https:\u002F\u002Fgithub.com\u002Fjustadudewhohacks\u002Fface-api.js\u002Fissues\u002F131",{"id":128,"question_zh":129,"answer_zh":130,"source_url":131},32895,"遇到 'Nt.makeTensor is not a function' 或 'Lt.makeTensor is not a function' 错误怎么办？","这是因为 TensorFlow.js 未正确初始化。需要在导入 face-api.js 之前或同时，显式导入 TensorFlow.js 的后端包。\n如果是 Node.js 环境，请添加：\nimport '@tensorflow\u002Ftfjs-node';\n如果是浏览器环境，确保已正确引入 @tensorflow\u002Ftfjs 库。检查 package.json 中 @tensorflow\u002Ftfjs 的版本是否与 face-api.js 兼容。","https:\u002F\u002Fgithub.com\u002Fjustadudewhohacks\u002Fface-api.js\u002Fissues\u002F455",{"id":133,"question_zh":134,"answer_zh":135,"source_url":136},32896,"在 Node.js 服务器启动时加载大量图片（如 10000 张）速度太慢，如何优化？","face-api.js 基于较旧的 ML 模型，单张图片处理耗时较长是其固有局限。对于大规模图像处理需求，建议考虑迁移到性能更优的新库，例如 'human' (https:\u002F\u002Fgithub.com\u002Fvladmandic\u002Fhuman)，该库在速度和功能上已超越 face-api.js。若必须使用当前库，可尝试并发处理（代码中已使用 Promise.all），但受限于模型本身，提升空间有限。","https:\u002F\u002Fgithub.com\u002Fjustadudewhohacks\u002Fface-api.js\u002Fissues\u002F817",{"id":138,"question_zh":139,"answer_zh":140,"source_url":141},32897,"如何在 AWS Lambda 等无头环境中运行人脸检测（不使用 node-canvas）？","在无法使用 node-canvas 的环境（如旧版 Node 的 AWS Lambda）中，可以直接将图像数据转换为 Tensor4D 传入检测器。\n步骤如下：\n1. 使用 axios 等工具获取图像的 ArrayBuffer。\n2. 将 buffer 转换为 Uint8Array 并进一步处理为符合模型输入的 Float32Array（通常需要解码 JPEG\u002FPNG 并归一化像素值到 0-1 之间，这一步在无 canvas 环境下较复杂，可能需要额外的纯 JS 图像解码库）。\n3. 使用 tf.tensor4d 创建张量，形状通常为 [1, height, width, 3]。\n注意：直接传递原始 buffer 会导致形状不匹配错误，必须先解码为像素数据。如果可能，建议使用支持 Canvas 的 Lambda 容器镜像部署。","https:\u002F\u002Fgithub.com\u002Fjustadudewhohacks\u002Fface-api.js\u002Fissues\u002F144",{"id":143,"question_zh":144,"answer_zh":145,"source_url":146},32898,"为什么调用 faceapi.draw.drawAgeAndGender 时报错 'is not a function'？","face-api.js 的核心库中可能未直接导出 drawAgeAndGender 绘图函数，或者该功能需要额外引入特定的模块\u002F版本。确认你使用的 face-api.js 版本是否支持年龄和性别检测的绘图功能。如果只引入了基础包，可能需要手动绘制文本标签，或者检查是否需要单独导入包含绘图工具的扩展模块。部分用户反馈需确保加载了 age_gender_model 权重后才能进行相关检测，但绘图函数缺失通常是 API 使用不当或版本差异导致。","https:\u002F\u002Fgithub.com\u002Fjustadudewhohacks\u002Fface-api.js\u002Fissues\u002F106",{"id":148,"question_zh":149,"answer_zh":150,"source_url":126},32899,"生产环境与本地环境表现不一致，模型加载报错如何处理？","生产环境报错而本地正常，通常是因为资源文件路径或文件完整性问题。\n1. 检查生产服务器上的模型文件（.json, .bin\u002F.shard）是否完整上传，特别是大文件分片是否丢失。\n2. 确认 manifest.json 中的路径配置与实际文件结构一致（参考张量形状错误的修复方法，检查文件扩展名）。\n3. 如果使用 CDN 加载模型，检查网络请求是否被拦截或跨域限制，建议在生产环境改用本地托管模型文件。\n4. 确保生产环境的 TensorFlow.js 后端（CPU\u002FWebGL\u002FWASM）已正确初始化。",[152,157,162,167,172,177,182,187,192,197,202,207,212,217,222,227,232,237,242,247],{"id":153,"version":154,"summary_zh":155,"released_at":156},247583,"0.22.2","- 将 tfjs-core 升级到 1.7.0 版本","2020-03-22T14:01:55",{"id":158,"version":159,"summary_zh":160,"released_at":161},247584,"0.22.1","- 将 tfjs-core 升级到 1.5.2 版本","2020-02-07T12:28:02",{"id":163,"version":164,"summary_zh":165,"released_at":166},247585,"0.22.0","- 将 tfjs-core 升级到 1.4.0 版本\n- 将 tfjs-image-recognition-base 合并到 face-api.js 中\n\n\n弃用内容：\n- 为以下内容添加了弃用警告：\n  - allFaces*\n  - mtcnn\n\n重大变更：\n- 将 [utils](https:\u002F\u002Fgithub.com\u002Fjustadudewhohacks\u002Fface-api.js\u002Fblob\u002Fmaster\u002Fsrc\u002Futils\u002Findex.ts) 移至 faceapi.utils\n","2019-12-15T11:44:05",{"id":168,"version":169,"summary_zh":170,"released_at":171},247586,"0.21.0","- 将 tfjs-core 升级至 1.2.9 版本\n- 添加了 WithAge\u002FWithGender 的缺失导出 #339\n- 为 FaceMatcher 和 LabeledFaceDescriptors 实现 JSON 序列化\u002F反序列化 #397","2019-09-15T19:06:20",{"id":173,"version":174,"summary_zh":175,"released_at":176},247587,"0.20.1","- 将 tfjs-core 升级到 1.2.2 版本","2019-06-28T17:50:47",{"id":178,"version":179,"summary_zh":180,"released_at":181},247588,"0.20.0","特性：\n- 年龄与性别识别（AgeGenderNet）\n- 改进且更灵活的绘图 API，可通过 faceapi.draw 访问（请参阅示例）\n- 允许在进行人脸分类（表情、年龄和性别预测）之前，通过 withFaceLandmarks 进行人脸对齐，以提高准确性\n- faceapi.matchDimensions 辅助函数，用于调整媒体元素的大小\n\nAPI 的重大变更：\n\n1. FaceExpressionNet.predictExpressions 现在返回 FaceExpressions 实例，而非数组。\n\n2. 在 detectAllFaces 和 detectSingleFace 之后，必须首先调用 withFaceLandmarks，因为现在可以使用人脸对齐技术来进行人脸分类，从而获得更稳定的人脸分类结果（包括表情、年龄和性别预测）：\n\n```javascript\nawait faceapi.detectAllFaces(input).withFaceExpressions().withFaceLandmarks()\n-> await faceapi.detectAllFaces(input).withFaceLandmarks().withFaceExpressions()\n\nawait faceapi.detectAllFaces(input).withFaceLandmarks().withFaceExpressions().withFaceDescriptors()\n-> await faceapi.detectAllFaces(input).withFaceLandmarks().withFaceExpressions().withFaceDescriptors()\n\nawait faceapi.detectSingleFace(input).withFaceExpressions().withFaceLandmarks()\n-> await faceapi.detectSingleFace(input).withFaceLandmarks().withFaceExpressions()\n\nawait faceapi.detectSingleFace(input).withFaceExpressions().withFaceLandmarks().withFaceDescriptor()\n-> await faceapi.detectSingleFace(input).withFaceLandmarks().withFaceExpressions().withFaceDescriptor()\n```\n\n3. 移除了已导出的内部实现和旧版绘图 API：\n- BoxWithText\n- getDefaultDrawOptions\n- drawBox（请改用 faceapi.draw.DrawBox(box).draw(canvas)）\n- drawDetection（请改用 faceapi.draw.drawDetections）\n- drawFaceLandmarks（请改用 faceapi.draw.drawFaceLandmarks）\n- drawFaceExpressions（请改用 faceapi.draw.drawFaceExpressions）\n- drawText（请改用 faceapi.draw.DrawTextField(text).draw(canvas)）","2019-05-07T19:58:38",{"id":183,"version":184,"summary_zh":185,"released_at":186},247589,"0.19.0","- 将 tfjs-core 更新至 1.0.3","2019-03-27T14:04:20",{"id":188,"version":189,"summary_zh":190,"released_at":191},247590,"0.18.0","- 使用 TypeScript 3.2.4 重新编译了包\r\n- 移除了对 tfjs-tiny-yolov2 的依赖","2019-01-28T17:08:26",{"id":193,"version":194,"summary_zh":195,"released_at":196},247591,"0.17.1","修复：\n- 移除了导致构建错误的绝对导入","2019-01-04T13:02:32",{"id":198,"version":199,"summary_zh":200,"released_at":201},247592,"0.17.0","功能：\n- 面部表情识别\n\n重大变更：\n\n以下两个工具类已被替换：FaceDetectionWithLandmarks 和 FullFaceDescription。现在，相应的函数调用会返回普通对象，而不是上述类的实例。因此，需要使用 `faceapi.resizeResults(results, { width: \u003Cwidth>, height: \u003Cheight> })` 来调整结果的尺寸，而不再使用 `results.map(res => res.forSize(width, height))`：\n\n``` javascript\nexport function resizeResults\u003CT>(results: T, { width, height }: IDimensions): T\n```","2019-01-02T21:35:09",{"id":203,"version":204,"summary_zh":205,"released_at":206},247593,"0.16.2","fixes:\r\n- fixed issue of incorrectly initializing nodejs environment in electron renderer thread #157\r\n\r\nother:\r\n- bumped tfjs-core version to 0.14.2","2018-12-13T11:19:14",{"id":208,"version":209,"summary_zh":210,"released_at":211},247594,"0.16.1","fixes:\r\n- detectSingleFace is now correctly returning face with highest score","2018-11-18T16:37:57",{"id":213,"version":214,"summary_zh":215,"released_at":216},247595,"0.16.0","- nodejs support\r\n\r\nother:\r\n- bumped tfjs-core version to 0.13.8","2018-11-12T20:47:49",{"id":218,"version":219,"summary_zh":220,"released_at":221},247596,"0.15.1","fixes:\r\n- resolved issues with missing exports\r\n- cleaned unused files from build folder","2018-10-30T13:00:36",{"id":223,"version":224,"summary_zh":225,"released_at":226},247597,"0.15.0","- new tiny face detection model for realtime face detection\r\n- simplified and easier to use API + more utility (Composable Tasks API, FaceMatcher)","2018-10-23T19:41:57",{"id":228,"version":229,"summary_zh":230,"released_at":231},247598,"0.14.3","fixes:\r\n- resolved broken dependency tree in package-lock.json, which caused tfjs-core to be bundled 3 times leading to ~3x bundle size + published fixed dist","2018-10-03T08:33:44",{"id":233,"version":234,"summary_zh":235,"released_at":236},247599,"0.14.2","- bumped tfjs-core version to 0.13.2, which comes with various performance improvements (speedup of ~2x for ssd mobilenetv1 face detector)","2018-10-02T14:04:57",{"id":238,"version":239,"summary_zh":240,"released_at":241},247600,"0.14.1","- published commonjs and es6 builds","2018-09-30T17:40:00",{"id":243,"version":244,"summary_zh":245,"released_at":246},247601,"0.14.0","- trained two 68 point face landmark detection models from scratch, which have higher accuracy and are much faster then previous model\r\n- furthermore the model sizes are much smaller (350kb and 80kb tiny model) compared to the old model (7MB)","2018-09-26T19:07:22",{"id":248,"version":249,"summary_zh":250,"released_at":251},247602,"0.13.0","- major performance improvements by resizing net input canvases instead of tensors, which is much more performant and circumvents issue mentioned [here](https:\u002F\u002Fgithub.com\u002Ftensorflow\u002Ftfjs\u002Fissues\u002F604)\r\n\r\nfixes:\r\n- fixed post processing of 68 point face landmarks, which caused distortion of points at axes of minor dimension\r\n\r\nbreaking changes:\r\n- removed managed flag and method from NetInput and related flag in toNetInput\r\n- NetInput inputs are now left untouched, thus NetInput.inputs has been removed, NetInput.getInput(batchIdx) should be used instead\r\n- NetInput and toNetInput do not accept tf.Tensor4D input with batchSize > 1 anymore, unstack batches instead and pass individual tensors as an array to create an equivalent batch input\r\n","2018-09-16T19:28:52"]