[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-jdh-algo--JoyVASA":3,"tool-jdh-algo--JoyVASA":62},[4,18,26,35,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",108322,2,"2026-04-10T11:39:34",[14,15,13],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":32,"last_commit_at":41,"category_tags":42,"status":17},6121,"gemini-cli","google-gemini\u002Fgemini-cli","gemini-cli 是一款由谷歌推出的开源 AI 命令行工具，它将强大的 Gemini 大模型能力直接集成到用户的终端环境中。对于习惯在命令行工作的开发者而言，它提供了一条从输入提示词到获取模型响应的最短路径，无需切换窗口即可享受智能辅助。\n\n这款工具主要解决了开发过程中频繁上下文切换的痛点，让用户能在熟悉的终端界面内直接完成代码理解、生成、调试以及自动化运维任务。无论是查询大型代码库、根据草图生成应用，还是执行复杂的 Git 操作，gemini-cli 都能通过自然语言指令高效处理。\n\n它特别适合广大软件工程师、DevOps 人员及技术研究人员使用。其核心亮点包括支持高达 100 万 token 的超长上下文窗口，具备出色的逻辑推理能力；内置 Google 搜索、文件操作及 Shell 命令执行等实用工具；更独特的是，它支持 MCP（模型上下文协议），允许用户灵活扩展自定义集成，连接如图像生成等外部能力。此外，个人谷歌账号即可享受免费的额度支持，且项目基于 Apache 2.0 协议完全开源，是提升终端工作效率的理想助手。",100752,"2026-04-10T01:20:03",[43,13,15,14],"插件",{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":10,"last_commit_at":50,"category_tags":51,"status":17},4487,"LLMs-from-scratch","rasbt\u002FLLMs-from-scratch","LLMs-from-scratch 是一个基于 PyTorch 的开源教育项目，旨在引导用户从零开始一步步构建一个类似 ChatGPT 的大型语言模型（LLM）。它不仅是同名技术著作的官方代码库，更提供了一套完整的实践方案，涵盖模型开发、预训练及微调的全过程。\n\n该项目主要解决了大模型领域“黑盒化”的学习痛点。许多开发者虽能调用现成模型，却难以深入理解其内部架构与训练机制。通过亲手编写每一行核心代码，用户能够透彻掌握 Transformer 架构、注意力机制等关键原理，从而真正理解大模型是如何“思考”的。此外，项目还包含了加载大型预训练权重进行微调的代码，帮助用户将理论知识延伸至实际应用。\n\nLLMs-from-scratch 特别适合希望深入底层原理的 AI 开发者、研究人员以及计算机专业的学生。对于不满足于仅使用 API，而是渴望探究模型构建细节的技术人员而言，这是极佳的学习资源。其独特的技术亮点在于“循序渐进”的教学设计：将复杂的系统工程拆解为清晰的步骤，配合详细的图表与示例，让构建一个虽小但功能完备的大模型变得触手可及。无论你是想夯实理论基础，还是为未来研发更大规模的模型做准备",90106,"2026-04-06T11:19:32",[52,15,13,14],"语言模型",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":10,"last_commit_at":59,"category_tags":60,"status":17},4292,"Deep-Live-Cam","hacksider\u002FDeep-Live-Cam","Deep-Live-Cam 是一款专注于实时换脸与视频生成的开源工具，用户仅需一张静态照片，即可通过“一键操作”实现摄像头画面的即时变脸或制作深度伪造视频。它有效解决了传统换脸技术流程繁琐、对硬件配置要求极高以及难以实时预览的痛点，让高质量的数字内容创作变得触手可及。\n\n这款工具不仅适合开发者和技术研究人员探索算法边界，更因其极简的操作逻辑（仅需三步：选脸、选摄像头、启动），广泛适用于普通用户、内容创作者、设计师及直播主播。无论是为了动画角色定制、服装展示模特替换，还是制作趣味短视频和直播互动，Deep-Live-Cam 都能提供流畅的支持。\n\n其核心技术亮点在于强大的实时处理能力，支持口型遮罩（Mouth Mask）以保留使用者原始的嘴部动作，确保表情自然精准；同时具备“人脸映射”功能，可同时对画面中的多个主体应用不同面孔。此外，项目内置了严格的内容安全过滤机制，自动拦截涉及裸露、暴力等不当素材，并倡导用户在获得授权及明确标注的前提下合规使用，体现了技术发展与伦理责任的平衡。",88924,"2026-04-06T03:28:53",[14,15,13,61],"视频",{"id":63,"github_repo":64,"name":65,"description_en":66,"description_zh":67,"ai_summary_zh":67,"readme_en":68,"readme_zh":69,"quickstart_zh":70,"use_case_zh":71,"hero_image_url":72,"owner_login":73,"owner_name":73,"owner_avatar_url":74,"owner_bio":75,"owner_company":75,"owner_location":75,"owner_email":75,"owner_twitter":75,"owner_website":75,"owner_url":76,"languages":77,"stars":90,"forks":91,"last_commit_at":92,"license":93,"difficulty_score":94,"env_os":95,"env_gpu":96,"env_ram":97,"env_deps":98,"category_tags":111,"github_topics":113,"view_count":32,"oss_zip_url":75,"oss_zip_packed_at":75,"status":17,"created_at":120,"updated_at":121,"faqs":122,"releases":123},6920,"jdh-algo\u002FJoyVASA","JoyVASA","Diffusion-based Portrait and Animal Animation","JoyVASA 是一款基于扩散模型的开源工具，专为生成高质量的真人及动物面部动画而设计。只需提供一张静态参考图片和一段音频，它就能让画面中的人物或动物随着声音自然说话，展现出逼真的表情变化和头部动作。\n\n针对现有音频驱动动画技术存在的训练效率低、推理速度慢以及长视频生成时画面不连贯等痛点，JoyVASA 提出了一种创新的“解耦”方案。它将静态的面部特征与动态的表情动作分离处理：先通过扩散模型根据音频生成独立的运动序列，再将其融合到任意静态形象上。这种架构不仅显著提升了视频的流畅度和时长上限，还打破了身份限制，使其能无缝应用于动物面孔的动画生成。此外，该模型在混合了中英文数据的数据集上训练，天然支持多语言场景。\n\nJoyVASA 非常适合 AI 研究人员、开发者以及数字内容创作者使用。研究人员可借鉴其独特的解耦表征框架和身份无关的运动生成机制；开发者和设计师则能利用它快速制作多语言虚拟人视频或赋予动物角色生命力。目前项目已开放代码、模型权重及技术论文，便于社区进一步探索与实时化优化。","\u003Ch1 align='center'>JoyVASA: Portrait and Animal Image Animation with Diffusion-Based Audio-Driven Facial Dynamics and Head Motion Generation\u003C\u002Fh1>\n\n\u003Cdiv align='center'>\n    \u003Ca href='https:\u002F\u002Fgithub.com\u002Fxuyangcao' target='_blank'>Xuyang Cao\u003C\u002Fa>\u003Csup>1*\u003C\u002Fsup>&emsp;\n    Guoxin Wang\u003Csup>12*\u003C\u002Fsup>&emsp;\n    \u003Ca href='https:\u002F\u002Fgithub.com\u002FDBDXSS' target='_blank'>Sheng Shi\u003C\u002Fa>\u003Csup>1*\u003C\u002Fsup>&emsp;\n    \u003Ca href='https:\u002F\u002Fgithub.com\u002Fzhaojun060708' target='_blank'>Jun Zhao\u003C\u002Fa>\u003Csup>1\u003C\u002Fsup>&emsp;\n    Yang Yao\u003Csup>1\u003C\u002Fsup>\n\u003C\u002Fdiv>\n\u003Cdiv align='center'>\n    Jintao Fei\u003Csup>1\u003C\u002Fsup>&emsp;\n    Minyu Gao\u003Csup>1\u003C\u002Fsup>\n\u003C\u002Fdiv>\n\u003Cdiv align='center'>\n    \u003Csup>1\u003C\u002Fsup>JD Health International Inc.  \u003Csup>2\u003C\u002Fsup>Zhejiang University\n\u003C\u002Fdiv>\n\n\u003Cbr>\n\u003Cdiv align='center'>\n    \u003Ca href='https:\u002F\u002Fgithub.com\u002Fjdh-algo\u002FJoyVASA'>\u003Cimg src='https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fstars\u002Fjdh-algo\u002FJoyVASA?style=social'>\u003C\u002Fa>\n    \u003Ca href='https:\u002F\u002Fjdh-algo.github.io\u002FJoyVASA'>\u003Cimg src='https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FProject-HomePage-Green'>\u003C\u002Fa>\n    \u003Ca href='https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.09209'>\u003Cimg src='https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FPaper-Arxiv-red'>\u003C\u002Fa>\n    \u003Ca href='https:\u002F\u002Fhuggingface.co\u002Fjdh-algo\u002FJoyVASA'>\u003Cimg src='https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F%F0%9F%A4%97%20HuggingFace-Model-yellow'>\u003C\u002Fa>\n    \u003C!-- \u003Ca href='https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fjdh-algo\u002FJoyHallo'>\u003Cimg src='https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F%F0%9F%A4%97%20HuggingFace-Demo-yellow'>\u003C\u002Fa> -->\n\u003C\u002Fdiv>\n\u003Cbr>\n\n## 📖 Introduction\n\nAudio-driven portrait animation has made significant advances with diffusion-based models, improving video quality and lipsync accuracy. However, the increasing complexity of these models has led to inefficiencies in training and inference, as well as constraints on video length and inter-frame continuity. In this paper, we propose JoyVASA, a diffusion-based method for generating facial dynamics and head motion in audio-driven facial animation. Specifically, in the first stage, we introduce a decoupled facial representation framework that separates dynamic facial expressions from static 3D facial representations. This decoupling allows the system to generate longer videos by combining any static 3D facial representation with dynamic motion sequences. Then, in the second stage, a diffusion transformer is trained to generate motion sequences directly from audio cues, independent of character identity. Finally, a generator trained in the first stage uses the 3D facial representation and the generated motion sequences as inputs to render high-quality animations. With the decoupled facial representation and the identity-independent motion generation process, JoyVASA extends beyond human portraits to animate animal faces seamlessly. The model is trained on a hybrid dataset of private Chinese and public English data, enabling multilingual support. Experimental results validate the effectiveness of our approach. Future work will focus on improving real-time performance and refining expression control, further expanding the framework’s applications in portrait animation.\n\n## 🧳 Framework\n\n![Inference Pipeline](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjdh-algo_JoyVASA_readme_41f8fea79c81.png)\n\n**Inference Pipeline of the proposed JoyVASA.** Given a reference image, we first extract the 3D facial appearance feature using the appearance encoder in LivePortrait, and also a series of learned 3D keypoints using the motion encoder. For the input speech, the audio features are initially extracted using the wav2vec2 encoder. The audio-driven motion sequences are then sampled using a diffusion model trained in the second stage in a sliding window fashion. Using the 3D keypoints of reference image, and the sampled target motion sequences, the target keypoints are computed. Finally, the 3D facial appearance feature is warped based on the source and target keypoints and rendered by a generator to produce the final output video.\n\n## ⚙️ Installation\n\n**System requirements:**\n\nUbuntu:\n\n- Tested on Ubuntu 20.04, CUDA 12.1\n- Tested GPUs: A100\n\nWindows:\n\n- Tested on Windows 11, CUDA 12.1\n- Tested GPUs: RTX 4060 Laptop 8GB VRAM GPU\n\n**Create environment:**\n\n```bash\n# 1. Create base environment\nconda create -n joyvasa python=3.10 -y\nconda activate joyvasa \n\n# 2. Install requirements\npip install -r requirements.txt\n\n# 3. Install ffmpeg\nsudo apt-get update  \nsudo apt-get install ffmpeg -y\n\n# 4. Optional: Install MultiScaleDeformableAttention for animal image animation\ncd src\u002Futils\u002Fdependencies\u002FXPose\u002Fmodels\u002FUniPose\u002Fops\npython setup.py build install\ncd - # equal to cd ..\u002F..\u002F..\u002F..\u002F..\u002F..\u002F..\u002F\n```\n\n## 🎒 Prepare model checkpoints\n\nMake sure you have [git-lfs](https:\u002F\u002Fgit-lfs.com) installed and download all the following checkpoints to `pretrained_weights`:\n\n### 1. Download JoyVASA motion generator checkpoints\n\n```bash\ngit lfs install\ngit clone https:\u002F\u002Fhuggingface.co\u002Fjdh-algo\u002FJoyVASA\n```\n\n### 2. Download audio encoder checkpoints\n\nWe suport two types of audio encoders, including [wav2vec2-base](https:\u002F\u002Fhuggingface.co\u002Ffacebook\u002Fwav2vec2-base-960h), and [hubert-chinese](https:\u002F\u002Fhuggingface.co\u002FTencentGameMate\u002Fchinese-hubert-base).\n\nRun the following commands to download [hubert-chinese](https:\u002F\u002Fhuggingface.co\u002FTencentGameMate\u002Fchinese-hubert-base) pretrained weights:\n\n```bash\ngit lfs install\ngit clone https:\u002F\u002Fhuggingface.co\u002FTencentGameMate\u002Fchinese-hubert-base\n```\n\nTo get the [wav2vec2-base](https:\u002F\u002Fhuggingface.co\u002Ffacebook\u002Fwav2vec2-base-960h) pretrained weights, run the following commands:\n\n```bash\ngit lfs install\ngit clone https:\u002F\u002Fhuggingface.co\u002Ffacebook\u002Fwav2vec2-base-960h\n```\n\n> [!NOTE]\n> The motion generation model with wav2vec2 encoder will be supported later.\n\n### 3. Download LivePortraits checkpoints\n\n```bash\n# !pip install -U \"huggingface_hub[cli]\"\nhuggingface-cli download KwaiVGI\u002FLivePortrait --local-dir pretrained_weights --exclude \"*.git*\" \"README.md\" \"docs\"\n```\n\nRefering to [Liveportrait](https:\u002F\u002Fgithub.com\u002FKwaiVGI\u002FLivePortrait\u002Ftree\u002Fmain) for more download methods.\n\n### 4. `pretrained_weights` contents\n\nThe final `pretrained_weights` directory should look like this:\n\n```text\n.\u002Fpretrained_weights\u002F\n├── insightface                                                                                                                                                 \n│   └── models                                                                                                                                                  \n│       └── buffalo_l                                                                                                                                           \n│           ├── 2d106det.onnx                                                                                                                                   \n│           └── det_10g.onnx   \n├── JoyVASA\n│   ├── motion_generator\n│   │   └── iter_0020000.pt\n│   └── motion_template\n│       └── motion_template.pkl\n├── liveportrait\n│   ├── base_models\n│   │   ├── appearance_feature_extractor.pth\n│   │   ├── motion_extractor.pth\n│   │   ├── spade_generator.pth\n│   │   └── warping_module.pth\n│   ├── landmark.onnx\n│   └── retargeting_models\n│       └── stitching_retargeting_module.pth\n├── liveportrait_animals\n│   ├── base_models\n│   │   ├── appearance_feature_extractor.pth\n│   │   ├── motion_extractor.pth\n│   │   ├── spade_generator.pth\n│   │   └── warping_module.pth\n│   ├── retargeting_models\n│   │   └── stitching_retargeting_module.pth\n│   └── xpose.pth\n├── TencentGameMate:chinese-hubert-base\n│   ├── chinese-hubert-base-fairseq-ckpt.pt\n│   ├── config.json\n│   ├── gitattributes\n│   ├── preprocessor_config.json\n│   ├── pytorch_model.bin\n│   └── README.md\n└── wav2vec2-base-960h               \n    ├── config.json                  \n    ├── feature_extractor_config.json\n    ├── model.safetensors\n    ├── preprocessor_config.json\n    ├── pytorch_model.bin\n    ├── README.md\n    ├── special_tokens_map.json\n    ├── tf_model.h5\n    ├── tokenizer_config.json\n    └── vocab.json\n```\n\n> [!NOTE]\n> The folder `TencentGameMate:chinese-hubert-base` in Windows should be renamed `chinese-hubert-base`.\n\n## 🚀 Inference\n\n### 1. Inference with command line\n\nAnimal:\n\n```python\npython inference.py -r assets\u002Fexamples\u002Fimgs\u002Fjoyvasa_001.png -a assets\u002Fexamples\u002Faudios\u002Fjoyvasa_001.wav --animation_mode animal --cfg_scale 2.0\n```\n\nHuman:\n\n```python\npython inference.py -r assets\u002Fexamples\u002Fimgs\u002Fjoyvasa_003.png -a assets\u002Fexamples\u002Faudios\u002Fjoyvasa_003.wav --animation_mode human --cfg_scale 2.0\n```\n\nYou can change cfg_scale to get results with different expressions and poses.\n\n> [!NOTE]\n> Mismatching Animation Mode and Reference Image may result in incorrect results.\n\n### 2. Inference with web demo\n\nUse the following command to start web demo:\n\n```python\npython app.py\n```\n\nThe demo will be create at http:\u002F\u002F127.0.0.1:7862.\n\n\n## ⚓️ Train Motion Generator with Your Own Data\n\nThe motion generater should be trained using human talking face videos.\n\n\n### 1. Prepare train and validation data\n\nChnage the `root_dir` in `01_extract_motions.py` with you own dataset path, then run the following commands to generate training and validation data:\n\n```bash\ncd src\u002Fprepare_data\npython 01_extract_motions.py\npython 05_extract_audio.py\npython 02_gen_labels.py\npyhton 03_merge_motions.py\npython 04_gen_template.py\n\nmv motion_templete.pkl motions.pkl train.json test.json ..\u002F..\u002Fdata\ncd ..\u002F..\n```\n\n### 2. Train\n\n```bash\npython train.py\n```\n\nThe experimental results is located in `experiments\u002F`.\n\n## 📝 Citations\n\nIf you find our work helpful, please consider citing us:\n\n```\n@misc{cao2024joyvasaportraitanimalimage,\n      title={JoyVASA: Portrait and Animal Image Animation with Diffusion-Based Audio-Driven Facial Dynamics and Head Motion Generation}, \n      author={Xuyang Cao and Guoxin Wang and Sheng Shi and Jun Zhao and Yang Yao and Jintao Fei and Minyu Gao},\n      year={2024},\n      eprint={2411.09209},\n      archivePrefix={arXiv},\n      primaryClass={cs.CV},\n      url={https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.09209}, \n}\n```\n\n## 🤝 Acknowledgments\n\nWe would like to thank the contributors to the [LivePortrait](https:\u002F\u002Fgithub.com\u002FKwaiVGI\u002FLivePortrait), [Open Facevid2vid](https:\u002F\u002Fgithub.com\u002Fzhanglonghao1992\u002FOne-Shot_Free-View_Neural_Talking_Head_Synthesis), [InsightFace](https:\u002F\u002Fgithub.com\u002Fdeepinsight\u002Finsightface), [X-Pose](https:\u002F\u002Fgithub.com\u002FIDEA-Research\u002FX-Pose), [DiffPoseTalk](https:\u002F\u002Fgithub.com\u002FDiffPoseTalk\u002FDiffPoseTalk), [Hallo](https:\u002F\u002Fgithub.com\u002Ffudan-generative-vision\u002Fhallo), [wav2vec 2.0](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Ffairseq\u002Ftree\u002Fmain\u002Fexamples\u002Fwav2vec), [Chinese Speech Pretrain](https:\u002F\u002Fgithub.com\u002FTencentGameMate\u002Fchinese_speech_pretrain), [Q-Align](https:\u002F\u002Fgithub.com\u002FQ-Future\u002FQ-Align), [Syncnet](https:\u002F\u002Fgithub.com\u002Fjoonson\u002Fsyncnet_python), and [VBench](https:\u002F\u002Fgithub.com\u002FVchitect\u002FVBench) repositories, for their open research and extraordinary work.\n","\u003Ch1 align='center'>JoyVASA：基于扩散模型的音频驱动面部动态与头部运动生成，实现人像与动物图像动画\u003C\u002Fh1>\n\n\u003Cdiv align='center'>\n    \u003Ca href='https:\u002F\u002Fgithub.com\u002Fxuyangcao' target='_blank'>Xuyang Cao\u003C\u002Fa>\u003Csup>1*\u003C\u002Fsup>&emsp;\n    Guoxin Wang\u003Csup>12*\u003C\u002Fsup>&emsp;\n    \u003Ca href='https:\u002F\u002Fgithub.com\u002FDBDXSS' target='_blank'>Sheng Shi\u003C\u002Fa>\u003Csup>1*\u003C\u002Fsup>&emsp;\n    \u003Ca href='https:\u002F\u002Fgithub.com\u002Fzhaojun060708' target='_blank'>Jun Zhao\u003C\u002Fa>\u003Csup>1\u003C\u002Fsup>&emsp;\n    Yang Yao\u003Csup>1\u003C\u002Fsup>\n\u003C\u002Fdiv>\n\u003Cdiv align='center'>\n    Jintao Fei\u003Csup>1\u003C\u002Fsup>&emsp;\n    Minyu Gao\u003Csup>1\u003C\u002Fsup>\n\u003C\u002Fdiv>\n\u003Cdiv align='center'>\n    \u003Csup>1\u003C\u002Fsup>JD Health International Inc.  \u003Csup>2\u003C\u002Fsup>浙江大学\n\u003C\u002Fdiv>\n\n\u003Cbr>\n\u003Cdiv align='center'>\n    \u003Ca href='https:\u002F\u002Fgithub.com\u002Fjdh-algo\u002FJoyVASA'>\u003Cimg src='https:\u002F\u002Fimg.shields.io\u002Fgithub\u002Fstars\u002Fjdh-algo\u002FJoyVASA?style=social'>\u003C\u002Fa>\n    \u003Ca href='https:\u002F\u002Fjdh-algo.github.io\u002FJoyVASA'>\u003Cimg src='https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FProject-HomePage-Green'>\u003C\u002Fa>\n    \u003Ca href='https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.09209'>\u003Cimg src='https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FPaper-Arxiv-red'>\u003C\u002Fa>\n    \u003Ca href='https:\u002F\u002Fhuggingface.co\u002Fjdh-algo\u002FJoyVASA'>\u003Cimg src='https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F%F0%9F%A4%97%20HuggingFace-Model-yellow'>\u003C\u002Fa>\n    \u003C!-- \u003Ca href='https:\u002F\u002Fhuggingface.co\u002Fspaces\u002Fjdh-algo\u002FJoyHallo'>\u003Cimg src='https:\u002F\u002Fimg.shields.io\u002Fbadge\u002F%F0%9F%A4%97%20HuggingFace-Demo-yellow'>\u003C\u002Fa> -->\n\u003C\u002Fdiv>\n\u003Cbr>\n\n## 📖 引言\n\n基于扩散模型的音频驱动人像动画近年来取得了显著进展，视频质量和口型同步精度大幅提升。然而，随着模型复杂度的增加，训练与推理效率降低，同时视频长度和帧间连续性也受到限制。本文提出了一种名为JoyVASA的扩散模型方法，用于在音频驱动的人脸动画中生成面部动态与头部运动。具体而言，在第一阶段，我们引入了一个解耦的面部表征框架，将动态面部表情与静态3D面部表征分离。这种解耦使得系统能够通过任意静态3D面部表征与动态运动序列的组合来生成更长的视频。随后，在第二阶段，我们训练了一个扩散Transformer模型，使其能够直接从音频线索中生成与角色身份无关的运动序列。最后，第一阶段训练好的生成器会以3D面部表征和生成的运动序列作为输入，渲染出高质量的动画。借助解耦的面部表征和与身份无关的运动生成过程，JoyVASA不仅适用于人类肖像，还能无缝地为动物面部生成动画。该模型使用混合数据集进行训练，包含私有的中文数据和公开的英文数据，从而支持多语言。实验结果验证了我们方法的有效性。未来的工作将集中在提升实时性能和优化表情控制上，进一步拓展该框架在人像动画领域的应用。\n\n## 🧳 框架\n\n![推理流程](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjdh-algo_JoyVASA_readme_41f8fea79c81.png)\n\n**所提出的JoyVASA的推理流程。** 给定一张参考图像，我们首先使用LivePortrait中的外观编码器提取3D面部外观特征，并利用运动编码器提取一系列学习到的3D关键点。对于输入语音，先使用wav2vec2编码器提取音频特征。然后，通过第二阶段训练的扩散模型，以滑动窗口的方式采样得到音频驱动的运动序列。结合参考图像的3D关键点和采样的目标运动序列，计算出目标关键点。最后，根据源关键点和目标关键点对3D面部外观特征进行变形，并由生成器渲染出最终的输出视频。\n\n## ⚙️ 安装\n\n**系统要求：**\n\nUbuntu：\n\n- 已在Ubuntu 20.04、CUDA 12.1上测试通过\n- 测试GPU：A100\n\nWindows：\n\n- 已在Windows 11、CUDA 12.1上测试通过\n- 测试GPU：RTX 4060笔记本电脑，8GB显存\n\n**创建环境：**\n\n```bash\n# 1. 创建基础环境\nconda create -n joyvasa python=3.10 -y\nconda activate joyvasa \n\n# 2. 安装依赖\npip install -r requirements.txt\n\n# 3. 安装ffmpeg\nsudo apt-get update  \nsudo apt-get install ffmpeg -y\n\n# 4. 可选：安装MultiScaleDeformableAttention以支持动物图像动画\ncd src\u002Futils\u002Fdependencies\u002FXPose\u002Fmodels\u002FUniPose\u002Fops\npython setup.py build install\ncd - # 等同于cd ..\u002F..\u002F..\u002F..\u002F..\u002F..\u002F..\u002F\n```\n\n## 🎒 准备模型检查点\n\n请确保已安装[git-lfs](https:\u002F\u002Fgit-lfs.com)，并将以下所有检查点下载至`pretrained_weights`目录：\n\n### 1. 下载JoyVASA运动生成器检查点\n\n```bash\ngit lfs install\ngit clone https:\u002F\u002Fhuggingface.co\u002Fjdh-algo\u002FJoyVASA\n```\n\n### 2. 下载音频编码器检查点\n\n我们支持两种类型的音频编码器，包括[wav2vec2-base](https:\u002F\u002Fhuggingface.co\u002Ffacebook\u002Fwav2vec2-base-960h)和[hubert-chinese](https:\u002F\u002Fhuggingface.co\u002FTencentGameMate\u002Fchinese-hubert-base)。运行以下命令下载[hubert-chinese](https:\u002F\u002Fhuggingface.co\u002FTencentGameMate\u002Fchinese-hubert-base)预训练权重：\n\n```bash\ngit lfs install\ngit clone https:\u002F\u002Fhuggingface.co\u002FTencentGameMate\u002Fchinese-hubert-base\n```\n\n要获取[wav2vec2-base](https:\u002F\u002Fhuggingface.co\u002Ffacebook\u002Fwav2vec2-base-960h)的预训练权重，请运行以下命令：\n\n```bash\ngit lfs install\ngit clone https:\u002F\u002Fhuggingface.co\u002Ffacebook\u002Fwav2vec2-base-960h\n```\n\n> [!NOTE]\n> 基于wav2vec2编码器的运动生成模型将在后续支持。\n\n### 3. 下载LivePortraits检查点\n\n```bash\n# !pip install -U \"huggingface_hub[cli]\"\nhuggingface-cli download KwaiVGI\u002FLivePortrait --local-dir pretrained_weights --exclude \"*.git*\" \"README.md\" \"docs\"\n```\n\n更多下载方式可参考[Liveportrait](https:\u002F\u002Fgithub.com\u002FKwaiVGI\u002FLivePortrait\u002Ftree\u002Fmain)。\n\n### 4. `pretrained_weights` 目录内容\n\n最终的 `pretrained_weights` 目录应如下所示：\n\n```text\n.\u002Fpretrained_weights\u002F\n├── insightface                                                                                                                                                 \n│   └── models                                                                                                                                                  \n│       └── buffalo_l                                                                                                                                           \n│           ├── 2d106det.onnx                                                                                                                                   \n│           └── det_10g.onnx   \n├── JoyVASA\n│   ├── motion_generator\n│   │   └── iter_0020000.pt\n│   └── motion_template\n│       └── motion_template.pkl\n├── liveportrait\n│   ├── base_models\n│   │   ├── appearance_feature_extractor.pth\n│   │   ├── motion_extractor.pth\n│   │   ├── spade_generator.pth\n│   │   └── warping_module.pth\n│   ├── landmark.onnx\n│   └── retargeting_models\n│       └── stitching_retargeting_module.pth\n├── liveportrait_animals\n│   ├── base_models\n│   │   ├── appearance_feature_extractor.pth\n│   │   ├── motion_extractor.pth\n│   │   ├── spade_generator.pth\n│   │   └── warping_module.pth\n│   ├── retargeting_models\n│   │   └── stitching_retargeting_module.pth\n│   └── xpose.pth\n├── TencentGameMate:chinese-hubert-base\n│   ├── chinese-hubert-base-fairseq-ckpt.pt\n│   ├── config.json\n│   ├── gitattributes\n│   ├── preprocessor_config.json\n│   ├── pytorch_model.bin\n│   └── README.md\n└── wav2vec2-base-960h               \n    ├── config.json                  \n    ├── feature_extractor_config.json\n    ├── model.safetensors\n    ├── preprocessor_config.json\n    ├── pytorch_model.bin\n    ├── README.md\n    ├── special_tokens_map.json\n    ├── tf_model.h5\n    ├── tokenizer_config.json\n    └── vocab.json\n```\n\n> [!NOTE]\n> Windows 系统中，文件夹 `TencentGameMate:chinese-hubert-base` 应重命名为 `chinese-hubert-base`。\n\n## 🚀 推理\n\n### 1. 命令行推理\n\n动物：\n\n```python\npython inference.py -r assets\u002Fexamples\u002Fimgs\u002Fjoyvasa_001.png -a assets\u002Fexamples\u002Faudios\u002Fjoyvasa_001.wav --animation_mode animal --cfg_scale 2.0\n```\n\n人类：\n\n```python\npython inference.py -r assets\u002Fexamples\u002Fimgs\u002Fjoyvasa_003.png -a assets\u002Fexamples\u002Faudios\u002Fjoyvasa_003.wav --animation_mode human --cfg_scale 2.0\n```\n\n您可以调整 `cfg_scale` 参数以获得不同表情和姿态的结果。\n\n> [!NOTE]\n> 动画模式与参考图像不匹配可能导致结果错误。\n\n### 2. Web 演示推理\n\n使用以下命令启动 Web 演示：\n\n```bash\npython app.py\n```\n\n演示将在 http:\u002F\u002F127.0.0.1:7862 打开。\n\n## ⚓️ 使用您自己的数据训练运动生成器\n\n运动生成器应使用人类说话的面部视频进行训练。\n\n\n### 1. 准备训练和验证数据\n\n将 `01_extract_motions.py` 中的 `root_dir` 替换为您自己的数据集路径，然后运行以下命令以生成训练和验证数据：\n\n```bash\ncd src\u002Fprepare_data\npython 01_extract_motions.py\npython 05_extract_audio.py\npython 02_gen_labels.py\npyhton 03_merge_motions.py\npython 04_gen_template.py\n\nmv motion_templete.pkl motions.pkl train.json test.json ..\u002F..\u002Fdata\ncd ..\u002F..\n```\n\n### 2. 训练\n\n```bash\npython train.py\n```\n\n实验结果位于 `experiments\u002F` 目录中。\n\n## 📝 引用\n\n如果您觉得我们的工作有所帮助，请考虑引用我们：\n\n```\n@misc{cao2024joyvasaportraitanimalimage,\n      title={JoyVASA：基于扩散模型的音频驱动面部动态与头部运动生成的肖像及动物图像动画}, \n      author={Xuyang Cao 和 Guoxin Wang 和 Sheng Shi 和 Jun Zhao 和 Yang Yao 和 Jintao Fei 和 Minyu Gao},\n      year={2024},\n      eprint={2411.09209},\n      archivePrefix={arXiv},\n      primaryClass={cs.CV},\n      url={https:\u002F\u002Farxiv.org\u002Fabs\u002F2411.09209}, \n}\n```\n\n## 🤝 致谢\n\n我们衷心感谢以下开源项目及其贡献者：[LivePortrait](https:\u002F\u002Fgithub.com\u002FKwaiVGI\u002FLivePortrait)、[Open Facevid2vid](https:\u002F\u002Fgithub.com\u002Fzhanglonghao1992\u002FOne-Shot_Free-View_Neural_Talking_Head_Synthesis)、[InsightFace](https:\u002F\u002Fgithub.com\u002Fdeepinsight\u002Finsightface)、[X-Pose](https:\u002F\u002Fgithub.com\u002FIDEA-Research\u002FX-Pose)、[DiffPoseTalk](https:\u002F\u002Fgithub.com\u002FDiffPoseTalk\u002FDiffPoseTalk)、[Hallo](https:\u002F\u002Fgithub.com\u002Ffudan-generative-vision\u002Fhallo)、[wav2vec 2.0](https:\u002F\u002Fgithub.com\u002Ffacebookresearch\u002Ffairseq\u002Ftree\u002Fmain\u002Fexamples\u002Fwav2vec)、[Chinese Speech Pretrain](https:\u002F\u002Fgithub.com\u002FTencentGameMate\u002Fchinese_speech_pretrain)、[Q-Align](https:\u002F\u002Fgithub.com\u002FQ-Future\u002FQ-Align)、[Syncnet](https:\u002F\u002Fgithub.com\u002Fjoonson\u002Fsyncnet_python)以及 [VBench](https:\u002F\u002Fgithub.com\u002FVchitect\u002FVBench)，感谢他们开放的研究成果和卓越的工作。","# JoyVASA 快速上手指南\n\nJoyVASA 是一款基于扩散模型的音频驱动面部动画生成工具，支持人像和动物图像的生动演绎，具备多语言（中英文）支持能力。\n\n## 1. 环境准备\n\n### 系统要求\n- **操作系统**: Ubuntu 20.04 或 Windows 11\n- **CUDA 版本**: 12.1\n- **推荐显卡**:\n  - Linux: NVIDIA A100\n  - Windows: RTX 4060 Laptop (8GB VRAM) 及以上\n- **Python 版本**: 3.10\n\n### 前置依赖\n确保系统已安装 `git`、`git-lfs` 和 `ffmpeg`。\n- Ubuntu 用户请执行：\n  ```bash\n  sudo apt-get update\n  sudo apt-get install ffmpeg -y\n  ```\n- Windows 用户请自行下载并配置 ffmpeg 环境变量。\n\n## 2. 安装步骤\n\n### 2.1 创建虚拟环境并安装依赖\n```bash\n# 1. 创建基础环境\nconda create -n joyvasa python=3.10 -y\nconda activate joyvasa \n\n# 2. 安装 Python 依赖\npip install -r requirements.txt\n\n# 3. (可选) 编译安装动物动画所需的 MultiScaleDeformableAttention 模块\n# 如果仅需做人像动画可跳过此步\ncd src\u002Futils\u002Fdependencies\u002FXPose\u002Fmodels\u002FUniPose\u002Fops\npython setup.py build install\ncd -\n```\n\n### 2.2 下载模型权重\n请确保已安装 `git-lfs` (`git lfs install`)。在项目根目录下创建 `pretrained_weights` 文件夹，并按以下方式下载权重：\n\n**1. 下载 JoyVASA 主模型**\n```bash\ngit clone https:\u002F\u002Fhuggingface.co\u002Fjdh-algo\u002FJoyVASA\n# 将克隆后的内容移动至 pretrained_weights\u002FJoyVASA (根据实际目录结构调整)\n```\n\n**2. 下载音频编码器 (二选一)**\n*推荐国内用户使用中文 Hubert 模型以获得更佳效果：*\n```bash\ngit clone https:\u002F\u002Fhuggingface.co\u002FTencentGameMate\u002Fchinese-hubert-base\n# Windows 用户需注意：克隆后的文件夹名若包含冒号，请重命名为 chinese-hubert-base\n```\n*或使用英文 wav2vec2 模型：*\n```bash\ngit clone https:\u002F\u002Fhuggingface.co\u002Ffacebook\u002Fwav2vec2-base-960h\n```\n\n**3. 下载 LivePortrait 基础模型**\n```bash\n# 需先安装 huggingface_hub: pip install -U \"huggingface_hub[cli]\"\nhuggingface-cli download KwaiVGI\u002FLivePortrait --local-dir pretrained_weights --exclude \"*.git*\" \"README.md\" \"docs\"\n```\n\n**最终目录结构参考：**\n确保 `pretrained_weights` 目录下包含 `JoyVASA`, `liveportrait`, `liveportrait_animals` 以及音频编码器文件夹。\n\n## 3. 基本使用\n\n### 3.1 命令行推理\n支持人像 (`human`) 和动物 (`animal`) 两种模式。请确保参考图片与模式匹配。\n\n**人像动画示例：**\n```python\npython inference.py -r assets\u002Fexamples\u002Fimgs\u002Fjoyvasa_003.png -a assets\u002Fexamples\u002Faudios\u002Fjoyvasa_003.wav --animation_mode human --cfg_scale 2.0\n```\n\n**动物动画示例：**\n```python\npython inference.py -r assets\u002Fexamples\u002Fimgs\u002Fjoyvasa_001.png -a assets\u002Fexamples\u002Faudios\u002Fjoyvasa_001.wav --animation_mode animal --cfg_scale 2.0\n```\n> **提示**: 调整 `--cfg_scale` 参数可以改变生成表情的幅度和头部姿态的活跃度。\n\n### 3.2 Web 界面演示\n启动本地 Web UI 进行可视化操作：\n```python\npython app.py\n```\n启动后在浏览器访问 `http:\u002F\u002F127.0.0.1:7862` 即可使用。","某在线教育团队正致力于将静态的绘本插图转化为生动的双语教学视频，以增强儿童的沉浸式学习体验。\n\n### 没有 JoyVASA 时\n- **角色受限严重**：传统音频驱动动画工具仅支持人类面部，无法直接让绘本中的狐狸、小熊等动物角色开口说话，需额外寻找替代方案或放弃动画化。\n- **长视频连贯性差**：生成超过几秒的视频时，画面容易出现闪烁、表情僵硬或口型不同步，导致教学内容断裂，后期修复耗时耗力。\n- **多语言适配困难**：模型多在单一语言数据集上训练，处理中英混合的教学音频时，唇形同步率大幅下降，影响发音示范的准确性。\n- **制作流程繁琐**：为了解决上述问题，团队往往需要结合多个工具分步处理（如先做人脸绑定再迁移），极大地拉长了内容生产周期。\n\n### 使用 JoyVASA 后\n- **跨物种无缝动画**：利用其解耦的面部表示框架，团队可直接上传动物插画，JoyVASA 能精准生成与人类无异的面部动态，让动物角色自然演绎课程。\n- **长片段稳定生成**：得益于扩散 Transformer 生成的独立运动序列，JoyVASA 能输出长时间且帧间连续流畅的视频，彻底消除了画面闪烁和动作卡顿。\n- **原生多语言支持**：基于中英文混合数据训练的特性，JoyVASA 在处理双语教学音频时，唇形同步依然精准，完美匹配复杂的发音口型。\n- **端到端高效产出**：只需一张参考图和一段音频，JoyVASA 即可一键生成高质量动画，将原本数小时的制作流程压缩至分钟级，大幅提升课件迭代速度。\n\nJoyVASA 通过解耦身份与动作的创新架构，打破了角色类型与视频长度的限制，让静态插图到多语言生动视频的转化变得简单而高效。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fjdh-algo_JoyVASA_7a14c86b.png","jdh-algo","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fjdh-algo_cecb645d.png",null,"https:\u002F\u002Fgithub.com\u002Fjdh-algo",[78,82,86],{"name":79,"color":80,"percentage":81},"Python","#3572A5",90.7,{"name":83,"color":84,"percentage":85},"Cuda","#3A4E3A",8.4,{"name":87,"color":88,"percentage":89},"C++","#f34b7d",0.8,861,85,"2026-04-09T23:20:28","MIT",4,"Linux, Windows","必需 NVIDIA GPU。Linux 测试环境：A100；Windows 测试环境：RTX 4060 Laptop (8GB VRAM)。需支持 CUDA 12.1。","未说明",{"notes":99,"python":100,"dependencies":101},"1. 必须安装 ffmpeg。2. 若需运行动物图像动画，需额外编译安装 MultiScaleDeformableAttention 组件。3. 首次运行前需下载多个模型权重（包括 JoyVASA、LivePortrait、音频编码器及 InsightFace 模型），建议使用 git-lfs 和 huggingface-cli 下载至 pretrained_weights 目录。4. Windows 下下载的中文字音模型文件夹需重命名为 'chinese-hubert-base'。5. 推理时需注意动画模式（human\u002Fanimal）与参考图片类型匹配。","3.10",[102,103,104,105,106,107,108,109,110],"torch","transformers","diffusers","insightface","onnxruntime","ffmpeg","gradio","wav2vec2","hubert",[112,15,61],"音频",[114,115,116,117,118,119],"audio-driven-talking-face","generative-ai","lip-sync","talking-head","image-animation","portrait-anination","2026-03-27T02:49:30.150509","2026-04-13T04:24:36.267005",[],[]]