[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-dorarad--gansformer":3,"tool-dorarad--gansformer":61},[4,18,26,36,44,53],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":17},4358,"openclaw","openclaw\u002Fopenclaw","OpenClaw 是一款专为个人打造的本地化 AI 助手，旨在让你在自己的设备上拥有完全可控的智能伙伴。它打破了传统 AI 助手局限于特定网页或应用的束缚，能够直接接入你日常使用的各类通讯渠道，包括微信、WhatsApp、Telegram、Discord、iMessage 等数十种平台。无论你在哪个聊天软件中发送消息，OpenClaw 都能即时响应，甚至支持在 macOS、iOS 和 Android 设备上进行语音交互，并提供实时的画布渲染功能供你操控。\n\n这款工具主要解决了用户对数据隐私、响应速度以及“始终在线”体验的需求。通过将 AI 部署在本地，用户无需依赖云端服务即可享受快速、私密的智能辅助，真正实现了“你的数据，你做主”。其独特的技术亮点在于强大的网关架构，将控制平面与核心助手分离，确保跨平台通信的流畅性与扩展性。\n\nOpenClaw 非常适合希望构建个性化工作流的技术爱好者、开发者，以及注重隐私保护且不愿被单一生态绑定的普通用户。只要具备基础的终端操作能力（支持 macOS、Linux 及 Windows WSL2），即可通过简单的命令行引导完成部署。如果你渴望拥有一个懂你",349277,3,"2026-04-06T06:32:30",[13,14,15,16],"Agent","开发框架","图像","数据工具","ready",{"id":19,"name":20,"github_repo":21,"description_zh":22,"stars":23,"difficulty_score":10,"last_commit_at":24,"category_tags":25,"status":17},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,"2026-04-05T11:01:52",[14,15,13],{"id":27,"name":28,"github_repo":29,"description_zh":30,"stars":31,"difficulty_score":32,"last_commit_at":33,"category_tags":34,"status":17},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",150720,2,"2026-04-11T11:33:10",[14,13,35],"语言模型",{"id":37,"name":38,"github_repo":39,"description_zh":40,"stars":41,"difficulty_score":32,"last_commit_at":42,"category_tags":43,"status":17},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",108322,"2026-04-10T11:39:34",[14,15,13],{"id":45,"name":46,"github_repo":47,"description_zh":48,"stars":49,"difficulty_score":32,"last_commit_at":50,"category_tags":51,"status":17},6121,"gemini-cli","google-gemini\u002Fgemini-cli","gemini-cli 是一款由谷歌推出的开源 AI 命令行工具，它将强大的 Gemini 大模型能力直接集成到用户的终端环境中。对于习惯在命令行工作的开发者而言，它提供了一条从输入提示词到获取模型响应的最短路径，无需切换窗口即可享受智能辅助。\n\n这款工具主要解决了开发过程中频繁上下文切换的痛点，让用户能在熟悉的终端界面内直接完成代码理解、生成、调试以及自动化运维任务。无论是查询大型代码库、根据草图生成应用，还是执行复杂的 Git 操作，gemini-cli 都能通过自然语言指令高效处理。\n\n它特别适合广大软件工程师、DevOps 人员及技术研究人员使用。其核心亮点包括支持高达 100 万 token 的超长上下文窗口，具备出色的逻辑推理能力；内置 Google 搜索、文件操作及 Shell 命令执行等实用工具；更独特的是，它支持 MCP（模型上下文协议），允许用户灵活扩展自定义集成，连接如图像生成等外部能力。此外，个人谷歌账号即可享受免费的额度支持，且项目基于 Apache 2.0 协议完全开源，是提升终端工作效率的理想助手。",100752,"2026-04-10T01:20:03",[52,13,15,14],"插件",{"id":54,"name":55,"github_repo":56,"description_zh":57,"stars":58,"difficulty_score":10,"last_commit_at":59,"category_tags":60,"status":17},4487,"LLMs-from-scratch","rasbt\u002FLLMs-from-scratch","LLMs-from-scratch 是一个基于 PyTorch 的开源教育项目，旨在引导用户从零开始一步步构建一个类似 ChatGPT 的大型语言模型（LLM）。它不仅是同名技术著作的官方代码库，更提供了一套完整的实践方案，涵盖模型开发、预训练及微调的全过程。\n\n该项目主要解决了大模型领域“黑盒化”的学习痛点。许多开发者虽能调用现成模型，却难以深入理解其内部架构与训练机制。通过亲手编写每一行核心代码，用户能够透彻掌握 Transformer 架构、注意力机制等关键原理，从而真正理解大模型是如何“思考”的。此外，项目还包含了加载大型预训练权重进行微调的代码，帮助用户将理论知识延伸至实际应用。\n\nLLMs-from-scratch 特别适合希望深入底层原理的 AI 开发者、研究人员以及计算机专业的学生。对于不满足于仅使用 API，而是渴望探究模型构建细节的技术人员而言，这是极佳的学习资源。其独特的技术亮点在于“循序渐进”的教学设计：将复杂的系统工程拆解为清晰的步骤，配合详细的图表与示例，让构建一个虽小但功能完备的大模型变得触手可及。无论你是想夯实理论基础，还是为未来研发更大规模的模型做准备",90106,"2026-04-06T11:19:32",[35,15,13,14],{"id":62,"github_repo":63,"name":64,"description_en":65,"description_zh":66,"ai_summary_zh":66,"readme_en":67,"readme_zh":68,"quickstart_zh":69,"use_case_zh":70,"hero_image_url":71,"owner_login":72,"owner_name":73,"owner_avatar_url":74,"owner_bio":75,"owner_company":76,"owner_location":77,"owner_email":78,"owner_twitter":79,"owner_website":80,"owner_url":81,"languages":82,"stars":95,"forks":96,"last_commit_at":97,"license":98,"difficulty_score":10,"env_os":99,"env_gpu":100,"env_ram":99,"env_deps":101,"category_tags":109,"github_topics":110,"view_count":32,"oss_zip_url":75,"oss_zip_packed_at":75,"status":17,"created_at":118,"updated_at":119,"faqs":120,"releases":151},6652,"dorarad\u002Fgansformer","gansformer","Generative Adversarial Transformers","gansformer 是一款专为图像生成任务设计的开源深度学习模型，巧妙融合了生成对抗网络（GAN）与 Transformer 架构的优势。它主要解决了传统方法在生成高分辨率图像时，难以兼顾长距离依赖捕捉与计算效率的难题。通过独特的“二分结构”，gansformer 能在保持线性计算复杂度的同时，实现图像全局信息的有效交互，从而轻松扩展至 1024x1024 甚至更高分辨率的图像合成。\n\n该工具特别适合人工智能研究人员、深度学习开发者以及对高质量图像生成感兴趣的技术探索者使用。其核心亮点在于采用了乘法积分机制进行基于区域的灵活调制，这不仅是对经典 Transformer 的创新改进，也可视为对著名 StyleGAN 网络的泛化与升级。模型通过潜变量与视觉特征之间的迭代信息传播，促进了物体和场景组合化表示的自然涌现。此外，gansformer 训练效率极高，达到同等效果所需的训练步数仅为 StyleGAN2 的五分之一到七分之一。目前项目已提供完善的 PyTorch 和 TensorFlow 版本，包含预训练模型及可视化脚本，帮助用户快速上手并复现前沿研究成果。","![Python 3.7](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fpython-3.7-b0071e.svg?style=plastic)\n![PyTorch 1.8](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fpytorch-1.8-%239e008e.svg?style=plastic)\n![TensorFlow 1.14](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Ftensorflow-1.14-blueviolet.svg?style=plastic)\n![cuDNN 7.3.1](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fcuda-10.0-2545e6.svg?style=plastic)\n![License CC BY-NC](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Flicense-MIT-108a00.svg?style=plastic)\n\n# GANformer: Generative Adversarial Transformers\n\u003Cp align=\"center\">\n  \u003Cb>\u003Ca href=\"https:\u002F\u002Fcs.stanford.edu\u002F~dorarad\u002F\">Drew A. Hudson\u003C\u002Fa> & \u003Ca href=\"http:\u002F\u002Flarryzitnick.org\u002F\">C. Lawrence Zitnick\u003C\u002Fa>\u003C\u002Fb>\n\u003C\u002Fp>\n\n## Check out our new [PyTorch](pytorch_version) version and the [GANformer2 paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.08960)!\n\n\u003Cdiv align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_d9570fe6311e.png\" style=\"float:left\" width=\"340px\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_30d6cf03d227.png\" style=\"float:right\" width=\"440px\">\n\u003C\u002Fdiv>\n\u003Cp>\u003C\u002Fp>\n\n***Update (Feb 21, 2022):*** *We updated the weight initialization of the PyTorch version to the intended scale, leading to a substantial improvement in the model's learning speed!*\n\nThis is an implementation of the [GANformer](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.01209.pdf) model, a novel and efficient type of transformer, explored for the task of image generation. The network employs a _bipartite structure_ that enables long-range interactions across the image, while maintaining computation of linearly efficiency, that can readily scale to high-resolution synthesis. \nThe model iteratively propagates information from a set of latent variables to the evolving visual features and vice versa, to support the refinement of each in light of the other and encourage the emergence of compositional representations of objects and scenes. \nIn contrast to the classic transformer architecture, it utilizes multiplicative integration that allows flexible region-based modulation, and can thus be seen as a generalization of the successful StyleGAN network.\n\n\u003Cimg align=\"right\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_9473d71eb80c.png\" width=\"270px\">\n\n**1st Paper**: [https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.01209](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.01209)  \n**2nd Paper**: [https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.08960](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.08960)  \n**Contact**: dorarad@cs.stanford.edu  \n**Implementation**: [`network.py`](training\u002Fnetwork.py) ([TF](network.py) \u002F [Pytorch](pytorch_version\u002Ftraining\u002Fnetworks.py))\n\n### We now support both [`PyTorch`](pytorch_version) and TF!\n:white_check_mark: Uploading initial code and readme  \n:white_check_mark: Image sampling and visualization script  \n:white_check_mark: Code clean-up and refacotiring, adding documentation  \n:white_check_mark: Training and data-prepreation intructions  \n:white_check_mark: Pretrained networks for all datasets  \n:white_check_mark: Extra visualizations and evaluations  \n:white_check_mark: Providing models trained for longer  \n:white_check_mark: Releasing the PyTorch version  \n:white_check_mark: Releasing pre-trained models for high-resolutions (up to 1024 x 1024)  \n⬜️ Releasing the GANformer2 model (supporting layout generation and conditional layout2image generation)\n\nIf you experience any issues or have suggestions for improvements or extensions, feel free to contact me either thourgh the issues page or at dorarad@stanford.edu. \n\n## Bibtex\n```bibtex\n@article{hudson2021ganformer,\n  title={Generative Adversarial Transformers},\n  author={Hudson, Drew A and Zitnick, C. Lawrence},\n  journal={Proceedings of the 38th International Conference on Machine Learning, {ICML} 2021},\n  year={2021}\n}\n\n@article{hudson2021ganformer2,\n  title={Compositional Transformers for Scene Generation},\n  author={Hudson, Drew A and Zitnick, C. Lawrence},\n  journal={Advances in Neural Information Processing Systems {NeurIPS} 2021},\n  year={2021}\n}\n```\n\n## Sample Images\nUsing the pre-trained models (generated after training for ***5-7x*** less steps than StyleGAN2 models! Training our models for longer will improve the image quality further):\n\u003Cdiv align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_f6350f25ebca.png\" width=\"700px\">\n\u003C\u002Fdiv>\n\n## Requirements\n\u003Cimg align=\"right\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_b339643b3e46.png\" width=\"190px\">\n\n- Python 3.6 or 3.7 are supported.\n- For the TF version: We recommend TensorFlow 1.14 which was used for development, but TensorFlow 1.15 is also supported.\n- For the Pytorch version: We support Pytorch >= 1.8.\n- The code was tested with CUDA 10.0 toolkit and cuDNN 7.5.\n- We have performed experiments on Titan V GPU. We assume 12GB of GPU memory (more memory can expedite training).\n- See [`requirements.txt`](requirements.txt) ([TF](requirements.py) \u002F [Pytorch](pytorch_version\u002Frequirements.py)) for the required python packages and run `pip install -r requirements.txt` to install them.\n\n## Quickstart & Overview\nOur repository supports both **Tensorflow** (at the main directory) and **Pytorch** (at [`pytorch_version`](pytorch_version)). The two implementations follow a close code and files structure, and share the same interface. To switch from the TF to Pytorch, simply enter into [`pytorch_version`](pytorch_version)), and install the [requirements](pytorch_version\u002Frequirements.txt).\nPlease feel free to open an issue or [contact](dorarad@cs.stanford.edu) for any questions or suggestions about the new implementation!\n\nA minimal example of using a pre-trained GANformer can be found at [`generate.py`](generate.py) ([TF](generate.py) \u002F [Pytorch](pytorch_version\u002Fgenerate.py)). When executed, the 10-lines program downloads a pre-trained modle and uses it to generate some images:\n```python\npython generate.py --gpus 0 --model gdrive:bedrooms-snapshot.pkl --output-dir images --images-num 32\n```\n**You can use `--truncation-psi` to control the generated images quality\u002Fdiversity trade-off.  \nWe recommend trying out different values in the range of `0.6-1.0`.**\n\n### Pretrained models and High resolutions\nWe provide pretrained models for resolution 256&times;256 for all datasets, as well as 1024&times;1024 for FFHQ and 1024&times;2048 for Cityscapes.\n\nTo generate images for the high-resolution models, run the following commands:\n(We reduce their batch-size to 1 so that they can load onto a single GPU)\n\n```python\npython generate.py --gpus 0 --model gdrive:ffhq-snapshot-1024.pkl --output-dir ffhq_images --images-num 32 --batch-size 1\npython generate.py --gpus 0 --model gdrive:cityscapes-snapshot-2048.pkl --output-dir cityscapes_images --images-num 32 --batch-size 1 --ratio 0.5 # 1024 x 2048 cityscapes currently supported in the TF version only\n```\n\nWe can train and evaluate new or pretrained model both quantitatively and qualitative with [`run_network.py`](run_network.py) ([TF](run_network.py) \u002F [Pytorch](pytorch_version\u002Frun_network.py)).  \nThe model architecutre can be found at [`network.py`](training\u002Fnetwork.py) ([TF](training\u002Fnetwork.py) \u002F [Pytorch](pytorch_version\u002Ftraining\u002Fnetwork.py)). The training procedure is implemented at [`training_loop.py`](training\u002Ftraining_loop.py) ([TF](training\u002Ftraining_loop.py) \u002F [Pytorch](pytorch_version\u002Ftraining\u002Ftraining_loop.py)).\n\n## Data preparation\nWe explored the GANformer model on 4 datasets for images and scenes: [CLEVR](https:\u002F\u002Fcs.stanford.edu\u002Fpeople\u002Fjcjohns\u002Fclevr\u002F), [LSUN-Bedrooms](https:\u002F\u002Fwww.yf.io\u002Fp\u002Flsun), [Cityscapes](https:\u002F\u002Fwww.cityscapes-dataset.com\u002F) and [FFHQ](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fffhq-dataset). The model can be trained on other datasets as well.\nWe trained the model on `256x256` resolution. Higher resolutions are supported too. The model will automatically adapt to the resolution of the images in the dataset.\n\nThe [`prepare_data.py`](prepare_data.py) ([TF](prepare_data.py) \u002F [Pytorch](pytorch_version\u002Fprepare_data.py)) can either prepare the datasets from our catalog or create new datasets.\n\n### Default Datasets \nTo prepare the datasets from the catalog, run the following command:\n```python\npython prepare_data.py --ffhq --cityscapes --clevr --bedrooms --max-images 100000\n```\n\nSee table below for details about the datasets in the catalog.\n\n**Useful options**:  \n* `--data-dir` the output data directory (default: `datasets`)  \n* `--shards-num` to select the number of shards for the data (default: adapted to each dataset)  \n* `--max-images` to store only a subset of the dataset, in order to reduce the size of the stored `tfrecord`\u002Fimage files (default: _max_).  \nThis can be particularly useful to save space in case of large datasets, such as LSUN-bedrooms (originaly contains 3M images)\n\n### Custom Datasets\nYou can also use the script to create new custom datasets. For instance:\n```python\npython prepare_data.py --task \u003Cdataset-name> --images-dir \u003Csource-dir> --format png --ratio 0.7 --shards-num 5\n```\nThe script supports several formats: `png`, `jpg`, `npy`, `hdf5`, `tfds` and `lmdb`.\n\n### Dataset Catalog\n| Dataset           | # Images  | Resolution    | Download Size | TFrecords Size   | Gamma | \n| :---------------: | :-------: | :-----------: | :-----------: | :--------------: | :---: |\n| **FFHQ**          | 70,000    | 256&times;256 | 13GB          | 13GB             | 10    |\n| **CLEVR**         | 100,015   | 256&times;256 | 18GB          | 15.5GB           | 40    |\n| **Cityscapes**    | 24,998    | 256&times;256 | 1.8GB         | 8GB              | 20    |\n| **LSUN-Bedrooms** | 3,033,042 | 256&times;256 | 42.8GB        | Up to 480GB      | 100   |\n\nUse `--max-images` to reduce the size of the `tfrecord` files.\n\n## Training\nModels are trained by using the `--train` option. To fine-tune a pretrained GANformer model:\n```python\npython run_network.py --train --gpus 0 --ganformer-default --expname clevr-pretrained --dataset clevr \\\n  --pretrained-pkl gdrive:clevr-snapshot.pkl\n```\nWe provide pretrained models for `bedrooms`, `cityscapes`, `clevr` and `ffhq`.\n\nTo train a GANformer in its default configuration form scratch:\n```python\npython run_network.py --train --gpus 0 --ganformer-default --expname clevr-scratch --dataset clevr --eval-images-num 10000\n```\n\nBy defualt, models training is resumed from the latest snapshot. Use `--restart` to strat a new experiment, or `--pretrained-pkl` to select a particular snapshot to load.\n\nFor comparing to state-of-the-art, we compute metric scores using 50,000 sample imaegs. To expedite training though, we recommend setting `--eval-images-num` to a lower number. Note though that this can impact the precision of the metrics, so we recommend using a lower value during training, and increasing it back up in the final evaluation.\n\nWe support a large variety of command-line options to adjust the model, training, and evaluation. Run `python run_network.py -h` for the full list of options!\n\nwe recommend exploring different values for `--gamma` when training on new datasets. If you train on resolution >= 512 and observe OOM issues, consider reducing `--batch-gpu` to a lower value.\n\n### Logging\n* During training, sample images and attention maps will be generated and stored at `results\u002F\u003Cexpname>-\u003Crun-id>` (`--keep-samples`).\n* Metrics will also be regularly commputed and reported in a `metric-\u003Cname>.txt` file. `--metrics` can be set to `fid` for FID, `is` for Inception Score and `pr` for Precision\u002FRecall.\n* Tensorboard logs are also created (`--summarize`) that track the metrics, loss values for the generator and discriminator, and other useful statistics over the course of training.\n\n### Baseline models\nThe codebase suppors multiple baselines in addition to the GANformer. For instance, to run a vanilla GAN model:\n```python\npython run_network.py --train --gpus 0 --baseline GAN --expname clevr-gan --dataset clevr \n```\n* **[Vanilla GAN](https:\u002F\u002Farxiv.org\u002Fabs\u002F1406.2661)**: `--baseline GAN`, a standard GAN without style modulation.\n* **[StyleGAN2](https:\u002F\u002Farxiv.org\u002Fabs\u002F1912.04958)**: `--baseline StyleGAN2`, with one global latent that modulates the image features.\n* **[k-GAN](https:\u002F\u002Farxiv.org\u002Fabs\u002F1810.10340)**: `--baseline kGAN`, which generates multiple image layers independetly and then merge them into one shared image (supported only in the TF version).\n* **[SAGAN]()**: `--baseline SAGAN`, which performs self-attention between all image features in low-resolution layer (e.g. `32x32`) (supported only in the TF version).\n\n## Evaluation\nTo evalute a model, use the `--eval` option:\n```python\npython run_network.py --eval --gpus 0 --expname clevr-exp --dataset clevr\n```\nAdd `--pretrained-pkl gdrive:\u003Cdataset>-snapshot.pkl` to evalute a pretrained model.\n\nBelow we provide the FID-50k scores for the GANformer (_using the pretrained checkpoints above_) as well as baseline models.  \nNote that these scores are different than the scores reported in the StyleGAN2 paper since they run experiments for up to 7x more training steps (5k-15k kimg-steps in our experiments over all models, which takes about 3-4 days with 4 GPUs, vs 50-70k kimg-steps in their experiments, which take over 90 GPU-days).\n\n**Note regarding Generator\u002FDiscriminator**: Following ablation experiments, we observed that incorporating the simplex and duplex attention to the generator (rather than to both the generator and discriminator) improve the models' performance. Accordingly, we are releasing pretrained models that incorporate attention to the generator only, and we have updated the paper to reflect that!\n\n| Model          | CLEVR        | LSUN-Bedroom | FFHQ       | Cityscapes |\n| :------------: | :----------: | :----------: | :--------: | :--------: |\n| **GAN**        | 25.02        | 12.16        | 13.18      | 11.57      |\n| **kGAN**       | 28.28        | 69.9         | 61.14      | 51.08      |\n| **SAGAN**      | 26.04        | 14.06        | 16.21      | 12.81      |\n| **StyleGAN2**  | 16.05        | 11.53        | 16.21      | 8.35       |\n| **GANformer** | ***9.24***   | ***6.15***   | ***7.42*** | ***5.23*** |\n\n\u003Cdiv>\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_646485e6c45f.png\" width=\"350px\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_42d2998b27d0.png\" width=\"350px\">\n\u003C\u002Fdiv>\n\n### Model Change-log\nCompared to the original GANformer depicted in the paper, this repository make several additional improvments that contributed to the performance:\n* Use `--mapping_ltnt2ltnt` so that the latents communicate with each other directly through self-attention inside the mapping network before starting to generate the image.\n* Add an additional global latent (`--style`) to the `k` latent components, such that first the global latent modulates all the image features uniformly, and then the `k` latents modulate different regions based on the bipartite transformer's attention.  \nThe global latent is useful for coordinating holistic aspects of the image such as global lighting conditions, global style properties for e.g. faces, etc.\n* After making these changes, we observed no additional benefit from adding the transformer to the discriminator, and therefore for simplicity we disabled that.\n\n## Visualization\nThe code supports producing qualitative results and visualizations. For instance, to create attention maps for each layer:\n```python\npython run_network.py --gpus 0 --vis --expname clevr-exp --dataset clevr --vis-layer-maps\n```\n\nBelow you can see sample images and attention maps produced by the GANformer:\n\n\u003Cdiv align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_3bbc9d8bbec7.png\" style=\"float:left\" width=\"831px\">\n\u003C\u002Fdiv>\n\n## Command-line Options\nIn the following we list some of the most useful model options. \n\n### Training\n* `--gamma`: We recommend exploring different values for the chosen dataset (default: `10`)\n* `--truncation-psi`: Controls the image quality\u002Fdiversity trade-off. (default: `0.7`)\n* `--eval-images-num`: Number of images to compute metrics over. We recommend selecting a lower number to expedite training (default: `50,000`)\n* `--restart`: To restart training from sracth instead of resuming from the latest snapshot\n* `--pretrained-pkl`: To load a pretrained model, either a local one or from drive `gdrive:\u003Cdataset>-snapshot.pkl` for the datasets in the catalog.\n* `--data-dir` and `--result-dir`: Directory names for the datasets (`tfrecords`) and logging\u002Fresults.\n\n### Model (most useful)\n* `--transformer`: To add transformer layers to the generator (GANformer)\n* `--components-num`: Number of latent components, which will attend to the image. We recommend values in the range of `8-16` (default: `1`)\n* `--latent-size`: Overall latent size (default: `512`). The size of each latent component will then be `latent_size\u002Fcomponents_num`\n* `--num-heads`: Number of attention heads (default: `1`)\n* `--integration`: Integration of information in the transformer layer, e.g. `add` or `mul` (default: `mul`)\n\n### Model (others)\n* `--g-start-res` and `--g-end-res`: Start and end resolution for the transformer layers (default: all layers up to resolution 2\u003Csup>8\u003C\u002Fsup>) \n* `--kmeans`: Track and update image-to-latents assignment centroids, used in the duplex attention\n* `--mapping-ltnt2ltnt`: Perform self-attention over latents in the mapping network\n* `--use-pos`: Use trainable positional encodings for the latents.\n* `--style False`: To turn-off one-vector global style modulation (StyleGAN2).\n\n### Visualization\n* **Sample imaegs**\n  * `--vis-images`: Generate image samples \n  * `--vis-latents`: Save source latent vectors\n* **Attention maps**\n  * `--vis-maps`: Visualize attention maps of last layer and first head\n  * `--vis-layer-maps`: Visualize attention maps of all layer and heads\n  * `--blending-alpha`: Alpha weight when visualizing a bledning of images and attention maps\n* **Image interpolations**\n  * `--vis-interpolations`: Generative interplations between pairs of source latents\n  * `--interpolation-density`: Number of samples in between two end points of an interpolation (default: `8`)\n* **Others**\n  * `--vis-noise-var`: Create noise variation visualization\n  * `--vis-style-mix`: Create style mixing visualization\n\nRun `python run_network.py -h` for the full options list.\n\n## Sample images (more examples)\n\u003Cdiv align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_9082fe6c32f1.png\" style=\"float:left\" width=\"750px\">\n  \u003Cbr>\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_c82416410f01.png\" style=\"float:left\" width=\"750px\">\n  \u003Cbr>\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_3a77abf159c4.png\" style=\"float:left\" width=\"750px\">\n  \u003Cbr>\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_3224bbad4960.png\" style=\"float:left\" width=\"750px\">\n\u003C\u002Fdiv>\n\n## CUDA \u002F Installation\nThe model relies on custom TensorFlow\u002FPytorch ops that are compiled on the fly using [NVCC](https:\u002F\u002Fdocs.nvidia.com\u002Fcuda\u002Fcuda-compiler-driver-nvcc\u002Findex.html). \n\nTo set up the environment e.g. for cuda-10.0:\n```python\nexport PATH=\u002Fusr\u002Flocal\u002Fcuda-10.0\u002Fbin${PATH:+:${PATH}}\nexport LD_LIBRARY_PATH=\u002Fusr\u002Flocal\u002Fcuda10.0\u002Flib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}\n```\n\nTo test that your NVCC installation is working correctly, run:\n```python\nnvcc test_nvcc.cu -o test_nvcc -run\n| CPU says hello.\n| GPU says hello.\n```\n\nIn the pytorch version, if you get the following repeating message:  \n\"Failed to build CUDA kernels for upfirdn2d. Falling back to slow reference implementation\"  \nmake sure your cuda and pytorch versions match. If you have multiple CUDA installed, consider\nusing setting ``CUDA_HOME`` to the matching one. E.g. \n```python\nexport CUDA_HOME=\u002Fusr\u002Flocal\u002Fcuda-10.1\n```\n\n## Architecture Overview\nThe GANformer consists of two networks:\n\n**Generator**: which produces the images (`x`) given randomly sampled latents (`z`). The latent z has a shape `[batch_size, component_num, latent_dim]`, where `component_num = 1` by default (Vanilla GAN, StyleGAN) but is > 1 for the GANformer model. We can define the latent components by splitting `z` along the second dimension to obtain `z_1,...,z_k` latent components. The generator likewise consists of two parts:\n* **Mapping network**: converts sampled latents from a normal distribution (`z`) to the intermediate space (`w`). A series of Feed-forward layers. The k latent components either are mapped independently from the `z` space to the `w` space or interact with each other through self-attention (optional flag).\n* **Synthesis network**: the intermediate latents w are used to guide the generation of new images. Images features begin from a small constant\u002Fsampled grid of `4x4`, and then go through multiple layers of convolution and up-sampling until reaching the desirable resolution (e.g. `256x256`). After each convolution, the image features are modulated (meaning that their variance and bias are controlled) by the intermediate latent vectors `w`. While in the StyleGAN model there is one global w vectors that controls all the features equally. The GANformer uses attention so that the k latent components specialize to control different regions in the image to create it cooperatively, and therefore perform better especially in generating images depicting multi-object scenes.\n* **Attention** can be used in several ways\n  * **Simplex Attention**: when attention is applied in one direction only from the latents to the image features (**top-down**).\n  * **Duplex Attention**: when attention is applied in the two directions: latents to image features (**top-down**) and then image features back to latents (**bottom-up**), so that each representation informs the other iteratively.\n  * **Self Attention between latents**: can also be used so to each direct interactions between the latents.\n  * **Self Attention between image features** (SAGAN model): prior approaches used attention directly between the image features, but this method does not scale well due to the quadratic number of features which becomes very high for high-resolutions.\n     \n**Discriminator**: Receives and image and has to predict whether it is real or fake – originating from the dataset or the generator. The model perform multiple layers of convolution and downsampling on the image, reducing the representation's resolution gradually until making final prediction. Optionally, attention can be incorporated into the discriminator as well where it has multiple (k) aggregator variables, that use attention to adaptively collect information from the image while being processed. We observe small improvements in model performance when attention is used in the discriminator, although note that most of the gain in using attention based on our observations arises from the generator.\n\n## Codebase\nThis codebase builds on top of and extends the great [StyleGAN2](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan2) and [StyleGAN2-ADA](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan2-ada-pytorch\u002F) repositories by Karras et al.  \n\nThe GANformer model can also be seen as a generalization of StyleGAN: while StyleGAN has one global latent vector that control the style of all image features globally, the GANformer has *k* latent vectors, that cooperate through attention to control regions within the image, and thereby better modeling images of multi-object and compositional scenes.\n\n## Acknowledgement\n_I wish to thank Christopher D. Manning for the fruitful discussions and constructive feedback in developing the Bipartite Transformer, especially when explored within the language representation area, as well as for providing the kind financial support that allowed this work to happen!_ :sunflower:\n\nIf you have questions, comments or feedback, please feel free to contact me at dorarad@cs.stanford.edu, Thank you! :)\n","![Python 3.7](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fpython-3.7-b0071e.svg?style=plastic)\n![PyTorch 1.8](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fpytorch-1.8-%239e008e.svg?style=plastic)\n![TensorFlow 1.14](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Ftensorflow-1.14-blueviolet.svg?style=plastic)\n![cuDNN 7.3.1](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Fcuda-10.0-2545e6.svg?style=plastic)\n![许可证 CC BY-NC](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002Flicense-MIT-108a00.svg?style=plastic)\n\n# GANformer：生成对抗变换器\n\u003Cp align=\"center\">\n  \u003Cb>\u003Ca href=\"https:\u002F\u002Fcs.stanford.edu\u002F~dorarad\u002F\">德鲁·A·哈德森\u003C\u002Fa> & \u003Ca href=\"http:\u002F\u002Flarryzitnick.org\u002F\">C·劳伦斯·齐特尼克\u003C\u002Fa>\u003C\u002Fb>\n\u003C\u002Fp>\n\n## 欢迎查看我们的全新[PyTorch](pytorch_version)版本以及[GANformer2论文](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.08960)！\n\n\u003Cdiv align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_d9570fe6311e.png\" style=\"float:left\" width=\"340px\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_30d6cf03d227.png\" style=\"float:right\" width=\"440px\">\n\u003C\u002Fdiv>\n\u003Cp>\u003C\u002Fp>\n\n***更新（2022年2月21日）：*** *我们已将PyTorch版本的权重初始化调整为预期规模，从而显著提升了模型的学习速度！*\n\n这是[GANformer](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.01209.pdf)模型的实现，该模型是一种新颖且高效的变换器架构，专为图像生成任务而设计。网络采用一种_二分结构_，能够在保持线性计算效率的同时，实现跨图像的长距离交互，并轻松扩展到高分辨率合成。\n\n该模型通过迭代地在潜在变量集合与不断演化的视觉特征之间传递信息，使两者相互补充、相互促进，从而鼓励对象和场景的组合式表征的出现。\n与经典的变换器架构不同，它使用乘法融合机制，允许灵活的区域调制，因此可以被视为对成功StyleGAN网络的一种推广。\n\n\u003Cimg align=\"right\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_9473d71eb80c.png\" width=\"270px\">\n\n**第一篇论文**：[https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.01209](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2103.01209)  \n**第二篇论文**：[https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.08960](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.08960)  \n**联系方式**：dorarad@cs.stanford.edu  \n**实现代码**：[`network.py`](training\u002Fnetwork.py) ([TF](network.py) \u002F [Pytorch](pytorch_version\u002Ftraining\u002Fnetworks.py))\n\n### 我们现在同时支持[`PyTorch`](pytorch_version)和TF！\n:white_check_mark: 上传初始代码和README文件  \n:white_check_mark: 图像采样与可视化脚本  \n:white_check_mark: 代码清理与重构，添加文档  \n:white_check_mark: 训练与数据预处理说明  \n:white_check_mark: 所有数据集的预训练网络  \n:white_check_mark: 额外的可视化与评估  \n:white_check_mark: 提供训练时间更长的模型  \n:white_check_mark: 发布PyTorch版本  \n:white_check_mark: 发布高分辨率（最高1024×1024）的预训练模型  \n⬜️ 发布GANformer2模型（支持布局生成及条件布局到图像生成）\n\n如果您遇到任何问题，或有任何改进建议及功能扩展需求，请随时通过Issues页面或发送邮件至dorarad@stanford.edu与我联系。\n\n## Bibtex\n```bibtex\n@article{hudson2021ganformer,\n  title={Generative Adversarial Transformers},\n  author={Hudson, Drew A and Zitnick, C. Lawrence},\n  journal={Proceedings of the 38th International Conference on Machine Learning, {ICML} 2021},\n  year={2021}\n}\n\n@article{hudson2021ganformer2,\n  title={Compositional Transformers for Scene Generation},\n  author={Hudson, Drew A and Zitnick, C. Lawrence},\n  journal={Advances in Neural Information Processing Systems {NeurIPS} 2021},\n  year={2021}\n}\n```\n\n## 样例图片\n使用预训练模型（这些模型仅需比StyleGAN2少***5-7倍***的训练步数即可生成！进一步延长训练时间将进一步提升图像质量）：\n\u003Cdiv align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_f6350f25ebca.png\" width=\"700px\">\n\u003C\u002Fdiv>\n\n## 环境要求\n\u003Cimg align=\"right\" src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_b339643b3e46.png\" width=\"190px\">\n\n- 支持Python 3.6或3.7。\n- 对于TF版本：我们推荐使用开发时所用的TensorFlow 1.14，但TensorFlow 1.15也同样适用。\n- 对于PyTorch版本：我们支持PyTorch >= 1.8。\n- 代码已在CUDA 10.0工具包和cuDNN 7.5环境下测试通过。\n- 我们曾在Titan V GPU上进行过实验。假设拥有12GB显存（更多显存可加速训练）。\n- 请参阅[`requirements.txt`](requirements.txt)（[TF](requirements.py) \u002F [PyTorch](pytorch_version\u002Frequirements.py)），其中列出了所需的Python包，运行`pip install -r requirements.txt`即可安装。\n\n## 快速入门与概览\n我们的仓库同时支持**TensorFlow**（主目录下）和**PyTorch**（位于[`pytorch_version`](pytorch_version)）。两种实现的代码和文件结构非常接近，接口也完全一致。如需从TF切换至PyTorch，只需进入[`pytorch_version`](pytorch_version)，并安装其中的[依赖项](pytorch_version\u002Frequirements.txt)。\n如有任何关于新实现的问题或建议，欢迎随时提交Issue或直接联系[dorarad@cs.stanford.edu]！\n\n一个使用预训练GANformer的最小示例可在[`generate.py`](generate.py)中找到（[TF](generate.py) \u002F [PyTorch](pytorch_version\u002Fgenerate.py)）。执行该程序时，这段仅有10行的代码会下载一个预训练模型，并用其生成一些图像：\n```python\npython generate.py --gpus 0 --model gdrive:bedrooms-snapshot.pkl --output-dir images --images-num 32\n```\n**您可以通过`--truncation-psi`来控制生成图像的质量与多样性之间的权衡。  \n我们建议尝试`0.6-1.0`范围内的不同值。**\n\n### 预训练模型与高分辨率\n我们为所有数据集提供了256×256分辨率的预训练模型，同时为FFHQ提供了1024×1024分辨率的模型，为Cityscapes提供了1024×2048分辨率的模型。\n\n要生成高分辨率模型的图像，请运行以下命令：\n（我们将批量大小降低到1，以便它们可以加载到单个GPU上）\n\n```python\npython generate.py --gpus 0 --model gdrive:ffhq-snapshot-1024.pkl --output-dir ffhq_images --images-num 32 --batch-size 1\npython generate.py --gpus 0 --model gdrive:cityscapes-snapshot-2048.pkl --output-dir cityscapes_images --images-num 32 --batch-size 1 --ratio 0.5 # 目前仅TF版本支持1024 x 2048的Cityscapes\n```\n\n我们可以使用[`run_network.py`](run_network.py)（[TF](run_network.py) \u002F [Pytorch](pytorch_version\u002Frun_network.py)）对新模型或预训练模型进行定量和定性的训练与评估。  \n模型架构可以在[`network.py`](training\u002Fnetwork.py)（[TF](training\u002Fnetwork.py) \u002F [Pytorch](pytorch_version\u002Ftraining\u002Fnetwork.py)）中找到。训练流程则在[`training_loop.py`](training\u002Ftraining_loop.py)（[TF](training\u002Ftraining_loop.py) \u002F [Pytorch](pytorch_version\u002Ftraining\u002Ftraining_loop.py)）中实现。\n\n## 数据准备\n我们在四个图像和场景数据集上探索了GANformer模型：[CLEVR](https:\u002F\u002Fcs.stanford.edu\u002Fpeople\u002Fjcjohns\u002Fclevr\u002F)、[LSUN-Bedrooms](https:\u002F\u002Fwww.yf.io\u002Fp\u002Flsun)、[Cityscapes](https:\u002F\u002Fwww.cityscapes-dataset.com\u002F)和[FFHQ](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fffhq-dataset)。该模型也可以在其他数据集上进行训练。\n我们以256×256分辨率训练了模型。更高分辨率同样受支持。模型会自动适应数据集中图像的分辨率。\n\n[`prepare_data.py`](prepare_data.py)（[TF](prepare_data.py) \u002F [Pytorch](pytorch_version\u002Fprepare_data.py)）既可以从我们的目录中准备数据集，也可以创建新的数据集。\n\n### 默认数据集\n要从目录中准备数据集，请运行以下命令：\n```python\npython prepare_data.py --ffhq --cityscapes --clevr --bedrooms --max-images 100000\n```\n\n下表详细列出了目录中的数据集信息。\n\n**常用选项**：  \n* `--data-dir` 输出数据目录（默认：`datasets`）  \n* `--shards-num` 选择数据分片的数量（默认：根据每个数据集调整）  \n* `--max-images` 只存储数据集的一部分，以减少存储的`tfrecord`\u002F图像文件大小（默认：_最大_）。  \n这对于节省大型数据集的空间特别有用，例如LSUN-bedrooms（原始包含300万张图像）。\n\n### 自定义数据集\n您也可以使用该脚本创建新的自定义数据集。例如：\n```python\npython prepare_data.py --task \u003Cdataset-name> --images-dir \u003Csource-dir> --format png --ratio 0.7 --shards-num 5\n```\n该脚本支持多种格式：`png`、`jpg`、`npy`、`hdf5`、`tfds`和`lmdb`。\n\n### 数据集目录\n| 数据集           | 图像数量  | 分辨率    | 下载大小 | TFrecords大小   | Gamma | \n| :---------------: | :-------: | :-----------: | :-----------: | :--------------: | :---: |\n| **FFHQ**          | 70,000    | 256×256 | 13GB          | 13GB             | 10    |\n| **CLEVR**         | 100,015   | 256×256 | 18GB          | 15.5GB           | 40    |\n| **Cityscapes**    | 24,998    | 256×256 | 1.8GB         | 8GB              | 20    |\n| **LSUN-Bedrooms** | 3,033,042 | 256×256 | 42.8GB        | 最高可达480GB      | 100   |\n\n使用`--max-images`可以减小`tfrecord`文件的大小。\n\n## 训练\n模型通过使用`--train`选项进行训练。要微调一个预训练的GANformer模型：\n```python\npython run_network.py --train --gpus 0 --ganformer-default --expname clevr-pretrained --dataset clevr \\\n  --pretrained-pkl gdrive:clevr-snapshot.pkl\n```\n我们为`bedrooms`、`cityscapes`、`clevr`和`ffhq`提供了预训练模型。\n\n要从头开始以默认配置训练GANformer模型：\n```python\npython run_network.py --train --gpus 0 --ganformer-default --expname clevr-scratch --dataset clevr --eval-images-num 10000\n```\n\n默认情况下，模型训练会从最新的快照恢复。使用`--restart`可开始一个新的实验，或使用`--pretrained-pkl`选择特定的快照进行加载。\n\n为了与当前最先进的方法进行比较，我们使用50,000个样本图像计算指标分数。然而，为了加快训练速度，我们建议将`--eval-images-num`设置为较低的数值。请注意，这可能会影响指标的精确性，因此我们建议在训练过程中使用较低的值，并在最终评估时将其提高。\n\n我们支持多种命令行选项来调整模型、训练和评估。运行`python run_network.py -h`即可查看完整的选项列表！\n\n我们建议在训练新数据集时尝试不同的`--gamma`值。如果您以≥512分辨率进行训练并遇到OOM问题，请考虑将`--batch-gpu`降低到较低的值。\n\n### 日志记录\n* 在训练过程中，样本图像和注意力图会被生成并存储在`results\u002F\u003Cexpname>-\u003Crun-id>`目录下（`--keep-samples`）。\n* 指标也会定期计算并记录在`metric-\u003Cname>.txt`文件中。`--metrics`可设置为`fid`用于FID、`is`用于Inception Score以及`pr`用于Precision\u002FRecall。\n* 还会创建TensorBoard日志（`--summarize`），跟踪指标、生成器和判别器的损失值以及其他有用的统计信息。\n\n### 基线模型\n除了GANformer之外，代码库还支持多种基线模型。例如，要运行一个普通的GAN模型：\n```python\npython run_network.py --train --gpus 0 --baseline GAN --expname clevr-gan --dataset clevr \n```\n* **[Vanilla GAN](https:\u002F\u002Farxiv.org\u002Fabs\u002F1406.2661)**：`--baseline GAN`，一种没有风格调制的标准GAN。\n* **[StyleGAN2](https:\u002F\u002Farxiv.org\u002Fabs\u002F1912.04958)**：`--baseline StyleGAN2`，使用一个全局潜在变量来调制图像特征。\n* **[k-GAN](https:\u002F\u002Farxiv.org\u002Fabs\u002F1810.10340)**：`--baseline kGAN`，独立生成多个图像层，然后将它们合并为一张共享图像（仅TF版本支持）。\n* **[SAGAN]()**：`--baseline SAGAN`，在低分辨率层（如32x32）中对所有图像特征进行自注意力操作（仅TF版本支持）。\n\n## 评估\n要评估模型，请使用 `--eval` 选项：\n```python\npython run_network.py --eval --gpus 0 --expname clevr-exp --dataset clevr\n```\n添加 `--pretrained-pkl gdrive:\u003Cdataset>-snapshot.pkl` 可以评估预训练模型。\n\n下面我们提供了 GANformer（使用上述预训练检查点）以及基线模型的 FID-50k 分数。  \n请注意，这些分数与 StyleGAN2 论文中报告的分数不同，因为我们的实验进行了多达 7 倍的训练步数（所有模型的实验中为 5k–15k kimg 步，使用 4 张 GPU 卡大约需要 3–4 天），而他们的实验则进行了 50k–70k kimg 步，耗时超过 90 个 GPU 天。\n\n**关于生成器\u002F判别器的说明**：经过消融实验，我们发现将单纯形和双工注意力机制仅应用于生成器（而非同时应用于生成器和判别器）能够提升模型性能。因此，我们发布了仅在生成器中加入注意力机制的预训练模型，并已更新论文以反映这一改进！\n\n| 模型          | CLEVR        | LSUN-Bedroom | FFHQ       | Cityscapes |\n| :------------: | :----------: | :----------: | :--------: | :--------: |\n| **GAN**        | 25.02        | 12.16        | 13.18      | 11.57      |\n| **kGAN**       | 28.28        | 69.9         | 61.14      | 51.08      |\n| **SAGAN**      | 26.04        | 14.06        | 16.21      | 12.81      |\n| **StyleGAN2**  | 16.05        | 11.53        | 16.21      | 8.35       |\n| **GANformer** | ***9.24***   | ***6.15***   | ***7.42*** | ***5.23*** |\n\n\u003Cdiv>\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_646485e6c45f.png\" width=\"350px\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_42d2998b27d0.png\" width=\"350px\">\n\u003C\u002Fdiv>\n\n### 模型变更日志\n与论文中描述的原始 GANformer 相比，本仓库进行了多项额外改进，从而提升了性能：\n* 使用 `--mapping_ltnt2ltnt` 选项，使潜在变量在映射网络内部通过自注意力机制直接相互交流，然后再开始生成图像。\n* 在 `k` 个潜在组件之外增加一个全局潜在变量 (`--style`)。首先由全局潜在变量统一调节图像的所有特征，然后由 `k` 个潜在变量根据二分图变换器的注意力机制分别调节不同区域。  \n全局潜在变量有助于协调图像的整体方面，例如全局光照条件、人脸等的整体风格属性等。\n* 经过这些调整后，我们发现向判别器添加变换器层并无额外益处，因此为了简化，我们将其禁用了。\n\n## 可视化\n该代码支持生成定性结果和可视化效果。例如，要创建每一层的注意力图：\n```python\npython run_network.py --gpus 0 --vis --expname clevr-exp --dataset clevr --vis-layer-maps\n```\n\n下面可以看到 GANformer 生成的示例图像和注意力图：\n\n\u003Cdiv align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_3bbc9d8bbec7.png\" style=\"float:left\" width=\"831px\">\n\u003C\u002Fdiv>\n\n## 命令行选项\n以下列出了一些最常用的模型选项。\n\n### 训练\n* `--gamma`：建议针对所选数据集尝试不同的值（默认值为 `10`）。\n* `--truncation-psi`：控制图像质量与多样性之间的权衡。（默认值为 `0.7`）\n* `--eval-images-num`：用于计算指标的图像数量。建议选择较小的数值以加快训练速度（默认值为 `50,000`）。\n* `--restart`：从头开始重新训练，而不是从最新快照恢复。\n* `--pretrained-pkl`：加载预训练模型，可以是本地文件，也可以是从云端加载 `gdrive:\u003Cdataset>-snapshot.pkl` 格式的文件，适用于目录中的各类数据集。\n* `--data-dir` 和 `--result-dir`：分别指定数据集（tfrecords）和日志\u002F结果的存储目录。\n\n### 模型（最常用）\n* `--transformer`：在生成器中添加变换器层（GANformer）。\n* `--components-num`：潜在组件的数量，这些组件将对图像进行注意力操作。建议取值范围为 `8–16`（默认值为 `1`）。\n* `--latent-size`：整体潜在空间大小（默认值为 `512`）。每个潜在组件的大小则为 `latent_size\u002Fcomponents_num`。\n* `--num-heads`：注意力头的数量（默认值为 `1`）。\n* `--integration`：变换器层中信息的整合方式，例如 `add` 或 `mul`（默认值为 `mul`）。\n\n### 模型（其他）\n* `--g-start-res` 和 `--g-end-res`：变换器层的起始和结束分辨率（默认值为所有层直至分辨率为 2\u003Csup>8\u003C\u002Fsup>）。\n* `--kmeans`：跟踪并更新图像到潜在变量的分配中心点，用于双工注意力机制。\n* `--mapping-ltnt2ltnt`：在映射网络中对潜在变量执行自注意力操作。\n* `--use-pos`：为潜在变量使用可训练的位置编码。\n* `--style False`：关闭单向量全局风格调节功能（StyleGAN2）。\n\n### 可视化\n* **样本图像**\n  * `--vis-images`：生成图像样本。\n  * `--vis-latents`：保存源潜在向量。\n* **注意力图**\n  * `--vis-maps`：可视化最后一层和第一个注意力头的注意力图。\n  * `--vis-layer-maps`：可视化所有层和所有注意力头的注意力图。\n  * `--blending-alpha`：在可视化图像与注意力图的混合时设置透明度权重。\n* **图像插值**\n  * `--vis-interpolations`：生成源潜在变量之间的一系列插值图像。\n  * `--interpolation-density`：插值过程中两个端点之间的样本数量（默认值为 `8`）。\n* **其他**\n  * `--vis-noise-var`：创建噪声变化的可视化效果。\n  * `--vis-style-mix`：创建风格混合的可视化效果。\n\n运行 `python run_network.py -h` 可查看完整的选项列表。\n\n## 示例图像（更多案例）\n\u003Cdiv align=\"center\">\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_9082fe6c32f1.png\" style=\"float:left\" width=\"750px\">\n  \u003Cbr>\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_c82416410f01.png\" style=\"float:left\" width=\"750px\">\n  \u003Cbr>\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_3a77abf159c4.png\" style=\"float:left\" width=\"750px\">\n  \u003Cbr>\n  \u003Cimg src=\"https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_readme_3224bbad4960.png\" style=\"float:left\" width=\"750px\">\n\u003C\u002Fdiv>\n\n## CUDA \u002F 安装\n该模型依赖于自定义的 TensorFlow\u002FPyTorch 操作，这些操作使用 [NVCC](https:\u002F\u002Fdocs.nvidia.com\u002Fcuda\u002Fcuda-compiler-driver-nvcc\u002Findex.html) 动态编译。\n\n要设置环境，例如针对 CUDA 10.0：\n```python\nexport PATH=\u002Fusr\u002Flocal\u002Fcuda-10.0\u002Fbin${PATH:+:${PATH}}\nexport LD_LIBRARY_PATH=\u002Fusr\u002Flocal\u002Fcuda10.0\u002Flib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}\n```\n\n要测试 NVCC 安装是否正常工作，请运行：\n```python\nnvcc test_nvcc.cu -o test_nvcc -run\n| CPU 说你好。\n| GPU 说你好。\n```\n\n在 PyTorch 版本中，如果出现以下重复信息：\n“无法构建 upfirdn2d 的 CUDA 内核。正在回退到低效的参考实现。”\n请确保你的 CUDA 和 PyTorch 版本匹配。如果你安装了多个 CUDA 版本，可以考虑将 `CUDA_HOME` 设置为对应的版本。例如：\n```python\nexport CUDA_HOME=\u002Fusr\u002Flocal\u002Fcuda-10.1\n```\n\n## 架构概述\nGANformer 由两个网络组成：\n\n**生成器**：根据随机采样的潜在变量 (`z`) 生成图像 (`x`)。潜在变量 `z` 的形状为 `[batch_size, component_num, latent_dim]`，其中 `component_num` 默认为 1（Vanilla GAN、StyleGAN），但在 GANformer 模型中大于 1。我们可以通过沿第二个维度拆分 `z` 来获得 `z_1,...,z_k` 个潜在组件。生成器同样分为两部分：\n* **映射网络**：将来自正态分布的采样潜在变量 (`z`) 转换为中间空间 (`w`)。它由一系列前馈层组成。这 `k` 个潜在组件可以从 `z` 空间独立映射到 `w` 空间，也可以通过自注意力机制相互作用（可选标志）。\n* **合成网络**：中间潜在变量 `w` 用于指导新图像的生成。图像特征从一个小型的常量或采样网格（`4x4`）开始，然后经过多层卷积和上采样，直到达到所需的分辨率（如 `256x256`）。每次卷积后，图像特征都会被中间潜在向量 `w` 调制（即控制其方差和偏置）。而在 StyleGAN 模型中，只有一个全局的 `w` 向量来平等地控制所有特征。GANformer 使用注意力机制，使 `k` 个潜在组件分别专注于控制图像中的不同区域，从而协同生成图像，因此在生成包含多物体场景的图像时表现更佳。\n* **注意力机制**可以以多种方式应用：\n  * **单向注意力**：当注意力仅从潜在变量单向作用于图像特征时（自上而下）。\n  * **双向注意力**：当注意力同时在两个方向上作用——从潜在变量到图像特征（自上而下），再从图像特征反馈回潜在变量（自下而上），从而使两种表示相互迭代地提供信息。\n  * **潜在变量之间的自注意力**：也可用于直接促进潜在变量之间的交互。\n  * **图像特征之间的自注意力**（SAGAN 模型）：早期方法直接在图像特征之间使用注意力，但这种方法扩展性较差，因为特征数量与分辨率成平方关系，在高分辨率下会变得非常高。\n\n**判别器**：接收一张图像，并预测它是真实的还是伪造的——即来源于数据集还是生成器。该模型会对图像进行多层卷积和下采样，逐步降低图像表示的分辨率，直至做出最终预测。可选地，判别器中也可以加入注意力机制，配备多个（`k`）聚合变量，这些变量利用注意力机制在图像处理过程中自适应地收集信息。我们观察到，在判别器中使用注意力机制时，模型性能会有小幅提升，不过需要注意的是，根据我们的观察，使用注意力机制所带来的主要收益来自于生成器。\n\n## 代码库\n该代码库基于并扩展了 Karras 等人优秀的 [StyleGAN2](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan2) 和 [StyleGAN2-ADA](https:\u002F\u002Fgithub.com\u002FNVlabs\u002Fstylegan2-ada-pytorch\u002F) 仓库。\n\nGANformer 模型也可以被视为 StyleGAN 的一种泛化：StyleGAN 只有一个全局潜在向量来全局控制所有图像特征的风格，而 GANformer 则拥有 *k* 个潜在向量，它们通过注意力机制协作控制图像中的不同区域，从而更好地建模多物体和组合场景的图像。\n\n## 致谢\n我谨向 Christopher D. Manning 表示感谢，感谢他在开发二部图 Transformer 时给予的富有成效的讨论和建设性意见，尤其是在语言表示领域内的探索；同时也感谢他提供的慷慨资金支持，使得这项工作得以顺利开展！ :sunflower:\n\n如果您有任何问题、评论或反馈，请随时通过 dorarad@cs.stanford.edu 与我联系。谢谢！ :)","# GANformer 快速上手指南\n\nGANformer 是一种用于图像生成的新型高效 Transformer 模型。它采用二分结构，支持长距离交互，同时保持线性计算效率，可扩展至高分辨率合成。本项目同时支持 **TensorFlow** 和 **PyTorch** 版本。\n\n## 1. 环境准备\n\n### 系统要求\n- **操作系统**: Linux (推荐)\n- **GPU**: 建议显存 ≥ 12GB (如 Titan V)，支持 CUDA 10.0+\n- **Python**: 3.6 或 3.7\n\n### 框架依赖\n根据你选择的版本安装对应框架：\n- **TensorFlow 版本**: TensorFlow 1.14 (推荐) 或 1.15\n- **PyTorch 版本**: PyTorch ≥ 1.8\n- **其他**: cuDNN 7.5+\n\n> **国内加速提示**: 安装 Python 包时建议使用清华或阿里镜像源以加快下载速度。\n> ```bash\n> pip install -r requirements.txt -i https:\u002F\u002Fpypi.tuna.tsinghua.edu.cn\u002Fsimple\n> ```\n\n## 2. 安装步骤\n\n本项目主目录为 TensorFlow 版本，`pytorch_version` 目录为 PyTorch 版本。以下以主目录（TF）为例，PyTorch 用户请进入 `pytorch_version` 目录操作。\n\n1. **克隆代码库**\n   ```bash\n   git clone \u003Crepository_url>\n   cd gansformer\n   # 如果使用 PyTorch 版本，请执行: cd pytorch_version\n   ```\n\n2. **安装依赖包**\n   ```bash\n   pip install -r requirements.txt\n   ```\n   *(注：PyTorch 用户请运行 `pip install -r pytorch_version\u002Frequirements.txt`)*\n\n3. **验证安装**\n   确保 `python` 命令可用，且 GPU 驱动正常识别。\n\n## 3. 基本使用\n\n### 快速生成图像 (使用预训练模型)\n最简单的用法是运行 `generate.py` 脚本。该脚本会自动下载预训练模型并生成图像。\n\n**命令示例：**\n```bash\npython generate.py --gpus 0 --model gdrive:bedrooms-snapshot.pkl --output-dir images --images-num 32\n```\n\n**参数说明：**\n- `--gpus`: 指定使用的 GPU ID (例如 `0`)。\n- `--model`: 预训练模型路径。支持从 Google Drive 自动下载 (格式 `gdrive:\u003Cname>.pkl`) 或本地路径。\n  - 可用模型包括：`bedrooms-snapshot.pkl`, `ffhq-snapshot.pkl`, `clevr-snapshot.pkl`, `cityscapes-snapshot.pkl`。\n- `--output-dir`: 生成图像的保存目录。\n- `--images-num`: 生成图像的数量。\n\n**调整生成质量与多样性：**\n使用 `--truncation-psi` 参数控制权衡，推荐范围 `0.6` 到 `1.0`。\n```bash\npython generate.py --gpus 0 --model gdrive:ffhq-snapshot.pkl --output-dir ffhq_images --images-num 32 --truncation-psi 0.7\n```\n\n### 生成高分辨率图像\n对于 1024x1024 (FFHQ) 或 1024x2048 (Cityscapes) 的模型，需将批大小设为 1 以适应显存：\n\n```bash\n# FFHQ 1024x1024\npython generate.py --gpus 0 --model gdrive:ffhq-snapshot-1024.pkl --output-dir ffhq_images --images-num 32 --batch-size 1\n\n# Cityscapes 1024x2048 (目前仅 TF 版本支持)\npython generate.py --gpus 0 --model gdrive:cityscapes-snapshot-2048.pkl --output-dir cityscapes_images --images-num 32 --batch-size 1 --ratio 0.5\n```\n\n### 数据准备 (可选)\n如需从头训练，可使用 `prepare_data.py` 准备数据集（支持 CLEVR, LSUN-Bedrooms, Cityscapes, FFHQ）。\n\n**下载并准备默认数据集：**\n```bash\npython prepare_data.py --ffhq --cityscapes --clevr --bedrooms --max-images 100000\n```\n*提示：使用 `--max-images` 可限制下载数量以节省空间。*\n\n### 开始训练 (可选)\n使用 `run_network.py` 进行训练或微调。\n\n**微调预训练模型：**\n```bash\npython run_network.py --train --gpus 0 --ganformer-default --expname clevr-finetune --dataset clevr --pretrained-pkl gdrive:clevr-snapshot.pkl\n```\n\n**从头训练：**\n```bash\npython run_network.py --train --gpus 0 --ganformer-default --expname clevr-scratch --dataset clevr --eval-images-num 10000\n```\n\n训练日志、样本图像及 Tensorboard 记录将保存在 `results\u002F\u003Cexpname>-\u003Crun-id>` 目录下。","某游戏工作室的美术团队正急需为开放世界项目批量生成高分辨率（1024x1024）的多样化场景概念图，以加速前期视觉探索。\n\n### 没有 gansformer 时\n- **训练成本高昂**：依赖传统 StyleGAN2 架构，需要极多的训练步数才能收敛，消耗大量 GPU 算力和时间。\n- **全局控制力弱**：难以通过潜在变量有效调节图像的整体布局或长距离结构，导致生成的场景构图混乱。\n- **缺乏组合性表达**：模型倾向于记忆纹理而非理解物体关系，无法灵活生成具有清晰物体层级和逻辑的场景。\n- **扩展性受限**：随着分辨率提升，计算复杂度急剧增加，难以高效扩展到高清合成任务。\n\n### 使用 gansformer 后\n- **训练效率飞跃**：凭借线性计算效率的双部结构，仅需传统模型 5-7 倍的更少步数即可完成高质量训练，大幅节省算力。\n- **长程交互增强**：利用 Transformer 机制实现像素间的长距离互动，能精准控制场景的全局结构与布局一致性。\n- **组合式生成能力**：通过潜变量与视觉特征的双向迭代传播，自然涌现出具有明确物体边界和逻辑关系的组合式场景。\n- **高清合成轻松应对**：架构设计原生支持高分辨率扩展，可稳定生成细节丰富的 1024x1024 图像而无须牺牲效率。\n\ngansformer 通过引入高效的生成式 Transformer 架构，将场景生成的训练速度、结构可控性与 compositional 表达能力提升到了全新高度。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Fdorarad_gansformer_d9570fe6.png","dorarad","Drew Arad Hudson","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Fdorarad_deecaed0.jpg",null,"Stanford University","Palo Alto, California","dorarad@stanford.edu","drewAhudson","https:\u002F\u002Fcs.stanford.edu\u002F~dorarad","https:\u002F\u002Fgithub.com\u002Fdorarad",[83,87,91],{"name":84,"color":85,"percentage":86},"Python","#3572A5",93.6,{"name":88,"color":89,"percentage":90},"Cuda","#3A4E3A",5.3,{"name":92,"color":93,"percentage":94},"C++","#f34b7d",1.1,1346,151,"2026-04-08T16:36:50","MIT","未说明","需要 NVIDIA GPU，推荐 Titan V，显存至少 12GB（更大显存可加速训练），CUDA 10.0，cuDNN 7.5",{"notes":102,"python":103,"dependencies":104},"该项目同时支持 TensorFlow 和 PyTorch 版本，代码结构相似。训练高分辨率模型（>=512）时若出现显存溢出（OOM），建议减小 batch-size。预训练模型支持最高 1024x1024 分辨率。","3.6 或 3.7",[105,106,107,108],"TensorFlow 1.14 (推荐) 或 1.15","PyTorch >= 1.8","CUDA Toolkit 10.0","cuDNN 7.5",[15,35],[111,112,113,114,115,116,117],"transformers","gans","generative-adversarial-networks","image-generation","scene-generation","compositionality","attention","2026-03-27T02:49:30.150509","2026-04-11T23:22:53.556824",[121,126,131,136,141,146],{"id":122,"question_zh":123,"answer_zh":124,"source_url":125},30036,"项目是否提供 PyTorch 版本的实现？","是的，维护者已经发布了新的 PyTorch 实现版本。该版本与原始的 TensorFlow 版本具有匹配的接口。用户可以直接使用新的 PyTorch 代码进行模型训练和推理。","https:\u002F\u002Fgithub.com\u002Fdorarad\u002Fgansformer\u002Fissues\u002F17",{"id":127,"question_zh":128,"answer_zh":129,"source_url":130},30037,"运行 CLEVR 预训练模型时，为什么得到的 FID 分数（约 22）与论文报告（9.2）不符？","这是由于数据预处理中的裁剪设置不一致导致的。本地处理的数据图像高宽比为 2\u002F3，而预训练模型是在高宽比为 3\u002F4 的图像上训练的。解决方法是重新运行数据准备命令：`python prepare_data.py --clevr --max-images 100000`，这将下载正确比例的数据，然后再运行评估命令即可获得预期的 FID 分数。","https:\u002F\u002Fgithub.com\u002Fdorarad\u002Fgansformer\u002Fissues\u002F16",{"id":132,"question_zh":133,"answer_zh":134,"source_url":135},30038,"运行 generate.py 时遇到 'FusedBiasAct' OpKernel 未注册或 cuDNN 版本错误怎么办？","这通常是由于预训练模型配置或截断值（truncation psi）不匹配引起的。维护者已更新代码以使用正确的 FFHQ 预训练模型，并将 `truncation-psi` 值从 0.5 调整为开发时使用的 0.7。请确保拉取最新代码并使用更新后的参数运行生成命令。此外，模型默认分辨率为 256x256 以减少计算需求，但代码支持更高分辨率的训练。","https:\u002F\u002Fgithub.com\u002Fdorarad\u002Fgansformer\u002Fissues\u002F5",{"id":137,"question_zh":138,"answer_zh":139,"source_url":140},30039,"训练代码报错提示变量未定义（如 maps_in, gen_mod）或函数参数错误，如何解决？","这是早期版本代码中的 Bug。维护者已修复了所有未定义变量的问题以及 `get_positional_embeddings` 函数的参数错误。请确保您使用的是仓库中的最新代码，所有相关问题已在最近的提交中解决，代码现在应该可以正常运行。","https:\u002F\u002Fgithub.com\u002Fdorarad\u002Fgansformer\u002Fissues\u002F1",{"id":142,"question_zh":143,"answer_zh":144,"source_url":145},30040,"如何进行条件训练（Conditional Training）或使用 Duplex 模式？","Duplex 模式包含两个方面：`--kmeans` 和 `--g-img2ltnt`。但在重构后的新代码中，`--g-img2ltnt` 仍存在一些训练问题。因此，维护者建议暂时使用 `--gansformer-default` 标志，该标志提供 simplex + kmeans 版本的模型，这是目前最稳定的配置。关于具体的 one-hot 编码条件代码，需关注后续的更新发布。","https:\u002F\u002Fgithub.com\u002Fdorarad\u002Fgansformer\u002Fissues\u002F12",{"id":147,"question_zh":148,"answer_zh":149,"source_url":150},30041,"在 Duplex Attention 机制中，P 的数量如何设置？K 和 V 是如何计算的？","在 Duplex Attention 中，V (Values) 存储的是内容变量（例如 GAN 中随机采样的潜变量），而 K (Keys) 跟踪从 X 到 Y 的基于注意力的分配质心，计算公式为 `K=a_b(Y, X)`。这意味着 K 是通过自注意力模块计算的，但输入为 (Y, X)。具体的 P 值设置取决于代码实现中的潜变量分割策略，通常对应于对象或区域的数量。","https:\u002F\u002Fgithub.com\u002Fdorarad\u002Fgansformer\u002Fissues\u002F10",[152,157],{"id":153,"version":154,"summary_zh":155,"released_at":156},206633,"v1.5.2","生成对抗变换器论文的官方实现，同时支持 PyTorch 和 TensorFlow，用于图像及组合场景生成。该代码库支持训练、评估、图像采样以及多种可视化功能。\n\n**1.5.2 版本更新（2022年2月22日）**：我们已将 PyTorch 版本的权重初始化调整为预期尺度，从而显著提升了模型的学习速度。","2022-02-02T23:39:05",{"id":158,"version":159,"summary_zh":160,"released_at":161},206634,"v1.0","生成对抗Transformer论文的官方实现，用于图像和组合场景生成。该代码库支持训练、评估、图像采样以及多种可视化功能。","2021-03-17T14:11:44"]