[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-3dlg-hcvc--plan2scene":3,"tool-3dlg-hcvc--plan2scene":64},[4,17,27,35,43,56],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":16},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,3,"2026-04-05T11:01:52",[13,14,15],"开发框架","图像","Agent","ready",{"id":18,"name":19,"github_repo":20,"description_zh":21,"stars":22,"difficulty_score":23,"last_commit_at":24,"category_tags":25,"status":16},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",138956,2,"2026-04-05T11:33:21",[13,15,26],"语言模型",{"id":28,"name":29,"github_repo":30,"description_zh":31,"stars":32,"difficulty_score":23,"last_commit_at":33,"category_tags":34,"status":16},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,"2026-04-03T11:11:01",[13,14,15],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":23,"last_commit_at":41,"category_tags":42,"status":16},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[13,26],{"id":44,"name":45,"github_repo":46,"description_zh":47,"stars":48,"difficulty_score":23,"last_commit_at":49,"category_tags":50,"status":16},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,"2026-04-05T10:45:23",[14,51,52,53,15,54,26,13,55],"数据工具","视频","插件","其他","音频",{"id":57,"name":58,"github_repo":59,"description_zh":60,"stars":61,"difficulty_score":10,"last_commit_at":62,"category_tags":63,"status":16},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,"2026-04-04T04:44:48",[15,14,13,26,54],{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":69,"readme_en":70,"readme_zh":71,"quickstart_zh":72,"use_case_zh":73,"hero_image_url":74,"owner_login":75,"owner_name":75,"owner_avatar_url":76,"owner_bio":77,"owner_company":78,"owner_location":78,"owner_email":78,"owner_twitter":78,"owner_website":78,"owner_url":79,"languages":80,"stars":97,"forks":98,"last_commit_at":99,"license":100,"difficulty_score":10,"env_os":101,"env_gpu":102,"env_ram":103,"env_deps":104,"category_tags":111,"github_topics":112,"view_count":118,"oss_zip_url":78,"oss_zip_packed_at":78,"status":16,"created_at":119,"updated_at":120,"faqs":121,"releases":152},215,"3dlg-hcvc\u002Fplan2scene","plan2scene","Official implementation of the paper Plan2Scene.","plan2scene 是一个基于学术研究的开源项目，旨在将二维建筑平面图和少量室内照片自动转化为带纹理的三维住宅模型。它主要解决了室内场景重建中数据稀缺和手动建模成本高的问题，让用户无需专业建模技能即可生成合理的 3D 场景。\n\n这个项目非常适合计算机视觉领域的研究人员、3D 内容开发者以及希望快速原型化的室内设计师使用。plan2scene 的核心技术亮点在于其强大的纹理合成能力与物体放置算法。即使在实际拍摄照片缺失或部分不可见的情况下，它也能通过推理补全室内细节，生成连贯的网格模型。\n\n作为论文的官方实现，plan2scene 提供了完整的代码库，支持 Rent3D++ 数据集，并包含了数据预处理、模型训练及评估的全流程脚本。此外，项目还集成了 Embark Studios 的纹理合成工具，确保生成的表面纹理自然真实。无论是用于学术研究基准测试，还是探索自动化 3D 内容生成，plan2scene 都是一个值得尝试的实用工具。","# Plan2Scene\n\nOfficial repository of the paper:\n\n__Plan2Scene: Converting floorplans to 3D scenes__\n\n[Madhawa Vidanapathirana](https:\u002F\u002Fgithub.com\u002Fmadhawav), [Qirui Wu](), [Yasutaka Furukawa](), [Angel X. Chang](https:\u002F\u002Fgithub.com\u002Fangelxuanchang)\n, [Manolis Savva](https:\u002F\u002Fgithub.com\u002Fmsavva)\n\n[[Paper](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.05375), [Project Page](https:\u002F\u002F3dlg-hcvc.github.io\u002Fplan2scene\u002F), [Google Colab Demo](https:\u002F\u002Fcolab.research.google.com\u002Fdrive\u002F1lDkbfIV0drR1o9D0WYzoWeRskB91nXHq?usp=sharing)]\n\n![Task Overview](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002F3dlg-hcvc_plan2scene_readme_e4f268b52e40.png)\nIn the Plan2Scene task, we produce a textured 3D mesh of a residence from a floorplan and set of photos.\n\n## Dependencies\n1) We use a conda environment initialized as [described here](.\u002Fdocs\u002Fmd\u002Fconda_env_setup.md).\n2) Setup the `command line library` of [Embark Studios texture-synthesis](https:\u002F\u002Fgithub.com\u002FEmbarkStudios\u002Ftexture-synthesis#command-line-binary) project. \n    1) You can download a pre-built binary [available here](https:\u002F\u002Fgithub.com\u002FEmbarkStudios\u002Ftexture-synthesis\u002Freleases). Alternatively, you may build from the source.\n    2) Download the seam mask [available here](https:\u002F\u002Fgithub.com\u002FEmbarkStudios\u002Ftexture-synthesis\u002Fblob\u002Fmain\u002Fimgs\u002Fmasks\u002F1_tile.jpg).   \n    3) Rename `.\u002Fconf\u002Fplan2scene\u002Fseam_correct-example.json` to 'seam_correct.json' and update the paths to the texture synthesis command line library binary, and the seam mask.\n\nUse 'code\u002Fsrc' as the source root when running python scripts.\n```bash\nexport PYTHONPATH=.\u002Fcode\u002Fsrc\n```\n\n## Data\n1) Rent3D++ dataset\n    1. Download and copy the [Rent3D++ dataset](https:\u002F\u002Fforms.gle\u002FmKAmnrzAm3LCK9ua6) to the `[PROJECT_ROOT]\u002Fdata` directory. The data organization is [described here](docs\u002Fmd\u002Frent3dpp_data_organization.md).\n    2. [Optional] We have provided 3D scenes pre-populated with CAD models of objects. \n       If you wish to re-populate these scenes using the _Object Placement_ approach we use, [follow the instructions here](docs\u002Fmd\u002Fplace_cad_models.md).    \n    3. To replicate our results, you should use the pre-extracted crops we provide. \n       These crops are provided with the Rent3D++ dataset and are copied to the `.\u002Fdata\u002Fprocessed\u002Fsurface_crops` directory.\n       \n       [Optional] If you wish to extract new crops instead of using these provided crops, following [these instructions](.\u002Fdocs\u002Fmd\u002Fextract_crops.md).\n    4. Select ground truth reference crops and populate photo room assignment lists.\n       ```bash\n       # Select ground truth reference crops.\n       python code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Fgenerate_reference_crops.py .\u002Fdata\u002Fprocessed\u002Fgt_reference\u002Ftrain .\u002Fdata\u002Finput\u002Fphoto_assignments\u002Ftrain train\n       python code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Fgenerate_reference_crops.py .\u002Fdata\u002Fprocessed\u002Fgt_reference\u002Fval .\u002Fdata\u002Finput\u002Fphoto_assignments\u002Fval val\n       python code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Fgenerate_reference_crops.py .\u002Fdata\u002Fprocessed\u002Fgt_reference\u002Ftest .\u002Fdata\u002Finput\u002Fphoto_assignments\u002Ftest test \n       \n       # We evaluate Plan2Scene by simulating photo un-observations.\n       # Generate photoroom.csv files considering different photo un-observation ratios.\n       python code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Fgenerate_unobserved_photo_assignments.py .\u002Fdata\u002Fprocessed\u002Fphoto_assignments\u002Ftrain .\u002Fdata\u002Finput\u002Fphoto_assignments\u002Ftrain .\u002Fdata\u002Finput\u002Funobserved_photos.json train\n       python code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Fgenerate_unobserved_photo_assignments.py .\u002Fdata\u002Fprocessed\u002Fphoto_assignments\u002Fval .\u002Fdata\u002Finput\u002Fphoto_assignments\u002Fval .\u002Fdata\u002Finput\u002Funobserved_photos.json val\n       python code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Fgenerate_unobserved_photo_assignments.py .\u002Fdata\u002Fprocessed\u002Fphoto_assignments\u002Ftest .\u002Fdata\u002Finput\u002Fphoto_assignments\u002Ftest .\u002Fdata\u002Finput\u002Funobserved_photos.json test  \n       ```\n2) [Optional] Stationary Textures Dataset - We use one of the following datasets to train the texture synthesis model. \n   _Not required if you are using pre-trained models._\n    - __Version 1__: We use this dataset in our CVPR paper. Details are available [here](.\u002Fdocs\u002Fmd\u002Fstationary_textures_dataset_v1.md).\n    - __Version 2__: Updated textures dataset which provides improved results on the Rent3D++ dataset. Details are available [here](.\u002Fdocs\u002Fmd\u002Fstationary_textures_dataset_v2.md).\n   \n3) [Optional] [Substance Mapped Textures dataset](.\u002Fdocs\u002Fmd\u002Fsmt_dataset.md). _Only used by the retrieve baseline._\n\n## Pretrained models\nPretrained models are available [here](.\u002Fdocs\u002Fmd\u002Fpretrained_models.md).\n\n## Inference on Rent3D++ dataset\n1) Download and pre-process the Rent3D++ dataset as described in the data section.\n2) Setup a [pretrained model](.\u002Fdocs\u002Fmd\u002Fpretrained_models.md) or train a new Plan2Scene network.    \n2) Synthesize textures for observed surfaces using the VGG textureness score.\n   ```bash\n   # For test data without simulating photo unobservations. (drop = 0.0)\n   python code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Ffill_room_embeddings.py .\u002Fdata\u002Fprocessed\u002Ftexture_gen\u002Ftest\u002Fdrop_0.0 test --drop 0.0\n   python code\u002Fscripts\u002Fplan2scene\u002Fcrop_select\u002Fvgg_crop_selector.py .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.0 .\u002Fdata\u002Fprocessed\u002Ftexture_gen\u002Ftest\u002Fdrop_0.0 test --drop 0.0\n   # Results are stored at .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.0\n   ```\n\n4) Propagate textures to unobserved surfaces using our texture propagation network.\n   ```bash\n   python code\u002Fscripts\u002Fplan2scene\u002Ftexture_prop\u002Fgnn_texture_prop.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0 .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.0 test GNN_PROP_CONF_PATH GNN_PROP_CHECKPOINT_PATH --keep-existing-predictions --drop 0.0\n   ```\n   To preview results, follow the instructions below.\n\n## Previewing outputs\n1) Complete inference steps.\n2) Correct seams of predicted textures and make them tileable.\n   ```bash\n   # For test data without simulating photo unobservations.\n   python code\u002Fscripts\u002Fplan2scene\u002Fpostprocessing\u002Fseam_correct_textures.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Ftileable_texture_crops .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Ftexture_crops test --drop 0.0\n   ```\n3) Generate .scene.json files with embedded textures using [embed_textures.py](code\u002Fscripts\u002Fplan2scene\u002Fpostprocessing\u002Fembed_textures.py).\n   A scene.json file describes the 3D geometry of a house. \n   It can be previewed via a browser using the 'scene-viewer' of [SmartScenesToolkit](https:\u002F\u002Fgithub.com\u002Fsmartscenes\u002Fsstk) (You will have to clone and build the SmartScenesToolkit).\n   ```bash\n   # For test data without simulating photo unobservations.\n   python code\u002Fscripts\u002Fplan2scene\u002Fpostprocessing\u002Fembed_textures.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Farchs .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Ftileable_texture_crops test --drop 0.0\n   # scene.json files are created in the .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Farchs directory.\n   ```\n4) Render .scene.json files as .pngs using [render_house_jsons.py](code\u002Fscripts\u002Fplan2scene\u002Frender_house_jsons.py).\n    - Download and build the [SmartScenesToolkit](https:\u002F\u002Fgithub.com\u002Fsmartscenes\u002Fsstk).\n    - Rename `.\u002Fconf\u002Frender-example.json` to `.\u002Fconf\u002Frender.json` and update its fields to point to scene-toolkit.\n    - Run the following command to generate previews.\n       ```bash\n       CUDA_VISIBLE_DEVICES=0 python code\u002Fscripts\u002Fplan2scene\u002Frender_house_jsons.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Farchs --scene-json\n       # A .png file is created for each .scene.json file in the .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Farchs directory.\n       ```\n5) Generate qualitative result pages with previews using [preview_houses.py](code\u002Fscripts\u002Fplan2scene\u002Fpreview_houses.py).\n   ```bash\n   python code\u002Fscripts\u002Fplan2scene\u002Fpreview_houses.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Fpreviews .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Farchs .\u002Fdata\u002Finput\u002Fphotos test --textures-path .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Ftileable_texture_crops 0.0\n   # Open .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Fpreviews\u002Fpreview.html\n   ```\n## Test on Rent3D++ dataset\n1) [Optional] Download a pre-trained model or train the substance classifier used by the Subs metric. \n   Training instructions are available [here](.\u002Fdocs\u002Fmd\u002Ftrain_substance_classifier.md).\n   Pre-trained weights are available [here](.\u002Fdocs\u002Fmd\u002Fpretrained_models.md).\n   Skip this step to omit the Subs metric.\n2) Generate overall evaluation report at 60% photo unobservations. We used this setting in paper evaluations.\n   ```bash\n   # Synthesize textures for observed surfaces using the VGG textureness score.\n   # For the case: 60% (i.e. 0.6) of the photos unobserved. \n   python code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Ffill_room_embeddings.py .\u002Fdata\u002Fprocessed\u002Ftexture_gen\u002Ftest\u002Fdrop_0.6 test --drop 0.6\n   python code\u002Fscripts\u002Fplan2scene\u002Fcrop_select\u002Fvgg_crop_selector.py .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.6 .\u002Fdata\u002Fprocessed\u002Ftexture_gen\u002Ftest\u002Fdrop_0.6 test --drop 0.6\n   \n   # Propagate textures to un-observed surfaces using our GNN.\n   # For the case: 60% (i.e. 0.6) of the photos unobserved.\n   python code\u002Fscripts\u002Fplan2scene\u002Ftexture_prop\u002Fgnn_texture_prop.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.6 .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.6 test GNN_PROP_CONF_PATH GNN_PROP_CHECKPOINT_PATH --keep-existing-predictions --drop 0.6\n   \n   # Correct seams of texture crops and make them tileable.\n   # For test data where 60% of photos are unobserved.\n   python code\u002Fscripts\u002Fplan2scene\u002Fpostprocessing\u002Fseam_correct_textures.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.6\u002Ftileable_texture_crops .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.6\u002Ftexture_crops test --drop 0.6\n   \n   # Generate overall results at 60% simulated photo unobservations.\n   python code\u002Fscripts\u002Fplan2scene\u002Ftest.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.6\u002Ftileable_texture_crops .\u002Fdata\u002Fprocessed\u002Fgt_reference\u002Ftest\u002Ftexture_crops test\n   ```\n3) Generate evaluation report for observed surfaces. No simulated unobservation of photos. We used this setting in paper evaluations.\n   ```bash\n   # Run inference on using drop=0.0.\n   python code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Ffill_room_embeddings.py .\u002Fdata\u002Fprocessed\u002Ftexture_gen\u002Ftest\u002Fdrop_0.0 test --drop 0.0\n   python code\u002Fscripts\u002Fplan2scene\u002Fcrop_select\u002Fvgg_crop_selector.py .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.0 .\u002Fdata\u002Fprocessed\u002Ftexture_gen\u002Ftest\u002Fdrop_0.0 test --drop 0.0\n   \n   # Correct seams of texture crops and make them tileable by running seam_correct_textures.py.\n   python code\u002Fscripts\u002Fplan2scene\u002Fpostprocessing\u002Fseam_correct_textures.py .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.0\u002Ftileable_texture_crops .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.0\u002Ftexture_crops test --drop 0.0\n   \n   # Generate evaluation results for observed surfaces.\n   python code\u002Fscripts\u002Fplan2scene\u002Ftest.py .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.0\u002Ftileable_texture_crops .\u002Fdata\u002Fprocessed\u002Fgt_reference\u002Ftest\u002Ftexture_crops test\n   ```\n5) Generate evaluation report for unobserved surfaces at 60% photo unobservations. We used this setting in the paper evaluations.\n   ```bash   \n   # It is assumed that the user has already generated the overall report at 0.6 drop fraction.\n   \n   # Generate results on unobserved surfaces at 60% simulated photo unobservations.\n   python code\u002Fscripts\u002Fplan2scene\u002Ftest.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.6\u002Ftileable_texture_crops .\u002Fdata\u002Fprocessed\u002Fgt_reference\u002Ftest\u002Ftexture_crops test --exclude-prior-predictions .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.6\u002Ftexture_crops\n   ```\n\n 6) Generate evaluation report on FID metric as described [here](.\u002Fdocs\u002Fmd\u002Fcompute_fid_metric.md).\n\n## Inference on custom data\nIf you have scanned images of floorplans, you can use [raster-to-vector](https:\u002F\u002Fgithub.com\u002Fart-programmer\u002FFloorplanTransformation) to convert those floorplan images to a vector format. Then, follow the [instructions here](.\u002Fdocs\u002Fmd\u002Fplan2scene_on_r2v.md) to create textured 3D meshes of houses. \n\nIf you have floorplan vectors in another format, you can convert them to the raster-to-vector __annotation format__. \nThen, follow the same instructions as before to create textured 3D meshes of houses. \nThe R2V annotation format is explained with examples in the [data section of the raster-to-vector repository](https:\u002F\u002Fgithub.com\u002Fart-programmer\u002FFloorplanTransformation#data).\n\n## Training a new Plan2Scene network\nPlan2Scene consists of two trainable components, 1) the texture synthesis stage and 2) the texture propagation stage. Each stage is trained separately. The training procedure is as follows.\n1) Train the texture synthesis stage as described [here](.\u002Fdocs\u002Fmd\u002Ftrain_texture_synth.md).\n2) Train the texture propagation stage as described [here](.\u002Fdocs\u002Fmd\u002Ftrain_texture_prop.md).\n\n## Baseline Models\nThe baseline models are [available here](.\u002Fdocs\u002Fmd\u002Fbaselines.md).\n","# Plan2Scene\n\n论文的官方仓库：\n\n__Plan2Scene: Converting floorplans (平面图) to 3D scenes (3D 场景)__\n\n[Madhawa Vidanapathirana](https:\u002F\u002Fgithub.com\u002Fmadhawav), [Qirui Wu](), [Yasutaka Furukawa](), [Angel X. Chang](https:\u002F\u002Fgithub.com\u002Fangelxuanchang)\n, [Manolis Savva](https:\u002F\u002Fgithub.com\u002Fmsavva)\n\n[[Paper (论文)](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.05375), [Project Page (项目页面)](https:\u002F\u002F3dlg-hcvc.github.io\u002Fplan2scene\u002F), [Google Colab Demo (Google Colab 演示)](https:\u002F\u002Fcolab.research.google.com\u002Fdrive\u002F1lDkbfIV0drR1o9D0WYzoWeRskB91nXHq?usp=sharing)]\n\n![Task Overview](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002F3dlg-hcvc_plan2scene_readme_e4f268b52e40.png)\n在 Plan2Scene 任务中，我们根据 floorplan (平面图) 和一组照片生成住宅的 textured 3D mesh (纹理 3D 网格)。\n\n## Dependencies (依赖项)\n1) 我们使用一个 conda environment (conda 环境)，初始化方法 [见此](.\u002Fdocs\u002Fmd\u002Fconda_env_setup.md)。\n2) 设置 [Embark Studios texture-synthesis (纹理合成)](https:\u002F\u002Fgithub.com\u002FEmbarkStudios\u002Ftexture-synthesis#command-line-binary) 项目的 `command line library (命令行库)`。 \n    1) 你可以下载 [此处可用](https:\u002F\u002Fgithub.com\u002FEmbarkStudios\u002Ftexture-synthesis\u002Freleases) 的 pre-built binary (预构建二进制文件)。或者，你也可以从源代码构建。\n    2) 下载 [此处可用](https:\u002F\u002Fgithub.com\u002FEmbarkStudios\u002Ftexture-synthesis\u002Fblob\u002Fmain\u002Fimgs\u002Fmasks\u002F1_tile.jpg) 的 seam mask (接缝掩膜)。   \n    3) 将 `.\u002Fconf\u002Fplan2scene\u002Fseam_correct-example.json` 重命名为 'seam_correct.json' 并更新 texture synthesis (纹理合成) command line library (命令行库) binary (二进制文件) 和 seam mask (接缝掩膜) 的路径。\n\n运行 python 脚本时，使用 'code\u002Fsrc' 作为 source root (源代码根目录)。\n```bash\nexport PYTHONPATH=.\u002Fcode\u002Fsrc\n```\n\n## Data (数据)\n1) Rent3D++ dataset (数据集)\n    1. 下载并复制 [Rent3D++ dataset (数据集)](https:\u002F\u002Fforms.gle\u002FmKAmnrzAm3LCK9ua6) 到 `[PROJECT_ROOT]\u002Fdata` 目录。数据组织方式 [见此](docs\u002Fmd\u002Frent3dpp_data_organization.md)。\n    2. [可选] 我们提供了预填充了对象 CAD models (CAD 模型) 的 3D 场景。 \n       如果你希望使用我们使用的 _Object Placement (对象放置)_ 方法重新填充这些场景，[请遵循此处的说明](docs\u002Fmd\u002Fplace_cad_models.md)。    \n    3. 要复现我们的结果，你应该使用我们提供的 pre-extracted crops (预提取裁剪图)。 \n       这些 crops (裁剪图) 随 Rent3D++ dataset (数据集) 提供，并复制到 `.\u002Fdata\u002Fprocessed\u002Fsurface_crops` 目录。\n       \n       [可选] 如果你希望提取新的 crops (裁剪图) 而不是使用这些提供的 crops (裁剪图)，请遵循 [这些说明](.\u002Fdocs\u002Fmd\u002Fextract_crops.md)。\n    4. 选择 ground truth (真实值) reference crops (参考裁剪图) 并填充 photo room assignment lists (照片房间分配列表)。\n       ```bash\n       # Select ground truth reference crops.\n       python code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Fgenerate_reference_crops.py .\u002Fdata\u002Fprocessed\u002Fgt_reference\u002Ftrain .\u002Fdata\u002Finput\u002Fphoto_assignments\u002Ftrain train\n       python code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Fgenerate_reference_crops.py .\u002Fdata\u002Fprocessed\u002Fgt_reference\u002Fval .\u002Fdata\u002Finput\u002Fphoto_assignments\u002Fval val\n       python code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Fgenerate_reference_crops.py .\u002Fdata\u002Fprocessed\u002Fgt_reference\u002Ftest .\u002Fdata\u002Finput\u002Fphoto_assignments\u002Ftest test \n       \n       # We evaluate Plan2Scene by simulating photo un-observations.\n       # Generate photoroom.csv files considering different photo un-observation ratios.\n       python code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Fgenerate_unobserved_photo_assignments.py .\u002Fdata\u002Fprocessed\u002Fphoto_assignments\u002Ftrain .\u002Fdata\u002Finput\u002Fphoto_assignments\u002Ftrain .\u002Fdata\u002Finput\u002Funobserved_photos.json train\n       python code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Fgenerate_unobserved_photo_assignments.py .\u002Fdata\u002Fprocessed\u002Fphoto_assignments\u002Fval .\u002Fdata\u002Finput\u002Fphoto_assignments\u002Fval .\u002Fdata\u002Finput\u002Funobserved_photos.json val\n       python code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Fgenerate_unobserved_photo_assignments.py .\u002Fdata\u002Fprocessed\u002Fphoto_assignments\u002Ftest .\u002Fdata\u002Finput\u002Fphoto_assignments\u002Ftest .\u002Fdata\u002Finput\u002Funobserved_photos.json test  \n       ```\n2) [可选] Stationary Textures Dataset (静态纹理数据集) - 我们使用以下数据集之一来训练 texture synthesis model (纹理合成模型)。 \n   _如果你使用 pre-trained models (预训练模型)，则不需要此项。_\n    - __Version 1__：我们在 CVPR 论文中使用此数据集。详情 [见此](.\u002Fdocs\u002Fmd\u002Fstationary_textures_dataset_v1.md)。\n    - __Version 2__：更新的 textures (纹理) 数据集，在 Rent3D++ dataset (数据集) 上提供改进的结果。详情 [见此](.\u002Fdocs\u002Fmd\u002Fstationary_textures_dataset_v2.md)。\n   \n3) [可选] [Substance Mapped Textures dataset (Substance Mapped Textures 数据集)](.\u002Fdocs\u002Fmd\u002Fsmt_dataset.md)。_仅由 retrieve baseline (检索基线) 使用。_\n\n## Pretrained models (预训练模型)\nPretrained models (预训练模型) [此处可用](.\u002Fdocs\u002Fmd\u002Fpretrained_models.md)。\n\n## Inference on Rent3D++ dataset (在 Rent3D++ 数据集上进行推理)\n1) 按照数据部分所述下载并预处理 Rent3D++ dataset (数据集)。\n2) 设置 [pretrained model (预训练模型)](.\u002Fdocs\u002Fmd\u002Fpretrained_models.md) 或训练一个新的 Plan2Scene network (网络)。    \n2) 使用 VGG textureness score (VGG 纹理度分数) 为 observed surfaces (观测表面) 合成 textures (纹理)。\n   ```bash\n   # For test data without simulating photo unobservations. (drop = 0.0)\n   python code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Ffill_room_embeddings.py .\u002Fdata\u002Fprocessed\u002Ftexture_gen\u002Ftest\u002Fdrop_0.0 test --drop 0.0\n   python code\u002Fscripts\u002Fplan2scene\u002Fcrop_select\u002Fvgg_crop_selector.py .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.0 .\u002Fdata\u002Fprocessed\u002Ftexture_gen\u002Ftest\u002Fdrop_0.0 test --drop 0.0\n   # Results are stored at .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.0\n   ```\n\n4) 使用我们的 texture propagation network (纹理传播网络) 将 textures (纹理) 传播到 unobserved surfaces (未观测表面)。\n   ```bash\n   python code\u002Fscripts\u002Fplan2scene\u002Ftexture_prop\u002Fgnn_texture_prop.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0 .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.0 test GNN_PROP_CONF_PATH GNN_PROP_CHECKPOINT_PATH --keep-existing-predictions --drop 0.0\n   ```\n   要预览结果，请遵循以下说明。\n\n## 预览输出\n1) 完成推理 (inference) 步骤。\n2) 修正预测纹理的接缝 (seams) 并使其可平铺 (tileable)。\n   ```bash\n   # For test data without simulating photo unobservations.\n   python code\u002Fscripts\u002Fplan2scene\u002Fpostprocessing\u002Fseam_correct_textures.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Ftileable_texture_crops .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Ftexture_crops test --drop 0.0\n   ```\n3) 使用 [embed_textures.py](code\u002Fscripts\u002Fplan2scene\u002Fpostprocessing\u002Fembed_textures.py) 生成嵌入纹理的 .scene.json 文件。\n   一个 scene.json 文件描述房屋的 3D 几何结构。\n   可以使用 [SmartScenesToolkit](https:\u002F\u002Fgithub.com\u002Fsmartscenes\u002Fsstk) 的 'scene-viewer' 通过浏览器预览（您需要克隆并构建 SmartScenesToolkit）。\n   ```bash\n   # For test data without simulating photo unobservations.\n   python code\u002Fscripts\u002Fplan2scene\u002Fpostprocessing\u002Fembed_textures.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Farchs .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Ftileable_texture_crops test --drop 0.0\n   # scene.json files are created in the .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Farchs directory.\n   ```\n4) 使用 [render_house_jsons.py](code\u002Fscripts\u002Fplan2scene\u002Frender_house_jsons.py) 将 .scene.json 文件渲染为 .png 图片。\n    - 下载并构建 [SmartScenesToolkit](https:\u002F\u002Fgithub.com\u002Fsmartscenes\u002Fsstk)。\n    - 将 `.\u002Fconf\u002Frender-example.json` 重命名为 `.\u002Fconf\u002Frender.json` 并更新其字段以指向 scene-toolkit。\n    - 运行以下命令生成预览。\n       ```bash\n       CUDA_VISIBLE_DEVICES=0 python code\u002Fscripts\u002Fplan2scene\u002Frender_house_jsons.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Farchs --scene-json\n       # A .png file is created for each .scene.json file in the .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Farchs directory.\n       ```\n5) 使用 [preview_houses.py](code\u002Fscripts\u002Fplan2scene\u002Fpreview_houses.py) 生成带有预览的定性结果页面。\n   ```bash\n   python code\u002Fscripts\u002Fplan2scene\u002Fpreview_houses.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Fpreviews .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Farchs .\u002Fdata\u002Finput\u002Fphotos test --textures-path .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Ftileable_texture_crops 0.0\n   # Open .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Fpreviews\u002Fpreview.html\n   ```\n## 在 Rent3D++ 数据集上测试\n1) [可选] 下载预训练模型或训练 Subs 指标 (Substance) 所使用的物质分类器。\n   训练说明可在 [此处](.\u002Fdocs\u002Fmd\u002Ftrain_substance_classifier.md) 找到。\n   预训练权重可在 [此处](.\u002Fdocs\u002Fmd\u002Fpretrained_models.md) 找到。\n   跳过此步骤将省略 Subs 指标。\n2) 生成 60% 照片未观测 (photo unobservations) 情况下的整体评估报告。我们在论文评估中使用了此设置。\n   ```bash\n   # Synthesize textures for observed surfaces using the VGG textureness score.\n   # For the case: 60% (i.e. 0.6) of the photos unobserved. \n   python code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Ffill_room_embeddings.py .\u002Fdata\u002Fprocessed\u002Ftexture_gen\u002Ftest\u002Fdrop_0.6 test --drop 0.6\n   python code\u002Fscripts\u002Fplan2scene\u002Fcrop_select\u002Fvgg_crop_selector.py .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.6 .\u002Fdata\u002Fprocessed\u002Ftexture_gen\u002Ftest\u002Fdrop_0.6 test --drop 0.6\n   \n   # Propagate textures to un-observed surfaces using our GNN.\n   # For the case: 60% (i.e. 0.6) of the photos unobserved.\n   python code\u002Fscripts\u002Fplan2scene\u002Ftexture_prop\u002Fgnn_texture_prop.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.6 .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.6 test GNN_PROP_CONF_PATH GNN_PROP_CHECKPOINT_PATH --keep-existing-predictions --drop 0.6\n   \n   # Correct seams of texture crops and make them tileable.\n   # For test data where 60% of photos are unobserved.\n   python code\u002Fscripts\u002Fplan2scene\u002Fpostprocessing\u002Fseam_correct_textures.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.6\u002Ftileable_texture_crops .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.6\u002Ftexture_crops test --drop 0.6\n   \n   # Generate overall results at 60% simulated photo unobservations.\n   python code\u002Fscripts\u002Fplan2scene\u002Ftest.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.6\u002Ftileable_texture_crops .\u002Fdata\u002Fprocessed\u002Fgt_reference\u002Ftest\u002Ftexture_crops test\n   ```\n3) 生成已观测表面 (observed surfaces) 的评估报告。无模拟的照片未观测。我们在论文评估中使用了此设置。\n   ```bash\n   # Run inference on using drop=0.0.\n   python code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Ffill_room_embeddings.py .\u002Fdata\u002Fprocessed\u002Ftexture_gen\u002Ftest\u002Fdrop_0.0 test --drop 0.0\n   python code\u002Fscripts\u002Fplan2scene\u002Fcrop_select\u002Fvgg_crop_selector.py .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.0 .\u002Fdata\u002Fprocessed\u002Ftexture_gen\u002Ftest\u002Fdrop_0.0 test --drop 0.0\n   \n   # Correct seams of texture crops and make them tileable by running seam_correct_textures.py.\n   python code\u002Fscripts\u002Fplan2scene\u002Fpostprocessing\u002Fseam_correct_textures.py .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.0\u002Ftileable_texture_crops .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.0\u002Ftexture_crops test --drop 0.0\n   \n   # Generate evaluation results for observed surfaces.\n   python code\u002Fscripts\u002Fplan2scene\u002Ftest.py .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.0\u002Ftileable_texture_crops .\u002Fdata\u002Fprocessed\u002Fgt_reference\u002Ftest\u002Ftexture_crops test\n   ```\n\n5) 生成 60% 照片未观测情况下未观测表面 (unobserved surfaces) 的评估报告。我们在论文评估中使用了此设置。\n   ```bash   \n   # It is assumed that the user has already generated the overall report at 0.6 drop fraction.\n   \n   # Generate results on unobserved surfaces at 60% simulated photo unobservations.\n   python code\u002Fscripts\u002Fplan2scene\u002Ftest.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.6\u002Ftileable_texture_crops .\u002Fdata\u002Fprocessed\u002Fgt_reference\u002Ftest\u002Ftexture_crops test --exclude-prior-predictions .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.6\u002Ftexture_crops\n   ```\n\n 6) 生成 [此处](.\u002Fdocs\u002Fmd\u002Fcompute_fid_metric.md) 所述的 FID 指标 (Fréchet Inception Distance) 评估报告。\n\n## 在自定义数据上推理\n如果您有扫描的平面图 (floorplans) 图像，可以使用 [raster-to-vector](https:\u002F\u002Fgithub.com\u002Fart-programmer\u002FFloorplanTransformation) 将这些平面图图像转换为矢量格式。然后，遵循 [此处说明](.\u002Fdocs\u002Fmd\u002Fplan2scene_on_r2v.md) 创建房屋的纹理 3D 网格 (meshes)。\n\n如果您有其他格式的平面图矢量，可以将它们转换为 raster-to-vector __标注格式__。\n然后，遵循与之前相同的说明创建房屋的纹理 3D 网格。\nR2V 标注格式及其示例在 [raster-to-vector 仓库的数据部分](https:\u002F\u002Fgithub.com\u002Fart-programmer\u002FFloorplanTransformation#data) 中进行了说明。\n\n## 训练新的 Plan2Scene 网络\nPlan2Scene 由两个可训练组件组成：1) 纹理合成阶段 (texture synthesis stage) 和 2) 纹理传播阶段 (texture propagation stage)。每个阶段分别训练。训练过程如下。\n1) 按照 [此处](.\u002Fdocs\u002Fmd\u002Ftrain_texture_synth.md) 所述训练纹理合成阶段。\n2) 按照 [此处](.\u002Fdocs\u002Fmd\u002Ftrain_texture_prop.md) 所述训练纹理传播阶段。\n\n## 基线模型 (Baseline Models)\n基线模型可在此处 [查看](.\u002Fdocs\u002Fmd\u002Fbaselines.md)。","# Plan2Scene 快速上手指南\n\nPlan2Scene 是一个将平面图（Floorplan）和照片集转换为带纹理的 3D 住宅网格模型的工具。本指南帮助开发者快速完成环境配置并运行推理示例。\n\n## 环境准备\n\n### 系统要求\n- 操作系统：Linux \u002F Unix 环境（基于提供的脚本命令）\n- Python 环境：需使用 Conda 管理\n- 硬件：推荐具备 CUDA 支持的 GPU 用于纹理合成与渲染\n\n### 前置依赖\n1. **Conda 环境**：需按照项目文档初始化环境。\n2. **Texture-Synthesis CLI**：需配置 Embark Studios 的 texture-synthesis 命令行工具。\n3. **SmartScenesToolkit**：用于预览和渲染 3D 场景（需克隆并构建）。\n\n## 安装步骤\n\n### 1. 配置 Conda 环境\n按照项目提供的文档初始化 Conda 环境：\n- 参考文档：`.\u002Fdocs\u002Fmd\u002Fconda_env_setup.md`\n\n### 2. 配置 Texture-Synthesis\n下载预构建的二进制文件及 seam mask，并修改配置文件。\n\n```bash\n# 1. 下载预构建二进制文件 (从 GitHub Releases 下载)\n# https:\u002F\u002Fgithub.com\u002FEmbarkStudios\u002Ftexture-synthesis\u002Freleases\n\n# 2. 下载 seam mask\n# https:\u002F\u002Fgithub.com\u002FEmbarkStudios\u002Ftexture-synthesis\u002Fblob\u002Fmain\u002Fimgs\u002Fmasks\u002F1_tile.jpg\n\n# 3. 重命名并更新配置文件\n# 将 .\u002Fconf\u002Fplan2scene\u002Fseam_correct-example.json 重命名为 'seam_correct.json'\n# 在文件中更新 texture synthesis 二进制文件路径和 seam mask 路径\n```\n\n### 3. 设置 Python 路径\n运行 Python 脚本时，需将 `code\u002Fsrc` 设置为源码根目录：\n\n```bash\nexport PYTHONPATH=.\u002Fcode\u002Fsrc\n```\n\n### 4. 准备数据与模型\n1. **数据集**：下载 [Rent3D++ dataset](https:\u002F\u002Fforms.gle\u002FmKAmnrzAm3LCK9ua6) 并复制到 `[PROJECT_ROOT]\u002Fdata` 目录。\n   - 数据组织方式参考：`docs\u002Fmd\u002Frent3dpp_data_organization.md`\n   - 确保预提取的 crops 位于 `.\u002Fdata\u002Fprocessed\u002Fsurface_crops`。\n2. **预处理数据**：运行以下脚本生成参考 crops 和照片分配列表：\n   ```bash\n   python code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Fgenerate_reference_crops.py .\u002Fdata\u002Fprocessed\u002Fgt_reference\u002Ftrain .\u002Fdata\u002Finput\u002Fphoto_assignments\u002Ftrain train\n   python code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Fgenerate_reference_crops.py .\u002Fdata\u002Fprocessed\u002Fgt_reference\u002Fval .\u002Fdata\u002Finput\u002Fphoto_assignments\u002Fval val\n   python code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Fgenerate_reference_crops.py .\u002Fdata\u002Fprocessed\u002Fgt_reference\u002Ftest .\u002Fdata\u002Finput\u002Fphoto_assignments\u002Ftest test \n   ```\n3. **预训练模型**：下载预训练模型，参考文档：`.\u002Fdocs\u002Fmd\u002Fpretrained_models.md`\n\n## 基本使用\n\n以下示例演示如何在 Rent3D++ 测试集上运行推理并预览结果（无模拟照片缺失，`drop=0.0`）。\n\n### 1. 纹理合成与选择\n使用 VGG 纹理评分为观察到的表面合成纹理。\n\n```bash\npython code\u002Fscripts\u002Fplan2scene\u002Fpreprocessing\u002Ffill_room_embeddings.py .\u002Fdata\u002Fprocessed\u002Ftexture_gen\u002Ftest\u002Fdrop_0.0 test --drop 0.0\npython code\u002Fscripts\u002Fplan2scene\u002Fcrop_select\u002Fvgg_crop_selector.py .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.0 .\u002Fdata\u002Fprocessed\u002Ftexture_gen\u002Ftest\u002Fdrop_0.0 test --drop 0.0\n```\n\n### 2. 纹理传播\n使用纹理传播网络将纹理扩展到未观察到的表面。\n*注意：需替换 `GNN_PROP_CONF_PATH` 和 `GNN_PROP_CHECKPOINT_PATH` 为实际路径。*\n\n```bash\npython code\u002Fscripts\u002Fplan2scene\u002Ftexture_prop\u002Fgnn_texture_prop.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0 .\u002Fdata\u002Fprocessed\u002Fvgg_crop_select\u002Ftest\u002Fdrop_0.0 test GNN_PROP_CONF_PATH GNN_PROP_CHECKPOINT_PATH --keep-existing-predictions --drop 0.0\n```\n\n### 3. 后处理与嵌入\n修正纹理接缝并生成可平铺纹理，随后嵌入到场景文件中。\n\n```bash\n# 修正接缝\npython code\u002Fscripts\u002Fplan2scene\u002Fpostprocessing\u002Fseam_correct_textures.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Ftileable_texture_crops .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Ftexture_crops test --drop 0.0\n\n# 生成 .scene.json 文件\npython code\u002Fscripts\u002Fplan2scene\u002Fpostprocessing\u002Fembed_textures.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Farchs .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Ftileable_texture_crops test --drop 0.0\n```\n\n### 4. 预览结果\n生成预览页面并在浏览器中查看。\n\n```bash\npython code\u002Fscripts\u002Fplan2scene\u002Fpreview_houses.py .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Fpreviews .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Farchs .\u002Fdata\u002Finput\u002Fphotos test --textures-path .\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Ftileable_texture_crops 0.0\n```\n完成后，打开 `.\u002Fdata\u002Fprocessed\u002Fgnn_prop\u002Ftest\u002Fdrop_0.0\u002Fpreviews\u002Fpreview.html` 查看结果。\n\n### 自定义数据推理\n如果您有自己的平面图图像，可先使用 [raster-to-vector](https:\u002F\u002Fgithub.com\u002Fart-programmer\u002FFloorplanTransformation) 将其转换为矢量格式，然后参考文档 `.\u002Fdocs\u002Fmd\u002Fplan2scene_on_r2v.md` 生成 3D 网格。","室内设计工作室的3D建模师小林，正在为客户的新公寓项目赶制可视化方案，手头只有开发商提供的平面图和几张家装参考照片。\n\n### 没有 plan2scene 时\n- 必须手动在3D软件中逐墙建模、贴材质，光基础结构就要花掉一整天，效率极低。\n- 参考照片中的地板、墙面纹理无法自动提取复用，只能靠肉眼比对后手动裁剪+拉伸，常出现比例失真或接缝明显的问题。\n- 不同房间若想保持风格统一，需反复切换素材库、手动调整色调与质感，一致性难以保障。\n- 客户临时要求“把客厅瓷砖换成卧室木地板”，修改成本高，几乎等于重做整个空间。\n- 最终交付物缺乏真实感，客户常抱怨“效果图和实景差太远”，沟通成本陡增。\n\n### 使用 plan2scene 后\n- 输入平面图和参考照片后，10分钟内自动生成带纹理的完整3D场景网格，省去80%的手工建模时间。\n- 工具智能提取照片中的表面材质（如木纹、瓷砖），并无缝合成到对应墙面\u002F地面，纹理自然无拉伸。\n- 自动识别不同房间的材质偏好，在整套户型中保持视觉连贯性，无需人工调色匹配。\n- 修改材质只需替换输入照片，系统重新生成场景，5分钟完成全局风格切换，响应客户需求零延迟。\n- 输出的3D模型具备照片级真实感，客户一眼就能想象入住效果，方案通过率显著提升。\n\nplan2scene 把“平面图+照片”直接变成可交互的真实感3D空间，让设计师从重复劳动中解放，专注创意本身。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002F3dlg-hcvc_plan2scene_d9a0dbf9.png","3dlg-hcvc","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002F3dlg-hcvc_ebe92c56.png","3DLG: 3D Language & Generation Research Group",null,"https:\u002F\u002Fgithub.com\u002F3dlg-hcvc",[81,85,89,93],{"name":82,"color":83,"percentage":84},"Python","#3572A5",96.9,{"name":86,"color":87,"percentage":88},"Cuda","#3A4E3A",1.8,{"name":90,"color":91,"percentage":92},"Shell","#89e051",1,{"name":94,"color":95,"percentage":96},"C++","#f34b7d",0.3,590,78,"2026-04-04T15:50:04","MIT","Linux","需要 NVIDIA GPU (通过 CUDA_VISIBLE_DEVICES 推断)，具体型号、显存及 CUDA 版本未说明","未说明",{"notes":105,"python":106,"dependencies":107},"1. 需按照文档初始化 conda 环境。2. 需下载 Embark Studios texture-synthesis 二进制文件及 seam mask 并配置 JSON 路径。3. 运行 Python 脚本前需设置环境变量 PYTHONPATH=.\u002Fcode\u002Fsrc。4. 预览功能需克隆并编译 SmartScenesToolkit。5. 需下载 Rent3D++ 数据集并按说明组织目录结构。","未说明 (需参考 docs\u002Fmd\u002Fconda_env_setup.md)",[108,109,110],"conda","Embark Studios texture-synthesis (command line binary)","SmartScenesToolkit (用于预览和渲染)",[14,54,13],[113,114,115,116,117],"texture-synthesis","indoor-reconstruction","3d-reconstruction","computer-vision","machine-learning",7,"2026-03-27T02:49:30.150509","2026-04-06T07:14:24.998061",[122,127,132,137,142,147],{"id":123,"question_zh":124,"answer_zh":125,"source_url":126},597,"如何在自定义数据上运行预训练模型？","您可以参考官方文档了解如何将光栅到矢量（raster-to-vector）的输出与 plan2scene 结合使用：https:\u002F\u002Fgithub.com\u002F3dlg-hcvc\u002Fplan2scene\u002Fblob\u002Fmain\u002Fdocs\u002Fmd\u002Fplan2scene_on_r2v.md。README 文件中也更新了关于如何使用自定义数据进行推理的说明。如果您已有矢量化的平面图数据及对应的房间照片，可按照指南配置数据路径后进行推理。","https:\u002F\u002Fgithub.com\u002F3dlg-hcvc\u002Fplan2scene\u002Fissues\u002F23",{"id":128,"question_zh":129,"answer_zh":130,"source_url":131},598,"Rent3D 数据集无法下载，训练模型必须使用该数据集吗？","训练模型不需要原始的 Rent3D 数据集。对于纹理合成阶段，可以使用 stationary textures v2 数据集（详见：https:\u002F\u002Fgithub.com\u002F3dlg-hcvc\u002Fplan2scene\u002Fblob\u002Fmain\u002Fdocs\u002Fmd\u002Fstationary_textures_dataset_v2.md）。对于纹理传播阶段，可以使用 Rent3D++ 数据集中提供的校正表面裁剪（rectified surface crops）。","https:\u002F\u002Fgithub.com\u002F3dlg-hcvc\u002Fplan2scene\u002Fissues\u002F33",{"id":133,"question_zh":134,"answer_zh":135,"source_url":136},599,"代码中是否包含平面图矢量化过程？支持 CAD 文件吗？","该仓库不包含矢量化过程，假设输入已经是矢量化的平面图（格式为 scene.json），解析代码位于 `code\u002Fsrc\u002Farch_parser\u002Fparser.py`。如果您需要将光栅图像转换为矢量平面图，建议使用其他专门的项目，例如：https:\u002F\u002Fgithub.com\u002Fart-programmer\u002FFloorplanTransformation。","https:\u002F\u002Fgithub.com\u002F3dlg-hcvc\u002Fplan2scene\u002Fissues\u002F21",{"id":138,"question_zh":139,"answer_zh":140,"source_url":141},600,"Smart Scene Toolkit (SSTK) 安装困难，有 Docker 配置或安装命令吗？","维护者提供了在 Docker 中运行 SSTK 的命令序列。建议基于 ubuntu:16.04 镜像，安装必要依赖后克隆仓库。关键步骤包括：\n1. `docker run -it -p 8010:8010 ubuntu:16.04`\n2. `apt-get install -y curl git build-essential libxi-dev libglu1-mesa-dev libglew-dev libvips`\n3. 安装 nvm 并设置 node 版本：`nvm install v10.23.2`\n4. 克隆仓库：`git clone https:\u002F\u002Fgithub.com\u002Fsmartscenes\u002Fsstk.git`","https:\u002F\u002Fgithub.com\u002F3dlg-hcvc\u002Fplan2scene\u002Fissues\u002F27",{"id":143,"question_zh":144,"answer_zh":145,"source_url":146},601,"运行 Colab 或本地环境时遇到 torch\u002Ftorchvision 版本错误或导入失败怎么办？","版本兼容性至关重要。用户反馈以下配置可能存在问题：python==3.7.13 + torch==1.7.1 或 python==3.6.9 + torch==1.8.0。建议尝试配置：python==3.7.13, torch==1.11.0+cu113, torchvision==0.12.0+cu113。注意：torch 1.11.0 中 `torchvision.models.utils` 已弃用，若报错需通过 torch.hub 修复或调整相关代码导入方式。","https:\u002F\u002Fgithub.com\u002F3dlg-hcvc\u002Fplan2scene\u002Fissues\u002F28",{"id":148,"question_zh":149,"answer_zh":150,"source_url":151},602,"有没有用于快速体验推理的 Google Colab 笔记本？","维护者已准备了用于推理的 Colab Notebook。您可以访问以下链接尝试：https:\u002F\u002Fcolab.research.google.com\u002Fdrive\u002F1lDkbfIV0drR1o9D0WYzoWeRskB91nXHq?usp=sharing。建议同时查看项目 README 文件以获取最新更新的笔记本链接。","https:\u002F\u002Fgithub.com\u002F3dlg-hcvc\u002Fplan2scene\u002Fissues\u002F22",[]]