[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"similar-fahadshamshad--awesome-transformers-in-medical-imaging":3,"tool-fahadshamshad--awesome-transformers-in-medical-imaging":64},[4,17,27,35,43,56],{"id":5,"name":6,"github_repo":7,"description_zh":8,"stars":9,"difficulty_score":10,"last_commit_at":11,"category_tags":12,"status":16},3808,"stable-diffusion-webui","AUTOMATIC1111\u002Fstable-diffusion-webui","stable-diffusion-webui 是一个基于 Gradio 构建的网页版操作界面，旨在让用户能够轻松地在本地运行和使用强大的 Stable Diffusion 图像生成模型。它解决了原始模型依赖命令行、操作门槛高且功能分散的痛点，将复杂的 AI 绘图流程整合进一个直观易用的图形化平台。\n\n无论是希望快速上手的普通创作者、需要精细控制画面细节的设计师，还是想要深入探索模型潜力的开发者与研究人员，都能从中获益。其核心亮点在于极高的功能丰富度：不仅支持文生图、图生图、局部重绘（Inpainting）和外绘（Outpainting）等基础模式，还独创了注意力机制调整、提示词矩阵、负向提示词以及“高清修复”等高级功能。此外，它内置了 GFPGAN 和 CodeFormer 等人脸修复工具，支持多种神经网络放大算法，并允许用户通过插件系统无限扩展能力。即使是显存有限的设备，stable-diffusion-webui 也提供了相应的优化选项，让高质量的 AI 艺术创作变得触手可及。",162132,3,"2026-04-05T11:01:52",[13,14,15],"开发框架","图像","Agent","ready",{"id":18,"name":19,"github_repo":20,"description_zh":21,"stars":22,"difficulty_score":23,"last_commit_at":24,"category_tags":25,"status":16},1381,"everything-claude-code","affaan-m\u002Feverything-claude-code","everything-claude-code 是一套专为 AI 编程助手（如 Claude Code、Codex、Cursor 等）打造的高性能优化系统。它不仅仅是一组配置文件，而是一个经过长期实战打磨的完整框架，旨在解决 AI 代理在实际开发中面临的效率低下、记忆丢失、安全隐患及缺乏持续学习能力等核心痛点。\n\n通过引入技能模块化、直觉增强、记忆持久化机制以及内置的安全扫描功能，everything-claude-code 能显著提升 AI 在复杂任务中的表现，帮助开发者构建更稳定、更智能的生产级 AI 代理。其独特的“研究优先”开发理念和针对 Token 消耗的优化策略，使得模型响应更快、成本更低，同时有效防御潜在的攻击向量。\n\n这套工具特别适合软件开发者、AI 研究人员以及希望深度定制 AI 工作流的技术团队使用。无论您是在构建大型代码库，还是需要 AI 协助进行安全审计与自动化测试，everything-claude-code 都能提供强大的底层支持。作为一个曾荣获 Anthropic 黑客大奖的开源项目，它融合了多语言支持与丰富的实战钩子（hooks），让 AI 真正成长为懂上",140436,2,"2026-04-05T23:32:43",[13,15,26],"语言模型",{"id":28,"name":29,"github_repo":30,"description_zh":31,"stars":32,"difficulty_score":23,"last_commit_at":33,"category_tags":34,"status":16},2271,"ComfyUI","Comfy-Org\u002FComfyUI","ComfyUI 是一款功能强大且高度模块化的视觉 AI 引擎，专为设计和执行复杂的 Stable Diffusion 图像生成流程而打造。它摒弃了传统的代码编写模式，采用直观的节点式流程图界面，让用户通过连接不同的功能模块即可构建个性化的生成管线。\n\n这一设计巧妙解决了高级 AI 绘图工作流配置复杂、灵活性不足的痛点。用户无需具备编程背景，也能自由组合模型、调整参数并实时预览效果，轻松实现从基础文生图到多步骤高清修复等各类复杂任务。ComfyUI 拥有极佳的兼容性，不仅支持 Windows、macOS 和 Linux 全平台，还广泛适配 NVIDIA、AMD、Intel 及苹果 Silicon 等多种硬件架构，并率先支持 SDXL、Flux、SD3 等前沿模型。\n\n无论是希望深入探索算法潜力的研究人员和开发者，还是追求极致创作自由度的设计师与资深 AI 绘画爱好者，ComfyUI 都能提供强大的支持。其独特的模块化架构允许社区不断扩展新功能，使其成为当前最灵活、生态最丰富的开源扩散模型工具之一，帮助用户将创意高效转化为现实。",107662,"2026-04-03T11:11:01",[13,14,15],{"id":36,"name":37,"github_repo":38,"description_zh":39,"stars":40,"difficulty_score":23,"last_commit_at":41,"category_tags":42,"status":16},3704,"NextChat","ChatGPTNextWeb\u002FNextChat","NextChat 是一款轻量且极速的 AI 助手，旨在为用户提供流畅、跨平台的大模型交互体验。它完美解决了用户在多设备间切换时难以保持对话连续性，以及面对众多 AI 模型不知如何统一管理的痛点。无论是日常办公、学习辅助还是创意激发，NextChat 都能让用户随时随地通过网页、iOS、Android、Windows、MacOS 或 Linux 端无缝接入智能服务。\n\n这款工具非常适合普通用户、学生、职场人士以及需要私有化部署的企业团队使用。对于开发者而言，它也提供了便捷的自托管方案，支持一键部署到 Vercel 或 Zeabur 等平台。\n\nNextChat 的核心亮点在于其广泛的模型兼容性，原生支持 Claude、DeepSeek、GPT-4 及 Gemini Pro 等主流大模型，让用户在一个界面即可自由切换不同 AI 能力。此外，它还率先支持 MCP（Model Context Protocol）协议，增强了上下文处理能力。针对企业用户，NextChat 提供专业版解决方案，具备品牌定制、细粒度权限控制、内部知识库整合及安全审计等功能，满足公司对数据隐私和个性化管理的高标准要求。",87618,"2026-04-05T07:20:52",[13,26],{"id":44,"name":45,"github_repo":46,"description_zh":47,"stars":48,"difficulty_score":23,"last_commit_at":49,"category_tags":50,"status":16},2268,"ML-For-Beginners","microsoft\u002FML-For-Beginners","ML-For-Beginners 是由微软推出的一套系统化机器学习入门课程，旨在帮助零基础用户轻松掌握经典机器学习知识。这套课程将学习路径规划为 12 周，包含 26 节精炼课程和 52 道配套测验，内容涵盖从基础概念到实际应用的完整流程，有效解决了初学者面对庞大知识体系时无从下手、缺乏结构化指导的痛点。\n\n无论是希望转型的开发者、需要补充算法背景的研究人员，还是对人工智能充满好奇的普通爱好者，都能从中受益。课程不仅提供了清晰的理论讲解，还强调动手实践，让用户在循序渐进中建立扎实的技能基础。其独特的亮点在于强大的多语言支持，通过自动化机制提供了包括简体中文在内的 50 多种语言版本，极大地降低了全球不同背景用户的学习门槛。此外，项目采用开源协作模式，社区活跃且内容持续更新，确保学习者能获取前沿且准确的技术资讯。如果你正寻找一条清晰、友好且专业的机器学习入门之路，ML-For-Beginners 将是理想的起点。",84991,"2026-04-05T10:45:23",[14,51,52,53,15,54,26,13,55],"数据工具","视频","插件","其他","音频",{"id":57,"name":58,"github_repo":59,"description_zh":60,"stars":61,"difficulty_score":10,"last_commit_at":62,"category_tags":63,"status":16},3128,"ragflow","infiniflow\u002Fragflow","RAGFlow 是一款领先的开源检索增强生成（RAG）引擎，旨在为大语言模型构建更精准、可靠的上下文层。它巧妙地将前沿的 RAG 技术与智能体（Agent）能力相结合，不仅支持从各类文档中高效提取知识，还能让模型基于这些知识进行逻辑推理和任务执行。\n\n在大模型应用中，幻觉问题和知识滞后是常见痛点。RAGFlow 通过深度解析复杂文档结构（如表格、图表及混合排版），显著提升了信息检索的准确度，从而有效减少模型“胡编乱造”的现象，确保回答既有据可依又具备时效性。其内置的智能体机制更进一步，使系统不仅能回答问题，还能自主规划步骤解决复杂问题。\n\n这款工具特别适合开发者、企业技术团队以及 AI 研究人员使用。无论是希望快速搭建私有知识库问答系统，还是致力于探索大模型在垂直领域落地的创新者，都能从中受益。RAGFlow 提供了可视化的工作流编排界面和灵活的 API 接口，既降低了非算法背景用户的上手门槛，也满足了专业开发者对系统深度定制的需求。作为基于 Apache 2.0 协议开源的项目，它正成为连接通用大模型与行业专有知识之间的重要桥梁。",77062,"2026-04-04T04:44:48",[15,14,13,26,54],{"id":65,"github_repo":66,"name":67,"description_en":68,"description_zh":69,"ai_summary_zh":70,"readme_en":71,"readme_zh":72,"quickstart_zh":73,"use_case_zh":74,"hero_image_url":75,"owner_login":76,"owner_name":77,"owner_avatar_url":78,"owner_bio":79,"owner_company":80,"owner_location":81,"owner_email":81,"owner_twitter":82,"owner_website":83,"owner_url":84,"languages":81,"stars":85,"forks":86,"last_commit_at":87,"license":81,"difficulty_score":88,"env_os":89,"env_gpu":89,"env_ram":89,"env_deps":90,"category_tags":93,"github_topics":94,"view_count":10,"oss_zip_url":81,"oss_zip_packed_at":81,"status":16,"created_at":112,"updated_at":113,"faqs":114,"releases":135},650,"fahadshamshad\u002Fawesome-transformers-in-medical-imaging","awesome-transformers-in-medical-imaging","A collection of resources on applications of Transformers in Medical Imaging.","awesome-transformers-in-medical-imaging 是一个精心整理的资源库，专门收录医学影像领域中基于 Transformer 架构的最新研究成果与开源代码。面对人工智能技术在医疗诊断中的飞速发展，研究者往往难以追踪分散的文献与实现细节，awesome-transformers-in-medical-imaging 有效解决了信息碎片化的问题。\n\n内容按任务类型分类，涵盖图像分割、分类、重建、配准及临床报告生成等核心方向，并严格遵循时间顺序排列，方便用户快速把握技术演进脉络。作为一篇发表于《Medical Image Analysis》期刊的高影响力综述论文的补充资源，其权威性得到了广泛认可，甚至成为该期刊下载量前三的文章之一。\n\n无论是从事计算机视觉、医学影像分析的科研人员，还是希望复现算法的开发者，都能从中获益。社区鼓励贡献者通过 Pull Request 更新最新论文或代码，保持了资源的时效性。对于想要入门或深入探索 Transformer 在医疗场景应用的技术人员来说，awesome-transformers-in-medical-imaging 是","awesome-transformers-in-medical-imaging 是一个精心整理的资源库，专门收录医学影像领域中基于 Transformer 架构的最新研究成果与开源代码。面对人工智能技术在医疗诊断中的飞速发展，研究者往往难以追踪分散的文献与实现细节，awesome-transformers-in-medical-imaging 有效解决了信息碎片化的问题。\n\n内容按任务类型分类，涵盖图像分割、分类、重建、配准及临床报告生成等核心方向，并严格遵循时间顺序排列，方便用户快速把握技术演进脉络。作为一篇发表于《Medical Image Analysis》期刊的高影响力综述论文的补充资源，其权威性得到了广泛认可，甚至成为该期刊下载量前三的文章之一。\n\n无论是从事计算机视觉、医学影像分析的科研人员，还是希望复现算法的开发者，都能从中获益。社区鼓励贡献者通过 Pull Request 更新最新论文或代码，保持了资源的时效性。对于想要入门或深入探索 Transformer 在医疗场景应用的技术人员来说，awesome-transformers-in-medical-imaging 是一个不可或缺的导航站。","[![Maintenance](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMaintained%3F-yes-green.svg)](https:\u002F\u002FGitHub.com\u002FNaereen\u002FStrapDown.js\u002Fgraphs\u002Fcommit-activity)\n[![PR's Welcome](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FPRs-welcome-brightgreen.svg?style=flat)](http:\u002F\u002Fmakeapullrequest.com) \n[![Awesome](https:\u002F\u002Fcdn.rawgit.com\u002Fsindresorhus\u002Fawesome\u002Fd7305f38d29fed78fa85652e3a63e154dd8e8829\u002Fmedia\u002Fbadge.svg)](https:\u002F\u002Fgithub.com\u002Fsindresorhus\u002Fawesome)\n\n# \u003Cp align=center> This repository complements our survey paper [Transformers in Medical Imaging: A Survey](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1361841523000634), published in Medical Image Analysis.\n\n#  📢📢🏆🏆🏆 Spotlight: Our article is now among the Top-3 Most Downloaded Articles of the Medical Image Analysis Journal! 🏆🏆🏆\n\n\n\nAuthors: [Fahad Shamshad](https:\u002F\u002Fscholar.google.com.pk\u002Fcitations?user=d7QL4wkAAAAJ&hl=en), [Salman Khan](https:\u002F\u002Fsalman-h-khan.github.io\u002F), [Syed Waqas Zamir](https:\u002F\u002Fscholar.google.ae\u002Fcitations?hl=en&user=POoai-QAAAAJ), [Muhammad Haris Khan](https:\u002F\u002Fscholar.google.com\u002Fcitations?user=ZgERfFwAAAAJ&hl=en), [Munawar Hayat](https:\u002F\u002Fscholar.google.com\u002Fcitations?user=Mx8MbWYAAAAJ&hl=en), [Fahad Shahbaz Khan](https:\u002F\u002Fscholar.google.es\u002Fcitations?user=zvaeYnUAAAAJ&hl=en), and [Huazhu Fu](https:\u002F\u002Fhzfu.github.io\u002F)\n\u003C\u002Fp>\n\n\n\n![](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Ffahadshamshad_awesome-transformers-in-medical-imaging_readme_5e3281930d32.gif)\n\n\u003Chr \u002F>\n\n# \u003Cp align=center>`Awesome Transformers in Medical Imaging`\u003C\u002Fp>\n\nA curated list of awesome Transformers resources in medical imaging (**in chronological order**), inspired by the other awesome-initiatives. We intend to regularly update the relevant latest papers and their open-source implementations on this page. \n\nWe strongly encourage the researchers that want to promote their fantastic work to the community to make pull request to update their paper's information!\n\n## Overview\n- [Survey papers](#survey)\n- [Medical Image Segmentation](#segmentation)\n- [Medical Image Classification](#classification)\n- [Medical Image Reconstruction](#reconstruction)\n- [Medical Image Registration](#registration)\n- [Medical Image Synthesis](#synthesis)\n- [Medical Image Detection](#detection)\n- [Clinical Report Generation](#clinical-report-generation)\n- [Others](#others)\n\n# Survey\n\n[**Transformers in Medical Imaging: A survey.**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.09873) [25th Jan., 2022] \u003Cbr>.\n*[Fahad Shamshad](https:\u002F\u002Fscholar.google.com.pk\u002Fcitations?user=d7QL4wkAAAAJ&hl=en), [Salman Khan](https:\u002F\u002Fsalman-h-khan.github.io\u002F), [Syed Waqas Zamir](https:\u002F\u002Fscholar.google.es\u002Fcitations?user=WNGPkVQAAAAJ&hl=en), [Muhammad Haris Khan](https:\u002F\u002Fscholar.google.com\u002Fcitations?user=ZgERfFwAAAAJ&hl=en), [Munawar Hayat](https:\u002F\u002Fscholar.google.com\u002Fcitations?user=Mx8MbWYAAAAJ&hl=en), [Fahad Shahbaz Khan](https:\u002F\u002Fscholar.google.es\u002Fcitations?user=zvaeYnUAAAAJ&hl=en), and [Huazhu Fu](https:\u002F\u002Fhzfu.github.io\u002F).*\u003Cbr>\n[[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.09873)] \n\n**Advances in Medical Image Analysis with Vision Transformers: A Comprehensive Review.** [9th Jan., 2023].\u003Cbr>\nReza Azad, Amirhossein Kazerouni, Moein Heidari, Ehsan Khodapanah Aghdam, Amirali Molaei, Yiwei Jia, Abin Jose, Rijo Roy, Dorit Merhof\n[[Paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2301.03505.pdf)\n\n**Medical image analysis based on transformer: A Review.** [13th Aug., 2022].\u003Cbr>\n*Zhaoshan Liu, Lei Shen.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.06643)] \n \n **Transforming medical imaging with Transformers? A comparative review of key properties, current progresses, and future perspectives.** [3rd June, 2022].\u003Cbr>\n*Jun Li, Junyu Chen, Yucheng Tang, Ce Wang, Bennett A. Landman, S. Kevin Zhou.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.01136)] \n\n**Vision Transformers in Medical Computer Vision -- A Contemplative Retrospection.** [29th March, 2022].\u003Cbr>\n*Arshi Parvaiz, Muhammad Anwaar Khalid, Rukhsana Zafar, Huma Ameer, Muhammad Ali, Muhammad Moazam Fraz.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15269)] \n\n**Transformers in Medical Image Analysis: A Review.** [24th Feb., 2022].\u003Cbr>\n*Kelei He, Chen Gan, Zhuoyuan Li, Islem Rekik, Zihao Yin, Wen Ji, Yang Gao, Qian Wang, Junfeng Zhang, Dinggang Shen.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.12165)] \n\n**Application of Transformer in Medical Image Segmentation.** [25th Oct., 2021].\u003Cbr>\n*Wenyin Zhang, Weijie Hao, Yuan Qi and Yong Wu.*\u003Cbr>\n [[PDF](https:\u002F\u002Fbiomedgrid.com\u002Fpdf\u002FAJBSR.MS.ID.002014.pdf)] \n\n\n\n# Segmentation\n\n**Attention-Based Transformers for Instance Segmentation of Cells in Microstructures.** [20th Nov., 2020] [BIBM, 2020].\u003Cbr>\n*Tim Prangemeier, Christoph Reich, Heinz Koeppl.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.09763)] [[Github](https:\u002F\u002Fgithub.com\u002FChristophReich1996\u002FCell-DETR)]\n \n **TransUNet: Transformers Make Strong Encoders for Medical Image Segmentation.** [8th Feb., 2021].\u003Cbr>\n*Jieneng Chen, Yongyi Lu, Qihang Yu, Xiangde Luo, Ehsan Adeli, Yan Wang, Le Lu, Alan L. Yuille, Yuyin Zhou.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.04306)] [[Github](https:\u002F\u002Fgithub.com\u002FBeckschen\u002FTransUNet)]\n \n  **TransFuse: Fusing Transformers and CNNs for Medical Image Segmentation.** [16th Feb., 2021] [⚡MICCAI, 2021].\u003Cbr>\n*Yundong Zhang, Huiye Liu, Qiang Hu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.08005)] [[Github](https:\u002F\u002Fgithub.com\u002FRayicer\u002FTransFuse)]\n \n   **Unsupervised Brain Anomaly Detection and Segmentation with Transformers.** [23rd Feb., 2021] [MIDL, 2021].\u003Cbr>\n*Walter Hugo Lopez Pinaya, Petru-Daniel Tudosiu, Robert Gray, Geraint Rees, Parashkev Nachev, Sebastien Ourselin, M. Jorge Cardoso.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.11650)]\n \n  **Convolution-Free Medical Image Segmentation using Transformers.** [26th Feb., 2021] [⚡MICCAI, 2021].\u003Cbr>\n*Davood Karimi, Serge Vasylechko, Ali Gholipour.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.13645)]\n \n  **CoTr: Efficiently Bridging CNN and Transformer for 3D Medical Image Segmentation.** [4th March, 2021] [⚡MICCAI, 2021].\u003Cbr>\n*Yutong Xie, Jianpeng Zhang, Chunhua Shen, Yong Xia.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.03024)] [[Github](https:\u002F\u002Fgithub.com\u002FYtongXie\u002FCoTr)]\n \n  **SpecTr: Spectral Transformer for Hyperspectral Pathology Image Segmentation.** [5th March, 2021].\u003Cbr>\n*Boxiang Yun, Yan Wang, Jieneng Chen, Huiyu Wang, Wei Shen, Qingli Li.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.03604)] [[Github](https:\u002F\u002Fgithub.com\u002Fhfut-xc-yun\u002FSpecTr)]\n \n  **TransBTS: Multimodal Brain Tumor Segmentation Using Transformer.** [7th March, 2021] [⚡MICCAI, 2021].\u003Cbr>\n*Wenxuan Wang, Chen Chen, Meng Ding, Jiangyun Li, Hong Yu, Sen Zha.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.04430)] [[Github](https:\u002F\u002Fgithub.com\u002FWenxuan-1119\u002FTransBTS)]\n \n  **U-Net Transformer: Self and Cross Attention for Medical Image Segmentation.** [10th March, 2021].\u003Cbr>\n*Olivier Petit, Nicolas Thome, Clément Rambour, Luc Soler.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.06104)] [[Github](https:\u002F\u002Fgithub.com\u002FHXLH50K\u002FU-Net-Transformer)]\n \n   **UNETR: Transformers for 3D Medical Image Segmentation .** [18th March, 2021].\u003Cbr>\n*Ali Hatamizadeh, Yucheng Tang, Vishwesh Nath, Dong Yang, Andriy Myronenko, Bennett Landman, Holger Roth, Daguang Xu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.10504)] [[Github](https:\u002F\u002Fgithub.com\u002FProject-MONAI\u002Fresearch-contributions\u002Ftree\u002Fmaster\u002FUNETR\u002FBTCV)]\n \n   **Medical Transformer: Universal Brain Encoder for 3D MRI Analysis.** [28th April, 2021].\u003Cbr>\n*Eunji Jun, Seungwoo Jeong, Da-Woon Heo, Heung-Il Suk.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.13633)]\n \n   **Pyramid Medical Transformer for Medical Image Segmentation .** [29th April, 2021].\u003Cbr>\n*Zhuangzhuang Zhang, Baozhou Sun, Weixiong Zhang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.14702)]\n \n   **GasHis-Transformer: A Multi-scale Visual Transformer Approach for Gastric Histopathology Image Classification.** [29th April, 2021].\u003Cbr>\n*Haoyuan Chen, Chen Li, Xiaoyan Li, Ge Wang, Weiming Hu, Yixin Li, Wanli Liu, Changhao Sun, Yudong Yao, Yueyang Teng, Marcin Grzegorzek.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.14528)]\n \n   **Swin-Unet: Unet-like Pure Transformer for Medical Image Segmentationr.** [12th May, 2021].\u003Cbr>\n*Hu Cao, Yueyue Wang, Joy Chen, Dongsheng Jiang, Xiaopeng Zhang, Qi Tian, Manning Wang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.05537)]\n \n   **Medical Image Segmentation Using Squeeze-and-Expansion Transformers.** [20th May, 2021] [⚡IJCAI, 2021].\u003Cbr>\n*Shaohua Li, Xiuchao Sui, Xiangde Luo, Xinxing Xu, Yong Liu, Rick Goh.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.09511)] [[Github](https:\u002F\u002Fgithub.com\u002Faskerlee\u002Fsegtran)]\n \n   **A Multi-Branch Hybrid Transformer Network for Corneal Endothelial Cell Segmentation.** [21st May, 2021] [⚡MICCAI, 2021].\u003Cbr>\n*Yinglin Zhang, Risa Higashita, Huazhu Fu, Yanwu Xu, Yang Zhang, Haofeng Liu, Jian Zhang, Jiang Liu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.07557)] \n \n   **DS-TransUNet:Dual Swin Transformer U-Net for Medical Image Segmentation.** [12 June, 2021].\u003Cbr>\n*Ailiang Lin, Bingzhi Chen, Jiayu Xu, Zheng Zhang, Guangming Lu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.06716)] \n \n   **More than Encoder: Introducing Transformer Decoder to Upsample.** [20th June, 2021].\u003Cbr>\n*Yijiang Li, Wentian Cai, Ying Gao, Xiping Hu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.10637)]\n \n   **Multi-Compound Transformer for Accurate Biomedical Image Segmentation.** [28th June, 2021] [⚡MICCAI, 2021].\u003Cbr>\n*Yuanfeng Ji, Ruimao Zhang, Huijie Wang, Zhen Li, Lingyun Wu, Shaoting Zhang, Ping Luo.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.14385)] [[Github](https:\u002F\u002Fgithub.com\u002FJiYuanFeng\u002FMCTrans)]\n \n   **UTNet: A Hybrid Transformer Architecture for Medical Image Segmentation.** [2nd July, 2021] [⚡MICCAI, 2021].\u003Cbr>\n*Yunhe Gao, Mu Zhou, Dimitris Metaxas.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.04805)] [[Github](https:\u002F\u002Fgithub.com\u002Faskerlee\u002Fsegtran)]\n \n   **Few-Shot Domain Adaptation with Polymorphic Transformers.** [10th July, 2021] [⚡MICCAI, 2021].\u003Cbr>\n*Shaohua Li, Xiuchao Sui, Jie Fu, Huazhu Fu, Xiangde Luo, Yangqin Feng, Xinxing Xu, Yong Liu, Daniel Ting, Rick Siow Mong Goh.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.04430)] [[Github](https:\u002F\u002Fgithub.com\u002FWenxuan-1119\u002FTransBTS)]\n \n   **TransClaw U-Net: Claw U-Net with Transformers for Medical Image Segmentation.** [12th July, 2021].\u003Cbr>\n*Yao Chang, Hu Menghan, Zhai Guangtao, Zhang Xiao-Ping.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.05188)]\n \n   **TransAttUnet: Multi-level Attention-guided U-Net with Transformer for Medical Image Segmentation.** [12th July, 2021].\u003Cbr>\n*Bingzhi Chen, Yishu Liu, Zheng Zhang, Guangming Lu, David Zhang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.05274)]\n \n   **LeViT-UNet: Make Faster Encoders with Transformer for Medical Image Segmentation.** [19th July, 2021].\u003Cbr>\n*Guoping Xu, Xingrong Wu, Xuan Zhang, Xinwei He.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.08623)] [[Github](https:\u002F\u002Fgithub.com\u002Fapple1986\u002FLeViT_UNet)]\n \n   **Polyp-PVT: Polyp Segmentation with Pyramid Vision Transformers.** [16th August, 2021].\u003Cbr>\n*Bo Dong, Wenhai Wang, Deng-Ping Fan, Jinpeng Li, Huazhu Fu, Ling Shao.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.06932)] [[Github](https:\u002F\u002Fgithub.com\u002FDengPingFan\u002FPolyp-PVT)]\n \n   **Evaluating Transformer-based Semantic Segmentation Networks for Pathological Image Segmentation.** [26th August, 2021].\u003Cbr>\n*Cam Nguyen, Zuhayr Asad, Yuankai Huo.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.11993)]\n \n   **Automated Kidney Tumor Segmentation with Convolution and Transformer Network.** [30th August, 2021] [👍 MICCAI KiTS Challenge, 2021].\u003Cbr>\n*Zhiqiang Shen, Zhiqiang_Shen, Hua Yang, Zhen Zhang, Shaohua Zheng.*\u003Cbr>\n [[PDF](https:\u002F\u002Fopenreview.net\u002Fforum?id=voteINyy36u)]\n \n   **nnFormer: Interleaved Transformer for Volumetric Segmentation.** [7th Sep., 2021].\u003Cbr>\n*Hong-Yu Zhou, Jiansen Guo, Yinghao Zhang, Lequan Yu, Liansheng Wang, Yizhou Yu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.03201)] [[Github](https:\u002F\u002Fgithub.com\u002F282857341\u002Fnnformer)]\n \n   **UCTransNet: Rethinking the Skip Connections in U-Net from a Channel-wise Perspective with Transformer.** [9th, Sep.,].\u003Cbr>\n*Haonan Wang, Peng Cao, Jiaqi Wang, Osmar R.Zaiane.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.04335)] [[Github](https:\u002F\u002Fgithub.com\u002Fmcgregorwwww\u002Fuctransnet)]\n \n   **MISSFormer: An Effective Medical Image Segmentation Transformer.** [15th, Sep. 2021].\u003Cbr>\n*Xiaohong Huang, Zhifang Deng, Dandan Li, Xueguang Yuan.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.07162)]\n \n   **TransBridge: A Lightweight Transformer for Left Ventricle Segmentation in Echocardiography.** [21st Sep., 2021] [👍 MICCAI Simplifying Medical Ultrasound Workshop, 2021].\u003Cbr>\n*Kaizhong DengYanda MengDongxu GaoJoshua BridgeYaochun ShenGregory LipYitian ZhaoYalin Zheng.*\u003Cbr>\n [[PDF](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-030-87583-1_7)] \n \n   **BiTr-Unet: a CNN-Transformer Combined Network for MRI Brain Tumor Segmentation.** [25th Sep., 2021] [👍 MICCAI BraTS DREAM Challenge ShuLab, 2021].\u003Cbr>\n*Qiran Jia, Hai Shu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.12271)]\n \n   **GT U-Net: A U-Net Like Group Transformer Network for Tooth Root Segmentation .** [30th Sep., 2021] [👍 MICCAI MLMI Workshop, 2021].\u003Cbr>\n*Yunxiang Li, Shuai Wang, Jun Wang, Guodong Zeng, Wenjun Liu, Qianni Zhang, Qun Jin, Yaqi Wang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.14813)] [[Github](https:\u002F\u002Fgithub.com\u002Fkent0n-li\u002Fgt-u-net)]\n \n   **Transformer Assisted Convolutional Network for Cell Instance Segmentation.** [5th Oct., 2021] [👍 ISBI Workshop, 2021].\u003Cbr>\n*Deepanshu Pandey, Pradyumna Gupta, Sumit Bhattacharya, Aman Sinha, Rohit Agarwal.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.02270)] [[Github](https:\u002F\u002Fgithub.com\u002Fdsciitism\u002Fsegpc-2021)]\n \n   **Boundary-aware Transformers for Skin Lesion Segmentation.** [8th Oct., 2021] [⚡MICCAI, 2021].\u003Cbr>\n*Jiacheng Wang, Lan Wei, Liansheng Wang, Qichao Zhou, Lei Zhu, Jing Qin.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.03864)] [[Github](https:\u002F\u002Fgithub.com\u002Fjcwang123\u002FBA-Transformer)]\n \n   **Spine-transformers: Vertebra labeling and segmentation in arbitrary field-of-view spine CTs via 3D transformers.** [10th Oct., 2021] [⚡MIA, 2021].\u003Cbr>\n*Rong Taoa, Wenyong Liub, Guoyan Zheng.*\u003Cbr>\n [[PDF](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1361841521003030)]\n \n   **AFTer-UNet: Axial Fusion Transformer UNet for Medical Image Segmentation.** [20th Oct., 2021].\u003Cbr>\n*Xiangyi Yan, Hao Tang, Shanlin Sun, Haoyu Ma, Deying Kong, Xiaohui Xie.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.10403)]\n \n   **Hepatic vessel segmentation based on 3D swin-transformer with inductive biased multi-head self-attention.** [5th Nov., 2021].\u003Cbr>\n*Mian Wu, Yinling Qian, Xiangyun Liao, Qiong Wang, Pheng-Ann Heng.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.03368)] \n \n   **Mixed Transformer U-Net For Medical Image Segmentation.** [8th, Nov. 2021].\u003Cbr>\n*Hongyi Wang, Shiao Xie, Lanfen Lin, Yutaro Iwamoto, Xian-Hua Han, Yen-Wei Chen, Ruofeng Tong.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.04734)] [[Github](https:\u002F\u002Fgithub.com\u002Fdootmaan\u002Fmt-unet)]\n \n   **T-AutoML: Automated Machine Learning for Lesion Segmentation using Transformers in 3D Medical Imaging.** [15th Nov., 2021] [⚡ICCV, 2021].\u003Cbr>\n*Dong Yang, Andriy Myronenko, Xiaosong Wang, Ziyue Xu, Holger R. Roth, Daguang Xu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.07535)]\n \n   **A Volumetric Transformer for Accurate 3D Tumor Segmentation.** [26th Nov., 2021].\u003Cbr>\n*Himashi Peiris, Munawar Hayat, Zhaolin Chen, Gary Egan, Mehrtash Harandi.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.13300)] [[Github](https:\u002F\u002Fgithub.com\u002Fhimashi92\u002Fvt-unet)]\n \n   **Exploiting full Resolution Feature Context for Liver Tumor and Vessel Segmentation via Fusion Encoder: Application to Liver Tumor and Vessel 3D reconstruction.** [26th Nov., 2021].\u003Cbr>\n*Xiangyu Meng, Xudong Zhang, Gan Wang, Ying Zhang, Xin Shi, Huanhuan Dai, Zixuan Wang, Xun Wang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.13299)]\n \n   **Self-Supervised Pre-Training of Swin Transformers for 3D Medical Image Analysis.** [29th Nov., 2021].\u003Cbr>\n*Yucheng Tang, Dong Yang, Wenqi Li, Holger Roth, Bennett Landman, Daguang Xu, Vishwesh Nath, Ali Hatamizadeh.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.14791)] [[Github](https:\u002F\u002Fgithub.com\u002FProject-MONAI\u002Fresearch-contributions\u002Ftree\u002Fmaster\u002FSwinUNETR)]\n \n   **MT-TransUNet: Mediating Multi-Task Tokens in Transformers for Skin Lesion Segmentation and Classification.** [3rd Dec., 2021].\u003Cbr>\n*Jingye Chen, Jieneng Chen, Zongwei Zhou, Bin Li, Alan Yuille, Yongyi Lu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.01767)] [[Github](https:\u002F\u002Fgithub.com\u002Fjingyechen\u002Fmt-transunet)]\n \n   **FAT-Net: Feature Adaptive Transformers for Automated Skin Lesion Segmentation.** [4th Dec., 2021] [⚡MIA, 2021].\u003Cbr>\n*Huisi Wu, Shihuai Chen, Guilian Chen, Wei Wang, Baiying Lei, Zhenkun Wen.*\u003Cbr>\n [[PDF](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1361841521003728)] [[Github](https:\u002F\u002Fgithub.com\u002FSZUcsh\u002FFAT-Net)]\n \n   **Semi-Supervised Medical Image Segmentation via Cross Teaching between CNN and Transformer.** [9th Dec., 2021].\u003Cbr>\n*Xiangde Luo, Minhao Hu, Tao Song, Guotai Wang, Shaoting Zhang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.04894v1)] [[Github](https:\u002F\u002Fgithub.com\u002FHiLab-git\u002FSSL4MIS)]\n \n   **D-Former: A U-shaped Dilated Transformer for 3D Medical Image Segmentation.** [3rd Jan., 2022].\u003Cbr>\n*Yixuan Wu, Kuanlun Liao, Jintai Chen, Jinhong Wang, Danny Z. Chen, Honghao Gao, Jian Wu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.00462)]\n \n   **Swin UNETR: Swin Transformers for Semantic Segmentation of Brain Tumors in MRI Images.** [4th Jan., 2022].\u003Cbr>\n*Ali Hatamizadeh, Vishwesh Nath, Yucheng Tang, Dong Yang, Holger Roth, Daguang Xu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.01266)] [[Github](https:\u002F\u002Fgithub.com\u002FProject-MONAI\u002Fresearch-contributions\u002Ftree\u002Fmaster\u002FSwinUNETR\u002FBRATS21)]\n \n   **HT-Net: hierarchical context-attention transformer network for medical ct image segmentation.** [15th Jan., 2022].\u003Cbr>\n*Mingjun Ma, Haiying Xia, Yumei Tan, Haisheng Li, Shuxiang Song .*\u003Cbr>\n [[PDF](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs10489-021-03010-0)] \n \n   **SegTransVAE: Hybrid CNN - Transformer with Regularization for medical image segmentation.** [21st Jan., 2022].\u003Cbr>\n*Quan-Dung Pham, Hai Nguyen-Truong, Nam Nguyen Phuong, Khoa N. A. Nguyen.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2201.08582.pdf)] [[Github](https:\u002F\u002Fgithub.com\u002Fitruonghai\u002FSegTransVAE)]\n \n   **Class-Aware Generative Adversarial Transformers for Medical Image Segmentation.** [26th Jan., 2022].\u003Cbr>\n*Chenyu You, Ruihan Zhao, Fenglin Liu, Sandeep Chinchali, Ufuk Topcu, Lawrence Staib, James S. Duncan.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.10737)] \n \n   **RTNet: Relation Transformer Network for Diabetic Retinopathy Multi-lesion Segmentation.** [26th Jan., 2022] [⚡IEEE TMI, 2022]..\u003Cbr>\n*Shiqi Huang, Jianan Li, Yuze Xiao, Ning Shen, Tingfa Xu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.11037)]\n \n   **Joint Liver and Hepatic Lesion Segmentation using a Hybrid CNN with Transformer Layers.** [26th Jan., 2022].\u003Cbr>\n*Georg Hille, Shubham Agrawal, Christian Wybranski, Maciej Pech, Alexey Surov, Sylvia Saalfeld.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.10981)]\n \n   **Multi-scale boundary neural network for gastric tumor segmentation.** [28th Jan., 2022].\u003Cbr>\n*Pengfei Wang, Yunqi Li, Yaru Sun, Dongzhi He & Zhiqiang Wang.*\u003Cbr>\n [[PDF](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs00371-021-02374-1)]\n \n   **TransBTSV2: Wider Instead of Deeper Transformer for Medical Image Segmentation.** [30th Jan., 2022].\u003Cbr>\n*Jiangyun Li, Wenxuan Wang, Chen Chen, Tianxiang Zhang, Sen Zha, Hong Yu, Jing Wang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.12785)] [[Github](https:\u002F\u002Fgithub.com\u002FWenxuan-1119\u002FTransBTS)]\n \n   **TraSeTR: Track-to-Segment Transformer with Contrastive Query for Instance-level Instrument Segmentation in Robotic Surgery.** [30th Jan., 2022].\u003Cbr>\n*Zixu Zhao, Yueming Jin, Pheng-Ann Heng.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.08453)]\n\n   **A Multi-scale Transformer for Medical Image Segmentation: Architectures, Model Efficiency, and Benchmarks.** [28th Feb., 2022].\u003Cbr>\n*Yunhe Gao, Mu Zhou, Di Liu, Dimitris Metaxas.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.00131)] [[Github](https:\u002F\u002Fgithub.com\u002Fyhygao\u002FCBIM-Medical-Image-Segmentation)]\n\n   **Tempera: Spatial Transformer Feature Pyramid Network for Cardiac MRI Segmentation.** [1st March, 2022].\u003Cbr>\n*Christoforos Galazis, Huiyi Wu, Zhuoyu Li, Camille Petri, Anil A. Bharath, Marta Varela.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.00355)]\n\n   **Contextual Attention Network: Transformer Meets U-Net.** [2nd March, 2022].\u003Cbr>\n*Azad Reza, Heidari Moein, Wu Yuli, Merhof Dorit.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.01932v1)] [[Github](https:\u002F\u002Fgithub.com\u002Frezazad68\u002FTMUnet)]\n \n   **Simulation-Driven Training of Vision Transformers Enabling Metal Segmentation in X-Ray Images.** [17th March, 2022].\u003Cbr>\n*Fuxin Fan, Ludwig Ritschl, Marcel Beister, Ramyar Biniazan, Björn Kreher, Tristan M. Gottschalk, Steffen Kappler, Andreas Maier.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.09207)]\n \n   **TransFusion: Multi-view Divergent Fusion for Medical Image Segmentation with Transformers.** [21st March, 2022].\u003Cbr>\n*Di Liu, Yunhe Gao, Qilong Zhangli, Zhennan Yan, Mu Zhou, Dimitris Metaxas.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.10726)]\n \n   **CAT-Net: A Cross-Slice Attention Transformer Model for Prostate Zonal Segmentation in MRI.** [29th March, 2022].\u003Cbr>\n*Alex Ling Yu Hung, Haoxin Zheng, Qi Miao, Steven S. Raman, Demetri Terzopoulos, Kyunghyun Sung.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15163)]\n \n   **UNetFormer: A Unified Vision Transformer Model and Pre-Training Framework for 3D Medical Image Segmentation.** [1st April, 2022].\u003Cbr>\n*Ali Hatamizadeh, Ziyue Xu, Dong Yang, Wenqi Li, Holger Roth, Daguang Xu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.00631)] [[Github](https:\u002F\u002Fgithub.com\u002FProject-MONAI\u002Fresearch-contributions)]\n \n   **CCAT-NET: A Novel Transformer Based Semi-supervised Framework for Covid-19 Lung Lesion Segmentation.** [6th April, 2022].\u003Cbr>\n*Mingyang Liu, Li Xiao, Huiqin Jiang, Qing He.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.02839)] \n \n   **Continual Hippocampus Segmentation with Transformers.** [17th April, 2022].\u003Cbr>\n*Amin Ranem, Camila González, Anirban Mukhopadhyay.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.08043)] \n \n   **TranSiam: Fusing Multimodal Visual Features Using Transformer for Medical Image Segmentation.** [26th April, 2022].\u003Cbr>\n*Xuejian Li, Shiqiang Ma, Jijun Tang, Fei Guo.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.12185)]\n \n   **ColonFormer: An Efficient Transformer based Method for Colon Polyp Segmentation.** [17th May, 2022].\u003Cbr>\n*Nguyen Thanh Duc, Nguyen Thi Oanh, Nguyen Thi Thuy, Tran Minh Triet, Dinh Viet Sang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.08473)]\n \n   **Transformer based multiple instance learning for weakly supervised histopathology image segmentation.** [18th May, 2022].\u003Cbr>\n*Ziniu Qian, Kailu Li, Maode Lai, Eric I-Chao Chang, Bingzheng Wei, Yubo Fan, Yan Xu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.08878)]\n \n   **Transformer based Generative Adversarial Network for Liver Segmentation.** [21st May, 2022].\u003Cbr>\n*Ugur Demir, Zheyuan Zhang, Bin Wang, Matthew Antalek, Elif Keles, Debesh Jha, Amir Borhani, Daniela Ladner, Ulas Bagci.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.10663)][[Github](https:\u002F\u002Fgithub.com\u002FProject-MONAI\u002Fresearch-contributions)]\n\n   **An uncertainty-aware transformer for MRI cardiac semantic segmentation via mean teachers.** [25th Jul, 2022] [MIUA, 2022].\u003Cbr>\n*Ziyang Wang, Jian-Qing Zheng, Irina Voiculescu.*\u003Cbr>\n [[PDF](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-12053-4_37)][[Github](https:\u002F\u002Fgithub.com\u002Fziyangwang007\u002FCV-SSL-MIS)]\n\n   **Computationally-Efficient Vision Transformer for Medical Image Semantic Segmentation via Dual Pseudo-Label Supervision.** [16th Oct, 2022] [ICIP, 2022].\u003Cbr>\n*Ziyang Wang, Nanqing Dong, Irina Voiculescu.*\u003Cbr>\n [[PDF](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9897482\u002F)][[Github](https:\u002F\u002Fgithub.com\u002Fziyangwang007\u002FCV-SSL-MIS)]\n\n   **Adversarial Vision Transformer for Medical Image Semantic Segmentation with Limited Annotations.** [21st Nov, 2022] [BMVC, 2022].\u003Cbr>\n*Ziyang Wang, Chengkuan Zhao, Zixuan Ni.*\u003Cbr>\n [[PDF](https:\u002F\u002Fbmvc2022.mpi-inf.mpg.de\u002F1002.pdf)][[Github](https:\u002F\u002Fgithub.com\u002Fziyangwang007\u002FCV-SSL-MIS)]\n\n   **DuAT: Dual-Aggregation Transformer Network for Medical Image Segmentation.** [21st Dec., 2022].\u003Cbr>\n*Feilong Tang, Qiming Huang, Jinfeng Wang, Xianxu Hou, Jionglong Su, Jingxin Liu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.11677)]\n\n   **When cnn meet with vit: Towards semi-supervised learning for multi-class medical image semantic segmentation.** [12th Feb., 2023] [ECCV Workshop, 2022].\u003Cbr>\n*Ziyang Wang, Tianze Li, Jian-Qing Zheng, Baoru Huang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.06449)][[Github](https:\u002F\u002Fgithub.com\u002Fziyangwang007\u002FCV-SSL-MIS)]\n\n   **DAE-Former: Dual Attention-guided Efficient Transformer for Medical Image Segmentation.** [27th Jan., 2023].\u003Cbr>\n*Reza Azad, René Arimond, Ehsan Khodapanah Aghdam, Amirhossein Kazerouni, Dorit Merhof.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.13504)][[Github](https:\u002F\u002Fgithub.com\u002Fmindflow-institue\u002FDAEFormer)]\n\n \n \n# Classification\n\n  **TransMed: Transformers Advance Multi-modal Medical Image Classification.** [10th March, 2021].\u003Cbr>\n*Yin Dai, Yifan Gao.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.05940)]\n \n   **Medical Transformer: Universal Brain Encoder for 3D MRI Analysis.** [28th April, 2021].\u003Cbr>\n*Eunji Jun, Seungwoo Jeong, Da-Woon Heo, Heung-Il Suk.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.13633)] \n \n   **TransMIL: Transformer based Correlated Multiple Instance Learning for Whole Slide Image Classification.** [2nd June, 2021] [⚡NeurIPS, 2021].\u003Cbr>\n*Zhuchen Shao, Hao Bian, Yang Chen, Yifeng Wang, Jian Zhang, Xiangyang Ji, Yongbing Zhang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.00908)] [[Github](https:\u002F\u002Fgithub.com\u002Fszc19990412\u002FTransMIL)]\n \n   **Vision Transformer-based recognition of diabetic retinopathy grade.** [15 July, 2021] [⚡CVPR, 2021].\u003Cbr>\n*Rui Sun, Yihao Li, Tianzhu Zhang, Zhendong Mao, Feng Wu, Yongdong Zhang.*\u003Cbr>\n [[PDF](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fhtml\u002FSun_Lesion-Aware_Transformers_for_Diabetic_Retinopathy_Grading_CVPR_2021_paper.html)] \n \n   **Is it Time to Replace CNNs with Transformers for Medical Images?** [20th Aug., 2021] [👍ICCV Workshop of Automated Medical Diagnosis, 2021].\u003Cbr>\n*Christos Matsoukas, Johan Fredin Haslum, Magnus Söderberg, Kevin Smith.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.09038)] [[Github](https:\u002F\u002Fgithub.com\u002Fchrismats\u002Fmedical_transformers)]\n \n   **Gene Transformer: Transformers for the Gene Expression-based Classification of Lung Cancer Subtypes** [26th Aug., 2021].\u003Cbr>\n*Anwar Khan, Boreom Lee.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.11833)]\n \n   **A transformer-based deep learning approach for classifying brain metastases into primary organ sites using clinical whole brain MRI.** [7th Oct., 2021].\u003Cbr>\n*Qing Lyu, Sanjeev V. Namjoshi, Emory McTyre, Umit Topaloglu, Richard Barcus, Michael D. Chan, Christina K. Cramer, Waldemar Debinski, Metin N. Gurcan, Glenn J. Lesser, Hui-Kuan Lin, Reginald F. Munden, Boris C. Pasche, Kiran Kumar Solingapuram Sai, Roy E. Strowd, Stephen B. Tatter, Kounosuke Watabe, Wei Zhang, Ge Wang, Christopher T. Whitlow.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.03588)] \n \n   **CAE-Transformer: Transformer-based Model to Predict Invasiveness of Lung Adenocarcinoma Subsolid Nodules from Non-thin Section 3D CT Scans.** [17th Oct., 2021].\u003Cbr>\n*Shahin Heidarian, Parnian Afshar, Anastasia Oikonomou, Konstantinos N. Plataniotis, Arash Mohammadi.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.08721)] \n \n   **Vision Transformer-based recognition of diabetic retinopathy grade.** [25th Oct., 2021].\u003Cbr>\n*Jianfang Wu, Ruo Hu, Zhenghong Xiao, Jiaxu Chen, Jingwei Liu.*\u003Cbr>\n [[PDF](https:\u002F\u002Fpubmed.ncbi.nlm.nih.gov\u002F34693536\u002F)] \n \n   **VISION TRANSFORMERS FOR CLASSIFICATION OF BREAST ULTRASOUND IMAGES.** [27th Oct., 2021].\u003Cbr>\n*Behnaz Gheflati, Hassan Rivaz.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.14731)]\n \n   **Indication as Prior Knowledge for Multimodal Disease Classification in Chest Radiographs with Transformers** [12th Feb., 2022] [👍ISBI, 2022].\u003Cbr>\n*Grzegorz Jacenków, Alison Q. O'Neil, Sotirios A. Tsaftaris.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.06076)] [[Github](https:\u002F\u002Fgithub.com\u002Fjacenkow\u002Fmmbt)]\n \n   **AI can evolve without labels: self-evolving vision transformer for chest X-ray diagnosis through knowledge distillation.** [13th Feb., 2022].\u003Cbr>\n*Sangjoon Park, Gwanghyun Kim, Yujin Oh, Joon Beom Seo, Sang Min Lee, Jin Hwan Kim, Sungjun Moon, Jae-Kwang Lim, Chang Min Park, Jong Chul Ye.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.06431)]\n \n   **ScoreNet: Learning Non-Uniform Attention and Augmentation for Transformer-Based Histopathological Image Classification.** [15th Feb., 2022].\u003Cbr>\n*Thomas Stegmüller, Antoine Spahr, Behzad Bozorgtabar, Jean-Philippe Thiran.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.07570v1)]\n \n   **A hybrid 2-stage vision transformer for AI-assisted 5 class pathologic diagnosis of gastric endoscopic biopsies.** [17th Feb., 2022].\u003Cbr>\n*Yujin Oh, Go Eun Bae, Kyung-Hee Kim, Min-Kyung Yeo, Jong Chul Ye.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.08510)]\n\n   **RadioTransformer: A Cascaded Global-Focal Transformer for Visual Attention-guided Disease Classification.** [23rd Feb., 2022].\u003Cbr>\n*Moinak Bhattacharya, Shubham Jain, Prateek Prasanna.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.11781)]\n \n   **Uni4Eye: Unified 2D and 3D Self-supervised Pre-training via Masked Image Modeling Transformer for Ophthalmic Image Classification.** [9th March, 2022].\u003Cbr>\n*Zhiyuan Cai, Li Lin, Huaqing He, Xiaoying Tang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.04614)]\n \n   **Noise-reducing attention cross fusion learning transformer for histological image classification of osteosarcoma.** [29th April, 2022].\u003Cbr>\n*Liangrui Pan, Hetian Wang, Lian Wang, Boya Ji, Mingting Liu, Mitchai Chongcheawchamnan, Jin Yuan, Shaoliang Peng.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.13838)]\n \n   **HoVer-Trans: Anatomy-aware HoVer-Transformer for ROI-free Breast Cancer Diagnosis in Ultrasound Images.** [17th May, 2022].\u003Cbr>\n*Yuhao Mo, Chu Han, Yu Liu, Min Liu, Zhenwei Shi, Jiatai Lin, Bingchao Zhao, Chunwang Huang, Bingjiang Qiu, Yanfen Cui, Lei Wu, Xipeng Pan, Zeyan Xu, Xiaomei Huang, Zaiyi Liu, Ying Wang, Changhong Liang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.08390)]\n \n   **A graph-transformer for whole slide image classification** [19th May, 2022].\u003Cbr>\n*Yi Zheng, Rushin H. Gindra, Emily J. Green, Eric J. Burks, Margrit Betke, Jennifer E. Beane, Vijaya B. Kolachalama.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.09671)] [[Github](https:\u002F\u002Fgithub.com\u002Fjacenkow\u002Fmmbt)]\n \n   **A Comparative Study of Gastric Histopathology Sub-size Image Classification: from Linear Regression to Visual Transformer** [19th May, 2022].\u003Cbr>\n*Weiming Hu, Haoyuan Chen, Wanli Liu, Xiaoyan Li, Hongzan Sun, Xinyu Huang, Marcin Grzegorzek, Chen Li.*\u003Cbr>\n [[PDF]([https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.09671)]\n \n   **Zero-Shot and Few-Shot Learning for Lung Cancer Multi-Label Classification using Vision Transformer** [30th May, 2022].\u003Cbr>\n*Fu-Ming Guo, Yingfang Fan.*\u003Cbr>\n [[PDF]([https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.15290)]\n \n   **Detecting Severity of Diabetic Retinopathy from Fundus Images using Ensembled Transformers** [3rd Jan, 2023].\u003Cbr>\n*Chandranath Adak, Tejas Karkera, Soumi Chattopadhyay, Muhammad Saqib.*\u003Cbr>\n [[PDF]([https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.00973)]\n \n \n## Classification COVID19 (Separate section due to its current significance)\n\n  **Vision Transformer for COVID-19 CXR Diagnosis using Chest X-ray Feature Corpus.** [12th March, 2021].\u003Cbr>\n*Sangjoon Park, Gwanghyun Kim, Yujin Oh, Joon Beom Seo, Sang Min Lee, Jin Hwan Kim, Sungjun Moon, Jae-Kwang Lim, Jong Chul Ye.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.07055)]\n \n   **Vision Transformer using Low-level Chest X-ray Feature Corpus for COVID-19 Diagnosis and Severity Quantification.** [15th April, 2021].\u003Cbr>\n*Sangjoon Park, Gwanghyun Kim, Yujin Oh, Joon Beom Seo, Sang Min Lee, Jin Hwan Kim, Sungjun Moon, Jae-Kwang Lim, Jong Chul Ye.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.07235)]\n \n   **POCFormer: A Lightweight Transformer Architecture for Detection of COVID-19 Using Point of Care Ultrasound.** [15th May, 2021].\u003Cbr>\n*Shehan Perera, Srikar Adhikari, Alper Yilmaz.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.09913)] \n \n   **Automatic Diagnosis of COVID-19 Using a tailored Transformer-Like Network.** [20th May, 2021].\u003Cbr>\n*Chengeng Liu1, Qingshan Yin.*\u003Cbr>\n [[PDF](https:\u002F\u002Fiopscience.iop.org\u002Farticle\u002F10.1088\u002F1742-6596\u002F2010\u002F1\u002F012175)]\n \n   **COVID-VIT: Classification of COVID-19 from CT chest images based on vision transformer models.** [4th July, 2021].\u003Cbr>\n*Xiaohong Gao, Yu Qian, Alice Gao.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.01682)] [[Github](https:\u002F\u002Fgithub.com\u002Fxiaohong1\u002FCOVID-ViT)]\n \n   **xViTCOS: Explainable Vision Transformer Based COVID-19 Screening Using Radiography .** [6th July, 2021].\u003Cbr>\n*Arnab Kumar MondalArnab Kumar Mondal, Arnab Bhattacharjee, Parag Singla, Prathosh AP.*\u003Cbr>\n [[PDF](https:\u002F\u002Fwww.techrxiv.org\u002Farticles\u002Fpreprint\u002FxViTCOS_Explainable_Vision_Transformer_Based_COVID-19_Screening_Using_Radiography\u002F14912367\u002F1)]\n \n   **Visual Transformer with Statistical Test for COVID-19 Classification.** [12th July, 2021] [👍ICCV MIA Workshop, 2021].\u003Cbr>\n*Chih-Chung Hsu, Guan-Lin Chen, Mei-Hsuan Wu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.05334)] \n \n   **MIA-COV19D: A transformer-based framework for COVID19 classification in chest CTs.** [15th July, 2021] [👍ICCV MIA Workshop, 2021].\u003Cbr>\n*Lei Zhang, Yan Wen.*\u003Cbr>\n [[PDF](https:\u002F\u002Fwww.researchgate.net\u002Fpublication\u002F353105641_MIA-COV19D_A_transformer-based_framework_for_COVID19_classification_in_chest_CTs)]\n \n   **COViT-GAN: Vision Transformer forCOVID-19 Detection in CT images.** [10th August, 2021].\u003Cbr>\n*Ara Abigail E. Ambita, Eujene Nikka V. Boquio, Prospero C. Naval Jr.*\u003Cbr>\n [[PDF](https:\u002F\u002Fwww.springerprofessional.de\u002Fen\u002Fcovit-gan-vision-transformer-forcovid-19-detection-in-ct-scan-im\u002F19652482)] \n \n   **COVID-Transformer: Interpretable COVID-19 Detection Using Vision Transformer for Healthcare.** [23rd Sep., 2021].\u003Cbr>\n*Debaditya Shome, T Kar, Sachi Nandan Mohanty, Prayag Tiwari, Khan Muhammad, Abdullah AlTameem, Yazhou Zhang, Abdul Khader Jilani Saudagar.*\u003Cbr>\n [[PDF](https:\u002F\u002Fpubmed.ncbi.nlm.nih.gov\u002F34769600\u002F)]\n \n   **Vision Transformer based COVID-19 Detection using Chest X-rays.** [9th Oct., 2021].\u003Cbr>\n*Koushik Sivarama Krishnan, Karthik Sivarama Krishnan.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.04458)]  \n \n   **COVID-19 Detection in Chest X-ray Images Using Swin-Transformer and Transformer in Transformer.** [16th Oct., 2021].\u003Cbr>\n*Juntao Jiang, Shuyi Lin.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.08427)] \n \n   **Federated Split Vision Transformer for COVID-19 CXR Diagnosis using Task-Agnostic Training.** [2nd Nov., 2021] [⚡NeurIPS, 2021].\u003Cbr>\n*Sangjoon Park, Gwanghyun Kim, Jeongsol Kim, Boah Kim, Jong Chul Ye.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.01338)]\n \n   **Multi-task vision transformer using low-level chest X-ray feature corpus for COVID-19 diagnosis and severity quantification.** [4th Nov., 2021] [⚡MIA, 2021].\u003Cbr>\n*Sangjoon Parka, Gwanghyun Kima,Yujin Oha, Joon Beom Seo, Sang Min Lee, Jin Hwan Kimc,Sungjun Moond, Jae-Kwang Lime, Jong Chul Ye.*\u003Cbr>\n [[PDF](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1361841521003443)]\n \n   **COVID-19 CT Image Recognition Algorithm Based on Transformer and CNN.** [24th Jan., 2022].\u003Cbr>\n*Xiaole Fan, Xiufang Feng, Yunyun Dong, Huichao Hou.*\u003Cbr>\n [[PDF](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0141938222000026)]\n \n\n# Reconstruction\n\n   **TransCT: Dual-path Transformer for Low Dose Computed Tomography.** [28th Feb., 2021] [⚡MICCAI, 2021].\u003Cbr>\n*Zhicheng Zhang, Lequan Yu, Xiaokun Liang, Wei Zhao, Lei Xing.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.00634)] [[Github](https:\u002F\u002Fgithub.com\u002Fzzc623\u002FTransCT)]\n \n   **Unsupervised MRI Reconstruction via Zero-Shot Learned Adversarial Transformers.** [15th May, 2021].\u003Cbr>\n*Yilmaz Korkmaz, Salman UH Dar, Mahmut Yurt, Muzaffer Özbey, Tolga Çukur.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.04430)] [[Github](https:\u002F\u002Fgithub.com\u002FWenxuan-1119\u002FTransBTS)]\n \n   **TED-net: Convolution-free T2T Vision Transformer-based Encoder-decoder Dilation network for Low-dose CT Denoising.** [8th June, 2021].\u003Cbr>\n*Dayang Wang, Zhan Wu, Hengyong Yu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.04650)]\n \n   **Task Transformer Network for Joint MRI Reconstruction and Super-Resolution.** [12th June, 2021] [⚡MICCAI, 2021].\u003Cbr>\n*Chun-Mei Feng, Yunlu Yan, Huazhu Fu, Li Chen, Yong Xu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.06742)] [[Github](https:\u002F\u002Fgithub.com\u002Fchunmeifeng\u002FT2Net)]\n \n   **Accelerated Multi-Modal MR Imaging with Transformers.** [27th June, 2021].\u003Cbr>\n*Chun-Mei Feng, Yunlu Yan, Geng Chen, Huazhu Fu, Yong Xu, Ling Shao.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.14248)] [[Github](https:\u002F\u002Fgithub.com\u002Fchunmeifeng\u002FMTrans)]\n \n   **E-DSSR: Efficient Dynamic Surgical Scene Reconstruction with Transformer-based Stereoscopic Depth Perception.** [1st July, 2021] [⚡MICCAI, 2021].\u003Cbr>\n*Yonghao Long, Zhaoshuo Li, Chi Hang Yee, Chi Fai Ng, Russell H. Taylor, Mathias Unberath, Qi Dou.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.00229)]\n \n   **Eformer: Edge Enhancement based Transformer for Medical Image Denoising.** [16th Sep., 2021] [👍ICCV Workshop, 2021].\u003Cbr>\n*Achleshwar Luthra, Harsh Sulakhe, Tanish Mittal, Abhishek Iyer, Santosh Yadav.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.08044)] \n \n   **Transformer-Unet: Raw Image Processing with Unet.** [17th Sep., 2021].\u003Cbr>\n*Youyang Sha, Yonghong Zhang, Xuquan Ji, Lei Hu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.08417)]\n \n   **Deep MRI Reconstruction with Generative Vision Transformers .** [25th Sep., 2021].\u003Cbr>\n*Yilmaz KorkmazMahmut Yurt, Salman Ul Hassan Dar, Muzaffer Özbey, Tolga Cukur.*\u003Cbr>\n [[PDF](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-030-88552-6_6)] [[Github](https:\u002F\u002Fgithub.com\u002Ficon-lab\u002FSLATER)]\n \n   **3D Transformer-GAN for High-quality PET Reconstruction.** [29th Sep., 2021] [⚡MICCAI, 2021].\u003Cbr>\n*Yanmei Luo, Yan Wang, Chen Zu, Bo Zhan, Xi Wu, Jiliu Zhou, Dinggang Shen, Luping Zhou.*\u003Cbr>\n [[PDF](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-030-87231-1_27)] \n \n   **TranSMS: Transformers for Super-Resolution Calibration in Magnetic Particle Imaging.** [3rd Nov., 2021] .\u003Cbr>\n*Alper Güngör, Baris Askin, Damla Alptekin Soydan, Emine Ulku Saritas, Can Barış Top, Tolga Çukur.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.02163)] \n \n   **DuDoTrans: Dual-Domain Transformer Provides More Attention for Sinogram Restoration in Sparse-View CT Reconstruction.** [21st Nov., 2021] .\u003Cbr>\n*Ce Wang, Kun Shang, Haimiao Zhang, Qian Li, Yuan Hui, S. Kevin Zhou.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.10790)]\n \n   **Self-supervised CT super-resolution with hybrid model.** [23rd Nov., 2021] .\u003Cbr>\n*Zhicheng Zhang, Shaode Yu, Wenjian Qin, Xiaokun Liang, Yaoqin Xie, Guohua Cao.*\u003Cbr>\n [[PDF](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0010482521005692)] \n \n   **MIST-net: Multi-domain Integrative Swin Transformer network for Sparse-View CT Reconstruction.** [29th Nov., 2021] .\u003Cbr>\n*Jiayi Pan, Weiwen Wu, Zhifan Gao, Heye Zhang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.14831)]\n \n   **ReconFormer: Accelerated MRI Reconstruction Using Recurrent Transformer.** [23rd Jan., 2022] .\u003Cbr>\n*Pengfei Guo, Yiqun Mei, Jinyuan Zhou, Shanshan Jiang, Vishal M. Patel.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.09376)][[Github](https:\u002F\u002Fgithub.com\u002Fguopengf\u002Freconformer)]\n \n   **DSFormer: A Dual-domain Self-supervised Transformer for Accelerated Multi-contrast MRI Reconstruction.** [26th Jan., 2022] .\u003Cbr>\n*Bo Zhou, Jo Schlemper, Neel Dey, Seyed Sadegh Mohseni Salehi, Chi Liu, James S. Duncan, Michal Sofka.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.10776)]\n\n   **CTformer: Convolution-free Token2Token Dilated Vision Transformer for Low-dose CT Denoising.** [28th Feb., 2022] .\u003Cbr>\n*Dayang Wang, Fenglei Fan, Zhan Wu, Rui Liu, Fei Wang, Hengyong Yu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.13517)][[Github](https:\u002F\u002Fgithub.com\u002Fwdayang\u002FCTformer)]\n \n   **Adaptively Re-weighting Multi-Loss Untrained Transformer for Sparse-View Cone-Beam CT Reconstruction.** [23rd March, 2022] .\u003Cbr>\n*Minghui Wu, Yangdi Xu, Yingying Xu, Guangwei Wu, Qingqing Chen, Hongxiang Lin.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.12476)]\n \n   **Transformer-empowered Multi-scale Contextual Matching and Aggregation for Multi-contrast MRI Super-resolution.** [26th March, 2022] .\u003Cbr>\n*Guangyuan Li, Jun Lv, Yapeng Tian, Qi Dou, Chengyan Wang, Chenliang Xu, Jing Qin.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.13963)]\n \n   **Cross-Modality High-Frequency Transformer for MR Image Super-Resolution.** [29th March, 2022] .\u003Cbr>\n*Chaowei Fang, Dingwen Zhang, Liang Wang, Yulun Zhang, Lechao Cheng, Junwei Han.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15314)]\n \n   **Data and Physics Driven Learning Models for Fast MRI -- Fundamentals and Methodologies from CNN, GAN to Attention and Transformers.** [1st April, 2022] .\u003Cbr>\n*Jiahao Huang, Yingying Fang, Yang Nan, Huanjun Wu, Yinzhe Wu, Zhifan Gao, Yang Li, Zidong Wang, Pietro Lio, Daniel Rueckert, Yonina C. Eldar, Guang Yang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.01706)]\n \n   **Low-Dose CT Denoising via Sinogram Inner-Structure Transformer.** [7th April, 2022] .\u003Cbr>\n*Liutao Yang, Zhongnian Li, Rongjun Ge, Junyong Zhao, Haipeng Si, Daoqiang Zhang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.03163)]\n \n   **Masked Co-attentional Transformer reconstructs 100x ultra-fast\u002Flow-dose whole-body PET from longitudinal images and anatomically guided MRI.** [9th May, 2022] .\u003Cbr>\n*Yan-Ran Wang, Liangqiong Qu, Natasha Diba Sheybani, Xiaolong Luo, Jiangshan Wang, Kristina Elizabeth Hawk, Ashok Joseph Theruvath, Sergios Gatidis, Xuerong Xiao, Allison Pribnow, Daniel Rubin, Heike E. Daldrup-Link.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.04044)]\n \n    **Transformer and GAN Based Super-Resolution Reconstruction Network for Medical Images.** [26th Dec., 2022] .\u003Cbr>\n*Weizhi Du, Harvery Tian.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.13068)]\n\n# Registration\n\n  **ViT-V-Net: Vision Transformer for Unsupervised Volumetric Medical Image Registration.** [13th April, 2021] [👍MIDL Short Paper, 2021].\u003Cbr>\n*Junyu Chen, Yufan He, Eric C. Frey, Ye Li, Yong Du.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.06468)] [[Github](https:\u002F\u002Fgithub.com\u002Fjunyuchen245\u002FViT-V-Net_for_3D_Image_Registration_Pytorch)]\n \n   **Attention for Image Registration (AiR): an unsupervised Transformer approach.** [5th May, 2021].\u003Cbr>\n*Zihao Wang, Hervé Delingette.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.02282)]\n \n   **Learning Dual Transformer Network for Diffeomorphic Registration.** [21st Sep., 2021] [⚡MICCAI, 2021].\u003Cbr>\n*Yungeng Zhang, Yuru Pei, Hongbin Zha.*\u003Cbr>\n [[PDF](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-030-87202-1_13)]\n \n   **TransMorph: Transformer for unsupervised medical image registration.** [19th Nov., 2021].\u003Cbr>\n*Junyu Chen, Yong Du, Yufan He, William P. Segars, Ye Li, Eric C. Frey.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.10480)] [[Github](https:\u002F\u002Fgithub.com\u002Fjunyuchen245\u002FTransMorph_Transformer_for_Medical_Image_Registration)]\n\n   **A Transformer-based Network for Deformable Medical Image Registration.** [24th Feb., 2022].\u003Cbr>\n*Yibo Wang, Wen Qian, Xuming Zhang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.12104)]\n \n   **Affine Medical Image Registration with Coarse-to-Fine Vision Transformer.** [29th March, 2022].\u003Cbr>\n*Tony C. W. Mok, Albert C. S. Chung.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15216)] [[Github](https:\u002F\u002Fgithub.com\u002Fcwmok\u002FC2FViT)]\n\n# Synthesis\n\n  **VTGAN: Semi-supervised Retinal Image Synthesis and Disease Prediction using Vision Transformers.** [14th April, 2021] [👍ICCV Workshop on Computer Vision for Automated Medical Diagnosi, 2021].\u003Cbr>\n*Sharif Amit Kamran, Khondker Fariha Hossain, Alireza Tavakkoli, Stewart Lee Zuckerbrod, Salah A. Baker.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.06757)] [[Github](https:\u002F\u002Fgithub.com\u002FSharifAmit\u002FVTGAN)]\n \n   **PTNet: A High-Resolution Infant MRI Synthesizer Based on Transformer.** [28th May, 2021].\u003Cbr>\n*Xuzhe Zhang, Xinzi He, Jia Guo, Nabil Ettehadi, Natalie Aw, David Semanek, Jonathan Posner, Andrew Laine, Yun Wang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.13993)] [[Github](https:\u002F\u002Fgithub.com\u002FXuzheZ\u002FPTNet)]\n \n   **ResViT: Residual vision transformers for multi-modal medical image synthesis.** [30th June, 2021].\u003Cbr>\n*Onat Dalmaz, Mahmut Yurt, Tolga Çukur.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.16031)] [[Github](https:\u002F\u002Fgithub.com\u002Ficon-lab\u002FResViT)]\n \n   **CyTran: Cycle-Consistent Transformers for Non-Contrast to Contrast CT Translation.** [12th Oct., 2021].\u003Cbr>\n*Nicolae-Catalin Ristea, Andreea-Iuliana Miron, Olivian Savencu, Mariana-Iuliana Georgescu, Nicolae Verga, Fahad Shahbaz Khan, Radu Tudor Ionescu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.06400)] [[Github](https:\u002F\u002Fgithub.com\u002Fristea\u002Fcycle-transformer)]\n \n   **One Model to Synthesize Them All: Multi-contrast Multi-scale Transformer for Missing Data Imputation.** [28th April, 2022].\u003Cbr>\n*Jiang Liu, Srivathsa Pasumarthi, Ben Duffy, Enhao Gong, Greg Zaharchuk, Keshav Datta.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.13738)] \n\n# Detection\n \n   **COTR: Convolution in Transformer Network for End to End Polyp Detection.** [23rd May, 2021].\u003Cbr>\n*Zhiqiang Shen, Chaonan Lin, Shaohua Zheng.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.10925)]\n  \n   **Transformer for Polyp Detection.** [14th Oct., 2021].\u003Cbr>\n*Shijie Liu, Hongyu Zhou, Xiaozhou Shi, Junwen Pan.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.07918)]\n  \n   **Lymph Node Detection in T2 MRI with Transformers.** [9th Nov., 2021].\u003Cbr>\n*Tejas Sudharshan Mathai, Sungwon Lee, Daniel C. Elton, Thomas C. Shen, Yifan Peng, Zhiyong Lu, Ronald M. Summers.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.04885)]\n \n   **SATr: Slice Attention with Transformer for Universal Lesion Detection.** [13th March, 2022].\u003Cbr>\n*Han Li, Long Chen, Hu Han, S. Kevin Zhou.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.07373)]\n \n   **Contrastive Transformer-based Multiple Instance Learning for Weakly Supervised Polyp Frame Detection.** [13th March, 2022].\u003Cbr>\n*Yu Tian, Guansong Pang, Fengbei Liu, Yuyuan Liu, Chong Wang, Yuanhong Chen, Johan W Verjans, Gustavo Carneiro.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.12121)]\n \n   **Unsupervised Contrastive Learning based Transformer for Lung Nodule Detection.** [30th April, 2022].\u003Cbr>\n*Chuang Niu, Ge Wang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.00122)]\n\n   **SwinFPN: Leveraging Vision Transformers for 3D Organs-At-Risk Detection.** [9th May, 2022] [MIDL Short Paper, 2022].\u003Cbr>\n*Bastian Wittmann, Suprosanna Shit, Fernando Navarro, Jan C. Peeken, Stephanie E. Combs, Bjoern Menze.*\u003Cbr>\n [[PDF](https:\u002F\u002Fopenreview.net\u002Fforum?id=yiIz7DhgRU5)] [[Github](https:\u002F\u002Fgithub.com\u002Fbwittmann\u002Ftransoar)]\n \n   **An Effective Transformer-based Solution for RSNA Intracranial Hemorrhage Detection Competition.** [16th May, 2022].\u003Cbr>\n*Fangxin Shang, Siqi Wang, Xiaorong Wang, Yehui Yang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.07556)] [[Github]([https:\u002F\u002Fgithub.com\u002Fristea\u002Fcycle-transformer](https:\u002F\u002Fgithub.com\u002FPaddlePaddle\u002FResearch))]\n\n   **Focused Decoding Enables 3D Anatomical Detection by Transformers.** [27th February, 2023] [MELBA, 2023].\u003Cbr>\n*Bastian Wittmann, Fernando Navarro, Suprosanna Shit, Bjoern Menze.*\u003Cbr>\n [[PDF](https:\u002F\u002Fwww.melba-journal.org\u002Fpapers\u002F2023:003.html)] [[Github](https:\u002F\u002Fgithub.com\u002Fbwittmann\u002Ftransoar)]\n\n# Clinical-Report-Generation\n\n   **Reinforced Transformer for Medical Image Captioning.** [10th Oct., 2019].[MLMI, 2019]\u003Cbr>\n*Yuxuan Xiong, Bo Du, Pingkun Yan.*\u003Cbr>\n [[PDF](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-030-32692-0_77)] \n\n   **Improving Factual Completeness and Consistency of Image-to-Text Radiology Report Generation.** [20th Oct., 2020].[NAACL, 2020]\u003Cbr>\n*Yasuhide Miura, Yuhao Zhang, Emily Bao Tsai, Curtis P. Langlotz, Dan Jurafsky.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2010.10042)] [[Github](https:\u002F\u002Fgithub.com\u002Fysmiura\u002Fifcc)]\n\n   **Generating Radiology Reports via Memory-driven Transformer.** [30th Oct., 2020].[EMNLP, 2020]\u003Cbr>\n*Zhihong Chen, Yan Song, Tsung-Hui Chang, Xiang Wan.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2010.16056)] [[Github](https:\u002F\u002Fgithub.com\u002Fcuhksz-nlp\u002FR2Gen)]\n \n   **Hierarchical X-Ray Report Generation via Pathology tags and Multi Head Attention.** [30th Nov., 2020].[EMNLP, 2020]\u003Cbr>\n*Preethi Srinivasan, Daksh Thapar, Arnav Bhavsar, Aditya Nigam.*\u003Cbr>\n [[PDF](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FACCV2020\u002Fhtml\u002FSrinivasan_Hierarchical_X-Ray_Report_Generation_via_Pathology_tags_and_Multi_Head_ACCV_2020_paper.html)]\n \n   **Learning Domain Adaptation with Model Calibration for Surgical Report Generation in Robotic Surgery.** [31st March, 2021].[⚡ICRA, 2021]\u003Cbr>\n*Mengya Xu, Mobarakol Islam, Chwee Ming Lim, Hongliang Ren.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.17120)] [[Github](https:\u002F\u002Fgithub.com\u002FXuMengyaAmy\u002FReportDALS)]\n \n   **Confidence-Guided Radiology Report Generation.** [21st June, 2021].[⚡MICCAI, 2021]\u003Cbr>\n*Yixin Wang, Zihao Lin, Jiang Tian, Zhongchao Shi, Yang Zhang, Jianping Fan, Zhiqiang He.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.10887)]\n \n   **Exploring and Distilling Posterior and Prior Knowledge for Radiology Report Generation.** [13th June, 2021].[⚡CVPR, 2021]\u003Cbr>\n*Fenglin Liu, Xian Wu, Shen Ge, Wei Fan, Yuexian Zou.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.06963)]\n \n   **RATCHET: Medical Transformer for Chest X-ray Diagnosis and Reporting.** [5th July, 2021].[⚡MICCAI, 2021]\u003Cbr>\n*Benjamin Hou, Georgios Kaissis, Ronald Summers, Bernhard Kainz.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.02104)] [[Github](https:\u002F\u002Fgithub.com\u002Ffarrell236\u002FRATCHET)]\n \n   **Surgical Instruction Generation with Transformers.** [14th July, 2021].[⚡MICCAI, 2021]\u003Cbr>\n*Jinglu Zhang, Yinyu Nie, Jian Chang, Jian Jun Zhang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.06964)]\n   \n   **Class-Incremental Domain Adaptation with Smoothing and Calibration for Surgical Report Generation.** [23rd July, 2021].[⚡MICCAI, 2021]\u003Cbr>\n*Mengya Xu, Mobarakol Islam, Chwee Ming Lim, Hongliang Ren.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.11091)] [[Github](https:\u002F\u002Fgithub.com\u002FXuMengyaAmy\u002FCIDACaptioning)]\n \n   **Automated Generation of Accurate and Fluent Medical X-ray Reports.** [27th Aug., 2021].[EMNLP, 2021]\u003Cbr>\n*Hoang T.N. Nguyen, Dong Nie, Taivanbat Badamdorj, Yujie Liu, Yingying Zhu, Jason Truong, Li Cheng.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.12126)] [[Github](https:\u002F\u002Fgithub.com\u002Fginobilinie\u002Fxray_report_generation)]\n \n   **AlignTransformer: Hierarchical Alignment of Visual Regions and Disease Tags for Medical Report Generation.** [1st Sep., 2021].[⚡MICCAI, 2021]\u003Cbr>\n*Di You, Fenglin Liu, Shen Ge, Xiaoxia Xie, Jing Zhang, Xian Wu.*\u003Cbr>\n [[PDF](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-030-87199-4_7)] \n \n   **Automatic Generation of Chest X-ray Reports Using a Transformer-based Deep Learning Model.** [20th Oct., 2021].[ICDS, 2021]\u003Cbr>\n*Ayoub Benali Amjoud; Mustapha Amrouch.*\u003Cbr>\n [[PDF](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9626725\u002F)]\n  \n   **FFA-IR: Towards an Explainable and Reliable Medical Report Generation Benchmark.** [31st Oct., 2021].[⚡NeurIPS, 2021]\u003Cbr>\n*Mingjie Li, Wenjia Cai, Rui Liu, Yuetian Weng, Xiaoyun Zhao, Cong Wang, Xin Chen, Zhong Liu, Caineng Pan, Mengke Li, yingfeng zheng, Yizhi Liu, Flora D. Salim, Karin Verspoor, Xiaodan Liang, Xiaojun Chang.*\u003Cbr>\n [[PDF](https:\u002F\u002Fopenreview.net\u002Fpdf?id=FgYTwJbjbf)] [[Github](https:\u002F\u002Fgithub.com\u002Fmlii0117\u002FFFA-IR)]\n \n   **Generalized Radiograph Representation Learning via Cross-supervision between Images and Free-text Radiology Reports.** [4th Nov., 2021].[Nature Machine Intelligence, 2022]\u003Cbr>\n*Hong-Yu Zhou, Xiaoyu Chen, Yinghao Zhang, Ruibang Luo, Liansheng Wang, Yizhou Yu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.03452)][[Github](https:\u002F\u002Fgithub.com\u002Ffunnyzhou\u002Frefers)]\n \n   **Auto-Encoding Knowledge Graph for Unsupervised Medical Report Generation.** [8th Nov., 2021].[⚡NeurIPS, 2021]\u003Cbr>\n*Fenglin Liu, Chenyu You, Xian Wu, Shen Ge, Sheng Wang, Xu Sun.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.04318)]\n \n   **Understanding Transfer Learning for Chest Radiograph Clinical Report Generation with Modified Transformer Architectures.** [5th May, 2022].\u003Cbr>\n*Edward Vendrow, Ethan Schonfeld.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.02841)]\n \n\n# Others\n\n   **Multimodal Co-Attention Transformer for Survival Prediction in Gigapixel Whole Slide Images.** [22nd Sep., 2021]. [⚡ICCV, 2021]\u003Cbr>\n*Chen, Richard J and Lu, Ming Y and Weng, Wei-Hung and Chen, Tiffany Y and Williamson, Drew FK and Manz, Trevor and Shady, Maha and Mahmood, Faisal.*\u003Cbr>\n [[PDF](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FChen_Multimodal_Co-Attention_Transformer_for_Survival_Prediction_in_Gigapixel_Whole_Slide_ICCV_2021_paper.pdf)][[Github](https:\u002F\u002Fgithub.com\u002Fmahmoodlab\u002Fmcat)]\n \n   **Limitations of Transformers on Clinical Text Classification.** [25th Sep., 2021].\u003Cbr>\n*Shang Gao, Mohammed Alawad, M Todd Young, John Gounley, Noah Schaefferkoetter, Hong Jun Yoon, Xiao-Cheng Wu, Eric B Durbin, Jennifer Doherty, Antoinette Stroup, Linda Coyle, Georgia Tourassi.*\u003Cbr>\n [[PDF](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?arnumber=9364676)]\n\n   **Explainable Transformer-Based Neural Network for the Prediction of Survival Outcomes in Non-Small Cell Lung Cancer (NSCLC).** [14th Oct., 2021].\u003Cbr>\n*Elly Kipkogei, Gustavo Alonso Arango Argoty, Ioannis Kagiampakis, Arijit Patra, Etai Jacob.*\u003Cbr>\n [[PDF](https:\u002F\u002Fwww.medrxiv.org\u002Fcontent\u002F10.1101\u002F2021.10.11.21264761v1)]\n\n   **3D Medical Point Transformer: Introducing Convolution to Attention Networks for Medical Point Cloud Analysis.** [9th Dec., 2021].\u003Cbr>\n*Jianhui Yu, Chaoyi Zhang, Heng Wang, Dingxin Zhang, Yang Song, Tiange Xiang, Dongnan Liu, Weidong Cai.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.04863)]\n \n   **Pre-training and Fine-tuning Transformers for fMRI Prediction Tasks.** [10th Dec., 2021].\u003Cbr>\n*Itzik Malkiel, Gony Rosenman, Lior Wolf, Talma Hendler.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.05761)]\n \n   **Does CLIP Benefit Visual Question Answering in the Medical Domain as Much as it Does in the General Domain?.** [27th Dec., 2021].\u003Cbr>\n*Sedigheh Eslami, Gerard de Melo, Christoph Meinel.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.13906)]\n \n   **TransPPG: Two-stream Transformer for Remote Heart Rate Estimate.** [26th Jan., 2022].\u003Cbr>\n*Jiaqi Kang, Su Yang, Weishan Zhang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.10873)]\n \n   **Brain Cancer Survival Prediction on Treatment-na ive MRI using Deep Anchor Attention Learning with Vision Transformer.** [3rd Feb., 2022].\u003Cbr>\n*Xuan Xu, Prateek Prasanna.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.01857)]\n\n   **Using Multi-scale SwinTransformer-HTC with Data augmentation in CoNIC Challenge.** [28th Feb., 2022].\u003Cbr>\n*Chia-Yen Lee, Hsiang-Chin Chien, Ching-Ping Wang, Hong Yen, Kai-Wen Zhen, Hong-Kun Lin.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.13588)]\n\n   **Self-Supervised Vision Transformers Learn Visual Concepts in Histopathology.** [1st March, 2022].\u003Cbr>\n*Richard J. Chen, Rahul G. Krishnan.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.00585)][[Github](https:\u002F\u002Fgithub.com\u002FRicharizardd\u002FSelf-Supervised-ViT-Path)]\n\n   **Characterizing Renal Structures with 3D Block Aggregate Transformers.** [4th March, 2022].\u003Cbr>\n*Xin Yu, Yucheng Tang, Yinchi Zhou, Riqiang Gao, Qi Yang, Ho Hin Lee, Thomas Li, Shunxing Bao, Yuankai Huo, Zhoubing Xu, Thomas A. Lasko, Richard G. Abramson, Bennett A. Landman.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.02430)]\n \n   **Joint rotational invariance and adversarial training of a dual-stream Transformer yields state of the art Brain-Score for Area V4.** [8th March, 2022].\u003Cbr>\n*William Berrios, Arturo Deza.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.06649)]\n \n   **Active Phase-Encode Selection for Slice-Specific Fast MR Scanning Using a Transformer-Based Deep Reinforcement Learning Framework.** [11th March, 2022].\u003Cbr>\n*Yiming Liu, Yanwei Pang, Ruiqi Jin, Zhenchang Wang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.05756)]\n \n   **Surface Vision Transformers: Attention-Based Modelling applied to Cortical Analysis.** [29th March, 2022]. [MIDL, 2022]\u003Cbr>\n*Simon Dahan, Abdulah Fawaz, Logan Z. J. Williams, Chunhui Yang, Timothy S. Coalson, Matthew F. Glasser, A. David Edwards, Daniel Rueckert, Emma C. Robinson.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.16414)][[Github](https:\u002F\u002Fgithub.com\u002Fmetrics-lab\u002Fsurface-vision-transformers)]\n \n   **Surface Vision Transformers: Flexible Attention-Based Modelling of Biomedical Surfaces.** [7th April, 2022]. [MIDL, 2022]\u003Cbr>\n*Simon Dahan, Hao Xu, Logan Z. J. Williams, Abdulah Fawaz, Chunhui Yang, Timothy S. Coalson, Michelle C. Williams, David E. Newby, A. David Edwards, Matthew F. Glasser, Alistair A. Young, Daniel Rueckert, Emma C. Robinson.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.03408)][[Github](https:\u002F\u002Fgithub.com\u002Fmetrics-lab\u002Fsurface-vision-transformers)]\n \n   **3D Shuffle-Mixer: An Efficient Context-Aware Vision Learner of Transformer-MLP Paradigm for Dense Prediction in Medical Volume.** [14th April, 2022]. [MIDL, 2022]\u003Cbr>\n*Jianye Pang, Cheng Jiang, Yihao Chen, Jianbo Chang, Ming Feng, Renzhi Wang, Jianhua Yao.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.06779)]\n \n   **Local Attention Graph-based Transformer for Multi-target Genetic Alteration Prediction.** [13th May, 2022].\u003Cbr>\n*Daniel Reisenbüchler, Sophia J. Wagner, Melanie Boxberg, Tingying Peng.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.06672)]\n \n   **A microstructure estimation Transformer inspired by sparse representation for diffusion MRI.** [13th May, 2022].\u003Cbr>\n*Tianshu Zheng, Cong Sun, Weihao Zheng, Wen Shi, Haotian Li, Yi Sun, Yi Zhang, Guangbin Wang, Chuyang Ye, Dan Wu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.06450)]\n \n   **BabyNet: Residual Transformer Module for Birth Weight Prediction on Fetal Ultrasound Video.** [19th May, 2022].\u003Cbr>\n*Szymon Płotka, Michal K. Grzeszczyk, Robert Brawura-Biskupski-Samaha, Paweł Gutaj, Michał Lipa, Tomasz Trzciński, Arkadiusz Sitek.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.09382)][[Github]([https:\u002F\u002Fgithub.com\u002Fmetrics-lab\u002Fsurface-vision-transformers](https:\u002F\u002Fgithub.com\u002Fsanoscience\u002Fbabynet))]\n \n   **Transformer-based out-of-distribution detection for clinically safe segmentation.** [21st May, 2022]. [MIDL, 2022 (Oral)]\u003Cbr>\n*Mark S Graham, Petru-Daniel Tudosiu, Paul Wright, Walter Hugo Lopez Pinaya, U Jean-Marie, Yee Mah, James Teo, Rolf H Jäger, David Werring, Parashkev Nachev, Sebastien Ourselin, M Jorge Cardoso.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.10650)]\n \n   **MS-DINO: Efficient Distributed Training of Vision Transformer Foundation Model in Medical Domain through Masked Sampling.** [5th Jan.,, 2023].\u003Cbr>\n*Sangjoon Park, Ik-Jae Lee, Jun Won Kim, Jong Chul Ye.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.02064)]\n \n   **Cooperation Learning Enhanced Colonic Polyp Segmentation Based on Transformer-CNN Fusion.** [17th Jan.,, 2023].\u003Cbr>\n*CYuanyuan Wang, Zhaohong Deng, Qiongdan Lou, Shudong Hu, Kup-sze Choi, Shitong Wang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.06892)]\n \n  **ViT-AE++: Improving Vision Transformer Autoencoder for Self-supervised Medical Image Representations.** [18th Jan.,, 2023].\u003Cbr>\n*Chinmay Prabhakar, Hongwei Bran Li, Jiancheng Yang, Suprosana Shit, Benedikt Wiestler, Bjoern Menze.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.07382)]\n \n \n\n \n\n \n\n# Citation\n\nIf you find the listing and survey useful for your work, please cite the paper:\n\n```\n@misc{shamshad2022transformers,\n      title={Transformers in Medical Imaging: A Survey}, \n      author={Shamshad, Fahad and  Khan, Salman and Zamir, Syed Waqas and Khan, Muhammad Haris and  Hayat, Munawar and Khan, Fahad Shahbaz and Fu, Huazhu}\n      year={2022},\n      eprint={2201.09873},\n      archivePrefix={arXiv},\n      primaryClass={cs.CV}\n}\n```\n\n","[![Maintenance](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FMaintained%3F-yes-green.svg)](https:\u002F\u002FGitHub.com\u002FNaereen\u002FStrapDown.js\u002Fgraphs\u002Fcommit-activity)\n[![PR's Welcome](https:\u002F\u002Fimg.shields.io\u002Fbadge\u002FPRs-welcome-brightgreen.svg?style=flat)](http:\u002F\u002Fmakeapullrequest.com) \n[![Awesome](https:\u002F\u002Fcdn.rawgit.com\u002Fsindresorhus\u002Fawesome\u002Fd7305f38d29fed78fa85652e3a63e154dd8e8829\u002Fmedia\u002Fbadge.svg)](https:\u002F\u002Fgithub.com\u002Fsindresorhus\u002Fawesome)\n\n# \u003Cp align=center> 本仓库是对我们发表在《Medical Image Analysis》上的综述论文 [Transformers in Medical Imaging: A Survey](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1361841523000634) 的补充。\n\n#  📢📢🏆🏆🏆 亮点：我们的文章现已跻身《Medical Image Analysis》期刊下载量前三的文章！🏆🏆🏆\n\n\n\n作者：[Fahad Shamshad](https:\u002F\u002Fscholar.google.com.pk\u002Fcitations?user=d7QL4wkAAAAJ&hl=en), [Salman Khan](https:\u002F\u002Fsalman-h-khan.github.io\u002F), [Syed Waqas Zamir](https:\u002F\u002Fscholar.google.ae\u002Fcitations?hl=en&user=POoai-QAAAAJ), [Muhammad Haris Khan](https:\u002F\u002Fscholar.google.com\u002Fcitations?user=ZgERfFwAAAAJ&hl=en), [Munawar Hayat](https:\u002F\u002Fscholar.google.com\u002Fcitations?user=Mx8MbWYAAAAJ&hl=en), [Fahad Shahbaz Khan](https:\u002F\u002Fscholar.google.es\u002Fcitations?user=zvaeYnUAAAAJ&hl=en), 和 [Huazhu Fu](https:\u002F\u002Fhzfu.github.io\u002F)\n\u003C\u002Fp>\n\n\n\n![](https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Ffahadshamshad_awesome-transformers-in-medical-imaging_readme_5e3281930d32.gif)\n\n\u003Chr \u002F>\n\n# \u003Cp align=center>`Awesome Transformers in Medical Imaging`\u003C\u002Fp>\n\n一份精选的医学影像领域 Transformers（Transformer 模型）资源列表（**按时间顺序排列**），灵感来源于其他 awesome 项目。我们计划定期在此页面上更新相关的最新论文及其开源实现。 \n\n我们强烈鼓励希望向社区推广其优秀成果的研究人员提交 Pull Request（拉取请求）以更新其论文信息！\n\n## 概览\n- [综述论文](#survey)\n- [医学图像分割](#segmentation)\n- [医学图像分类](#classification)\n- [医学图像重建](#reconstruction)\n- [医学图像配准](#registration)\n- [医学图像合成](#synthesis)\n- [医学图像检测](#detection)\n- [临床报告生成](#clinical-report-generation)\n- [其他](#others)\n\n# 综述\n\n[**Transformers in Medical Imaging: A survey.**](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.09873) [2022 年 1 月 25 日] \u003Cbr>.\n*[Fahad Shamshad](https:\u002F\u002Fscholar.google.com.pk\u002Fcitations?user=d7QL4wkAAAAJ&hl=en), [Salman Khan](https:\u002F\u002Fsalman-h-khan.github.io\u002F), [Syed Waqas Zamir](https:\u002F\u002Fscholar.google.es\u002Fcitations?user=WNGPkVQAAAAJ&hl=en), [Muhammad Haris Khan](https:\u002F\u002Fscholar.google.com\u002Fcitations?user=ZgERfFwAAAAJ&hl=en), [Munawar Hayat](https:\u002F\u002Fscholar.google.com\u002Fcitations?user=Mx8MbWYAAAAJ&hl=en), [Fahad Shahbaz Khan](https:\u002F\u002Fscholar.google.es\u002Fcitations?user=zvaeYnUAAAAJ&hl=en), 和 [Huazhu Fu](https:\u002F\u002Fhzfu.github.io\u002F).*\u003Cbr>\n[[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.09873)] \n\n**Advances in Medical Image Analysis with Vision Transformers: A Comprehensive Review.** [2023 年 1 月 9 日].\u003Cbr>\nReza Azad, Amirhossein Kazerouni, Moein Heidari, Ehsan Khodapanah Aghdam, Amirali Molaei, Yiwei Jia, Abin Jose, Rijo Roy, Dorit Merhof\n[[Paper]](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2301.03505.pdf)\n\n**Medical image analysis based on transformer: A Review.** [2022 年 8 月 13 日].\u003Cbr>\n*Zhaoshan Liu, Lei Shen.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.06643)] \n \n **Transforming medical imaging with Transformers? A comparative review of key properties, current progresses, and future perspectives.** [2022 年 6 月 3 日].\u003Cbr>\n*Jun Li, Junyu Chen, Yucheng Tang, Ce Wang, Bennett A. Landman, S. Kevin Zhou.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2206.01136)] \n\n**Vision Transformers in Medical Computer Vision -- A Contemplative Retrospection.** [2022 年 3 月 29 日].\u003Cbr>\n*Arshi Parvaiz, Muhammad Anwaar Khalid, Rukhsana Zafar, Huma Ameer, Muhammad Ali, Muhammad Moazam Fraz.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15269)] \n\n**Transformers in Medical Image Analysis: A Review.** [2022 年 2 月 24 日].\u003Cbr>\n*Kelei He, Chen Gan, Zhuoyuan Li, Islem Rekik, Zihao Yin, Wen Ji, Yang Gao, Qian Wang, Junfeng Zhang, Dinggang Shen.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.12165)] \n\n**Application of Transformer in Medical Image Segmentation.** [2021 年 10 月 25 日].\u003Cbr>\n*Wenyin Zhang, Weijie Hao, Yuan Qi and Yong Wu.*\u003Cbr>\n [[PDF](https:\u002F\u002Fbiomedgrid.com\u002Fpdf\u002FAJBSR.MS.ID.002014.pdf)] \n\n\n\n# 医学图像分割\n\n**基于注意力的 Transformer（变换器）用于微结构中细胞的实例分割。** [2020 年 11 月 20 日] [BIBM, 2020].\u003Cbr>\n*Tim Prangemeier, Christoph Reich, Heinz Koeppl.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2011.09763)] [[Github](https:\u002F\u002Fgithub.com\u002FChristophReich1996\u002FCell-DETR)]\n \n **TransUNet：Transformer 使医学图像分割的编码器更强。** [2021 年 2 月 8 日].\u003Cbr>\n*Jieneng Chen, Yongyi Lu, Qihang Yu, Xiangde Luo, Ehsan Adeli, Yan Wang, Le Lu, Alan L. Yuille, Yuyin Zhou.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.04306)] [[Github](https:\u002F\u002Fgithub.com\u002FBeckschen\u002FTransUNet)]\n \n **TransFuse：融合 Transformer 和 CNN（卷积神经网络）以进行医学图像分割。** [2021 年 2 月 16 日] [⚡MICCAI, 2021].\u003Cbr>\n*Yundong Zhang, Huiye Liu, Qiang Hu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.08005)] [[Github](https:\u002F\u002Fgithub.com\u002FRayicer\u002FTransFuse)]\n \n **使用 Transformer 进行无监督脑异常检测与分割。** [2021 年 2 月 23 日] [MIDL, 2021].\u003Cbr>\n*Walter Hugo Lopez Pinaya, Petru-Daniel Tudosiu, Robert Gray, Geraint Rees, Parashkev Nachev, Sebastien Ourselin, M. Jorge Cardoso.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.11650)]\n \n **使用 Transformer 的免卷积医学图像分割。** [2021 年 2 月 26 日] [⚡MICCAI, 2021].\u003Cbr>\n*Davood Karimi, Serge Vasylechko, Ali Gholipour.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2102.13645)]\n \n **CoTr：高效桥接 CNN 和 Transformer 以进行 3D 医学图像分割。** [2021 年 3 月 4 日] [⚡MICCAI, 2021].\u003Cbr>\n*Yutong Xie, Jianpeng Zhang, Chunhua Shen, Yong Xia.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.03024)] [[Github](https:\u002F\u002Fgithub.com\u002FYtongXie\u002FCoTr)]\n \n **SpecTr：用于高光谱病理图像分割的光谱 Transformer。** [2021 年 3 月 5 日].\u003Cbr>\n*Boxiang Yun, Yan Wang, Jieneng Chen, Huiyu Wang, Wei Shen, Qingli Li.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.03604)] [[Github](https:\u002F\u002Fgithub.com\u002Fhfut-xc-yun\u002FSpecTr)]\n \n **TransBTS：使用 Transformer 的多模态脑肿瘤分割。** [2021 年 3 月 7 日] [⚡MICCAI, 2021].\u003Cbr>\n*Wenxuan Wang, Chen Chen, Meng Ding, Jiangyun Li, Hong Yu, Sen Zha.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.04430)] [[Github](https:\u002F\u002Fgithub.com\u002FWenxuan-1119\u002FTransBTS)]\n \n **U-Net Transformer：自注意力与交叉注意力用于医学图像分割。** [2021 年 3 月 10 日].\u003Cbr>\n*Olivier Petit, Nicolas Thome, Clément Rambour, Luc Soler.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.06104)] [[Github](https:\u002F\u002Fgithub.com\u002FHXLH50K\u002FU-Net-Transformer)]\n \n **UNETR：用于 3D 医学图像分割的 Transformer。** [2021 年 3 月 18 日].\u003Cbr>\n*Ali Hatamizadeh, Yucheng Tang, Vishwesh Nath, Dong Yang, Andriy Myronenko, Bennett Landman, Holger Roth, Daguang Xu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.10504)] [[Github](https:\u002F\u002Fgithub.com\u002FProject-MONAI\u002Fresearch-contributions\u002Ftree\u002Fmaster\u002FUNETR\u002FBTCV)]\n \n **Medical Transformer：用于 3D MRI 分析的通用大脑编码器。** [2021 年 4 月 28 日].\u003Cbr>\n*Eunji Jun, Seungwoo Jeong, Da-Woon Heo, Heung-Il Suk.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.13633)]\n \n **金字塔医学 Transformer 用于医学图像分割。** [2021 年 4 月 29 日].\u003Cbr>\n*Zhuangzhuang Zhang, Baozhou Sun, Weixiong Zhang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.14702)]\n \n **GasHis-Transformer：一种用于胃组织病理学图像分类的多尺度视觉 Transformer 方法。** [2021 年 4 月 29 日].\u003Cbr>\n*Haoyuan Chen, Chen Li, Xiaoyan Li, Ge Wang, Weiming Hu, Yixin Li, Wanli Liu, Changhao Sun, Yudong Yao, Yueyang Teng, Marcin Grzegorzek.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.14528)]\n \n **Swin-Unet：用于医学图像分割的类 U-Net 纯 Transformer。** [2021 年 5 月 12 日].\u003Cbr>\n*Hu Cao, Yueyue Wang, Joy Chen, Dongsheng Jiang, Xiaopeng Zhang, Qi Tian, Manning Wang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.05537)]\n \n **使用挤压与扩展 Transformer 进行医学图像分割。** [2021 年 5 月 20 日] [⚡IJCAI, 2021].\u003Cbr>\n*Shaohua Li, Xiuchao Sui, Xiangde Luo, Xinxing Xu, Yong Liu, Rick Goh.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.09511)] [[Github](https:\u002F\u002Fgithub.com\u002Faskerlee\u002Fsegtran)]\n \n **用于角膜内皮细胞分割的多分支混合 Transformer 网络。** [2021 年 5 月 21 日] [⚡MICCAI, 2021].\u003Cbr>\n*Yinglin Zhang, Risa Higashita, Huazhu Fu, Yanwu Xu, Yang Zhang, Haofeng Liu, Jian Zhang, Jiang Liu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.07557)] \n \n **DS-TransUNet：用于医学图像分割的双 Swin Transformer U-Net。** [2021 年 6 月 12 日].\u003Cbr>\n*Ailiang Lin, Bingzhi Chen, Jiayu Xu, Zheng Zhang, Guangming Lu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.06716)] \n \n **不止于编码器：引入 Transformer 解码器进行上采样。** [2021 年 6 月 20 日].\u003Cbr>\n*Yijiang Li, Wentian Cai, Ying Gao, Xiping Hu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.10637)]\n \n **多复合 Transformer 用于精确生物医学图像分割。** [2021 年 6 月 28 日] [⚡MICCAI, 2021].\u003Cbr>\n*Yuanfeng Ji, Ruimao Zhang, Huijie Wang, Zhen Li, Lingyun Wu, Shaoting Zhang, Ping Luo.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.14385)] [[Github](https:\u002F\u002Fgithub.com\u002FJiYuanFeng\u002FMCTrans)]\n \n **UTNet：用于医学图像分割的混合 Transformer 架构。** [2021 年 7 月 2 日] [⚡MICCAI, 2021].\u003Cbr>\n*Yunhe Gao, Mu Zhou, Dimitris Metaxas.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.04805)] [[Github](https:\u002F\u002Fgithub.com\u002Faskerlee\u002Fsegtran)]\n \n **使用多形态 Transformer 进行少样本域适应。** [2021 年 7 月 10 日] [⚡MICCAI, 2021].\u003Cbr>\n*Shaohua Li, Xiuchao Sui, Jie Fu, Huazhu Fu, Xiangde Luo, Yangqin Feng, Xinxing Xu, Yong Liu, Daniel Ting, Rick Siow Mong Goh.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.04430)] [[Github](https:\u002F\u002Fgithub.com\u002FWenxuan-1119\u002FTransBTS)]\n \n **TransClaw U-Net：用于医学图像分割的带 Transformer 的 Claw U-Net。** [2021 年 7 月 12 日].\u003Cbr>\n*Yao Chang, Hu Menghan, Zhai Guangtao, Zhang Xiao-Ping.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.05188)]\n \n **TransAttUnet：用于医学图像分割的带 Transformer 的多级注意力引导 U-Net。** [2021 年 7 月 12 日].\u003Cbr>\n*Bingzhi Chen, Yishu Liu, Zheng Zhang, Guangming Lu, David Zhang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.05274)]\n \n **LeViT-UNet：使用 Transformer 构建更快的医学图像分割编码器。** [2021 年 7 月 19 日].\u003Cbr>\n*Guoping Xu, Xingrong Wu, Xuan Zhang, Xinwei He.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.08623)] [[Github](https:\u002F\u002Fgithub.com\u002Fapple1986\u002FLeViT_UNet)]\n \n **Polyp-PVT：使用金字塔视觉 Transformer 进行息肉分割。** [2021 年 8 月 16 日].\u003Cbr>\n*Bo Dong, Wenhai Wang, Deng-Ping Fan, Jinpeng Li, Huazhu Fu, Ling Shao.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.06932)] [[Github](https:\u002F\u002Fgithub.com\u002FDengPingFan\u002FPolyp-PVT)]\n \n **评估基于 Transformer 的语义分割网络用于病理图像分割。** [2021 年 8 月 26 日].\u003Cbr>\n*Cam Nguyen, Zuhayr Asad, Yuankai Huo.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.11993)]\n \n **使用卷积和 Transformer 网络的自动化肾肿瘤分割。** [2021 年 8 月 30 日] [👍 MICCAI KiTS Challenge, 2021].\u003Cbr>\n*Zhiqiang Shen, Zhiqiang_Shen, Hua Yang, Zhen Zhang, Shaohua Zheng.*\u003Cbr>\n [[PDF](https:\u002F\u002Fopenreview.net\u002Fforum?id=voteINyy36u)]\n \n **nnFormer：用于体积分割的交错 Transformer。** [2021 年 9 月 7 日].\u003Cbr>\n*Hong-Yu Zhou, Jiansen Guo, Yinghao Zhang, Lequan Yu, Liansheng Wang, Yizhou Yu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.03201)] [[Github](https:\u002F\u002Fgithub.com\u002F282857341\u002Fnnformer)]\n \n **UCTransNet：从通道视角重新思考 U-Net 中的跳跃连接与 Transformer。** [2021 年 9 月 9 日].\u003Cbr>\n*Haonan Wang, Peng Cao, Jiaqi Wang, Osmar R.Zaiane.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.04335)] [[Github](https:\u002F\u002Fgithub.com\u002Fmcgregorwwww\u002Fuctransnet)]\n \n **MISSFormer：一种有效的医学图像分割 Transformer。** [2021 年 9 月 15 日].\u003Cbr>\n*Xiaohong Huang, Zhifang Deng, Dandan Li, Xueguang Yuan.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.07162)]\n \n **TransBridge：用于超声心动图左心室分割的轻量级 Transformer。** [2021 年 9 月 21 日] [👍 MICCAI Simplifying Medical Ultrasound Workshop, 2021].\u003Cbr>\n*Kaizhong DengYanda MengDongxu GaoJoshua BridgeYaochun ShenGregory LipYitian ZhaoYalin Zheng.*\u003Cbr>\n [[PDF](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-030-87583-1_7)] \n \n **BiTr-Unet：用于 MRI 脑肿瘤分割的 CNN-Transformer 组合网络。** [2021 年 9 月 25 日] [👍 MICCAI BraTS DREAM Challenge ShuLab, 2021].\u003Cbr>\n*Qiran Jia, Hai Shu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.12271)]\n \n **GT U-Net：用于牙根分割的类 U-Net 组 Transformer 网络。** [2021 年 9 月 30 日] [👍 MICCAI MLMI Workshop, 2021].\u003Cbr>\n*Yunxiang Li, Shuai Wang, Jun Wang, Guodong Zeng, Wenjun Liu, Qianni Zhang, Qun Jin, Yaqi Wang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.14813)] [[Github](https:\u002F\u002Fgithub.com\u002Fkent0n-li\u002Fgt-u-net)]\n \n **Transformer 辅助卷积网络用于细胞实例分割。** [2021 年 10 月 5 日] [👍 ISBI Workshop, 2021].\u003Cbr>\n*Deepanshu Pandey, Pradyumna Gupta, Sumit Bhattacharya, Aman Sinha, Rohit Agarwal.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.02270)] [[Github](https:\u002F\u002Fgithub.com\u002Fdsciitism\u002Fsegpc-2021)]\n \n **边界感知 Transformer 用于皮肤病变分割。** [2021 年 10 月 8 日] [⚡MICCAI, 2021].\u003Cbr>\n*Jiacheng Wang, Lan Wei, Liansheng Wang, Qichao Zhou, Lei Zhu, Jing Qin.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.03864)] [[Github](https:\u002F\u002Fgithub.com\u002Fjcwang123\u002FBA-Transformer)]\n \n **脊柱 Transformer：通过 3D Transformer 在任意视野脊柱 CT 中进行椎骨标记和分割。** [2021 年 10 月 10 日] [⚡MIA, 2021].\u003Cbr>\n*Rong Taoa, Wenyong Liub, Guoyan Zheng.*\u003Cbr>\n [[PDF](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1361841521003030)]\n \n **AFTer-UNet：用于医学图像分割的轴向融合 Transformer UNet。** [2021 年 10 月 20 日].\u003Cbr>\n*Xiangyi Yan, Hao Tang, Shanlin Sun, Haoyu Ma, Deying Kong, Xiaohui Xie.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.10403)]\n \n **基于具有归纳偏差多头自注意力的 3D Swin Transformer 的肝血管分割。** [2021 年 11 月 5 日].\u003Cbr>\n*Mian Wu, Yinling Qian, Xiangyun Liao, Qiong Wang, Pheng-Ann Heng.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.03368)] \n \n **混合 Transformer U-Net 用于医学图像分割。** [2021 年 11 月 8 日].\u003Cbr>\n*Hongyi Wang, Shiao Xie, Lanfen Lin, Yutaro Iwamoto, Xian-Hua Han, Yen-Wei Chen, Ruofeng Tong.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.04734)] [[Github](https:\u002F\u002Fgithub.com\u002Fdootmaan\u002Fmt-unet)]\n \n **T-AutoML：用于 3D 医学成像中病变分割的自动化机器学习 Transformer。** [2021 年 11 月 15 日] [⚡ICCV, 2021].\u003Cbr>\n*Dong Yang, Andriy Myronenko, Xiaosong Wang, Ziyue Xu, Holger R. Roth, Daguang Xu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.07535)]\n \n **用于精确 3D 肿瘤分割的体积 Transformer。** [2021 年 11 月 26 日].\u003Cbr>\n*Himashi Peiris, Munawar Hayat, Zhaolin Chen, Gary Egan, Mehrtash Harandi.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.13300)] [[Github](https:\u002F\u002Fgithub.com\u002Fhimashi92\u002Fvt-unet)]\n \n **利用全分辨率特征上下文通过融合编码器进行肝肿瘤和血管分割：应用于肝肿瘤和血管 3D 重建。** [2021 年 11 月 26 日].\u003Cbr>\n*Xiangyu Meng, Xudong Zhang, Gan Wang, Ying Zhang, Xin Shi, Huanhuan Dai, Zixuan Wang, Xun Wang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.13299)]\n \n **Swin Transformer 用于 3D 医学图像分析的自监督预训练。** [2021 年 11 月 29 日].\u003Cbr>\n*Yucheng Tang, Dong Yang, Wenqi Li, Holger Roth, Bennett Landman, Daguang Xu, Vishwesh Nath, Ali Hatamizadeh.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.14791)] [[Github](https:\u002F\u002Fgithub.com\u002FProject-MONAI\u002Fresearch-contributions\u002Ftree\u002Fmaster\u002FSwinUNETR)]\n \n **MT-TransUNet：在 Transformer 中介导多任务令牌以进行皮肤病变分割和分类。** [2021 年 12 月 3 日].\u003Cbr>\n*Jingye Chen, Jieneng Chen, Zongwei Zhou, Bin Li, Alan Yuille, Yongyi Lu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.01767)] [[Github](https:\u002F\u002Fgithub.com\u002Fjingyechen\u002Fmt-transunet)]\n \n **FAT-Net：用于自动化皮肤病变分割的特征自适应 Transformer。** [2021 年 12 月 4 日] [⚡MIA, 2021].\u003Cbr>\n*Huisi Wu, Shihuai Chen, Guilian Chen, Wei Wang, Baiying Lei, Zhenkun Wen.*\u003Cbr>\n [[PDF](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1361841521003728)] [[Github](https:\u002F\u002Fgithub.com\u002FSZUcsh\u002FFAT-Net)]\n \n **通过 CNN 和 Transformer 之间的交叉教学进行半监督医学图像分割。** [2021 年 12 月 9 日].\u003Cbr>\n*Xiangde Luo, Minhao Hu, Tao Song, Guotai Wang, Shaoting Zhang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.04894v1)] [[Github](https:\u002F\u002Fgithub.com\u002FHiLab-git\u002FSSL4MIS)]\n \n **D-Former：用于 3D 医学图像分割的 U 形膨胀 Transformer。** [2022 年 1 月 3 日].\u003Cbr>\n*Yixuan Wu, Kuanlun Liao, Jintai Chen, Jinhong Wang, Danny Z. Chen, Honghao Gao, Jian Wu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.00462)]\n \n **Swin UNETR：用于 MRI 图像中脑肿瘤语义分割的 Swin Transformer。** [2022 年 1 月 4 日].\u003Cbr>\n*Ali Hatamizadeh, Vishwesh Nath, Yucheng Tang, Dong Yang, Holger Roth, Daguang Xu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.01266)] [[Github](https:\u002F\u002Fgithub.com\u002FProject-MONAI\u002Fresearch-contributions\u002Ftree\u002Fmaster\u002FSwinUNETR\u002FBRATS21)]\n \n **HT-Net：用于医学 CT 图像分割的分层上下文注意力 Transformer 网络。** [2022 年 1 月 15 日].\u003Cbr>\n*Mingjun Ma, Haiying Xia, Yumei Tan, Haisheng Li, Shuxiang Song .*\u003Cbr>\n [[PDF](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs10489-021-03010-0)] \n \n **SegTransVAE：用于医学图像分割的带正则化的混合 CNN-Transformer。** [2022 年 1 月 21 日].\u003Cbr>\n*Quan-Dung Pham, Hai Nguyen-Truong, Nam Nguyen Phuong, Khoa N. A. Nguyen.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fpdf\u002F2201.08582.pdf)] [[Github](https:\u002F\u002Fgithub.com\u002Fitruonghai\u002FSegTransVAE)]\n \n **类别感知生成对抗 Transformer 用于医学图像分割。** [2022 年 1 月 26 日].\u003Cbr>\n*Chenyu You, Ruihan Zhao, Fenglin Liu, Sandeep Chinchali, Ufuk Topcu, Lawrence Staib, James S. Duncan.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.10737)] \n \n **RTNet：用于糖尿病视网膜病变多病变分割的关系 Transformer 网络。** [2022 年 1 月 26 日] [⚡IEEE TMI, 2022]..\u003Cbr>\n*Shiqi Huang, Jianan Li, Yuze Xiao, Ning Shen, Tingfa Xu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.11037)]\n \n **使用带 Transformer 层的混合 CNN 联合进行肝脏和肝病变分割。** [2022 年 1 月 26 日].\u003Cbr>\n*Georg Hille, Shubham Agrawal, Christian Wybranski, Maciej Pech, Alexey Surov, Sylvia Saalfeld.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.10981)]\n \n **用于胃癌分割的多尺度边界神经网络。** [2022 年 1 月 28 日].\u003Cbr>\n*Pengfei Wang, Yunqi Li, Yaru Sun, Dongzhi He & Zhiqiang Wang.*\u003Cbr>\n [[PDF](https:\u002F\u002Flink.springer.com\u002Farticle\u002F10.1007\u002Fs00371-021-02374-1)]\n \n **TransBTSV2：用于医学图像分割的更宽而非更深的 Transformer。** [2022 年 1 月 30 日].\u003Cbr>\n*Jiangyun Li, Wenxuan Wang, Chen Chen, Tianxiang Zhang, Sen Zha, Hong Yu, Jing Wang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.12785)] [[Github](https:\u002F\u002Fgithub.com\u002FWenxuan-1119\u002FTransBTS)]\n \n **TraSeTR：用于机器人手术实例级器械分割的带对比查询的 Track-to-Segment Transformer。** [2022 年 1 月 30 日].\u003Cbr>\n*Zixu Zhao, Yueming Jin, Pheng-Ann Heng.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.08453)]\n\n**一种用于医学图像分割的多尺度 Transformer：架构、模型效率与基准测试。** 2022 年 2 月 28 日。\u003Cbr>\n*Yunhe Gao, Mu Zhou, Di Liu, Dimitris Metaxas.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.00131)] [[Github](https:\u002F\u002Fgithub.com\u002Fyhygao\u002FCBIM-Medical-Image-Segmentation)]\n\n   **Tempera：用于心脏 MRI 分割的空间 Transformer 特征金字塔网络。** 2022 年 3 月 1 日。\u003Cbr>\n*Christoforos Galazis, Huiyi Wu, Zhuoyu Li, Camille Petri, Anil A. Bharath, Marta Varela.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.00355)]\n\n   **上下文注意力网络：Transformer 与 U-Net 的结合。** 2022 年 3 月 2 日。\u003Cbr>\n*Azad Reza, Heidari Moein, Wu Yuli, Merhof Dorit.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.01932v1)] [[Github](https:\u002F\u002Fgithub.com\u002Frezazad68\u002FTMUnet)]\n \n   **基于模拟驱动的视觉 Transformer 训练以实现 X 射线图像中的金属分割。** 2022 年 3 月 17 日。\u003Cbr>\n*Fuxin Fan, Ludwig Ritschl, Marcel Beister, Ramyar Biniazan, Björn Kreher, Tristan M. Gottschalk, Steffen Kappler, Andreas Maier.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.09207)]\n \n   **TransFusion：面向医学图像分割的 Transformer 多视图发散融合。** 2022 年 3 月 21 日。\u003Cbr>\n*Di Liu, Yunhe Gao, Qilong Zhangli, Zhennan Yan, Mu Zhou, Dimitris Metaxas.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.10726)]\n \n   **CAT-Net：一种用于 MRI 前列腺分区分割的跨切片注意力 Transformer 模型。** 2022 年 3 月 29 日。\u003Cbr>\n*Alex Ling Yu Hung, Haoxin Zheng, Qi Miao, Steven S. Raman, Demetri Terzopoulos, Kyunghyun Sung.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15163)]\n \n   **UNetFormer：用于 3D 医学图像分割的统一视觉 Transformer 模型及预训练框架。** 2022 年 4 月 1 日。\u003Cbr>\n*Ali Hatamizadeh, Ziyue Xu, Dong Yang, Wenqi Li, Holger Roth, Daguang Xu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.00631)] [[Github](https:\u002F\u002Fgithub.com\u002FProject-MONAI\u002Fresearch-contributions)]\n \n   **CCAT-NET：一种基于 Transformer 的新型半监督 Covid-19 肺部病变分割框架。** 2022 年 4 月 6 日。\u003Cbr>\n*Mingyang Liu, Li Xiao, Huiqin Jiang, Qing He.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.02839)] \n \n   **使用 Transformer 进行海马体持续分割。** 2022 年 4 月 17 日。\u003Cbr>\n*Amin Ranem, Camila González, Anirban Mukhopadhyay.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.08043)] \n \n   **TranSiam：利用 Transformer 融合多模态视觉特征以进行医学图像分割。** 2022 年 4 月 26 日。\u003Cbr>\n*Xuejian Li, Shiqiang Ma, Jijun Tang, Fei Guo.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.12185)]\n \n   **ColonFormer：一种基于 Transformer 的高效结肠息肉分割方法。** 2022 年 5 月 17 日。\u003Cbr>\n*Nguyen Thanh Duc, Nguyen Thi Oanh, Nguyen Thi Thuy, Tran Minh Triet, Dinh Viet Sang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.08473)]\n \n   **基于 Transformer 的弱监督组织病理学图像分割多示例学习。** 2022 年 5 月 18 日。\u003Cbr>\n*Ziniu Qian, Kailu Li, Maode Lai, Eric I-Chao Chang, Bingzheng Wei, Yubo Fan, Yan Xu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.08878)]\n \n   **基于 Transformer 的生成对抗网络 (GAN) 用于肝脏分割。** 2022 年 5 月 21 日。\u003Cbr>\n*Ugur Demir, Zheyuan Zhang, Bin Wang, Matthew Antalek, Elif Keles, Debesh Jha, Amir Borhani, Daniela Ladner, Ulas Bagci.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.10663)][[Github](https:\u002F\u002Fgithub.com\u002FProject-MONAI\u002Fresearch-contributions)]\n\n   **一种通过均值教师实现 MRI 心脏语义分割的不确定性感知 Transformer。** 2022 年 7 月 25 日 [MIUA, 2022]。\u003Cbr>\n*Ziyang Wang, Jian-Qing Zheng, Irina Voiculescu.*\u003Cbr>\n [[PDF](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-031-12053-4_37)][[Github](https:\u002F\u002Fgithub.com\u002Fziyangwang007\u002FCV-SSL-MIS)]\n\n   **通过双重伪标签监督实现的计算高效医学图像语义分割视觉 Transformer。** 2022 年 10 月 16 日 [ICIP, 2022]。\u003Cbr>\n*Ziyang Wang, Nanqing Dong, Irina Voiculescu.*\u003Cbr>\n [[PDF](https:\u002F\u002Fieeexplore.ieee.org\u002Fabstract\u002Fdocument\u002F9897482\u002F)][[Github](https:\u002F\u002Fgithub.com\u002Fziyangwang007\u002FCV-SSL-MIS)]\n\n   **用于有限标注医学图像语义分割的对抗性视觉 Transformer。** 2022 年 11 月 21 日 [BMVC, 2022]。\u003Cbr>\n*Ziyang Wang, Chengkuan Zhao, Zixuan Ni.*\u003Cbr>\n [[PDF](https:\u002F\u002Fbmvc2022.mpi-inf.mpg.de\u002F1002.pdf)][[Github](https:\u002F\u002Fgithub.com\u002Fziyangwang007\u002FCV-SSL-MIS)]\n\n   **DuAT：用于医学图像分割的双聚合 Transformer 网络。** 2022 年 12 月 21 日。\u003Cbr>\n*Feilong Tang, Qiming Huang, Jinfeng Wang, Xianxu Hou, Jionglong Su, Jingxin Liu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.11677)]\n\n   **当 CNN 遇见 ViT：迈向多类医学图像语义分割的半监督学习。** 2023 年 2 月 12 日 [ECCV 研讨会，2022]。\u003Cbr>\n*Ziyang Wang, Tianze Li, Jian-Qing Zheng, Baoru Huang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2208.06449)][[Github](https:\u002F\u002Fgithub.com\u002Fziyangwang007\u002FCV-SSL-MIS)]\n\n   **DAE-Former：用于医学图像分割的双注意力引导高效 Transformer。** 2023 年 1 月 27 日。\u003Cbr>\n*Reza Azad, René Arimond, Ehsan Khodapanah Aghdam, Amirhossein Kazerouni, Dorit Merhof.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.13504)][[Github](https:\u002F\u002Fgithub.com\u002Fmindflow-institue\u002FDAEFormer)]\n\n# 分类\n\n  **TransMed：Transformer 推动多模态医学图像分类。** [2021 年 3 月 10 日].\u003Cbr>\n*Yin Dai, Yifan Gao.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.05940)]\n \n   **Medical Transformer：用于 3D MRI 分析的通用大脑编码器。** [2021 年 4 月 28 日].\u003Cbr>\n*Eunji Jun, Seungwoo Jeong, Da-Woon Heo, Heung-Il Suk.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.13633)] \n \n   **TransMIL：基于 Transformer 的相关多示例学习用于全切片图像分类。** [2021 年 6 月 2 日] [⚡NeurIPS, 2021].\u003Cbr>\n*Zhuchen Shao, Hao Bian, Yang Chen, Yifeng Wang, Jian Zhang, Xiangyang Ji, Yongbing Zhang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.00908)] [[GitHub](https:\u002F\u002Fgithub.com\u002Fszc19990412\u002FTransMIL)]\n \n   **基于 Vision Transformer 的糖尿病视网膜病变分级识别。** [2021 年 7 月 15 日] [⚡CVPR, 2021].\u003Cbr>\n*Rui Sun, Yihao Li, Tianzhu Zhang, Zhendong Mao, Feng Wu, Yongdong Zhang.*\u003Cbr>\n [[PDF](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FCVPR2021\u002Fhtml\u002FSun_Lesion-Aware_Transformers_for_Diabetic_Retinopathy_Grading_CVPR_2021_paper.html)] \n \n   **是时候用 Transformer 取代医学图像中的 CNN 了吗？** [2021 年 8 月 20 日] [👍ICCV 自动医疗诊断研讨会，2021].\u003Cbr>\n*Christos Matsoukas, Johan Fredin Haslum, Magnus Söderberg, Kevin Smith.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.09038)] [[GitHub](https:\u002F\u002Fgithub.com\u002Fchrismats\u002Fmedical_transformers)]\n \n   **Gene Transformer：基于基因表达分类肺癌亚型的 Transformer** [2021 年 8 月 26 日].\u003Cbr>\n*Anwar Khan, Boreom Lee.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.11833)]\n \n   **一种基于 Transformer 的深度学习方法，利用临床全脑 MRI 将脑转移瘤分类至原发器官部位。** [2021 年 10 月 7 日].\u003Cbr>\n*Qing Lyu, Sanjeev V. Namjoshi, Emory McTyre, Umit Topaloglu, Richard Barcus, Michael D. Chan, Christina K. Cramer, Waldemar Debinski, Metin N. Gurcan, Glenn J. Lesser, Hui-Kuan Lin, Reginald F. Munden, Boris C. Pasche, Kiran Kumar Solingapuram Sai, Roy E. Strowd, Stephen B. Tatter, Kounosuke Watabe, Wei Zhang, Ge Wang, Christopher T. Whitlow.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.03588)] \n \n   **CAE-Transformer：基于 Transformer 的模型，从非薄层 3D CT 扫描预测肺腺癌亚实性结节的侵袭性。** [2021 年 10 月 17 日].\u003Cbr>\n*Shahin Heidarian, Parnian Afshar, Anastasia Oikonomou, Konstantinos N. Plataniotis, Arash Mohammadi.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.08721)] \n \n   **基于 Vision Transformer 的糖尿病视网膜病变分级识别。** [2021 年 10 月 25 日].\u003Cbr>\n*Jianfang Wu, Ruo Hu, Zhenghong Xiao, Jiaxu Chen, Jingwei Liu.*\u003Cbr>\n [[PDF](https:\u002F\u002Fpubmed.ncbi.nlm.nih.gov\u002F34693536\u002F)] \n \n   **用于乳腺超声图像分类的 Vision Transformers。** [2021 年 10 月 27 日].\u003Cbr>\n*Behnaz Gheflati, Hassan Rivaz.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.14731)]\n \n   **以指示作为先验知识，使用 Transformer 进行胸部 X 光片的多模态疾病分类** [2022 年 2 月 12 日] [👍ISBI, 2022].\u003Cbr>\n*Grzegorz Jacenków, Alison Q. O'Neil, Sotirios A. Tsaftaris.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.06076)] [[GitHub](https:\u002F\u002Fgithub.com\u002Fjacenkow\u002Fmmbt)]\n \n   **无需标签即可进化的 AI：通过知识蒸馏实现胸部 X 光诊断的自进化 Vision Transformer。** [2022 年 2 月 13 日].\u003Cbr>\n*Sangjoon Park, Gwanghyun Kim, Yujin Oh, Joon Beom Seo, Sang Min Lee, Jin Hwan Kim, Sungjun Moon, Jae-Kwang Lim, Chang Min Park, Jong Chul Ye.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.06431)]\n \n   **ScoreNet：学习非均匀注意力与增强，用于基于 Transformer 的组织病理学图像分类。** [2022 年 2 月 15 日].\u003Cbr>\n*Thomas Stegmüller, Antoine Spahr, Behzad Bozorgtabar, Jean-Philippe Thiran.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.07570v1)]\n \n   **一种混合双阶段 Vision Transformer，用于 AI 辅助的胃镜活检 5 类病理诊断。** [2022 年 2 月 17 日].\u003Cbr>\n*Yujin Oh, Go Eun Bae, Kyung-Hee Kim, Min-Kyung Yeo, Jong Chul Ye.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.08510)]\n\n   **RadioTransformer：用于视觉注意力引导疾病分类的级联全局 - 焦点 Transformer。** [2022 年 2 月 23 日].\u003Cbr>\n*Moinak Bhattacharya, Shubham Jain, Prateek Prasanna.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.11781)]\n \n   **Uni4Eye：通过掩码图像建模 Transformer 统一 2D 和 3D 自监督预训练，用于眼科图像分类。** [2022 年 3 月 9 日].\u003Cbr>\n*Zhiyuan Cai, Li Lin, Huaqing He, Xiaoying Tang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.04614)]\n \n   **用于骨肉瘤组织学图像分类的降噪注意力交叉融合学习 Transformer。** [2022 年 4 月 29 日].\u003Cbr>\n*Liangrui Pan, Hetian Wang, Lian Wang, Boya Ji, Mingting Liu, Mitchai Chongcheawchamnan, Jin Yuan, Shaoliang Peng.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.13838)]\n \n   **HoVer-Trans：解剖感知的 HoVer-Transformer，用于超声图像中无 ROI 的乳腺癌诊断。** [2022 年 5 月 17 日].\u003Cbr>\n*Yuhao Mo, Chu Han, Yu Liu, Min Liu, Zhenwei Shi, Jiatai Lin, Bingchao Zhao, Chunwang Huang, Bingjiang Qiu, Yanfen Cui, Lei Wu, Xipeng Pan, Zeyan Xu, Xiaomei Huang, Zaiyi Liu, Ying Wang, Changhong Liang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.08390)]\n \n   **用于全切片图像分类的图 Transformer** [2022 年 5 月 19 日].\u003Cbr>\n*Yi Zheng, Rushin H. Gindra, Emily J. Green, Eric J. Burks, Margrit Betke, Jennifer E. Beane, Vijaya B. Kolachalama.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.09671)] [[GitHub](https:\u002F\u002Fgithub.com\u002Fjacenkow\u002Fmmbt)]\n \n   **胃癌组织病理学子尺寸图像分类的比较研究：从线性回归到视觉 Transformer** [2022 年 5 月 19 日].\u003Cbr>\n*Weiming Hu, Haoyuan Chen, Wanli Liu, Xiaoyan Li, Hongzan Sun, Xinyu Huang, Marcin Grzegorzek, Chen Li.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.09671)]\n \n   **使用 Vision Transformer 进行肺癌多标签分类的零样本和少样本学习** [2022 年 5 月 30 日].\u003Cbr>\n*Fu-Ming Guo, Yingfang Fan.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.15290)]\n \n   **使用集成 Transformer 从眼底图像检测糖尿病视网膜病变的严重程度。** [2023 年 1 月 3 日].\u003Cbr>\n*Chandranath Adak, Tejas Karkera, Soumi Chattopadhyay, Muhammad Saqib.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.00973)]\n\n## COVID-19 分类（因其当前的重要性而单独列出）\n\n  **Vision Transformer (视觉 Transformer) 用于 COVID-19 CXR (胸部 X 光) 诊断，采用胸部 X 光特征语料库。** [2021 年 3 月 12 日].\u003Cbr>\n*Sangjoon Park, Gwanghyun Kim, Yujin Oh, Joon Beom Seo, Sang Min Lee, Jin Hwan Kim, Sungjun Moon, Jae-Kwang Lim, Jong Chul Ye.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.07055)]\n \n   **使用低级胸部 X 光特征语料库的 Vision Transformer 用于 COVID-19 诊断和严重程度量化。** [2021 年 4 月 15 日].\u003Cbr>\n*Sangjoon Park, Gwanghyun Kim, Yujin Oh, Joon Beom Seo, Sang Min Lee, Jin Hwan Kim, Sungjun Moon, Jae-Kwang Lim, Jong Chul Ye.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.07235)]\n \n   **POCFormer：一种基于床旁超声检测 COVID-19 的轻量级 Transformer 架构。** [2021 年 5 月 15 日].\u003Cbr>\n*Shehan Perera, Srikar Adhikari, Alper Yilmaz.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.09913)] \n \n   **使用定制类 Transformer 网络自动诊断 COVID-19。** [2021 年 5 月 20 日].\u003Cbr>\n*Chengeng Liu1, Qingshan Yin.*\u003Cbr>\n [[PDF](https:\u002F\u002Fiopscience.iop.org\u002Farticle\u002F10.1088\u002F1742-6596\u002F2010\u002F1\u002F012175)]\n \n   **COVID-VIT：基于 Vision Transformer 模型对 CT (计算机断层扫描) 胸部图像进行 COVID-19 分类。** [2021 年 7 月 4 日].\u003Cbr>\n*Xiaohong Gao, Yu Qian, Alice Gao.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.01682)] [[GitHub](https:\u002F\u002Fgithub.com\u002Fxiaohong1\u002FCOVID-ViT)]\n \n   **xViTCOS：基于可解释 Vision Transformer 的放射学 COVID-19 筛查。** [2021 年 7 月 6 日].\u003Cbr>\n*Arnab Kumar Mondal, Arnab Bhattacharjee, Parag Singla, Prathosh AP.*\u003Cbr>\n [[PDF](https:\u002F\u002Fwww.techrxiv.org\u002Farticles\u002Fpreprint\u002FxViTCOS_Explainable_Vision_Transformer_Based_COVID-19_Screening_Using_Radiography\u002F14912367\u002F1)]\n \n   **用于 COVID-19 分类的带有统计检验的 Visual Transformer。** [2021 年 7 月 12 日] [👍ICCV MIA Workshop, 2021].\u003Cbr>\n*Chih-Chung Hsu, Guan-Lin Chen, Mei-Hsuan Wu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.05334)] \n \n   **MIA-COV19D：一种基于 Transformer 的胸部 CT 中 COVID-19 分类框架。** [2021 年 7 月 15 日] [👍ICCV MIA Workshop, 2021].\u003Cbr>\n*Lei Zhang, Yan Wen.*\u003Cbr>\n [[PDF](https:\u002F\u002Fwww.researchgate.net\u002Fpublication\u002F353105641_MIA-COV19D_A_transformer-based_framework_for_COVID19_classification_in_chest_CTs)]\n \n   **COViT-GAN：用于 CT 图像中 COVID-19 检测的 Vision Transformer (生成对抗网络)。** [2021 年 8 月 10 日].\u003Cbr>\n*Ara Abigail E. Ambita, Eujene Nikka V. Boquio, Prospero C. Naval Jr.*\u003Cbr>\n [[PDF](https:\u002F\u002Fwww.springerprofessional.de\u002Fen\u002Fcovit-gan-vision-transformer-forcovid-19-detection-in-ct-scan-im\u002F19652482)] \n \n   **COVID-Transformer：利用 Vision Transformer 进行医疗领域可解释的 COVID-19 检测。** [2021 年 9 月 23 日].\u003Cbr>\n*Debaditya Shome, T Kar, Sachi Nandan Mohanty, Prayag Tiwari, Khan Muhammad, Abdullah AlTameem, Yazhou Zhang, Abdul Khader Jilani Saudagar.*\u003Cbr>\n [[PDF](https:\u002F\u002Fpubmed.ncbi.nlm.nih.gov\u002F34769600\u002F)]\n \n   **基于 Vision Transformer 使用胸部 X 光进行 COVID-19 检测。** [2021 年 10 月 9 日].\u003Cbr>\n*Koushik Sivarama Krishnan, Karthik Sivarama Krishnan.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.04458)]  \n \n   **使用 Swin-Transformer 和 Transformer in Transformer 在胸部 X 光图像中进行 COVID-19 检测。** [2021 年 10 月 16 日].\u003Cbr>\n*Juntao Jiang, Shuyi Lin.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.08427)] \n \n   **使用任务无关训练的 Federated Split Vision Transformer 用于 COVID-19 CXR 诊断。** [2021 年 11 月 2 日] [⚡NeurIPS, 2021].\u003Cbr>\n*Sangjoon Park, Gwanghyun Kim, Jeongsol Kim, Boah Kim, Jong Chul Ye.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.01338)]\n \n   **使用低级胸部 X 光特征语料库的多任务 Vision Transformer 用于 COVID-19 诊断和严重程度量化。** [2021 年 11 月 4 日] [⚡MIA, 2021].\u003Cbr>\n*Sangjoon Park, Gwanghyun Kim, Yujin Oh, Joon Beom Seo, Sang Min Lee, Jin Hwan Kim, Sungjun Moon, Jae-Kwang Lim, Jong Chul Ye.*\u003Cbr>\n [[PDF](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS1361841521003443)]\n \n   **基于 Transformer 和 CNN (卷积神经网络) 的 COVID-19 CT 图像识别算法。** [2022 年 1 月 24 日].\u003Cbr>\n*Xiaole Fan, Xiufang Feng, Yunyun Dong, Huichao Hou.*\u003Cbr>\n [[PDF](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0141938222000026)]\n\n# 重建\n\n   **TransCT：用于低剂量计算机断层扫描 (CT) 的双路径 Transformer（变换器）。** [28th Feb., 2021] [⚡MICCAI, 2021].\u003Cbr>\n*Zhicheng Zhang, Lequan Yu, Xiaokun Liang, Wei Zhao, Lei Xing.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.00634)] [[Github](https:\u002F\u002Fgithub.com\u002Fzzc623\u002FTransCT)]\n \n   **通过零样本学习对抗性 Transformer 进行无监督 MRI（磁共振成像）重建。** [15th May, 2021].\u003Cbr>\n*Yilmaz Korkmaz, Salman UH Dar, Mahmut Yurt, Muzaffer Özbey, Tolga Çukur.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.04430)] [[Github](https:\u002F\u002Fgithub.com\u002FWenxuan-1119\u002FTransBTS)]\n \n   **TED-net：基于卷积自由 T2T 视觉 Transformer 的编码器 - 解码器膨胀网络用于低剂量 CT 去噪。** [8th June, 2021].\u003Cbr>\n*Dayang Wang, Zhan Wu, Hengyong Yu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.04650)]\n \n   **用于联合 MRI 重建和超分辨率的任务 Transformer 网络。** [12th June, 2021] [⚡MICCAI, 2021].\u003Cbr>\n*Chun-Mei Feng, Yunlu Yan, Huazhu Fu, Li Chen, Yong Xu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.06742)] [[Github](https:\u002F\u002Fgithub.com\u002Fchunmeifeng\u002FT2Net)]\n \n   **使用 Transformer 加速多模态 MR 成像。** [27th June, 2021].\u003Cbr>\n*Chun-Mei Feng, Yunlu Yan, Geng Chen, Huazhu Fu, Yong Xu, Ling Shao.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.14248)] [[Github](https:\u002F\u002Fgithub.com\u002Fchunmeifeng\u002FMTrans)]\n \n   **E-DSSR：基于 Transformer 的立体深度感知的高效动态手术场景重建。** [1st July, 2021] [⚡MICCAI, 2021].\u003Cbr>\n*Yonghao Long, Zhaoshuo Li, Chi Hang Yee, Chi Fai Ng, Russell H. Taylor, Mathias Unberath, Qi Dou.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.00229)]\n \n   **Eformer：基于边缘增强的 Transformer，用于医学图像去噪。** [16th Sep., 2021] [👍ICCV Workshop, 2021].\u003Cbr>\n*Achleshwar Luthra, Harsh Sulakhe, Tanish Mittal, Abhishek Iyer, Santosh Yadav.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.08044)] \n \n   **Transformer-Unet：使用 Unet 进行原始图像处理。** [17th Sep., 2021].\u003Cbr>\n*Youyang Sha, Yonghong Zhang, Xuquan Ji, Lei Hu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2109.08417)]\n \n   **使用生成式视觉 Transformer 进行深度 MRI 重建。** [25th Sep., 2021].\u003Cbr>\n*Yilmaz KorkmazMahmut Yurt, Salman Ul Hassan Dar, Muzaffer Özbey, Tolga Cukur.*\u003Cbr>\n [[PDF](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-030-88552-6_6)] [[Github](https:\u002F\u002Fgithub.com\u002Ficon-lab\u002FSLATER)]\n \n   **用于高质量 PET（正电子发射断层扫描）重建的 3D Transformer-GAN（生成对抗网络）。** [29th Sep., 2021] [⚡MICCAI, 2021].\u003Cbr>\n*Yanmei Luo, Yan Wang, Chen Zu, Bo Zhan, Xi Wu, Jiliu Zhou, Dinggang Shen, Luping Zhou.*\u003Cbr>\n [[PDF](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-030-87231-1_27)] \n \n   **TranSMS：用于磁性粒子成像中超分辨率校准的 Transformers。** [3rd Nov., 2021] .\u003Cbr>\n*Alper Güngör, Baris Askin, Damla Alptekin Soydan, Emine Ulku Saritas, Can Barış Top, Tolga Çukur.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.02163)] \n \n   **DuDoTrans：双域 Transformer 为稀疏视图 CT 重建中的正弦图恢复提供更多注意力。** [21st Nov., 2021] .\u003Cbr>\n*Ce Wang, Kun Shang, Haimiao Zhang, Qian Li, Yuan Hui, S. Kevin Zhou.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.10790)]\n \n   **使用混合模型的自监督 CT 超分辨率。** [23rd Nov., 2021] .\u003Cbr>\n*Zhicheng Zhang, Shaode Yu, Wenjian Qin, Xiaokun Liang, Yaoqin Xie, Guohua Cao.*\u003Cbr>\n [[PDF](https:\u002F\u002Fwww.sciencedirect.com\u002Fscience\u002Farticle\u002Fpii\u002FS0010482521005692)] \n \n   **MIST-net：用于稀疏视图 CT 重建的多域集成 Swin Transformer 网络。** [29th Nov., 2021] .\u003Cbr>\n*Jiayi Pan, Weiwen Wu, Zhifan Gao, Heye Zhang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.14831)]\n \n   **ReconFormer：使用循环 Transformer 加速 MRI 重建。** [23rd Jan., 2022] .\u003Cbr>\n*Pengfei Guo, Yiqun Mei, Jinyuan Zhou, Shanshan Jiang, Vishal M. Patel.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.09376)][[Github](https:\u002F\u002Fgithub.com\u002Fguopengf\u002Freconformer)]\n \n   **DSFormer：一种用于加速多对比度 MRI 重建的双域自监督 Transformer。** [26th Jan., 2022] .\u003Cbr>\n*Bo Zhou, Jo Schlemper, Neel Dey, Seyed Sadegh Mohseni Salehi, Chi Liu, James S. Duncan, Michal Sofka.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.10776)]\n\n   **CTformer：用于低剂量 CT 去噪的无卷积 Token2Token 膨胀视觉 Transformer。** [28th Feb., 2022] .\u003Cbr>\n*Dayang Wang, Fenglei Fan, Zhan Wu, Rui Liu, Fei Wang, Hengyong Yu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.13517)][[Github](https:\u002F\u002Fgithub.com\u002Fwdayang\u002FCTformer)]\n \n   **自适应重加权多损失未训练 Transformer 用于稀疏视图锥形束 CT 重建。** [23rd March, 2022] .\u003Cbr>\n*Minghui Wu, Yangdi Xu, Yingying Xu, Guangwei Wu, Qingqing Chen, Hongxiang Lin.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.12476)]\n \n   **Transformer 赋能的多尺度上下文匹配与聚合，用于多对比度 MRI 超分辨率。** [26th March, 2022] .\u003Cbr>\n*Guangyuan Li, Jun Lv, Yapeng Tian, Qi Dou, Chengyan Wang, Chenliang Xu, Jing Qin.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.13963)]\n \n   **跨模态高频 Transformer，用于 MR 图像超分辨率。** [29th March, 2022] .\u003Cbr>\n*Chaowei Fang, Dingwen Zhang, Liang Wang, Yulun Zhang, Lechao Cheng, Junwei Han.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15314)]\n \n   **数据和物理驱动的深度学习模型用于快速 MRI——从卷积神经网络 (CNN)、GAN 到注意力机制和 Transformer 的基础与方法论。** [1st April, 2022] .\u003Cbr>\n*Jiahao Huang, Yingying Fang, Yang Nan, Huanjun Wu, Yinzhe Wu, Zhifan Gao, Yang Li, Zidong Wang, Pietro Lio, Daniel Rueckert, Yonina C. Eldar, Guang Yang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.01706)]\n \n   **通过正弦图内部结构 Transformer 进行低剂量 CT 去噪。** [7th April, 2022] .\u003Cbr>\n*Liutao Yang, Zhongnian Li, Rongjun Ge, Junyong Zhao, Haipeng Si, Daoqiang Zhang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.03163)]\n \n   **掩码协同注意力 Transformer 从纵向图像和解剖引导 MRI 重建 100 倍超快\u002F低剂量全身 PET。** [9th May, 2022] .\u003Cbr>\n*Yan-Ran Wang, Liangqiong Qu, Natasha Diba Sheybani, Xiaolong Luo, Jiangshan Wang, Kristina Elizabeth Hawk, Ashok Joseph Theruvath, Sergios Gatidis, Xuerong Xiao, Allison Pribnow, Daniel Rubin, Heike E. Daldrup-Link.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.04044)]\n \n    **基于 Transformer 和 GAN 的医学图像超分辨率重建网络。** [26th Dec., 2022] .\u003Cbr>\n*Weizhi Du, Harvery Tian.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2212.13068)]\n\n# 配准\n\n  **ViT-V-Net: Vision Transformer (视觉 Transformer) for Unsupervised (无监督) Volumetric Medical Image Registration (医学图像配准).** [2021 年 4 月 13 日] [👍MIDL 短文论文，2021].\u003Cbr>\n*Junyu Chen, Yufan He, Eric C. Frey, Ye Li, Yong Du.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.06468)] [[GitHub](https:\u002F\u002Fgithub.com\u002Fjunyuchen245\u002FViT-V-Net_for_3D_Image_Registration_Pytorch)]\n \n   **Attention for Image Registration (AiR): an unsupervised Transformer approach.** [2021 年 5 月 5 日].\u003Cbr>\n*Zihao Wang, Hervé Delingette.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.02282)]\n \n   **Learning Dual Transformer Network for Diffeomorphic (微分同胚) Registration.** [2021 年 9 月 21 日] [⚡MICCAI, 2021].\u003Cbr>\n*Yungeng Zhang, Yuru Pei, Hongbin Zha.*\u003Cbr>\n [[PDF](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-030-87202-1_13)]\n \n   **TransMorph: Transformer for unsupervised medical image registration.** [2021 年 11 月 19 日].\u003Cbr>\n*Junyu Chen, Yong Du, Yufan He, William P. Segars, Ye Li, Eric C. Frey.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.10480)] [[GitHub](https:\u002F\u002Fgithub.com\u002Fjunyuchen245\u002FTransMorph_Transformer_for_Medical_Image_Registration)]\n\n   **A Transformer-based Network for Deformable (可变形) Medical Image Registration.** [2022 年 2 月 24 日].\u003Cbr>\n*Yibo Wang, Wen Qian, Xuming Zhang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.12104)]\n \n   **Affine (仿射) Medical Image Registration with Coarse-to-Fine (由粗到细) Vision Transformer.** [2022 年 3 月 29 日].\u003Cbr>\n*Tony C. W. Mok, Albert C. S. Chung.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.15216)] [[GitHub](https:\u002F\u002Fgithub.com\u002Fcwmok\u002FC2FViT)]\n\n# 合成\n\n  **VTGAN: Semi-supervised (半监督) Retinal (视网膜) Image Synthesis and Disease Prediction using Vision Transformers.** [2021 年 4 月 14 日] [👍ICCV Workshop on Computer Vision for Automated Medical Diagnosi, 2021].\u003Cbr>\n*Sharif Amit Kamran, Khondker Fariha Hossain, Alireza Tavakkoli, Stewart Lee Zuckerbrod, Salah A. Baker.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2104.06757)] [[GitHub](https:\u002F\u002Fgithub.com\u002FSharifAmit\u002FVTGAN)]\n \n   **PTNet: A High-Resolution Infant MRI (磁共振成像) Synthesizer Based on Transformer.** [2021 年 5 月 28 日].\u003Cbr>\n*Xuzhe Zhang, Xinzi He, Jia Guo, Nabil Ettehadi, Natalie Aw, David Semanek, Jonathan Posner, Andrew Laine, Yun Wang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.13993)] [[GitHub](https:\u002F\u002Fgithub.com\u002FXuzheZ\u002FPTNet)]\n \n   **ResViT: Residual vision transformers for Multi-modal (多模态) Medical Image Synthesis.** [2021 年 6 月 30 日].\u003Cbr>\n*Onat Dalmaz, Mahmut Yurt, Tolga Çukur.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.16031)] [[GitHub](https:\u002F\u002Fgithub.com\u002Ficon-lab\u002FResViT)]\n \n   **CyTran: Cycle-Consistent (循环一致性) Transformers for Non-Contrast to Contrast CT (计算机断层扫描) Translation.** [2021 年 10 月 12 日].\u003Cbr>\n*Nicolae-Catalin Ristea, Andreea-Iuliana Miron, Olivian Savencu, Mariana-Iuliana Georgescu, Nicolae Verga, Fahad Shahbaz Khan, Radu Tudor Ionescu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2110.06400)] [[GitHub](https:\u002F\u002Fgithub.com\u002Fristea\u002Fcycle-transformer)]\n \n   **One Model to Synthesize Them All: Multi-contrast Multi-scale Transformer for Missing Data Imputation (缺失数据插补).** [2022 年 4 月 28 日].\u003Cbr>\n*Jiang Liu, Srivathsa Pasumarthi, Ben Duffy, Enhao Gong, Greg Zaharchuk, Keshav Datta.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.13738)] \n\n# 检测\n \n   **COTR: Convolution in Transformer Network for End to End (端到端) Polyp (息肉) Detection.** [2021 年 5 月 23 日].\u003Cbr>\n*Zhiqiang Shen, Chaonan Lin, Shaohua Zheng.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2105.10925)]\n  \n   **Transformer for Polyp Detection.** [2021 年 10 月 14 日].\u003Cbr>\n*Shijie Liu, Hongyu Zhou, Xiaozhou Shi, Junwen Pan.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.07918)]\n  \n   **Lymph Node Detection in T2 MRI with Transformers.** [2021 年 11 月 9 日].\u003Cbr>\n*Tejas Sudharshan Mathai, Sungwon Lee, Daniel C. Elton, Thomas C. Shen, Yifan Peng, Zhiyong Lu, Ronald M. Summers.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.04885)]\n \n   **SATr: Slice Attention with Transformer for Universal Lesion (病灶) Detection.** [2022 年 3 月 13 日].\u003Cbr>\n*Han Li, Long Chen, Hu Han, S. Kevin Zhou.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.07373)]\n \n   **Contrastive Transformer-based Multiple Instance Learning (实例学习) for Weakly Supervised (弱监督) Polyp Frame Detection.** [2022 年 3 月 13 日].\u003Cbr>\n*Yu Tian, Guansong Pang, Fengbei Liu, Yuyuan Liu, Chong Wang, Yuanhong Chen, Johan W Verjans, Gustavo Carneiro.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.12121)]\n \n   **Unsupervised Contrastive Learning (对比学习) based Transformer for Lung Nodule (肺结节) Detection.** [2022 年 4 月 30 日].\u003Cbr>\n*Chuang Niu, Ge Wang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.00122)]\n\n   **SwinFPN: Leveraging Vision Transformers for 3D Organs-At-Risk (风险器官) Detection.** [2022 年 5 月 9 日] [MIDL 短文论文，2022].\u003Cbr>\n*Bastian Wittmann, Suprosanna Shit, Fernando Navarro, Jan C. Peeken, Stephanie E. Combs, Bjoern Menze.*\u003Cbr>\n [[PDF](https:\u002F\u002Fopenreview.net\u002Fforum?id=yiIz7DhgRU5)] [[GitHub](https:\u002F\u002Fgithub.com\u002Fbwittmann\u002Ftransoar)]\n \n   **An Effective Transformer-based Solution for RSNA Intracranial Hemorrhage (颅内出血) Detection Competition.** [2022 年 5 月 16 日].\u003Cbr>\n*Fangxin Shang, Siqi Wang, Xiaorong Wang, Yehui Yang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.07556)] [[GitHub]([https:\u002F\u002Fgithub.com\u002Fristea\u002Fcycle-transformer](https:\u002F\u002Fgithub.com\u002FPaddlePaddle\u002FResearch))]\n\n   **Focused Decoding Enables 3D Anatomical Detection by Transformers.** [2023 年 2 月 27 日] [MELBA, 2023].\u003Cbr>\n*Bastian Wittmann, Fernando Navarro, Suprosanna Shit, Bjoern Menze.*\u003Cbr>\n [[PDF](https:\u002F\u002Fwww.melba-journal.org\u002Fpapers\u002F2023:003.html)] [[GitHub](https:\u002F\u002Fgithub.com\u002Fbwittmann\u002Ftransoar)]\n\n# 临床报告生成\n\n   **用于医学图像描述（Medical Image Captioning）的强化 Transformer（变换器）。** [2019 年 10 月 10 日].[MLMI, 2019]\u003Cbr>\n*Yuxuan Xiong, Bo Du, Pingkun Yan.*\u003Cbr>\n [[PDF](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-030-32692-0_77)] \n\n   **提高图像到文本放射学报告（Radiology Report）生成的事实完整性和一致性。** [2020 年 10 月 20 日].[NAACL, 2020]\u003Cbr>\n*Yasuhide Miura, Yuhao Zhang, Emily Bao Tsai, Curtis P. Langlotz, Dan Jurafsky.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2010.10042)] [[Github](https:\u002F\u002Fgithub.com\u002Fysmiura\u002Fifcc)]\n\n   **通过记忆驱动 Transformer 生成放射学报告。** [2020 年 10 月 30 日].[EMNLP, 2020]\u003Cbr>\n*Zhihong Chen, Yan Song, Tsung-Hui Chang, Xiang Wan.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2010.16056)] [[Github](https:\u002F\u002Fgithub.com\u002Fcuhksz-nlp\u002FR2Gen)]\n \n   **通过病理标签和多头注意力机制（Multi Head Attention）进行分层 X 射线（X-Ray）报告生成。** [2020 年 11 月 30 日].[EMNLP, 2020]\u003Cbr>\n*Preethi Srinivasan, Daksh Thapar, Arnav Bhavsar, Aditya Nigam.*\u003Cbr>\n [[PDF](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FACCV2020\u002Fhtml\u002FSrinivasan_Hierarchical_X-Ray_Report_Generation_via_Pathology_tags_and_Multi_Head_ACCV_2020_paper.html)]\n \n   **在机器人手术中，通过模型校准学习领域自适应（Domain Adaptation）以生成手术报告。** [2021 年 3 月 31 日].[⚡ICRA, 2021]\u003Cbr>\n*Mengya Xu, Mobarakol Islam, Chwee Ming Lim, Hongliang Ren.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2103.17120)] [[Github](https:\u002F\u002Fgithub.com\u002FXuMengyaAmy\u002FReportDALS)]\n \n   **置信度引导的放射学报告生成。** [2021 年 6 月 21 日].[⚡MICCAI, 2021]\u003Cbr>\n*Yixin Wang, Zihao Lin, Jiang Tian, Zhongchao Shi, Yang Zhang, Jianping Fan, Zhiqiang He.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.10887)]\n \n   **探索并蒸馏后验知识和先验知识以用于放射学报告生成。** [2021 年 6 月 13 日].[⚡CVPR, 2021]\u003Cbr>\n*Fenglin Liu, Xian Wu, Shen Ge, Wei Fan, Yuexian Zou.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2106.06963)]\n \n   **RATCHET：用于胸部 X 射线诊断和报告的医疗 Transformer。** [2021 年 7 月 5 日].[⚡MICCAI, 2021]\u003Cbr>\n*Benjamin Hou, Georgios Kaissis, Ronald Summers, Bernhard Kainz.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.02104)] [[Github](https:\u002F\u002Fgithub.com\u002Ffarrell236\u002FRATCHET)]\n \n   **使用 Transformer 生成手术指令。** [2021 年 7 月 14 日].[⚡MICCAI, 2021]\u003Cbr>\n*Jinglu Zhang, Yinyu Nie, Jian Chang, Jian Jun Zhang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.06964)]\n   \n   **具有平滑和校准的类增量领域自适应用于手术报告生成。** [2021 年 7 月 23 日].[⚡MICCAI, 2021]\u003Cbr>\n*Mengya Xu, Mobarakol Islam, Chwee Ming Lim, Hongliang Ren.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2107.11091)] [[Github](https:\u002F\u002Fgithub.com\u002FXuMengyaAmy\u002FCIDACaptioning)]\n \n   **准确流畅的医学 X 射线报告自动生成。** [2021 年 8 月 27 日].[EMNLP, 2021]\u003Cbr>\n*Hoang T.N. Nguyen, Dong Nie, Taivanbat Badamdorj, Yujie Liu, Yingying Zhu, Jason Truong, Li Cheng.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2108.12126)] [[Github](https:\u002F\u002Fgithub.com\u002Fginobilinie\u002Fxray_report_generation)]\n \n   **AlignTransformer：用于医学报告生成的视觉区域和疾病标签的分层对齐。** [2021 年 9 月 1 日].[⚡MICCAI, 2021]\u003Cbr>\n*Di You, Fenglin Liu, Shen Ge, Xiaoxia Xie, Jing Zhang, Xian Wu.*\u003Cbr>\n [[PDF](https:\u002F\u002Flink.springer.com\u002Fchapter\u002F10.1007\u002F978-3-030-87199-4_7)] \n \n   **使用基于 Transformer 的深度学习模型自动生成胸部 X 射线报告。** [2021 年 10 月 20 日].[ICDS, 2021]\u003Cbr>\n*Ayoub Benali Amjoud; Mustapha Amrouch.*\u003Cbr>\n [[PDF](https:\u002F\u002Fieeexplore.ieee.org\u002Fdocument\u002F9626725\u002F)]\n  \n   **FFA-IR：迈向可解释且可靠的医学报告生成基准。** [2021 年 10 月 31 日].[⚡NeurIPS, 2021]\u003Cbr>\n*Mingjie Li, Wenjia Cai, Rui Liu, Yuetian Weng, Xiaoyun Zhao, Cong Wang, Xin Chen, Zhong Liu, Caineng Pan, Mengke Li, yingfeng zheng, Yizhi Liu, Flora D. Salim, Karin Verspoor, Xiaodan Liang, Xiaojun Chang.*\u003Cbr>\n [[PDF](https:\u002F\u002Fopenreview.net\u002Fpdf?id=FgYTwJbjbf)] [[Github](https:\u002F\u002Fgithub.com\u002Fmlii0117\u002FFFA-IR)]\n \n   **通过图像与自由文本放射学报告之间的交叉监督实现泛化放射影像表示学习。** [2021 年 11 月 4 日].[Nature Machine Intelligence, 2022]\u003Cbr>\n*Hong-Yu Zhou, Xiaoyu Chen, Yinghao Zhang, Ruibang Luo, Liansheng Wang, Yizhou Yu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.03452)][[Github](https:\u002F\u002Fgithub.com\u002Ffunnyzhou\u002Frefers)]\n \n   **用于无监督医学报告生成的自动编码知识图谱（Knowledge Graph）。** [2021 年 11 月 8 日].[⚡NeurIPS, 2021]\u003Cbr>\n*Fenglin Liu, Chenyu You, Xian Wu, Shen Ge, Sheng Wang, Xu Sun.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2111.04318)]\n \n   **理解修改后的 Transformer 架构在胸部放射影像临床报告生成中的迁移学习（Transfer Learning）。** [2022 年 5 月 5 日].\u003Cbr>\n*Edward Vendrow, Ethan Schonfeld.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.02841)]\n \n\n# 其他\n\n   **用于吉像素全切片图像（Whole Slide Images）中生存预测（Survival Prediction）的多模态协同注意力 Transformer。** [2021 年 9 月 22 日]. [⚡ICCV, 2021]\u003Cbr>\n*Chen, Richard J and Lu, Ming Y and Weng, Wei-Hung and Chen, Tiffany Y and Williamson, Drew FK and Manz, Trevor and Shady, Maha and Mahmood, Faisal.*\u003Cbr>\n [[PDF](https:\u002F\u002Fopenaccess.thecvf.com\u002Fcontent\u002FICCV2021\u002Fpapers\u002FChen_Multimodal_Co-Attention_Transformer_for_Survival_Prediction_in_Gigapixel_Whole_Slide_ICCV_2021_paper.pdf)][[Github](https:\u002F\u002Fgithub.com\u002Fmahmoodlab\u002Fmcat)]\n \n   **Transformer 在临床文本分类上的局限性。** [2021 年 9 月 25 日].\u003Cbr>\n*Shang Gao, Mohammed Alawad, M Todd Young, John Gounley, Noah Schaefferkoetter, Hong Jun Yoon, Xiao-Cheng Wu, Eric B Durbin, Jennifer Doherty, Antoinette Stroup, Linda Coyle, Georgia Tourassi.*\u003Cbr>\n [[PDF](https:\u002F\u002Fieeexplore.ieee.org\u002Fstamp\u002Fstamp.jsp?arnumber=9364676)]\n\n   **用于非小细胞肺癌（NSCLC）生存结果预测的可解释 Transformer 基础神经网络。** [2021 年 10 月 14 日].\u003Cbr>\n*Elly Kipkogei, Gustavo Alonso Arango Argoty, Ioannis Kagiampakis, Arijit Patra, Etai Jacob.*\u003Cbr>\n [[PDF](https:\u002F\u002Fwww.medrxiv.org\u002Fcontent\u002F10.1101\u002F2021.10.11.21264761v1)]\n\n**3D 医学点 Transformer（变换器）：将卷积引入注意力网络以进行医学点云分析。** [2021 年 12 月 9 日].\u003Cbr>\n*Jianhui Yu, Chaoyi Zhang, Heng Wang, Dingxin Zhang, Yang Song, Tiange Xiang, Dongnan Liu, Weidong Cai.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.04863)]\n \n   **用于 fMRI 预测任务的 Transformer 预训练与微调。** [2021 年 12 月 10 日].\u003Cbr>\n*Itzik Malkiel, Gony Rosenman, Lior Wolf, Talma Hendler.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.05761)]\n \n   **CLIP 在医疗领域的视觉问答中是否像通用领域一样有益？** [2021 年 12 月 27 日].\u003Cbr>\n*Sedigheh Eslami, Gerard de Melo, Christoph Meinel.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2112.13906)]\n \n   **TransPPG：用于远程心率估计的双流 Transformer。** [2022 年 1 月 26 日].\u003Cbr>\n*Jiaqi Kang, Su Yang, Weishan Zhang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2201.10873)]\n \n   **基于深度锚点注意力学习与 Vision Transformer 的未经治疗 MRI 脑癌生存预测。** [2022 年 2 月 3 日].\u003Cbr>\n*Xuan Xu, Prateek Prasanna.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.01857)]\n\n   **在 CoNIC 挑战中使用多尺度 SwinTransformer-HTC 与数据增强。** [2022 年 2 月 28 日].\u003Cbr>\n*Chia-Yen Lee, Hsiang-Chin Chien, Ching-Ping Wang, Hong Yen, Kai-Wen Zhen, Hong-Kun Lin.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2202.13588)]\n\n   **自监督 Vision Transformers 学习组织病理学中的视觉概念。** [2022 年 3 月 1 日].\u003Cbr>\n*Richard J. Chen, Rahul G. Krishnan.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.00585)][[Github](https:\u002F\u002Fgithub.com\u002FRicharizardd\u002FSelf-Supervised-ViT-Path)]\n\n   **使用 3D 块聚合 Transformer 表征肾脏结构。** [2022 年 3 月 4 日].\u003Cbr>\n*Xin Yu, Yucheng Tang, Yinchi Zhou, Riqiang Gao, Qi Yang, Ho Hin Lee, Thomas Li, Shunxing Bao, Yuankai Huo, Zhoubing Xu, Thomas A. Lasko, Richard G. Abramson, Bennett A. Landman.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.02430)]\n \n   **双流 Transformer 的联合旋转不变性和对抗训练产生 V4 区的最佳 Brain-Score。** [2022 年 3 月 8 日].\u003Cbr>\n*William Berrios, Arturo Deza.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.06649)]\n \n   **基于 Transformer 的深度强化学习框架用于切片特定快速 MR 扫描的主动相位编码选择。** [2022 年 3 月 11 日].\u003Cbr>\n*Yiming Liu, Yanwei Pang, Ruiqi Jin, Zhenchang Wang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.05756)]\n \n   **表面 Vision Transformers：应用于皮层分析的基于注意力的建模。** [2022 年 3 月 29 日]. [MIDL, 2022]\u003Cbr>\n*Simon Dahan, Abdulah Fawaz, Logan Z. J. Williams, Chunhui Yang, Timothy S. Coalson, Matthew F. Glasser, A. David Edwards, Daniel Rueckert, Emma C. Robinson.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2203.16414)][[Github](https:\u002F\u002Fgithub.com\u002Fmetrics-lab\u002Fsurface-vision-transformers)]\n \n   **表面 Vision Transformers：生物医学表面的灵活基于注意力的建模。** [2022 年 4 月 7 日]. [MIDL, 2022]\u003Cbr>\n*Simon Dahan, Hao Xu, Logan Z. J. Williams, Abdulah Fawaz, Chunhui Yang, Timothy S. Coalson, Michelle C. Williams, David E. Newby, A. David Edwards, Matthew F. Glasser, Alistair A. Young, Daniel Rueckert, Emma C. Robinson.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.03408)][[Github](https:\u002F\u002Fgithub.com\u002Fmetrics-lab\u002Fsurface-vision-transformers)]\n \n   **3D Shuffle-Mixer：用于医学体积密集预测的高效上下文感知 Vision Transformer-MLP 范式学习者。** [2022 年 4 月 14 日]. [MIDL, 2022]\u003Cbr>\n*Jianye Pang, Cheng Jiang, Yihao Chen, Jianbo Chang, Ming Feng, Renzhi Wang, Jianhua Yao.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2204.06779)]\n \n   **基于局部注意力图的 Transformer 用于多目标基因变异预测。** [2022 年 5 月 13 日].\u003Cbr>\n*Daniel Reisenbüchler, Sophia J. Wagner, Melanie Boxberg, Tingying Peng.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.06672)]\n \n   **受稀疏表示启发的用于扩散 MRI 的微观结构估计 Transformer。** [2022 年 5 月 13 日].\u003Cbr>\n*Tianshu Zheng, Cong Sun, Weihao Zheng, Wen Shi, Haotian Li, Yi Sun, Yi Zhang, Guangbin Wang, Chuyang Ye, Dan Wu.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.06450)]\n \n   **BabyNet：用于胎儿超声视频出生体重预测的残差 Transformer 模块。** [2022 年 5 月 19 日].\u003Cbr>\n*Szymon Płotka, Michal K. Grzeszczyk, Robert Brawura-Biskupski-Samaha, Paweł Gutaj, Michał Lipa, Tomasz Trzciński, Arkadiusz Sitek.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.09382)][[Github]([https:\u002F\u002Fgithub.com\u002Fmetrics-lab\u002Fsurface-vision-transformers](https:\u002F\u002Fgithub.com\u002Fsanoscience\u002Fbabynet))]\n \n   **用于临床安全分割的 Transformer 分布外检测。** [2022 年 5 月 21 日]. [MIDL, 2022 (Oral)]\u003Cbr>\n*Mark S Graham, Petru-Daniel Tudosiu, Paul Wright, Walter Hugo Lopez Pinaya, U Jean-Marie, Yee Mah, James Teo, Rolf H Jäger, David Werring, Parashkev Nachev, Sebastien Ourselin, M Jorge Cardoso.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2205.10650)]\n \n   **MS-DINO：通过掩码采样在医疗领域高效分布式训练 Vision Transformer 基础模型。** [2023 年 1 月 5 日].\u003Cbr>\n*Sangjoon Park, Ik-Jae Lee, Jun Won Kim, Jong Chul Ye.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.02064)]\n \n   **基于 Transformer-CNN 融合的合作学习增强结肠息肉分割。** [2023 年 1 月 17 日].\u003Cbr>\n*CYuanyuan Wang, Zhaohong Deng, Qiongdan Lou, Shudong Hu, Kup-sze Choi, Shitong Wang.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.06892)]\n \n  **ViT-AE++：改进 Vision Transformer 自编码器以用于自监督医学图像表示。** [2023 年 1 月 18 日].\u003Cbr>\n*Chinmay Prabhakar, Hongwei Bran Li, Jiancheng Yang, Suprosana Shit, Benedikt Wiestler, Bjoern Menze.*\u003Cbr>\n [[PDF](https:\u002F\u002Farxiv.org\u002Fabs\u002F2301.07382)]\n \n\n \n\n \n\n\n\n# 引用\n\n如果您发现该列表和综述对您的工作有用，请引用以下论文：\n\n```\n@misc{shamshad2022transformers,\n      title={Transformers in Medical Imaging: A Survey}, \n      author={Shamshad, Fahad and  Khan, Salman and Zamir, Syed Waqas and Khan, Muhammad Haris and  Hayat, Munawar and Khan, Fahad Shahbaz and Fu, Huazhu}\n      year={2022},\n      eprint={2201.09873},\n      archivePrefix={arXiv},\n      primaryClass={cs.CV}\n}\n```","# Awesome Transformers in Medical Imaging 快速上手指南\n\n本仓库是一个精选的医学影像领域 Transformer 资源列表，收录了相关的综述论文、开源代码及实现方案。它本身不是一个可执行的软件包，而是作为研究者和开发者的导航库，帮助您快速定位特定任务（如分割、分类、重建等）的最新模型。\n\n## 环境准备\n\n由于本仓库主要提供资源索引和链接，运行前需确保本地具备以下基础环境：\n\n- **操作系统**: Linux \u002F macOS \u002F Windows\n- **版本控制工具**: Git (用于克隆仓库)\n- **编程语言**: Python 3.8+ (用于后续运行链接中的具体项目)\n- **网络访问**: 能够访问 GitHub 及 arXiv 服务器（国内用户建议使用加速代理或镜像源）\n\n## 安装步骤\n\n本仓库无需通过包管理器安装，直接通过 Git 克隆即可获取所有资源信息。\n\n1. **克隆仓库**\n   ```bash\n   git clone https:\u002F\u002Fgithub.com\u002Ffahad-shamshad\u002Fawesome-transformers-in-medical-imaging.git\n   cd awesome-transformers-in-medical-imaging\n   ```\n\n2. **查看内容**\n   打开 `README.md` 文件，浏览目录结构。\n   ```bash\n   cat README.md\n   ```\n   *注：国内网络环境下克隆可能较慢，建议配置 Git 镜像或使用 GitHub 加速服务。*\n\n## 基本使用\n\n本仓库按任务类型分类整理了相关论文与代码，使用方法如下：\n\n### 1. 浏览资源分类\n根据您的需求，在 `README.md` 中查找对应的章节，例如：\n- **Medical Image Segmentation**: 医学图像分割\n- **Medical Image Classification**: 医学图像分类\n- **Medical Image Reconstruction**: 医学图像重建\n- 其他类别包括 Registration, Synthesis, Detection 等。\n\n### 2. 访问论文与代码\n每个条目包含论文标题、作者、日期以及 PDF 和 GitHub 链接。\n- **阅读论文**: 点击 `[PDF]` 或 `[Paper]` 链接下载原文。\n- **复现代码**: 点击 `[Github]` 链接进入具体的项目仓库。\n\n### 3. 运行具体模型\n本仓库仅做索引，实际模型代码位于各子项目的独立仓库中。以 **TransUNet** 为例：\n1. 在列表中找到 `TransUNet` 条目。\n2. 复制其 GitHub 链接（例如 `https:\u002F\u002Fgithub.com\u002FBeckschen\u002FTransUNet`）。\n3. 克隆该子项目并遵循其独立的 `README.md` 进行依赖安装和训练。\n   ```bash\n   git clone https:\u002F\u002Fgithub.com\u002FBeckschen\u002FTransUNet\n   cd TransUNet\n   # 参考该项目文档安装依赖并运行\n   ```\n\n### 4. 贡献资源\n如果您有优秀的开源工作希望加入，欢迎提交 Pull Request 更新列表信息。","某医疗 AI 初创公司的算法团队，正计划开发一款基于 Transformer 架构的脑部 MRI 肿瘤分割系统。\n\n### 没有 awesome-transformers-in-medical-imaging 时\n- 需要跨多个学术数据库手动检索最新论文，效率极低且容易遗漏关键文献。\n- 难以区分通用视觉 Transformer 与针对医学图像优化的变体，试错成本高。\n- 查找开源代码实现分散，经常遇到论文有但无代码的情况，复现困难。\n- 对领域内最新进展缺乏系统性了解，技术选型盲目，导致项目初期方向偏差。\n\n### 使用 awesome-transformers-in-medical-imaging 后\n- 直接通过 awesome-transformers-in-medical-imaging 按任务分类获取精选资源，快速定位相关论文。\n- 列表包含官方论文链接及对应的开源实现仓库，大幅降低代码复现门槛。\n- 依据综述文章梳理的技术脉络，能清晰对比不同模型在医学场景下的性能差异。\n- 定期更新机制确保团队始终掌握 Transformer 在医学影像领域的最新 SOTA 方案。\n\nawesome-transformers-in-medical-imaging 将碎片化的研究资料整合为结构化导航，显著缩短从理论调研到工程落地的周期。","https:\u002F\u002Foss.gittoolsai.com\u002Fimages\u002Ffahadshamshad_awesome-transformers-in-medical-imaging_3486c75c.png","fahadshamshad","Fahad Shamshad","https:\u002F\u002Foss.gittoolsai.com\u002Favatars\u002Ffahadshamshad_fcd5668d.jpg","Working in AI Security and LLMs.","MBZUAI",null,"fahadshamshad_","fahadshamshad.github.io","https:\u002F\u002Fgithub.com\u002Ffahadshamshad",1283,193,"2026-04-05T04:19:22",1,"未说明",{"notes":91,"python":89,"dependencies":92},"此仓库为医学影像 Transformer 相关论文及代码的资源列表，并非独立的可执行软件包。因此本仓库本身无特定的运行环境要求。如需使用其中的模型或代码，请前往各条目对应的 GitHub 仓库查看具体的安装与环境配置说明。",[],[53,26,14],[95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111],"transformers","vision-transformers","medical-imaging","medical-image-analysis","clinical-report","medical-report-generate","deep-neural-networks","medical-image-segmentation","medical-image-classification","medical-image-detection","medical-image-reconstruction","medical-image-synthesis","medical-image-registration","covid-19","awesome-list","brats2021","brain-tumor-segmentation","2026-03-27T02:49:30.150509","2026-04-06T08:48:27.111315",[115,120,125,130],{"id":116,"question_zh":117,"answer_zh":118,"source_url":119},2691,"如何提交关于无监督脑异常检测与分割的 Transformer 论文？","维护者回复：感谢您的告知，我们已将该项有趣的工作添加到列表中。","https:\u002F\u002Fgithub.com\u002Ffahadshamshad\u002Fawesome-transformers-in-medical-imaging\u002Fissues\u002F1",{"id":121,"question_zh":122,"answer_zh":123,"source_url":124},2692,"如何提交关于表面视觉 Transformer 用于皮层分析的研究？","维护者回复：您好，我们已将您的工作添加到列表中。谢谢。","https:\u002F\u002Fgithub.com\u002Ffahadshamshad\u002Fawesome-transformers-in-medical-imaging\u002Fissues\u002F3",{"id":126,"question_zh":127,"answer_zh":128,"source_url":129},2693,"如何提交医学 Transformer 目标检测相关的多篇论文？","虽本条 Issue 暂无直接回复，但根据同仓库其他 Issue（如#1、#3）的处理惯例，此类包含论文链接和代码地址的贡献请求通常会被维护者审核并添加到列表中。","https:\u002F\u002Fgithub.com\u002Ffahadshamshad\u002Fawesome-transformers-in-medical-imaging\u002Fissues\u002F6",{"id":131,"question_zh":132,"answer_zh":133,"source_url":134},2694,"是否可以分享自己整理的 Transformer 论文集合并请求收录？","维护者表示欢迎（回复：谢谢），用户可以分享相关资源库并请求维护者查看或添加链接。","https:\u002F\u002Fgithub.com\u002Ffahadshamshad\u002Fawesome-transformers-in-medical-imaging\u002Fissues\u002F8",[]]