@inbook{cadd943c142b4b818fb70a5e53f0798c,
title = "Brain Lesion Synthesis via Progressive Adversarial Variational Auto-Encoder",
abstract = "Laser interstitial thermal therapy (LITT) is a novel minimally invasive treatment that is used to ablate intracranial structures to treat mesial temporal lobe epilepsy (MTLE). Region of interest (ROI) segmentation before and after LITT would enable automated lesion quantification to objectively assess treatment efficacy. Deep learning techniques, such as convolutional neural networks (CNNs) are state-of-the-art solutions for ROI segmentation, but require large amounts of annotated data during the training. However, collecting large datasets from emerging treatments such as LITT is impractical. In this paper, we propose a progressive brain lesion synthesis framework (PAVAE) to expand both the quantity and diversity of the training dataset. Concretely, our framework consists of two sequential networks: a mask synthesis network and a mask-guided lesion synthesis network. To better employ extrinsic information to provide additional supervision during network training, we design a condition embedding block (CEB) and a mask embedding block (MEB) to encode inherent conditions of masks to the feature space. Finally, a segmentation network is trained using raw and synthetic lesion images to evaluate the effectiveness of the proposed framework. Experimental results show that our method can achieve realistic synthetic results and boost the performance of down-stream segmentation tasks above traditional data augmentation techniques.",
author = "Jiayu Huo and Vejay Vakharia and Chengyuan Wu and Ashwini Sharan and Ko, {Andrew L.} and Sebastien Ourselin and Rachel Sparks",
note = "Funding Information: Acknowledgement. This work was supported by Centre for Doctoral Training in Surgical and Interventional Engineering at King{\textquoteright}s College London. This research was funded in whole, or in part, by the Wellcome Trust [218380/Z/19/Z, WT203148/Z/16/Z]. For the purpose of open access, the author has applied a CC BY public copyright licence to any Author Accepted Manuscript version arising from this submission. This research was supported by the UK Research and Innovation London Medical Imaging & Artificial Intelligence Centre for Value Based Healthcare. The research was funded/supported by the National Institute for Health Research (NIHR) Biomedical Research Centre based at Guy{\textquoteright}s and St Thomas{\textquoteright} NHS Foundation Trust and King{\textquoteright}s College London and supported by the NIHR Clinical Research Facility (CRF) at Guy{\textquoteright}s and St Thomas{\textquoteright}. The views expressed are those of the author(s) and not necessarily those of the NHS, the NIHR or the Department of Health. Publisher Copyright: {\textcopyright} 2022, The Author(s), under exclusive license to Springer Nature Switzerland AG.",
year = "2022",
month = sep,
day = "18",
doi = "10.1007/978-3-031-16980-9_10",
language = "English",
isbn = "978-3-031-16979-3",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer, Cham",
pages = "101--111",
editor = "Can Zhao and David Svoboda and Wolterink, {Jelmer M.} and Maria Escobar",
booktitle = "Simulation and Synthesis in Medical Imaging - 7th International Workshop, SASHIMI 2022, Held in Conjunction with MICCAI 2022, Proceedings",
}