@article{johnson2015itk, author = {Johnson, Hans. and Ib, Luis. and Mccormick, Matthew. and {Insight Software Consortium.}}, doi = {1�930934-15�7}, isbn = {978-1-930934-28-3}, journal = {Itk}, keywords = {Guide,Registration,Segmentation}, publisher = {Kitware, Inc.}, title = {{The ITK Software Guide Book 2: Design and Functionality}}, url = {https://itk.org/ITKSoftwareGuide/html/Book2/ITKSoftwareGuide-Book2.html}, year = {2017} } @incollection{Johnson2014, abstract = {The Insight Toolkit (ITK) is an open-source software toolkit for performing registration and segmentation. Segmentation is the process of identifying and classifying data found in a digitally sampled representation. Typically the sampled representation is an image acquired from such medical instrumentation as CT or MRI scanners. Registration is the task of aligning or developing correspondences between data. For example, in the medical environment, a CT scan may be aligned with a MRI scan in order to combine the information contained in both. ITK is a cross-platform software. It uses a build environment known as CMake to manage platform-specific project generation and compilation process in a platform-independent way. ITK is implemented in C++. ITK's implementation style employs generic programming, which involves the use of templates to generate, at compile-time, code that can be applied generically to any class or data-type that supports the operations used by the template. The use of C++ templating means that the code is highly efficient and many issues are discovered at compile- time, rather than at run-time during program execution. It also means that many of ITK's algorithms can be applied to arbitrary spatial dimensions and pixel types. An automated wrapping system integrated with ITK generates an interface between C++ and a high-level programming language Python. This enables rapid prototyping and faster exploration of ideas by shortening the edit-compile-execute cycle. In addition to automated wrapping, the SimpleITK project provides a streamlined interface to ITK that is available for C++, Python, Java, CSharp, R, Tcl and Ruby. Developers from around the world can use, debug, maintain, and extend the software because ITK is an open-source project. ITK uses a model of software development known as Extreme Programming. Extreme Programming collapses the usual software development methodology into a simultaneous iterative process of design-implement-test-release. The key features of Extreme Programming are communication and testing. Communication among the members of the ITK community is what helps manage the rapid evolution of the software. Testing is what keeps the software stable. An extensive testing process supported by the system known as CDash measures the quality of ITK code on a daily basis. The ITK Testing Dashboard is updated continuously, reflecting the quality of the code at any moment. The most recent version of this document is available online at http://itk.org/ItkSoftwareGuide.pdf. This book is a guide to developing soft- ware with ITK; it is the first of two companion books. This book covers building and installation, general architecture and design, as well as the process of contributing in the ITK community. The second book covers detailed design and functionality for reading and and writing images, filtering, registration, segmentation, and performing statistical analysis.}, author = {Johnson, Hans J. and Ib, Luis and Mccormick, Matthew and Consortium, Software}, booktitle = {The ITK Software GUIDE}, doi = {1�930934-15�7}, isbn = {978-1-930934-28-3}, keywords = {Guide,Registration,Segmentation}, pages = {805}, title = {{The ITK Software Guide Book 2 : Design and Functionality Fourth Edition Updated for ITK version 4.6}}, year = {2014} } @book{johnson2013itk, abstract = {of ideas by shortening the edit-compile-execute cycle. In addition to automated wrapping, the SimpleITK project provides a streamlined interface to ITK that is available for C++, Python, Java, CSharp, R, Tcl and Ruby. Developers from around the world can use, debug, maintain, and extend the software because ITK is an open-source project. ITK uses a model of software development known as Extreme Programming. Extreme Programming collapses the usual software development methodology into a simultaneous iterative process of design-implement-test-release. The key features of Extreme Programming are communication and testing. Communication among the members of the ITK community is what helps manage the rapid evolution of the software. Testing is what keeps the software stable. An extensive testing process supported by the system known as CDash measures the quality of ITK code on a daily basis. The ITK Testing Dashboard is updated continuously, reflecting the quality of the code at any moment. This book is a guide to developing software with ITK. It covers building and installation, ar- chitecture and design, image analysis theory and its applications, as well as the process of contributing to the ITK community. The most recent version of this document http://itk.org/ItkSoftwareGuide.pdf.}, author = {Johnson, Hans J. and Mccormick, Matt and Ibanez, Luis and Consortium, Insight Software}, title = {{The ITK Software Guide Third Edition - Updated for ITK version 4.5}}, url = {http://itk.org/ItkSoftwareGuide.pdf}, year = {2013} } @book{johnson2015itk, abstract = {The Insight Toolkit (ITK) is an open-source software toolkit for performing registration and segmen- tation. Segmentation is the process of identifying and classifying data found in a digitally sampled representation. Typically the sampled representation is an image acquired fromsuchmedical instru- mentation as CT orMRI scanners. Registration is the task of aligning or developing correspondences between data. For example, in themedical environment, a CT scan may be aligned with aMRI scan in order to combine the information contained in both.}, author = {Johnson, Hans J. and McCormick, Matthew M and Ibanez, Luis}, booktitle = {Kitware, Inc.(January 2015)}, edition = {4.7}, editor = {Johnson, Hans J. and McCormick, Matthew M and Ibanez, Luis}, isbn = {978-1930934276}, keywords = {Guide,Registration,Segmentation}, pages = {248}, publisher = {Kitware, Inc.}, title = {{The ITK Software Guide Book 1: Introduction and Development Guidelines Fourth Edition Updated for ITK version 4.7}}, url = {https://itk.org/}, year = {2015} } @book{johnson2014itk, abstract = {The Insight Toolkit (ITK) is an open-source software toolkit for performing registration and segmen- tation. Segmentation is the process of identifying and classifying data found in a digitally sampled representation. Typically the sampled representation is an image acquired fromsuchmedical instru- mentation as CT orMRI scanners. Registration is the task of aligning or developing correspondences between data. For example, in themedical environment, a CT scan may be aligned with aMRI scan in order to combine the information contained in both.}, author = {Johnson, Hans J. and McCormick, Matthew M and Ibanez, Luis}, booktitle = {Kitware, Inc.(January 2015)}, edition = {4.7}, editor = {Johnson, Hans J. and McCormick, Matthew M and Ibanez, Luis}, isbn = {978-1930934276}, title = {{The ITK Software Guide Book 1: Introduction and Development Guidelines Fourth Edition Updated for ITK version 4.7}}, url = {https://itk.org/}, year = {2015} } @article{beare2011citk, author = {Beare, Richard and Micevski, Daniel and Share, Chris and Parkinson, Luke and Ward, Phil and Goscinski, Wojtek and Kuiper, Mike}, journal = {The Insight Journal}, title = {{CITK-an architecture and examples of CUDA enabled ITK filters, Release 0.00}}, year = {2011} } @article{RN826, abstract = {Large-scale consortium efforts such as Enhancing NeuroImaging Genetics through Meta-Analysis (ENIGMA) and other collaborative efforts show that combining statistical data from multiple independent studies can boost statistical power and achieve more accurate estimates of effect sizes, contributing to more reliable and reproducible research. A meta- analysis would pool effects from studies conducted in a similar manner, yet to date, no such harmonized protocol exists for resting state fMRI (rsfMRI) data. Here, we propose an initial pipeline for multi-site rsfMRI analysis to allow research groups around the world to analyze scans in a harmonized way, and to perform coordinated statistical tests. The challenge lies in the fact that resting state fMRI measurements collected by researchers over the last decade vary widely, with variable quality and differing spatial or temporal signal-to-noise ratio (tSNR). An effective harmonization must provide optimal measures for all quality data. Here we used rsfMRI data from twenty-two independent studies with approximately fifty corresponding T1-weighted and rsfMRI datasets each, to (A) review and aggregate the state of existing rsfMRI data, (B) demonstrate utility of principal component analysis (PCA)-based denoising and (C) develop a deformable ENIGMA EPI template based on the representative anatomy that incorporates spatial distortion patterns from various protocols and populations.}, author = {Adhikari, Bhim M. and Jahanshad, Neda and Shukla, Dinesh and Turner, Jessica and Grotegerd, Dominik and Dannlowski, Udo and Kugel, Harald and Engelen, Jennifer and Dietsche, Bruno and Krug, Axel and Kircher, Tilo and Fieremans, Els and Veraart, Jelle and Novikov, Dmitry S. and Boedhoe, Premika S.W. and van der Werf, Ysbrand D. and van den Heuvel, Odile A. and Ipser, Jonathan and Uhlmann, Anne and Stein, Dan J. and Dickie, Erin and Voineskos, Aristotle N. and Malhotra, Anil K. and Pizzagalli, Fabrizio and Calhoun, Vince D. and Waller, Lea and Veer, Ilja M. and Walter, Hernik and Buchanan, Robert W. and Glahn, David C. and Hong, L. Elliot and Thompson, Paul M. and Kochunov, Peter}, doi = {10.1007/s11682-018-9941-x}, issn = {19317565}, journal = {Brain Imaging and Behavior}, keywords = {ENIGMA EPI template,Large multi-site studies,Processing pipelines}, number = {5}, pages = {1453--1467}, title = {{A resting state fMRI analysis pipeline for pooling inference across diverse cohorts: an ENIGMA rs-fMRI protocol}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {13}, year = {2019} } @article{RN973, abstract = {The choice of a reference image typically influences the results of deformable image registration, thereby making it asymmetric. This is a consequence of a spatially non-uniform weighting in the cost function integral that leads to general registration inaccuracy. The inhomogeneous integral measure - which is the local volume change in the transformation, thus varying through the course of the registration - causes image regions to contribute differently to the objective function. More importantly, the optimization algorithm is allowed to minimize the cost function by manipulating the volume change, instead of aligning the images. The approaches that restore symmetry to deformable registration successfully achieve inverse-consistency, but do not eliminate the regional bias that is the source of the error. In this work, we address the root of the problem: the non-uniformity of the cost function integral. We introduce a new quasi-volume-preserving constraint that allows for volume change only in areas with well-matching image intensities, and show that such a constraint puts a bound on the error arising from spatial non-uniformity. We demonstrate the advantages of adding the proposed constraint to standard (asymmetric and symmetrized) demons and diffeomorphic demons algorithms through experiments on synthetic images, and real X-ray and 2D/3D brain MRI data. Specifically, the results show that our approach leads to image alignment with more accurate matching of manually defined neuroanatomical structures, better tradeoff between image intensity matching and registration-induced distortion, improved native symmetry, and lower susceptibility to local optima. In summary, the inclusion of this space- and time-varying constraint leads to better image registration along every dimension that we have measured it.}, author = {Aganj, Iman and Reuter, Martin and Sabuncu, Mert R. and Fischl, Bruce}, doi = {10.1016/j.neuroimage.2014.10.059}, issn = {10959572}, journal = {NeuroImage}, keywords = {Deformable image registration,Integral non-uniformity,Inverse-consistency,Symmetry,Volume-preserving constraints}, pages = {238--251}, title = {{Avoiding symmetry-breaking spatial non-uniformity in deformable image registration via a quasi-volume-preserving constraint}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84920130398{\&}doi=10.1016{\%}2Fj.neuroimage.2014.10.059{\&}partnerID=40{\&}md5=537fee2b2ea9e7af63a87f230be26ded}, volume = {106}, year = {2015} } @article{RN788, abstract = {Mammography is the gold standard screening technique in breast cancer, but it has some limitations for women with dense breasts. In such cases, sonography is usually recommended as an additional imaging technique. A traditional sonogram produces a two-dimensional (2D) visualization of the breast and is highly operator dependent. Automated breast ultrasound (ABUS) has also been proposed to produce a full 3D scan of the breast automatically with reduced operator dependency, facilitating double reading and comparison with past exams. When using ABUS, lesion segmentation and tracking changes over time are challenging tasks, as the three-dimensional (3D) nature of the images makes the analysis difficult and tedious for radiologists. The goal of this work is to develop a semi-automatic framework for breast lesion segmentation in ABUS volumes which is based on the Watershed algorithm. The effect of different de-noising methods on segmentation is studied showing a significant impact (p {\textless} 0.05) on the performance using a dataset of 28 temporal pairs resulting in a total of 56 ABUS volumes. The volumetric analysis is also used to evaluate the performance of the developed framework. A mean Dice Similarity Coefficient of 0.69 ± 0.11 with a mean False Positive ratio 0.35 ± 0.14 has been obtained. The Pearson correlation coefficient between the segmented volumes and the corresponding ground truth volumes is r2 = 0.960 (p = 0.05). Similar analysis, performed on 28 temporal (prior and current) pairs, resulted in a good correlation coefficient r2 = 0.967 (p {\textless} 0.05) for prior and r2 = 0.956 (p {\textless} 0.05) for current cases. The developed framework showed prospects to help radiologists to perform an assessment of ABUS lesion volumes, as well as to quantify volumetric changes during lesions diagnosis and follow-up.}, author = {Agarwal, Richa and Diaz, Oliver and Llad{\'{o}}, Xavier and Gubern-M{\'{e}}rida, Albert and Vilanova, Joan C. and Mart{\'{i}}, Robert}, doi = {10.1177/0161734617737733}, issn = {01617346}, journal = {Ultrasonic Imaging}, keywords = {ABUS (Automated Breast Ultrasound),breast cancer,lesion segmentation,temporal,volumetric analysis,watershed}, number = {2}, pages = {97--112}, title = {{Lesion Segmentation in Automated 3D Breast Ultrasound: Volumetric Analysis}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {40}, year = {2018} } @article{RN971, abstract = {Medical image registration is the process of mapping two or more medical images into a single more informative image for the purpose of receiving precise and complementary information. The precise mapping of medical images obtained in different time-frames and by the same or different modalities is now possible due to the availability of large number of registration techniques. The purpose of this paper is to present and analyse intrinsic registration techniques for medical imaging in a comprehensive manner. Our approach of analysis is unique from already published work because we have performed detailed investigation on each registration techniques, and analyse similarity measures and assessments according to various parameters. The knowledge on the work that has been developed in the area is presented in a compact form. This work is expected to provide a useful platform for the researchers in the field of medical image registration in general and in intrinsic registration in particular.}, author = {Alam, Fakhre and Rahman, Sami Ur}, issn = {18119387}, journal = {Journal of Postgraduate Medical Institute}, keywords = {Image guided surgery,Intrinsic registration,Medical image registration,Medical imaging modalities}, number = {2}, pages = {119--132}, title = {{Intrinsic registration techniques for medical images: A state-of-the-art review}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84964707329{\&}partnerID=40{\&}md5=77f7b0fdb11032a6d00501e3da414f03}, volume = {30}, year = {2016} } @article{RN830, abstract = {Background: Whole-body diffusion weighted imaging (WB-DWI) has proven value to detect multiple myeloma (MM) lesions. However, the large volume of imaging data and the presence of numerous lesions makes the reading process challenging. The aim of the current study was to develop a semi-automatic lesion segmentation algorithm for WB-DWI images in MM patients and to evaluate this smart-algorithm (SA) performance by comparing it to the manual segmentations performed by radiologists. Methods: An atlas-based segmentation was developed to remove the high-signal intensity normal tissues on WB-DWI and to restrict the lesion area to the skeleton. Then, an outlier threshold-based segmentation was applied to WB-DWI images, and the segmented area's signal intensity was compared to the average signal intensity of a low-fat muscle on T1-weighted images. This method was validated in 22 whole-body DWI images of patients diagnosed with MM. Dice similarity coefficient (DSC), sensitivity and positive predictive value (PPV) were computed to evaluate the SA performance against the gold standard (GS) and to compare with the radiologists. A non-parametric Wilcoxon test was also performed. Apparent diffusion coefficient (ADC) histogram metrics and lesion volume were extracted for the GS segmentation and for the correctly identified lesions by SA and their correlation was assessed. Results: The mean inter-radiologists DSC was 0.323 ± 0.268. The SA vs GS achieved a DSC of 0.274 ± 0.227, sensitivity of 0.764 ± 0.276 and PPV 0.217 ± 0.207. Its distribution was not significantly different from the mean DSC of inter-radiologist segmentation (p = 0.108, Wilcoxon test). ADC and lesion volume intraclass correlation coefficient (ICC) of the GS and of the correctly identified lesions by the SA was 0.996 for the median and 0.894 for the lesion volume (p {\textless} 0.001). The duration of the lesion volume segmentation by the SA was, on average, 10.22 ± 0.86 min, per patient. Conclusions: The SA provides equally reproducible segmentation results when compared to the manual segmentation of radiologists. Thus, the proposed method offers robust and efficient segmentation of MM lesions on WB-DWI. This method may aid accurate assessment of tumor burden and therefore provide insights to treatment response assessment.}, author = {Almeida, S{\'{i}}lvia D. and Santinha, Joa{\~{o}} and Oliveira, Francisco P.M. and Ip, Joana and Lisitskaya, Maria and Louren{\c{c}}o, Joa{\~{o}} and Uysal, Aycan and Matos, Celso and Joa{\~{o}}, Cristina and Papanikolaou, Nikolaos}, doi = {10.1186/s40644-020-0286-5}, issn = {14707330}, journal = {Cancer Imaging}, keywords = {Atlas-based segmentation,Diffusion weighted imaging,Multiple myeloma,Semi-automatic segmentation,Total lesion burden}, number = {1}, title = {{Quantification of tumor burden in multiple myeloma by atlas-based semi-automatic segmentation of WB-DWI}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85077786257{\&}doi=10.1186{\%}2Fs40644-020-0286-5{\&}partnerID=40{\&}md5=c2b44a405ae7ba7bfa48478311ee70a6}, volume = {20}, year = {2020} } @inproceedings{RN969, abstract = {The aim of this research is to compare some image segmentation methods for lungs based on performance evaluation parameter (Mean Square Error (MSE) and Peak Signal Noise to Ratio (PSNR)). In this study, the methods compared were connected threshold, neighborhood connected, and the threshold level set segmentation on the image of the lungs. These three methods require one important parameter, i.e the threshold. The threshold interval was obtained from the histogram of the original image. The software used to segment the image here was InsightToolkit-4.7.0 (ITK). This research used 5 lung images to be analyzed. Then, the results were compared using the performance evaluation parameter determined by using MATLAB. The segmentation method is said to have a good quality if it has the smallest MSE value and the highest PSNR. The results show that four sample images match the criteria of connected threshold, while one sample refers to the threshold level set segmentation. Therefore, it can be concluded that connected threshold method is better than the other two methods for these cases.}, author = {Amanda, A. R. and Widita, R.}, booktitle = {Journal of Physics: Conference Series}, doi = {10.1088/1742-6596/694/1/012048}, editor = {Haryanto, F and Ng, K H and Peralta, A P and Arimura, H and Viridi, S}, isbn = {17426588 (ISSN)}, issn = {17426596}, number = {1}, publisher = {Institute of Physics Publishing}, title = {{Comparison of image segmentation of lungs using methods: Connected threshold, neighborhood connected, and threshold level set segmentation}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84971601490{\&}doi=10.1088{\%}2F1742-6596{\%}2F694{\%}2F1{\%}2F012048{\&}partnerID=40{\&}md5=211e94ed993c83699b57654dc8bcf38d}, volume = {694}, year = {2016} } @article{RN886, abstract = {Objective: To assess the accuracy and precision of segmentation of the maxillary sinus in MR images to evaluate the potential usefulness of this modality in longitudinal studies of sinus development. Methods: A total of 15 healthy subjects who had been both craniofacial CT and MR scanned were included and the 30 maxillary sinus volumes were evaluated using segmentation. Two of the authors did segmentation of MRI and one of these authors did double segmentation. Agreement in results between CT and MRI as well as inter- and intraexaminer errors were evaluated by statistical and three-dimensional analysis. Results: The intraclass correlation coefficient for volume measurements for both method error, inter- and intraexaminer agreement were {\textgreater} 0.9 [maximal 95{\%} confidence interval of 0.989-0.997, p {\textless} 0.001] and the limit of agreement for all parameters were {\textless} 5.1{\%}. Segmentation errors were quantified in terms of overlap [Dice Coefficient (DICE) {\textgreater} 0.9 = excellent agreement] and border distance [95{\%} percentile Hausdorff Distance (HD) {\textless} 2 mm = acceptable agreement]. The results were replicable and not influenced by systematic errors. Conclusion: We found a high accuracy and precision of manual segmentation of the maxillary sinus in MR images. The largest mean errors were found close to the orbit and the teeth. Advances in knowledge: MRI can be used for 3D models of the paranasal sinuses with equally good results as CT and allows longitudinal follow-up of sinus development.}, author = {Andersen, Tobias N. and Darvann, Tron A. and Murakami, Shumei and Larsen, Per and Senda, Yurie and Bilde, Anders and Buchwald, Christian V. and Kreiborg, Sven}, doi = {10.1259/bjr.20170663}, issn = {00071285}, journal = {British Journal of Radiology}, number = {1085}, title = {{Accuracy and precision of manual segmentation of the maxillary sinus in MR images - A method study}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85045980566{\&}doi=10.1259{\%}2Fbjr.20170663{\&}partnerID=40{\&}md5=fb09baba9be82130126233860365e1f2}, volume = {91}, year = {2018} } @inproceedings{RN847, abstract = {Due to a lack of ubiquitous tools for volume data visualization, 3D rendering of volumetric content is shared and distributed as 2D media (video and static images). This work shows how using open web technologies (HTML5, JavaScript,WebGL and SVG), high quality volume rendering is achievable in an interactive manner with any WebGL-enabled device. In the web platform, real-time volume rendering algorithms are constrained to small datasets. This work presents a WebGL progressive ray-casting volume rendering approach that allows the interactive visualization of larger datasets with a higher rendering quality. This approach is better suited for devices with low compute capacity such as tablets and mobile devices. As a validation case, the presented method is used in an industrial quality inspection use case to visually assess the air void distribution of a plastic injection mould component in the web browser.}, author = {Arbelaiz, Ander and Moreno, Aitor and Barandiaran, I{\~{n}}igo and Garc{\'{i}}a-Alonso, Alejandro}, booktitle = {Proceedings - Web3D 2019: 24th International ACM Conference on 3D Web Technology}, doi = {10.1145/3329714.3338131}, editor = {Spencer, S N}, isbn = {9781450367981}, keywords = {Industrial application,Progressive rendering,Quality control,Ray-casting,Ubiquitous platforms,Void segmentation,Volume rendering,Web,WebGL}, publisher = {Association for Computing Machinery, Inc}, title = {{Progressive ray-casting volume rendering with WebGL for visual assessment of air void distribution in quality control}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85071158969{\&}doi=10.1145{\%}2F3329714.3338131{\&}partnerID=40{\&}md5=2a6a9a2b43279419c898a8f0a38d80a6}, year = {2019} } @article{RN819, abstract = {Many types of medical and scientific experiments acquire raw data in the form of images. Various forms of image processing and image analysis are used to transform the raw image data into quantitative measures that are the basis of subsequent statistical analysis. In this article we describe the SimpleITK R package. SimpleITK is a simplified interface to the insight segmentation and registration toolkit (ITK). ITK is an open source C++ toolkit that has been actively developed over the past 18 years and is widely used by the medical image analysis community. SimpleITK provides packages for many interpreter environments, including R. Currently, it includes several hundred classes for image analysis including a wide range of image input and output, filtering operations, and higher level components for segmentation and registration. Using SimpleITK, development of complex combinations of image and statistical analysis procedures is feasible. This article includes several examples of computational image analysis tasks implemented using SimpleITK, including spherical marker localization, multi-modal image registration, segmentation evaluation, and cell image analysis.}, author = {Beare, Richard and Lowekamp, Bradley and Yaniv, Ziv}, doi = {10.18637/jss.v086.i08}, issn = {15487660}, journal = {Journal of Statistical Software}, keywords = {Image processing,Image registration,Image segmentation,Medical imaging,R}, number = {8}, pages = {1--35}, title = {{Image segmentation, registration and characterization in R with simpleITK}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {86}, year = {2018} } @article{RN816, abstract = {Accurate measurement of knee alignment, quantified by the hip-knee-ankle (HKA) angle (varus-valgus), serves as an essential biomarker in the diagnosis of various orthopaedic conditions and selection of appropriate therapies. Such angular deformities are assessed from standing X-ray panoramas. However, the limited field-of-view of traditional X-ray imaging systems necessitates the acquisition of several sector images to capture an individual's standing posture, and their subsequent ‘stitching' to reconstruct a panoramic image. Such panoramas are typically constructed manually by an X-ray imaging technician, often using various external markers attached to the individual's clothing and visible in two adjacent sector images. To eliminate human error, user-induced variability, improve consistency and reproducibility, and reduce the time associated with the traditional manual ‘stitching' protocol, here we propose an automatic panorama construction method that only relies on anatomical features reliably detected in the images, eliminating the need for any external markers or manual input from the technician. The method first performs a rough segmentation of the femur and the tibia, then the sector images are registered by evaluating a distance metric between the corresponding bones along their medial edge. The identified translations are then used to generate the standing panorama image. The method was evaluated on 95 patient image datasets from a database of X-ray images acquired across 10 clinical sites as part of the screening process for a multi-site clinical trial. The panorama reconstruction parameters yielded by the proposed method were compared to those used for the manual panorama construction, which served as gold-standard. The horizontal translation differences were 0:43 ± 1:95 mm 0:26 ± 1:43mm for the femur and tibia respectively, while the vertical translation differences were 3:76 ± 22:35 mm and 1:85 ± 6:79 mm for the femur and tibia, respectively. Our results showed no statistically significant differences between the HKA angles measured using the automated vs. the manually generated panoramas, and also led to similar decisions with regards to the patient inclusion/exclusion in the clinical trial. Thus, the proposed method was shown to provide comparable performance to manual panorama construction, with increased efficiency, consistency and robustness.}, author = {Ben-Zikri, Yehuda K. and Yaniv, Ziv R. and Baum, Karl and Linte, Cristian A.}, doi = {10.1080/21681163.2018.1537859}, issn = {21681171}, journal = {Computer Methods in Biomechanics and Biomedical Engineering: Imaging and Visualization}, keywords = {Long-limb X-ray,axial deformity at the knee,hip-knee-ankle angle,image registration,panorama reconstruction,segmentation}, number = {4}, pages = {464--478}, title = {{A marker-free registration method for standing X-ray panorama reconstruction for hip-knee-ankle axis deformity assessment}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {7}, year = {2019} } @inproceedings{RN912, abstract = {A large diversity of image registration methodologies has emerged from the research community. The scattering of methods over toolboxes impedes rigorous comparison to select the appropriate method for a given application. Toolboxes typically tailor their implementations to a mathematical registration paradigm, which makes internal functionality nonexchangeable. Subsequently, this forms a barrier for adoption of registration technology in the clinic. We therefore propose a unifying, role-based software design that can integrate a broad range of functional registration components. These components can be configured into an algorithmic network via a single highlevel user interface. A generic component handshake mechanism provides users feedback on incompatibilities. We demonstrate the viability of our design by incorporating two paradigms from different code bases. The implementation is done in C++ and is available as open source. The progress of embedding more paradigms can be followed via https://github.com/kaspermarstal/SuperElastix.}, author = {Berendsen, Floris F. and Marstal, Kasper and Klein, Stefan and Staring, Marius}, booktitle = {IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops}, doi = {10.1109/CVPRW.2016.69}, isbn = {9781467388504}, issn = {21607516}, pages = {498--506}, publisher = {IEEE Computer Society}, title = {{The Design of SuperElastix - A Unifying Framework for a Wide Range of Image Registration Methodologies}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85010223955{\&}doi=10.1109{\%}2FCVPRW.2016.69{\&}partnerID=40{\&}md5=dc7baf79ffe3f2bf7316f023fd6aa7a1}, year = {2016} } @article{RN921, abstract = {Describing and analyzing heart multiphysics requires the acquisition and fusion of multisensor cardiac images. Multisensor image fusion enables a combined analysis of these heterogeneous modalities. We propose to register intra-patient multiview 2D+t ultrasound (US) images with multiview late gadolinium-enhanced (LGE) images acquired during cardiac magnetic resonance imaging (MRI), in order to fuse mechanical and tissue state information. The proposed procedure registers both US and LGE to cine MRI. The correction of slice misalignment and the rigid registration of multiview LGE and cine MRI are studied, to select the most appropriate similarity measure. It showed that mutual information performs the best for LGE slice misalignment correction and for LGE and cine registration. Concerning US registration, dynamic endocardial contours resulting from speckle tracking echocardiography were exploited in a geometry-based dynamic registration. We propose the use of an adapted dynamic time warping procedure to synchronize cardiac dynamics in multiview US and cine MRI. The registration of US and LGE MRI was evaluated on a dataset of patients with hypertrophic cardiomyopathy. A visual assessment of 330 left ventricular regions from US images of 28 patients resulted in 92.7{\%} of regions successfully aligned with cardiac structures in LGE. Successfully-aligned regions were then used to evaluate the abilities of strain indicators to predict the presence of fibrosis. Longitudinal peak-strain and peak-delay of aligned left ventricular regions were computed from corresponding regional strain curves from US. The Mann-Withney test proved that the expected values of these indicators change between the populations of regions with and without fibrosis (p {\textless} 0.01). ROC curves otherwise proved that the presence of fibrosis is one factor amongst others which modifies longitudinal peak-strain and peak-delay.}, author = {Betancur, Juli{\'{a}}n and Simon, Antoine and Halbert, Edgar and Tavard, Fran{\c{c}}ois and Carr{\'{e}}, Fran{\c{c}}ois and Hern{\'{a}}ndez, Alfredo and Donal, Erwan and Schnell, Fr{\'{e}}d{\'{e}}ric and Garreau, Mireille}, doi = {10.1016/j.media.2015.10.010}, issn = {13618423}, journal = {Medical Image Analysis}, keywords = {Cardiac ultrasound,Hypertrophic cardiomyopathy,Late gadolinium-enhanced magnetic resonance,Multimodal image registration,Speckle tracking echocardiography}, pages = {13--21}, title = {{Registration of dynamic multiview 2D ultrasound and late gadolinium enhanced images of the heart: Application to hypertrophic cardiomyopathy characterization}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84948142529{\&}doi=10.1016{\%}2Fj.media.2015.10.010{\&}partnerID=40{\&}md5=7d93ff8e0d80ef9b1730c036b8529e1d}, volume = {28}, year = {2016} } @inproceedings{RN943, abstract = {Voice hoarseness can have various reasons, one of them is a change of the vocal fold mucus. This change can be examined with micro endoscopes. Cell detection in these images is a difficult task, due to bad image quality, caused by noise and illumination variations. In previous works, it was observed that the repetitive pattern of the cell walls cause an elliptical shape in the Fourier domain [1, 2]. A manual segmentation and back transformation of this shape results in filtered images, where the cell detection is much easier [3]. The goal of this work is to automatically segment the elliptical shape in Fourier domain. Two different approaches are developed to get a suitable band-pass filter: a thresholding and an active contour method. After the band-pass filter is applied, the achieved results are superior to the manual segmentation case.}, author = {Bier, Bastian and Mualla, Firas and Steidl, Stefan and Bohr, Christopher and Neumann, Helmut and Maier, Andreas and Hornegger, Joachim}, booktitle = {Informatik aktuell}, doi = {10.1007/978-3-662-46224-9_71}, editor = {Deserno, T M and Tolxdorff, T and Handels, H and Meinzer, H P}, isbn = {9783662462232}, issn = {1431472X}, pages = {413--418}, publisher = {Kluwer Academic Publishers}, title = {{Band-pass filter design by segmentation in frequency domain for detection of epithelial cells in endomicroscope images}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85012231802{\&}doi=10.1007{\%}2F978-3-662-46224-9{\_}71{\&}partnerID=40{\&}md5=a2320e0e42ae26842cd9d13f26bc03b8}, volume = {0}, year = {2015} } @inproceedings{RN970, abstract = {Image processing is important in diagnosing diseases or damages of human organs. One of the important stages of image processing is segmentation process. Segmentation is a separation process of the image into regions of certain similar characteristics. It is used to simplify the image to make an analysis easier. The case raised in this study is image segmentation of bones. Bone's image segmentation is a way to get bone dimensions, which is needed in order to make prosthesis that is used to treat broken or cracked bones. Segmentation methods chosen in this study are fast marching and geodesic active contours. This study uses ITK (Insight Segmentation and Registration Toolkit) software. The success of the segmentation was then determined by calculating its accuracy, sensitivity, and specificity. Based on the results, the Active Contours method has slightly higher accuracy and sensitivity values than the fast marching method. As for the value of specificity, fast marching has produced three image results that have higher specificity values compared to those of geodesic active contour's. The result also indicates that both methods have succeeded in performing bone's image segmentation. Overall, geodesic active contours method is quite better than fast marching in segmenting bone images.}, author = {Bilqis, A. and Widita, R.}, booktitle = {Journal of Physics: Conference Series}, doi = {10.1088/1742-6596/694/1/012044}, editor = {Haryanto, F and Ng, K H and Peralta, A P and Arimura, H and Viridi, S}, isbn = {17426588 (ISSN)}, issn = {17426596}, number = {1}, publisher = {Institute of Physics Publishing}, title = {{Comparison of segmentation using fast marching and geodesic active contours methods for bone}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84971658619{\&}doi=10.1088{\%}2F1742-6596{\%}2F694{\%}2F1{\%}2F012044{\&}partnerID=40{\&}md5=5864b0c4fb7c08baa7e2a09f54dd0b9e}, volume = {694}, year = {2016} } @inproceedings{RN974, abstract = {Human brain is a set of four communicating network of ventricles heaving with cerebrospinal fluid (CSF) which is located inside the brain parenchyma. An efficient segmentation of cerebral lateral ventricles one in each hemisphere can support the study of efficient pathologies for successful conclusion of various diseases. In this paper, an efficient and fast energy optimised technique for volumetric segmentation of lateral ventricles from MR images of human brain is proposed which is based on geodesic active contours using level set method. The proposed approach consists of mainly four main stages: 1. Preprocessing stage, 2. Presegmentation stage, 3. Contour Evolution with Energy optimisation stage, 4. Termination stage. Experiments on multislice MRI data obtained dice coefficient of 0.955, jaccard coefficient of 0.915 and other surface distance measures demonstrate the advantages of the proposed approach in both accuracy and efficiency.}, author = {Biswas, Ankur and Bhattacharya, P. and Maity, S. P.}, booktitle = {Procedia Computer Science}, doi = {10.1016/j.procs.2018.07.084}, editor = {Zoppi, M and Muthuswamy, S}, isbn = {18770509 (ISSN)}, issn = {18770509}, keywords = {Geodesic active contour,Lateral Ventricle,MR Images,Volumetric,level set methods}, pages = {561--568}, publisher = {Elsevier B.V.}, title = {{An efficient volumetric segmentation of cerebral lateral ventricles}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85051328846{\&}doi=10.1016{\%}2Fj.procs.2018.07.084{\&}partnerID=40{\&}md5=e21c6be1fb5f5e12be609a506f661f8b}, volume = {133}, year = {2018} } @incollection{RN975, abstract = {An efficient Segmentation of lateral ventricles plays a vital role in quantitatively analyzing the global and regional information in magnetic resonance imaging (MRI) of human brain. In this paper, a semi automatic segmentation methodology to support the study of efficient pathologies of the lateral ventricles along with white matter and gray matter of human brain is proposed. The segmentation is executed using an optimal geometric active contour with level set methods. A nominal anatomical knowledge is incorporated into the methodology in order to choose the most probable surfaces of the lateral ventricles of human brain, even if they are disconnected, and to eliminate addition of non ventricle cerebrospinal fluid (CSF) regions. The proposed segmentation method is applied to multislice MRI data and compared with region growing algorithms. The results Dice similarity coefficient 0.955, Jaccard similarity coefficient 0.815 demonstrates the reliability and efficiency.}, author = {Biswas, Ankur and Maity, Santi P. and Bhattacharya, Paritosh}, booktitle = {Communications in Computer and Information Science}, doi = {10.1007/978-981-13-1343-1_53}, editor = {Mandal, J K and Sinha, D}, isbn = {9789811313424}, issn = {18650929}, keywords = {Dice similarity coefficient,Geometric active contour,Jaccard similarity coefficient,Level set,Magnetic resonance imaging (MRI)}, pages = {646--657}, publisher = {Springer Verlag}, title = {{Optimal Geometric Active Contours: Application to Human Brain Segmentation}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85053181661{\&}doi=10.1007{\%}2F978-981-13-1343-1{\_}53{\&}partnerID=40{\&}md5=65a97c1936282996fbe6ce8dfeb09631}, volume = {836}, year = {2018} } @incollection{RN968, abstract = {In many scientific research fields, Matlab has been established as de facto tool for application design. This approach offers multiple advantages such as rapid deployment prototyping and the use of high performance linear algebra, among others. However, the applications developed are highly dependent of the Matlab runtime, limiting the deployment in heterogeneous platforms. In this paper we present the migration of a Matlab-implemented application to the C++ programming language, allowing the parallelization in GPUs. In particular, we have chosen RUMBA-SD, a spherical deconvolution algorithm, which estimates the intravoxel white-matter fiber orientations from diffusion MRI data. We describe the methodology used along with the tools and libraries leveraged during the translation task of such application. To demonstrate the benefits of the migration process, we perform a series of experiments using different high performance computing heterogeneous platforms and linear algebra libraries. This work aims to be a guide for future developments that are implemented out of Matlab. The results show that the C++ version attains, on average, a speedup of 8× over the Matlab one.}, author = {Blas, Javier Garcia and Dolz, Manuel F. and {Daniel Garcia}, J. and Carretero, Jesus and Daducci, Alessandro and Aleman, Yasser and Canales-Rodriguez, Erick Jorge}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-319-49583-5_49}, editor = {Carretero, J and Nakano, K and Ko, R K L and Mueller, P and Garcia-Blas, J}, isbn = {9783319495828}, issn = {16113349}, keywords = {Linear algebra,Magnetic resonance imaging,Matlab}, pages = {630--643}, publisher = {Springer Verlag}, title = {{Porting Matlab applications to high-performance C++ codes: CPU/GPU-accelerated spherical deconvolution of diffusion MRI data}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85007236124{\&}doi=10.1007{\%}2F978-3-319-49583-5{\_}49{\&}partnerID=40{\&}md5=aa237e3a3fce5dcf7978db850e1b2615}, volume = {10048 LNCS}, year = {2016} } @article{RN832, abstract = {Transparent research in musculoskeletal imaging is fundamental to reliably investigate diseases such as knee osteoarthritis (OA), a chronic disease impairing femoral knee cartilage. To study cartilage degeneration, researchers have developed algorithms to segment femoral knee cartilage from magnetic resonance (MR) images and to measure cartilage morphology and relaxometry. The majority of these algorithms are not publicly available or require advanced programming skills to be compiled and run. However, to accelerate discoveries and findings, it is crucial to have open and reproducible workflows. We present pyKNEEr, a framework for open and reproducible research on femoral knee cartilage from MR images. pyKNEEr is written in python, uses Jupyter notebook as a user interface, and is available on GitHub with a GNU GPLv3 license. It is composed of three modules: 1) image preprocessing to standardize spatial and intensity characteristics; 2) femoral knee cartilage segmentation for intersubject, multimodal, and longitudinal acquisitions; and 3) analysis of cartilage morphology and relaxometry. Each module contains one or more Jupyter notebooks with narrative, code, visualizations, and dependencies to reproduce computational environments. pyKNEEr facilitates transparent image-based research of femoral knee cartilage because of its ease of installation and use, and its versatility for publication and sharing among researchers. Finally, due to its modular structure, pyKNEEr favors code extension and algorithm comparison. We tested our reproducible workflows with experiments that also constitute an example of transparent research with pyKNEEr, and we compared pyKNEEr performances to existing algorithms in literature review visualizations. We provide links to executed notebooks and executable environments for immediate reproducibility of our findings.}, author = {Bonaretti, Serena and Gold, Garry E. and Beaupre, Gary S.}, doi = {10.1371/journal.pone.0226501}, issn = {19326203}, journal = {PLoS ONE}, number = {1}, title = {{PyKNEEr: An image analysis workflow for open and reproducible research on femoral knee cartilage}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85078319647{\&}doi=10.1371{\%}2Fjournal.pone.0226501{\&}partnerID=40{\&}md5=72a88df7ae12079c45940349b7f58af9}, volume = {15}, year = {2020} } @article{RN865, abstract = {Purpose: To investigate a novel alternative diffusion-weighted imaging (DWI) approach using oscillating gradients preparation (OGSE) to obtain much shorter effective diffusion times ($\Delta$eff) for tumor response monitoring by apparent diffusion coefficient (ADC) mapping in a glioblastoma mouse model. Methods and Materials: Twenty-four BALB/c nude mice inoculated with U87 glioblastoma cells were randomized into a control group and an irradiation group, which underwent a 15-day fractioned radiation therapy (RT) course with 2 Gy/d. Therapy response was assessed by mapping of ADCs at 6 time points using an in-house implementation of a cos-OGSE DWI sequence with $\Delta$eff = 1.25 ms and compared with a standard pulsed gradient DWI protocol (PGSE) with typical clinical diffusion time $\Delta$eff = 18 ms. Longitudinal ADC changes in tumor and contralateral white matter (WM) were statistically assessed using repeated-measures analysis of variance and post hoc (Sidak) testing. Results: On short $\Delta$eff OGSE maps tumor ADC was generally 30{\%}-50{\%} higher than in surrounding WM. Areas correlated well with histology. Tumor identification was generally more difficult on PGSE maps owing to nonsignificant WM/tumor contrast. During RT, OGSE maps also showed significant tumor ADC increase (approximately 15{\%}) in response to radiation, consistently seen after 14-Gy RT dose. The clinical reference (PGSE) showed lower sensitivity to radiation changes, and no significant response across the radiation group and time course could be detected. Conclusion: Our short $\Delta$eff DWI method using OGSE better reflected histologically defined tumor areas and enabled more consistent and earlier detection of microstructural radiation changes than conventional methods. Oscillating gradients preparation offers significant potential as a robust microstructural RT response biomarker, potentially helping to shift important therapy decisions to earlier stages in the RT time course.}, author = {Bongers, Andre and Hau, Eric and Shen, Han}, doi = {10.1016/j.ijrobp.2017.12.280}, issn = {1879355X}, journal = {International Journal of Radiation Oncology Biology Physics}, number = {4}, pages = {1014--1023}, title = {{Short Diffusion Time Diffusion-Weighted Imaging With Oscillating Gradient Preparation as an Early Magnetic Resonance Imaging Biomarker for Radiation Therapy Response Monitoring in Glioblastoma: A Preclinical Feasibility Study}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85041512823{\&}doi=10.1016{\%}2Fj.ijrobp.2017.12.280{\&}partnerID=40{\&}md5=acf090aa80c436bcbeed0e2695f55e04}, volume = {102}, year = {2018} } @incollection{RN959, abstract = {Contrast-enhanced computed tomography angiography (CE-CTA) provides valuable, non-invasive assessment of lower extremity peripheral arterial disease (PAD). The advent of wide beam CT scanners has enabled multiple CT acquisitions over the same structure at a high frame rate, facilitating time-resolved CTA acquisitions. In this study, we investigate the technical feasibility of automatically quantifying the bolus arrival time and blood velocity in the arteries below the knee from time-resolved CTA. Our approach is based on arterial segmentation and local estimation of the bolus arrival time. The results are compared to values obtained through manual reading of the datasets and show good agreement. Based on a small patient study, we explore initial utility of these quantitative measures for the diagnosis of lower extremity PAD.}, author = {Boonen, Pieter Thomas and Buls, Nico and {Van Gompel}, Gert and {De Brucker}, Yannick and Aerden, Dimitri and {De Mey}, Johan and Vandemeulebroucke, Jef}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-030-01364-6_2}, editor = {Lee, S and Trucco, E and Maier-Hein, L and Moriconi, S and Albarqouni, S and Jannin, P and Balocco, S and Zahnd, G and Mateus, D and Taylor, Z and Demirci, S and Stoyanov, D and Sznitman, R and Martel, A and Cheplygina, V and Granger, E and Duong, L}, isbn = {9783030013639}, issn = {16113349}, keywords = {Artery segmentation,Blood velocity,Lower extremities,Peripheral arterial disease,Time-resolved CTA}, pages = {11--18}, publisher = {Springer Verlag}, title = {{Automated quantification of blood flow velocity from time-resolved CT angiography}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85055830741{\&}doi=10.1007{\%}2F978-3-030-01364-6{\_}2{\&}partnerID=40{\&}md5=9917ad93753ce090a308f5318500b9e3}, volume = {11043 LNCS}, year = {2018} } @inproceedings{RN874, abstract = {Signal inhomogeneity in MRI can influence significantly automatic data processing like segmentation, etc. or even affect the diagnostic procedure. In this work, a new method of intensity nonuniformity correction is presented. Our idea was to replace FCM clustering by k-harmonic means in the method that uses a standard N3 correction procedure. The algorithm was tested with MRI dataset acquired from a phantom object using a breast MRI coil to simulate real conditions during the study. Results were compared with five other methods using two indexes-integral uniformity and standard deviation of the signal inside the object. For the presented and improved method, the lowest integral uniformity and the reasonable low signal deviation were obtained.}, author = {Borys, Damian and Serafin, Wojciech and Frackiewicz, Mariusz and Psiuk-Maksymowicz, Krzysztof and Palus, Henryk}, booktitle = {Proceedings - 14th International Conference on Signal Image Technology and Internet Based Systems, SITIS 2018}, doi = {10.1109/SITIS.2018.00055}, editor = {Chbeir, R and di Baja, G S and Gallo, L and Yetongnon, K and Dipanda, A and Castrillon-Santana, M}, isbn = {9781538693858}, keywords = {Intensity nonuniformity correction,MRI,Phantom}, pages = {314--319}, publisher = {Institute of Electrical and Electronics Engineers Inc.}, title = {{A Phantom Study of New Bias Field Correction Method Combining N3 and KHM for MRI Imaging}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85065920171{\&}doi=10.1109{\%}2FSITIS.2018.00055{\&}partnerID=40{\&}md5=db972e30f4168923938969ac8921de9b}, year = {2018} } @article{RN845, abstract = {Objective: To propose a hybrid multiatlas fusion and correction approach to estimate a pseudo–computed tomography (pCT) image from T2-weighted brain magnetic resonance (MR) images in the context of MRI-only radiotherapy. Materials and Methods: A set of eleven pairs of T2-weighted MR and CT brain images was included. Using leave-one-out cross-validation, atlas MR images were registered to the target MRI with multimetric, multiresolution deformable registration. The subsequent deformations were applied to the atlas CT images, producing uncorrected pCT images. Afterward, a three-dimensional hybrid CT number correction technique was used. This technique uses information about MR intensity, spatial location, and tissue label from segmented MR images with the fuzzy c-means algorithm and combines them in a weighted fashion to correct Hounsfield unit values of the uncorrected pCT images. The corrected pCT images were then fused into a final pCT image. Results: The proposed hybrid approach proved to be performant in correcting Hounsfield unit values in terms of qualitative and quantitative measures. Average correlation was 0.92 and 0.91 for the proposed approach by taking the mean and the median, respectively, compared with 0.86 for the uncorrected unfused version. Average values of dice similarity coefficient for bone were 0.68 and 0.72 for the fused corrected pCT images by taking the mean and the median, respectively, compared with 0.65 for the uncorrected unfused version indicating a significant bone estimation improvement. Conclusion: A hybrid fusion and correction method is presented to estimate a pCT image from T2-weighted brain MR images.}, author = {Boukellouz, Wafa and Moussaoui, Abdelouahab and Taleb-Ahmed, Abdelmalik and Boydev, Christine}, doi = {10.1016/j.jmir.2019.03.184}, issn = {18767982}, journal = {Journal of Medical Imaging and Radiation Sciences}, keywords = {MR-only radiotherapy,Pseudo-CT,brain,hybrid CT number correction,multiatlas fusion}, number = {3}, pages = {425--440}, title = {{Multiatlas Fusion with a Hybrid CT Number Correction Technique for Subject-Specific Pseudo-CT Estimation in the Context of MRI-Only Radiation Therapy}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85065894779{\&}doi=10.1016{\%}2Fj.jmir.2019.03.184{\&}partnerID=40{\&}md5=710b20f7941f0fb981b12e9164a50e6a}, volume = {50}, year = {2019} } @article{RN797, abstract = {Using magnetic resonance imaging (MRI) as the sole imaging modality for patient modeling in radiation therapy (RT) is a challenging task due to the need to derive electron density information from MRI and construct a so-called pseudo-computed tomography (pCT) image. We have previously published a new method to derive pCT images from head T1-weighted (T1-w) MR images using a single-atlas propagation scheme followed by a post hoc correction of the mapped CT numbers using local intensity information. The purpose of this study was to investigate the performance of our method with head zero echo time (ZTE) MR images. To evaluate results, the mean absolute error in bins of 20 HU was calculated with respect to the true planning CT scan of the patient. We demonstrated that applying our method using ZTE MR images instead of T1-w improved the correctness of the pCT in case of bone resection surgery prior to RT (that is, an example of large anatomical difference between the atlas and the patient).}, author = {Boydev, C. and Demol, B. and Pasquier, D. and Saint-Jalmes, H. and Delpon, G. and Reynaert, N.}, doi = {10.1016/j.ejmp.2017.04.028}, issn = {1724191X}, journal = {Physica Medica}, keywords = {Atlas-based method,Head cancer,MRI-only treatment planning,Pseudo-CT,ZTE sequence}, pages = {332--338}, title = {{Zero echo time MRI-only treatment planning for radiation therapy of brain tumors after resection}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {42}, year = {2017} } @article{RN848, abstract = {Motion artefacts from involuntary changes in eye fixation remain a major imaging issue in optical coherence tomography (OCT). This paper reviews the state-of-the-art of retrospective procedures to correct retinal motion and axial eye motion artefacts in OCT imaging. Following an overview of motion induced artefacts and correction strategies, a chronological survey of retrospective approaches since the introduction of OCT until the current days is presented. Pre-processing, registration, and validation techniques are described. The review finishes by discussing the limitations of the current techniques and the challenges to be tackled in future developments.}, author = {Brea, Luisa S{\'{a}}nchez and {De Jesus}, Danilo Andrade and Shirazi, Muhammad Faizan and Pircher, Michael and van Walsum, Theo and Klein, Stefan}, doi = {10.3390/app9132700}, issn = {20763417}, journal = {Applied Sciences (Switzerland)}, keywords = {Image registration,Optical coherence tomography,Retinal motion artefacts}, number = {13}, title = {{Review on retrospective procedures to correct retinal motion artefacts in OCT imaging}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85068857722{\&}doi=10.3390{\%}2Fapp9132700{\&}partnerID=40{\&}md5=609d603321731686ea1ae522250c7c6f}, volume = {9}, year = {2019} } @article{RN840, abstract = {The alveolar capillary network (ACN) has a large surface area that provides the basis for an optimized gas exchange in the lung. It needs to adapt to morphological changes during early lung development and alveolarization. Structural alterations of the pulmonary vasculature can lead to pathological functional conditions such as in bronchopulmonary dysplasia and various other lung diseases. To understand the development of the ACN and its impact on the pathogenesis of lung diseases, methods are needed that enable comparative analyses of the complex three-dimensional structure of the ACN at different developmental stages and under pathological conditions. In this study a newborn mouse lung was imaged with serial block-face scanning electron microscopy (SBF-SEM) to investigate the ACN and its surrounding structures before the alveolarization process begins. Most parts but not all of the examined ACN contain two layers of capillaries, which were repeatedly connected with each other. A path from an arteriole to a venule was extracted and straightened to allow cross-sectional visualization of the data along the path within a plane. This allows a qualitative characterization of the structures that erythrocytes pass on their way through the ACN. One way to define regions of the ACN supplied by specific arterioles is presented and used for analyses. Pillars, possibly intussusceptive, were found in the vasculature but no specific pattern was observed in regard to parts of the saccular septa. This study provides 3D information with a resolution of about 150 nm on the microscopic structure of a newborn mouse lung and outlines some of the potentials and challenges of SBF-SEM for 3D analyses of the ACN.}, author = {Buchacker, Tobias and M{\"{u}}hlfeld, Christian and Wrede, Christoph and Wagner, Willi L. and Beare, Richard and McCormick, Matt and Grothausmann, Roman}, doi = {10.3389/fphys.2019.01357}, issn = {1664042X}, journal = {Frontiers in Physiology}, keywords = {3D reconstruction,capillary network,lung,segmentation,serial block-face scanning electron microscopy}, title = {{Assessment of the Alveolar Capillary Network in the Postnatal Mouse Lung in 3D Using Serial Block-Face Scanning Electron Microscopy}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85076709753{\&}doi=10.3389{\%}2Ffphys.2019.01357{\&}partnerID=40{\&}md5=06fa9637c496f5d61d2f175943a787c0}, volume = {10}, year = {2019} } @article{RN850, abstract = {Purpose: Proton CT (pCT) has the ability to reduce inherent uncertainties in proton treatment by directly measuring the relative proton stopping power with respect to water, thereby avoiding the uncertain conversion of X-ray CT Hounsfield unit to relative stopping power and the deleterious effect of X- ray CT artifacts. The purpose of this work was to further evaluate the potential of pCT for pretreatment positioning using experimental pCT data of a head phantom. Methods: The performance of a 3D image registration algorithm was tested with pCT reconstructions of a pediatric head phantom. A planning pCT simulation scan of the phantom was obtained with 200 MeV protons and reconstructed with a 3D filtered back projection (FBP) algorithm followed by iterative reconstruction and a representative pretreatment pCT scan was reconstructed with FBP only to save reconstruction time. The pretreatment pCT scan was rigidly transformed by prescribing random errors with six degrees of freedom or deformed by the deformation field derived from a head and neck cancer patient to the pretreatment pCT reconstruction, respectively. After applying the rigid or deformable image registration algorithm to retrieve the original pCT image before transformation, the accuracy of the registration was assessed. To simulate very low-dose imaging for patient setup, the proton CT images were reconstructed with 100{\%}, 50{\%}, 25{\%}, and 12.5{\%} of the total number of histories of the original planning pCT simulation scan, respectively. Results: The residual errors in image registration were lower than 1 mm and 1° of magnitude regardless of the anatomic directions and imaging dose. The mean residual errors ranges found for rigid image registration were from −0.29 ± 0.09 to 0.51 ± 0.50 mm for translations and from −0.05 ± 0.13 to 0.08 ± 0.08 degrees for rotations. The percentages of sub-millimetric errors found, for deformable image registration, were between 63.5{\%} and 100{\%}. Conclusion: This experimental head phantom study demonstrated the potential of low-dose pCT imaging for 3D image registration. Further work is needed to confirm the value pCT for pretreatment image-guided proton therapy.}, author = {Cassetta, Roberto and Piersimoni, Pierluigi and Riboldi, Marco and Giacometti, Valentina and Bashkirov, Vladmir and Baroni, Guido and Ordonez, Caesar and Coutrakon, George and Schulte, Reinhard}, doi = {10.1002/acm2.12565}, issn = {15269914}, journal = {Journal of Applied Clinical Medical Physics}, keywords = {deformable image registration,image reconstruction,proton CT,rigid image registration}, number = {4}, pages = {83--90}, title = {{Accuracy of low-dose proton CT image registration for pretreatment alignment verification in reference to planning proton CT}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85063859861{\&}doi=10.1002{\%}2Facm2.12565{\&}partnerID=40{\&}md5=cf8e867163571978728a5b92c8fc3fab}, volume = {20}, year = {2019} } @inproceedings{RN883, author = {{Cespedes Sanchez}, Pedro Pablo and {Legal Ayala}, Horacio and {Pinto Roa}, Diego and Gimenez, Gabriel Alberto and Lopez, Lorenzo and {Vazquez Noguera}, Jose Luis}, booktitle = {14th International Symposium on Medical Information Processing and Analysis, SIPAIM 2018}, doi = {10.1117/12.2506687}, editor = {Lepore, N and Romero, E and Brieva, J}, isbn = {9781510626058}, issn = {1996756X}, pages = {8}, publisher = {SPIE}, title = {{Intrapatient multimodal medical image registration of brain CT-MRI 3D: an approach based on metaheuristics}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85060546679{\&}doi=10.1117{\%}2F12.2506687{\&}partnerID=40{\&}md5=5125dd9c739585e2fc9a96de92ac84ae}, volume = {10975}, year = {2018} } @article{RN885, abstract = {This study proposes a regional fringe analysis (RFA) method to detect the regions of a target object in captured shifted images to improve depth measurement in phase-shifting fringe projection profilometry (PS-FPP). In the RFA method, region-based segmentation is exploited to segment the de-fringed image of a target object, and a multi-level fuzzy-based classification with five presented features is used to analyze and discriminate the regions of an object from the segmented regions, which were associated with explicit fringe information. Then, in the experiment, the performance of the proposed method is tested and evaluated on 26 test cases made of five types of materials. The qualitative and quantitative results demonstrate that the proposed RFA method can effectively detect the desired regions of an object to improve depth measurement in the PS-FPP system.}, author = {Chien, Kuang Che Chang and Tu, Han Yen and Hsieh, Ching Huang and Cheng, Chau Jern and Chang, Chun Yen}, doi = {10.1088/1361-6501/aa94a5}, issn = {13616501}, journal = {Measurement Science and Technology}, keywords = {Phase-shifting,classifcation,depth measurement,fringe projection proflometry,fuzzy analysis,region-based segmentation}, number = {1}, title = {{Regional fringe analysis for improving depth measurement in phase-shifting fringe projection profilometry}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85039165127{\&}doi=10.1088{\%}2F1361-6501{\%}2Faa94a5{\&}partnerID=40{\&}md5=2a3721b607941509869568960082b198}, volume = {29}, year = {2018} } @article{RN916, abstract = {Purpose: To develop an individually optimized contrast-enhanced (CE) 4D-computed tomography (CT) for radiotherapy simulation in pancreatic ductal adenocarcinomas (PDA). Methods: Ten PDA patients were enrolled. Each underwent three CT scans: a 4D-CT immediately following a CE 3D-CT and an individually optimized CE 4D-CT using test injection. Three physicians contoured the tumor and pancreatic tissues. Image quality scores, tumor volume, motion, tumor-to-pancreas contrast, and contrast-to-noise ratio (CNR) were compared in the three CTs. Interobserver variations were also evaluated in contouring the tumor using simultaneous truth and performance level estimation. Results: Average image quality scores for CE 3D-CT and CE 4D-CT were comparable (4.0 and 3.8, respectively; P = 0.082), and both were significantly better than that for 4D-CT (2.6, P {\textless} 0.001). Tumor-to-pancreas contrast results were comparable in CE 3D-CT and CE 4D-CT (15.5 and 16.7 Hounsfield units (HU), respectively; P = 0.21), and the latter was significantly higher than in 4D-CT (9.2 HU, P = 0.001). Image noise in CE 3D-CT (12.5 HU) was significantly lower than in CE 4D-CT (22.1 HU, P = 0.013) and 4D-CT (19.4 HU, P = 0.009). CNRs were comparable in CE 3D-CT and CE 4D-CT (1.4 and 0.8, respectively; P = 0.42), and both were significantly better in 4D-CT (0.6, P = 0.008 and 0.014). Mean tumor volumes were significantly smaller in CE 3D-CT (29.8 cm3, P = 0.03) and CE 4D-CT (22.8 cm3, P = 0.01) than in 4D-CT (42.0 cm3). Mean tumor motion was comparable in 4D-CT and CE 4D-CT (7.2 and 6.2 mm, P = 0.17). Interobserver variations were comparable in CE 3D-CT and CE 4D-CT (Jaccard index 66.0{\%} and 61.9{\%}, respectively) and were worse for 4D-CT (55.6{\%}) than CE 3D-CT. Conclusions: CE 4D-CT demonstrated characteristics comparable to CE 3D-CT, with high potential for simultaneously delineating the tumor and quantifying tumor motion with a single scan.}, author = {Choi, Wookjin and Xue, Ming and Lane, Barton F. and Kang, Min Kyu and Patel, Kruti and Regine, William F. and Klahr, Paul and Wang, Jiahui and Chen, Shifeng and D'Souza, Warren and Lu, Wei}, doi = {10.1118/1.4963213}, issn = {00942405}, journal = {Medical Physics}, keywords = {4D-CT,contrast enhancement,pancreatic ductal adenocarcinoma,radiotherapy simulation}, number = {10}, pages = {5659--5666}, title = {{Individually optimized contrast-enhanced 4D-CT for radiotherapy simulation in pancreatic ductal adenocarcinoma}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84988808490{\&}doi=10.1118{\%}2F1.4963213{\&}partnerID=40{\&}md5=d555238a514a92d5a318ee19a0427dba}, volume = {43}, year = {2016} } @article{RN843, abstract = {Cone beam computed tomography (CBCT) is a medical imaging technique employed for diagnosis and treatment of patients with cranio-maxillofacial deformities. CBCT 3D reconstruction and segmentation of bones such as mandible or maxilla are essential procedures in surgical and orthodontic treatments. However, CBCT image processing may be impaired by features such as low contrast, inhomogeneity, noise and artifacts. Besides, values assigned to voxels are relative Hounsfield units unlike traditional computed tomography (CT). Such drawbacks render CBCT segmentation a difficult and time-consuming task, usually performed manually with tools designed for medical image processing. We present an interactive two-stage method for the segmentation of CBCT: (i) we first perform an automatic segmentation of bone structures with super-voxels, allowing a compact graph representation of the 3D data; (ii) next, a user-placed seed process guides a graph partitioning algorithm, splitting the extracted bones into mandible and skull. We have evaluated our segmentation method in three different scenarios and compared the results with ground truth data of the mandible and the skull. Results show that our method produces accurate segmentation and is robust to changes in parameters. We also compared our method with two similar segmentation strategy and showed that it produces more accurate segmentation. Finally, we evaluated our method for CT data of patients with deformed or missing bones and the segmentation was accurate for all data. The segmentation of a typical CBCT takes in average 5 min, which is faster than most techniques currently available.}, author = {{Cuadros Linares}, Oscar and Bianchi, Jonas and Raveli, Dirceu and {Batista Neto}, Jo{\~{a}}o and Hamann, Bernd}, doi = {10.1007/s00371-018-1511-0}, issn = {01782789}, journal = {Visual Computer}, keywords = {Bone segmentation,Cone beam computed tomography,Graph clustering,Mandible,Skull,Super-voxels}, number = {10}, pages = {1461--1474}, title = {{Mandible and skull segmentation in cone beam computed tomography using super-voxels and graph clustering}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85045951942{\&}doi=10.1007{\%}2Fs00371-018-1511-0{\&}partnerID=40{\&}md5=a294525f068f2ad564fa94a0fcd267bf}, volume = {35}, year = {2019} } @article{RN875, abstract = {Introduction: Linear registration to a standard space is one of the major steps in processing and analyzing magnetic resonance images (MRIs) of the brain. Here we present an overview of linear stereotaxic MRI registration and compare the performance of 5 publicly available and extensively used linear registration techniques in medical image analysis. Methods: A set of 9693 T1-weighted MR images were obtained for testing from 4 datasets: ADNI, PREVENT-AD, PPMI, and HCP, two of which have multi-center and multi-scanner data and three of which have longitudinal data. Each individual native image was linearly registered to the MNI ICBM152 average template using five versions of MRITOTAL from MINC tools, FLIRT from FSL, two versions of Elastix, spm{\_}affreg from SPM, and ANTs linear registration techniques. Quality control (QC) images were generated from the registered volumes and viewed by an expert rater to assess the quality of the registrations. The QC image contained 60 sub-images (20 of each of axial, sagittal, and coronal views at different levels throughout the brain) overlaid with contours of the ICBM152 template, enabling the expert rater to label the registration as acceptable or unacceptable. The performance of the registration techniques was then compared across different datasets. In addition, the effect of image noise, intensity non-uniformity, age, head size, and atrophy on the performance of the techniques was investigated by comparing differences between age, scaling factor, ventricle volume, brain volume, and white matter hyperintensity (WMH) volumes between passed and failed cases for each method. Results: The average registration failure rate among all datasets was 27.41{\%}, 27.14{\%}, 12.74{\%}, 13.03{\%}, 0.44{\%} for the five versions of MRITOTAL techniques, 8.87{\%} for ANTs, 11.11{\%} for FSL, 12.35{\%} for Elastix Affine, 24.40{\%} for Elastix Similarity, and 30.66{\%} for SPM. There were significant effects of signal to noise ratio, image intensity non-uniformity estimates, as well as age, head size, and atrophy related changes between passed and failed registrations. Conclusion: Our experiments show that the Revised BestLinReg had the best performance among the evaluated registration techniques while all techniques performed worse for images with higher levels of noise and non-uniformity as well as atrophy related changes.}, author = {Dadar, Mahsa and Fonov, Vladimir S. and Collins, D. Louis}, doi = {10.1016/j.neuroimage.2018.03.025}, issn = {10959572}, journal = {NeuroImage}, keywords = {Linear registration,MRI,Quality control}, pages = {191--200}, title = {{A comparison of publicly available linear MRI stereotaxic registration techniques}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85044150830{\&}doi=10.1016{\%}2Fj.neuroimage.2018.03.025{\&}partnerID=40{\&}md5=a707203bafd861c84c210833597b8ad5}, volume = {174}, year = {2018} } @inproceedings{RN908, abstract = {{\textcopyright} 2017 SPIE. Accurate tumor segmentation is a critical step in the development of the computer-aided detection (CAD) based quantitative image analysis scheme for early stage prognostic evaluation of ovarian cancer patients. The purpose of this investigation is to assess the efficacy of several different methods to segment the metastatic tumors occurred in different organs of ovarian cancer patients. In this study, we developed a segmentation scheme consisting of eight different algorithms, which can be divided into three groups: 1) Region growth based methods; 2) Canny operator based methods; and 3) Partial differential equation (PDE) based methods. A number of 138 tumors acquired from 30 ovarian cancer patients were used to test the performance of these eight segmentation algorithms. The results demonstrate each of the tested tumors can be successfully segmented by at least one of the eight algorithms without the manual boundary correction. Furthermore, modified region growth, classical Canny detector, and fast marching, and threshold level set algorithms are suggested in the future development of the ovarian cancer related CAD schemes. This study may provide meaningful reference for developing novel quantitative image feature analysis scheme to more accurately predict the response of ovarian cancer patients to the chemotherapy at early stage.}, author = {Danala, Gopichandh and Wang, Yunzhi and Thai, Theresa and Gunderson, Camille C. and Moxley, Katherine M. and Moore, Kathleen and Mannel, Robert S. and Cheng, Samuel and Liu, Hong and Zheng, Bin and Qiu, Yuchen}, booktitle = {Biophotonics and Immune Responses XII}, doi = {10.1117/12.2250978}, editor = {Chen, W R}, isbn = {9781510605718}, issn = {16057422}, pages = {100650J}, publisher = {SPIE}, title = {{Improving efficacy of metastatic tumor segmentation to facilitate early prediction of ovarian cancer patients' response to chemotherapy}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85019246953{\&}doi=10.1117{\%}2F12.2250978{\&}partnerID=40{\&}md5=d74ac0c887f79b14d956f34ba1490c94}, volume = {10065}, year = {2017} } @article{RN833, abstract = {Purpose: Cardiac image segmentation is a critical process for generating personalized models of the heart and for quantifying cardiac performance parameters. Fully automatic segmentation of the left ventricle (LV), the right ventricle (RV), and the myocardium from cardiac cine MR images is challenging due to variability of the normal and abnormal anatomy, as well as the imaging protocols. This study proposes a multi-task learning (MTL)-based regularization of a convolutional neural network (CNN) to obtain accurate segmenation of the cardiac structures from cine MR images. Methods: We train a CNN network to perform the main task of semantic segmentation, along with the simultaneous, auxiliary task of pixel-wise distance map regression. The network also predicts uncertainties associated with both tasks, such that their losses are weighted by the inverse of their corresponding uncertainties. As a result, during training, the task featuring a higher uncertainty is weighted less and vice versa. The proposed distance map regularizer is a decoder network added to the bottleneck layer of an existing CNN architecture, facilitating the network to learn robust global features. The regularizer block is removed after training, so that the original number of network parameters does not change. The trained network outputs per-pixel segmentation when a new patient cine MR image is provided as an input. Results: We show that the proposed regularization method improves both binary and multi-class segmentation performance over the corresponding state-of-the-art CNN architectures. The evaluation was conducted on two publicly available cardiac cine MRI datasets, yielding average Dice coefficients of 0.84 ± 0.03 and 0.91 ± 0.04. We also demonstrate improved generalization performance of the distance map regularized network on cross-dataset segmentation, showing as much as 42{\%} improvement in myocardium Dice coefficient from 0.56 ± 0.28 to 0.80 ± 0.14. Conclusions: We have presented a method for accurate segmentation of cardiac structures from cine MR images. Our experiments verify that the proposed method exceeds the segmentation performance of three existing state-of-the-art methods. Furthermore, several cardiac indices that often serve as diagnostic biomarkers, specifically blood pool volume, myocardial mass, and ejection fraction, computed using our method are better correlated with the indices computed from the reference, ground truth segmentation. Hence, the proposed method has the potential to become a non-invasive screening and diagnostic tool for the clinical assessment of various cardiac conditions, as well as a reliable aid for generating patient specific models of the cardiac anatomy for therapy planning, simulation, and guidance.}, archivePrefix = {arXiv}, arxivId = {1901.01238}, author = {Dangi, Shusil and Linte, Cristian A. and Yaniv, Ziv}, doi = {10.1002/mp.13853}, eprint = {1901.01238}, issn = {00942405}, journal = {Medical Physics}, keywords = {cardiac segmentation,convolutional neural network,magnetic resonance imaging,multi-task learning,regularization,task uncertainty weighting}, number = {12}, pages = {5637--5651}, title = {{A distance map regularized CNN for cardiac cine MR image segmentation}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85074595228{\&}doi=10.1002{\%}2Fmp.13853{\&}partnerID=40{\&}md5=e49d5b643280b5d657eba92082261710}, volume = {46}, year = {2019} } @incollection{RN838, abstract = {Segmentation of the left ventricle and quantification of various cardiac contractile functions is crucial for the timely diagnosis and treatment of cardiovascular diseases. Traditionally, the two tasks have been tackled independently. Here we propose a convolutional neural network based multi-task learning approach to perform both tasks simultaneously, such that, the network learns better representation of the data with improved generalization performance. Probabilistic formulation of the problem enables learning the task uncertainties during the training, which are used to automatically compute the weights for the tasks. We performed a five fold cross-validation of the myocardium segmentation obtained from the proposed multi-task network on 97 patient 4-dimensional cardiac cine-MRI datasets available through the STACOM LV segmentation challenge against the provided gold-standard myocardium segmentation, obtaining a Dice overlap of (Formula Presented) and mean surface distance of (Formula Presented) mm, while simultaneously estimating the myocardial area with mean absolute difference error of (Formula Presented).}, archivePrefix = {arXiv}, arxivId = {1809.10221}, author = {Dangi, Shusil and Yaniv, Ziv and Linte, Cristian A.}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-030-12029-0_3}, editor = {Li, S and McLeod, K and Young, A and Rhode, K and Pop, M and Zhao, J and Sermesant, M and Mansi, T}, eprint = {1809.10221}, isbn = {9783030120283}, issn = {16113349}, pages = {21--31}, publisher = {Springer Verlag}, title = {{Left Ventricle Segmentation and Quantification from Cardiac Cine MR Images via Multi-task Learning}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85064043381{\&}doi=10.1007{\%}2F978-3-030-12029-0{\_}3{\&}partnerID=40{\&}md5=89d8de887e5c7b9bb24cf49448d058fe}, volume = {11395 LNCS}, year = {2019} } @article{RN966, abstract = {In this study, we present several image segmentation techniques for various image scales and modalities. We consider cellular-, organ-, and whole organism-levels of biological structures in cardiovascular applications. Several automatic segmentation techniques are presented and discussed in this work. The overall pipeline for reconstruction of biological structures consists of the following steps: image pre-processing, feature detection, initial mask generation, mask processing, and segmentation post-processing. Several examples of image segmentation are presented, including patient-specific abdominal tissues segmentation, vascular network identification and myocyte lipid droplet micro-structure reconstruction.}, author = {Danilov, Alexander and Pryamonosov, Roman and Yurova, Alexandra}, doi = {10.3390/computation4030035}, issn = {20793197}, journal = {Computation}, keywords = {Abdominal tissues,Cardiovascular applications,Cerebral arteries,Coronary arteries,Electron microscopy,Image segmentation}, number = {3}, title = {{Image segmentation for cardiovascular biomedical applications at different scales}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85050390538{\&}doi=10.3390{\%}2Fcomputation4030035{\&}partnerID=40{\&}md5=ec04fcf1cbad06c8db55480c5b77767f}, volume = {4}, year = {2016} } @article{RN941, abstract = {Current osteoporosis treatments improve bone mass by increasing net bone formation: anti-resorptive drugs such as bisphosphonates block osteoclast activity, while anabolic agents such as parathyroid hormone (PTH) increase bone remodeling, with a greater effect on formation. Although these drugs are widely used, their role in modulating formation and resorption is not fully understood, due in part to technical limitations in the ability to longitudinally assess bone remodeling. Importantly, it is not known whether or not PTH-induced bone formation is independent of resorption, resulting in controversy over the effectiveness of combination therapies that use both PTH and an anti-resorptive. In this study, we developed a $\mu$CT-based, in vivo dynamic bone histomorphometry technique for rat tibiae, and applied this method to longitudinally track changes in bone resorption and formation as a result of treatment with alendronate (ALN), PTH, or combination therapy of both PTH and ALN (PTH+ALN). Correlations between our $\mu$CT-based measures of bone formation and measures of bone formation based on calcein-labeled histology (r = 0.72-0.83) confirm the accuracy of this method. Bone remodeling parameters measured through $\mu$CT-based in vivo dynamic bone histomorphometry indicate an increased rate of bone formation in rats treated with PTH and PTH+ALN, together with a decrease in bone resorption measures in rats treated with ALN and PTH+ALN. These results were further supported by traditional histology-based measurements, suggesting that PTH was able to induce bone formation while bone resorption was suppressed.}, author = {de Bakker, Chantal M.J. and Altman, Allison R. and Tseng, Wei Ju and Tribble, Mary Beth and Li, Connie and Chandra, Abhishek and Qin, Ling and Liu, X. Sherry}, doi = {10.1016/j.bone.2014.12.061}, issn = {87563282}, journal = {Bone}, keywords = {Animal models/rodent,Anti-resorptive treatment,Bone stiffness,In vivo $\mu$CT,Parathyroid hormone,Trabecular bone microstructure}, pages = {198--207}, title = {{$\mu$CT-based, in vivo dynamic bone histomorphometry allows 3D evaluation of the early responses of bone resorption and formation to PTH and alendronate combination therapy}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84920973040{\&}doi=10.1016{\%}2Fj.bone.2014.12.061{\&}partnerID=40{\&}md5=ce16efe68357b4c7c5a21d428c6efe79}, volume = {73}, year = {2015} } @article{RN903, abstract = {Pregnancy, lactation, and weaning result in dramatic changes in maternal calcium metabolism. In particular, the increased calcium demand during lactation causes a substantial degree of maternal bone loss. This reproductive bone loss has been suggested to be largely reversible, as multiple clinical studies have found that parity and lactation history have no adverse effect on postmenopausal fracture risk. However, the precise effects of pregnancy, lactation, and post-weaning recovery on maternal bone structure are not well understood. Our study aimed to address this question by longitudinally tracking changes in trabecular and cortical bone microarchitecture at the proximal tibia in rats throughout three cycles of pregnancy, lactation, and post-weaning using in vivo $\mu$CT. We found that the trabecular thickness underwent a reversible deterioration during pregnancy and lactation, which was fully recovered after weaning, whereas other parameters of trabecular microarchitecture (including trabecular number, spacing, connectivity density, and structure model index) underwent a more permanent deterioration, which recovered minimally. Thus, pregnancy and lactation resulted in both transient and long-lasting alterations in trabecular microstructure. In the meantime, multiple reproductive cycles appeared to improve the robustness of cortical bone (resulting in an elevated cortical area and polar moment of inertia), as well as increase the proportion of the total load carried by the cortical bone at the proximal tibia. Taken together, changes in the cortical and trabecular compartments suggest that whereas rat tibial trabecular bone appears to be highly involved in maintaining calcium homeostasis during female reproduction, cortical bone adapts to increase its load-bearing capacity, allowing the overall mechanical function of the tibia to be maintained. {\textcopyright} 2017 American Society for Bone and Mineral Research.}, author = {de Bakker, Chantal M.J. and Altman-Singles, Allison R. and Li, Yihan and Tseng, Wei Ju and Li, Connie and Liu, X. Sherry}, doi = {10.1002/jbmr.3084}, issn = {15234681}, journal = {Journal of Bone and Mineral Research}, keywords = {BONE FORMATION,BONE MICROARCHITECTURE,LACTATION,PREGNANCY,STIFFNESS,WEANING}, number = {5}, pages = {1014--1026}, title = {{Adaptations in the Microarchitecture and Load Distribution of Maternal Cortical and Trabecular Bone in Response to Multiple Reproductive Cycles in Rats}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85013042443{\&}doi=10.1002{\%}2Fjbmr.3084{\&}partnerID=40{\&}md5=18906d1bf5b2d3d0e94d19b1af8384bf}, volume = {32}, year = {2017} } @inproceedings{RN936, abstract = {Planning safe trajectories in keyhole neurosurgery requires a high level of accuracy in order to access to small structures either by biopsies, stimulating deep brain and others. We propose a computer system that carries out decision making based on rules using fuzzy logic to plan safe trajectories for preoperative neurosurgery. The processes to generate input values of membership functions, and implementation of the system for decision function will be explained. The results of risk weights for each candidate trajectory are evaluated and the safest calculated trajectories taking into account the risk structures that there are in the brain from the insertion points to the target point are visualized.}, author = {{De Le{\'{o}}n Cuevas}, Alejandro and Tovar-Arriaga, Sa{\'{u}}l and Gonz{\'{a}}lez-Guti{\'{e}}rrez, Arturo and Aceves-Fern{\'{a}}ndez, Marco Antonio}, booktitle = {2015 12th International Conference on Electrical Engineering, Computing Science and Automatic Control, CCE 2015}, doi = {10.1109/ICEEE.2015.7357927}, isbn = {9781467378390}, keywords = {Image guided surgery,Trajectory planning,artificial intelligence in surgery,fuzzy logic}, publisher = {Institute of Electrical and Electronics Engineers Inc.}, title = {{Trajectory planning for keyhole neurosurgery using fuzzy logic for risk evaluation}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962882000{\&}doi=10.1109{\%}2FICEEE.2015.7357927{\&}partnerID=40{\&}md5=2ec0aab6774734025966c01139e1fb7a}, year = {2015} } @inproceedings{RN837, abstract = {Spectral imaging is a ubiquitous tool in modern biochemistry. Despite acquiring dozens to thousands of spectral channels, existing technology cannot capture spectral images at the same spatial resolution as structural microscopy. Due to partial voluming and low light exposure, spectral images are often difficult to interpret and analyze. This highlights a need to upsample the low-resolution spectral image by using spatial information contained in the high-resolution image, thereby creating a fused representation with high specificity both spatially and spectrally. In this paper, we propose a framework for the fusion of co-registered structural and spectral microscopy images to create super-resolved representations of spectral images. As a first application, we super-resolve spectral images of ex-vivo retinal tissue imaged with confocal laser scanning microscopy, by using spatial information from structured illumination microscopy. Second, we super-resolve mass spectroscopic images of mouse brain tissue, by using spatial information from high-resolution histology images. We present a systematic validation of model assumptions crucial towards maintaining the original nature of spectra and the applicability of super-resolution. Goodness-of-fit for spectral predictions are evaluated through functional R2 values, and the spatial quality of the super-resolved images are evaluated using normalized mutual information. {\textcopyright} COPYRIGHT SPIE. Downloading of the abstract is permitted for personal use only.}, author = {Dey, Neel and Li, Shijie and Bermond, Katharina and Heintzmann, Rainer and Curcio, Christine A. and Ach, Thomas and Gerig, Guido}, booktitle = {Medical Imaging 2019: Image Processing}, doi = {10.1117/12.2512598}, editor = {Angelini, E D and Angelini, E D and Angelini, E D and Landman, B A}, isbn = {9781510625457}, issn = {0277-786X}, pages = {12}, publisher = {SPIE}, title = {{Multi-modal image fusion for multispectral super-resolution in microscopy}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85068325681{\&}doi=10.1117{\%}2F12.2512598{\&}partnerID=40{\&}md5=bb0775b42af9f68ef3a088e55974d94e}, volume = {10949}, year = {2019} } @article{RN814, abstract = {Purpose: To develop a methodological framework to simultaneously measure R2* and magnetic susceptibility in trabecularized yellow bone marrow and to investigate the sensitivity of Quantitative Susceptibility Mapping (QSM) for measuring trabecular bone density using a non-UTE multi-gradient echo sequence. Methods: The ankle of 16 healthy volunteers and two patients was scanned using a time-interleaved multi-gradient-echo (TIMGRE) sequence. After field mapping based on water–fat separation methods and background field removal based on the Laplacian boundary value method, three different QSM dipole inversion schemes were implemented. Mean susceptibility values in regions of different trabecular bone density in the calcaneus were compared to the corresponding values in the R2* maps, bone volume to total volume ratios (BV/TV) estimated from high resolution imaging (in 14 subjects), and CT attenuation (in two subjects). In addition, numerical simulations were performed in a simplified trabecular bone model of randomly positioned spherical bone inclusions to verify and compare the scaling of R2* and susceptibility with BV/TV. Results: Differences in calcaneus trabecularization were well depicted in susceptibility maps, in good agreement with high-resolution MR and CT images. Simulations and in vivo scans showed a linear relationship of measured susceptibility with BV/TV and R2*. The ankle in vivo results showed a strong linear correlation between susceptibility and R2* (R2 = 0.88, p {\textless} 0.001) with a slope and intercept of −0.004 and 0.2 ppm, respectively. Conclusions: A method for multi-paramteric mapping, including R2* -mapping and QSM was developed for measuring trabecularized yellow bone marrow, showing good sensitivity of QSM for measuring trabecular bone density.}, author = {Diefenbach, Maximilian N. and Meineke, Jakob and Ruschke, Stefan and Baum, Thomas and Gersing, Alexandra and Karampinos, Dimitrios C.}, doi = {10.1002/mrm.27531}, issn = {15222594}, journal = {Magnetic Resonance in Medicine}, keywords = {susceptibility mapping,trabecular bone density}, number = {3}, pages = {1739--1754}, title = {{On the sensitivity of quantitative susceptibility mapping for measuring trabecular bone density}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {81}, year = {2019} } @inproceedings{RN926, author = {Dmitriev, Konstantin and Gutenko, Ievgeniia and Nadeem, Saad and Kaufman, Arie}, booktitle = {Medical Imaging 2016: Image Processing}, doi = {10.1117/12.2216537}, editor = {Styner, M A and Angelini, E D and Angelini, E D}, isbn = {9781510600195}, issn = {16057422}, pages = {97842C}, publisher = {SPIE}, title = {{Pancreas and cyst segmentation}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84981736248{\&}doi=10.1117{\%}2F12.2216537{\&}partnerID=40{\&}md5=0c6892196eefd59bd4552f6498d124be}, volume = {9784}, year = {2016} } @article{RN931, abstract = {The nonrigid registration algorithm based on B-spline Free-Form Deformation (FFD) plays a key role and is widely applied in medical image processing due to the good flexibility and robustness. However, it requires a tremendous amount of computing time to obtain more accurate registration results especially for a large amount of medical image data. To address the issue, a parallel nonrigid registration algorithm based on B-spline is proposed in this paper. First, the Logarithm Squared Difference (LSD) is considered as the similarity metric in the B-spline registration algorithm to improve registration precision. After that, we create a parallel computing strategy and lookup tables (LUTs) to reduce the complexity of the B-spline registration algorithm. As a result, the computing time of three time-consuming steps including B-splines interpolation, LSD computation, and the analytic gradient computation of LSD, is efficiently reduced, for the B-spline registration algorithm employs the Nonlinear Conjugate Gradient (NCG) optimization method. Experimental results of registration quality and execution efficiency on the large amount of medical images show that our algorithm achieves a better registration accuracy in terms of the differences between the best deformation fields and ground truth and a speedup of 17 times over the single-threaded CPU implementation due to the powerful parallel computing ability of Graphics Processing Unit (GPU).}, author = {Du, Xiaogang and Dang, Jianwu and Wang, Yangping and Wang, Song and Lei, Tao}, doi = {10.1155/2016/7419307}, issn = {17486718}, journal = {Computational and Mathematical Methods in Medicine}, title = {{A Parallel Nonrigid Registration Algorithm Based on B-Spline for Medical Images}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85008957755{\&}doi=10.1155{\%}2F2016{\%}2F7419307{\&}partnerID=40{\&}md5=94c05725af9454145efeff48e39a8b1e}, volume = {2016}, year = {2016} } @article{RN905, abstract = {The use of medical imaging in diagnosing brain disease is growing. The challenges are related to the big size of data and complexity of the image processing. High standard of hardware and software are demanded, which can only be provided in big hospitals. Our purpose was to provide a smart cloud system to help diagnosing brain diseases for hospital with limited infrastructure. The expertise of neurologists was first implanted in cloud server to conduct an automatic diagnosis in real time using image processing technique developed based on ITK library and web service. Users upload images through website and the result, in this case the size of tumor was sent back immediately. A specific image compression technique was developed for this purpose. The smart cloud system was able to measure the area and location of tumors, with average size of 19.91 ± 2.38 cm2 and an average response time 7.0 ± 0.3 s. The capability of the server decreased when multiple clients accessed the system simultaneously: 14 ± 0 s (5 parallel clients) and 27 ± 0.2 s (10 parallel clients). The cloud system was successfully developed to process and analyze medical images for diagnosing brain diseases in this case for tumor.}, author = {Fahmi, Fahmi and Nasution, Tigor H. and Anggreiny}, doi = {10.3233/THC-171298}, issn = {09287329}, journal = {Technology and Health Care}, keywords = {Cloud system,ITK,brain diseases,medical image,web services}, number = {3}, pages = {607--610}, title = {{Smart cloud system with image processing server in diagnosing brain diseases dedicated for hospitals with limited resources}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85020190896{\&}doi=10.3233{\%}2FTHC-171298{\&}partnerID=40{\&}md5=43aa37ad080ba10e71273ea68294d0c5}, volume = {25}, year = {2017} } @inproceedings{RN963, abstract = {{\textcopyright} 2017 SPIE. A 3D kinematic measurement of joint movement is crucial for orthopedic surgery assessment and diagnosis. This is usually obtained through a frame-by-frame registration of the 3D bone volume to a uoroscopy video of the joint movement. The high cost of a high-quality uoroscopy imaging system has hindered the access of many labs to this application. This is while the more affordable and low-dosage version, the mini C-Arm, is not commonly used for this application due to low image quality. In this paper, we introduce a novel method for kinematic analysis of joint movement using the mini C-Arm. In this method the bone of interest is recovered and isolated from the rest of the image using a non-rigid registration of an atlas to each frame. The 3D/2D registration is then performed using the weighted histogram of image gradients as an image feature. In our experiments, the registration error was 0.89 mm and 2.36°for human C2 vertebra. While the precision is still lacking behind a high quality uoroscopy machine, it is a good starting point facilitating the use of mini C-Arms for motion analysis making this application available to lower-budget environments. Moreover, the registration was highly resistant to the initial distance from the true registration, converging to the answer from anywhere within ±90°of it.}, author = {Ghafurian, Soheil and Hacihaliloglu, Ilker and Metaxas, Dimitris N. and Tan, Virak and Li, Kang}, booktitle = {Medical Imaging 2017: Image-Guided Procedures, Robotic Interventions, and Modeling}, doi = {10.1117/12.2254678}, editor = {Webster, R J and Fei, B}, isbn = {9781510607156}, issn = {16057422}, pages = {101350B}, publisher = {SPIE}, title = {{3D/2D image registration method for joint motion analysis using low-quality images from mini C-arm machines}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85020389194{\&}doi=10.1117{\%}2F12.2254678{\&}partnerID=40{\&}md5=6fd25bdbdca508f33b2b3846f0f0d7fc}, volume = {10135}, year = {2017} } @inproceedings{RN929, abstract = {{\textcopyright} 2016 SPIE.Digitally reconstructed radiographs (DRR) are a simulation of radiographic images produced through a perspective projection of the three-dimensional (3D) image (volume) onto a two-dimensional (2D) image plane. The traditional method for the generation of DRRs, namely ray-casting, is a computationally intensive process and accounts for most of solution time in 3D/2D medical image registration frameworks, where a large number of DRRs is required. A few alternate methods for a faster DRR generation have been proposed, the most successful of which are based on the idea of pre-calculating the attenuation value of possible rays. Despite achieving good quality, these methods support a limited range of motion for the volume and entail long pre-calculation time. In this paper, we propose a new preprocessing procedure and data structure for the calculation of the ray attenuation values. This method supports all possible volume positions with practically small memory requirements in addition to reducing the complexity of the problem from O(n3) to O(n2). In our experiments, we generated DRRs of high quality in 63 milliseconds with a preprocessing time of 99.48 seconds and a memory size of 7.45 megabytes.}, author = {Ghafurian, Soheil and Metaxas, Dimitris N. and Tan, Virak and Li, Kang}, booktitle = {Medical Imaging 2016: Image-Guided Procedures, Robotic Interventions, and Modeling}, doi = {10.1117/12.2217756}, editor = {Webster, R J and Yaniv, Z R}, isbn = {9781510600218}, issn = {16057422}, pages = {97860C}, publisher = {SPIE}, title = {{Fast generation of digitally reconstructed radiograph through an efficient preprocessing of ray attenuation values}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84982108538{\&}doi=10.1117{\%}2F12.2217756{\&}partnerID=40{\&}md5=dc30594f8f1ae1c399aff0783e094425}, volume = {9786}, year = {2016} } @article{RN891, abstract = {Objectives: To assess the role in predicting nipple-areola complex (NAC) involvement of a newly developed automatic method which computes the 3D tumor-NAC distance. Patients and Methods: Ninety-nine patients scheduled to nipple sparing mastectomy (NSM) underwent magnetic resonance (MR) examination at 1.5 T, including sagittal T2w and dynamic contrast enhanced (DCE)-MR imaging. An automatic method was developed to segment the NAC and the tumor and to compute the 3D distance between them. The automatic measurement was compared with manual axial and sagittal 2D measurements. NAC involvement was defined by the presence of invasive ductal or lobular carcinoma and/or ductal carcinoma in situ or ductal intraepithelial neoplasia (DIN1c − DIN3). Results: Tumor-NAC distance was computed on 95/99 patients (25 NAC+), as three tumors were not correctly segmented (sensitivity = 97{\%}), and 1 NAC was not detected (sensitivity = 99{\%}). The automatic 3D distance reached the highest area under the receiver operating characteristic (ROC) curve (0.830) with respect to the manual axial (0.676), sagittal (0.664), and minimum distances (0.664). At the best cut-off point of 21 mm, the 3D distance obtained sensitivity = 72{\%}, specificity = 80{\%}, positive predictive value = 56{\%}, and negative predictive value = 89{\%}. Conclusions: This method could provide a reproducible biomarker to preoperatively select breast cancer patients candidates to NSM, thus helping surgical planning and intraoperative management of patients.}, author = {Giannini, Valentina and Bianchi, Veronica and Carabalona, Silvia and Mazzetti, Simone and Maggiorotto, Furio and Kubatzki, Franziska and Regge, Daniele and Ponzone, Riccardo and Martincich, Laura}, doi = {10.1002/jso.24788}, issn = {10969098}, journal = {Journal of Surgical Oncology}, keywords = {3D automatic distance,breast cancer,magnetic resonance imaging,mastectomy,nipple-areola sparing,tumor segmentation}, number = {8}, pages = {1069--1078}, title = {{MRI to predict nipple-areola complex (NAC) involvement: An automatic method to compute the 3D distance between the NAC and tumor}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85040316780{\&}doi=10.1002{\%}2Fjso.24788{\&}partnerID=40{\&}md5=759b8bc9fcaf202c7bea3274c812f4c2}, volume = {116}, year = {2017} } @article{RN852, abstract = {Purpose: Pathological complete response (pCR) following neoadjuvant chemoradiotherapy or radiotherapy in locally advanced rectal cancer (LARC) is reached in approximately 15–30{\%} of cases, therefore it would be useful to assess if pretreatment of 18 F-FDG PET/CT and/or MRI texture features can reliably predict response to neoadjuvant therapy in LARC. Methods: Fifty-two patients were dichotomized as responder (pR+) or non-responder (pR-) according to their pathological tumor regression grade (TRG) as follows: 22 as pR+ (nine with TRG = 1, 13 with TRG = 2) and 30 as pR- (16 with TRG = 3, 13 with TRG = 4 and 1 with TRG = 5). First-order parameters and 21 second-order texture parameters derived from the Gray-Level Co-Occurrence matrix were extracted from semi-automatically segmented tumors on T2w MRI, ADC maps, and PET/CT acquisitions. The role of each texture feature in predicting pR+ was assessed with monoparametric and multiparametric models. Results: In the mono-parametric approach, PET homogeneity reached the maximum AUC (0.77; sensitivity = 72.7{\%} and specificity = 76.7{\%}), while PET glycolytic volume and ADC dissimilarity reached the highest sensitivity (both 90.9{\%}). In the multiparametric analysis, a logistic regression model containing six second-order texture features (five from PET and one from T2w MRI) yields the highest predictivity in distinguish between pR+ and pR- patients (AUC = 0.86; sensitivity = 86{\%}, and specificity = 83{\%} at the Youden index). Conclusions: If preliminary results of this study are confirmed, pretreatment PET and MRI could be useful to personalize patient treatment, e.g., avoiding toxicity of neoadjuvant therapy in patients predicted pR-.}, author = {Giannini, V. and Mazzetti, S. and Bertotto, I. and Chiarenza, C. and Cauda, S. and Delmastro, E. and Bracco, C. and {Di Dia}, A. and Leone, F. and Medico, E. and Pisacane, A. and Ribero, D. and Stasi, M. and Regge, D.}, doi = {10.1007/s00259-018-4250-6}, issn = {16197089}, journal = {European Journal of Nuclear Medicine and Molecular Imaging}, keywords = {18 F-FDG PET/CT imaging,Locally advanced rectal cancer,Magnetic resonance imaging,Prediction of treatment response,Radiomics,Texture features}, number = {4}, pages = {878--888}, title = {{Predicting locally advanced rectal cancer response to neoadjuvant therapy with 18 F-FDG PET and MRI radiomics features}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85060031543{\&}doi=10.1007{\%}2Fs00259-018-4250-6{\&}partnerID=40{\&}md5=4bf80d097b9dd71dfebe905f5b3b38d8}, volume = {46}, year = {2019} } @article{RN911, abstract = {Objective: To assess whether a computer-aided, diagnosis (CAD) system can predict pathological Complete Response (pCR) to neoadjuvant chemotherapy (NAC) prior to treatment using texture features. Methods: Response to treatment of 44 patients was defined according to the histopatology of resected tumour and extracted axillary nodes in two ways: (a) pCR+ (Smith's Grade = 5) vs pCR- (Smith's Grade {\textless} 5); (b) pCRN+ (pCR+ and absence of residual lymph node metastases) vs pCRN-. A CAD system was developed to: (i) segment the breasts; (ii) register the DCE-MRI sequence; (iii) detect the lesion and (iv) extract 27 3D texture features. The role of individual texture features, multiparametric models and Bayesian classifiers in predicting patients' response to NAC were evaluated. Results: A cross-validated Bayesian classifier fed with 6 features was able to predict pCR with a specificity of 72{\%} and a sensitivity of 67{\%}. Conversely, 2 features were used by the Bayesian classifier to predict pCRN, obtaining a sensitivity of 69{\%} and a specificity of 61{\%}. Conclusion: A CAD scheme, that extracts texture features from an automatically segmented 3D mask of the tumour, could predict pathological response to NAC. Additional research should be performed to validate these promising results on a larger cohort of patients and using different classification strategies. Advances in knowledge: This is the first study assessing the role of an automatic CAD system in predicting the pathological response to NAC before treatment. Fully automatic methods represent the backbone of standardized analysis and may help in timely managing patients candidate to NAC.}, author = {Giannini, Valentina and Mazzetti, Simone and Marmo, Agnese and Montemurro, Filippo and Regge, Daniele and Martincich, Laura}, doi = {10.1259/bjr.20170269}, issn = {00071285}, journal = {British Journal of Radiology}, number = {1077}, title = {{A computer-aided diagnosis (CAD) scheme for pretreatment prediction of pathological response to neoadjuvant therapy using dynamic contrast-enhanced MRI texture features}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85028072747{\&}doi=10.1259{\%}2Fbjr.20170269{\&}partnerID=40{\&}md5=5c537a3d7909e31a3bd3d481046791ce}, volume = {90}, year = {2017} } @article{RN939, abstract = {Multiparametric (mp)-Magnetic Resonance Imaging (MRI) is emerging as a powerful test to diagnose and stage prostate cancer (PCa). However, its interpretation is a time consuming and complex feat requiring dedicated radiologists. Computer-aided diagnosis (CAD) tools could allow better integration of data deriving from the different MRI sequences in order to obtain accurate, reproducible, non-operator dependent information useful to identify and stage PCa. In this paper, we present a fully automatic CAD system conceived as a 2-stage process. First, a malignancy probability map for all voxels within the prostate is created. Then, a candidate segmentation step is performed to highlight suspected areas, thus evaluating both the sensitivity and the number of false positive (FP) regions detected by the system. Training and testing of the CAD scheme is performed using whole-mount histological sections as the reference standard. On a cohort of 56 patients (i.e. 65 lesions) the area under the ROC curve obtained during the voxel-wise step was 0.91, while, in the second step, a per-patient sensitivity of 97{\%} was reached, with a median number of FP equal to 3 in the whole prostate. The system here proposed could be potentially used as first or second reader to manage patients suspected to have PCa, thus reducing both the radiologist's reporting time and the inter-reader variability. As an innovative setup, it could also be used to help the radiologist in setting the MRI-guided biopsy target.}, author = {Giannini, Valentina and Mazzetti, Simone and Vignati, Anna and Russo, Filippo and Bollito, Enrico and Porpiglia, Francesco and Stasi, Michele and Regge, Daniele}, doi = {10.1016/j.compmedimag.2015.09.001}, issn = {18790771}, journal = {Computerized Medical Imaging and Graphics}, keywords = {Computer aided detection,Image analysis,Multiparametric MRI,Prostate cancer,SVM classifier}, pages = {219--226}, title = {{A fully automatic computer aided diagnosis system for peripheral zone prostate cancer detection using multi-parametric magnetic resonance imaging}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84983127840{\&}doi=10.1016{\%}2Fj.compmedimag.2015.09.001{\&}partnerID=40{\&}md5=3a43e32746c5917a989d8eb7f9942bb2}, volume = {46}, year = {2015} } @article{RN878, abstract = {Keyhole surgeries become increasingly important in clinical daily routine as they help minimizing the damage of a patient's healthy tissue. The planning of keyhole surgeries is based on medical imaging and an important factor that influences the surgeries' success. Due to the image reconstruction process, medical image data contains uncertainty that exacerbates the planning of a keyhole surgery. In this paper we present a visual workflow that helps clinicians to examine and compare different surgery paths as well as visualizing the patients' affected tissue. The analysis is based on the concept of hierarchical image semantics, that segment the underlying image data with respect to the input images' uncertainty and the users understanding of tissue composition. Users can define arbitrary surgery paths that they need to investigate further. The defined paths can be queried by a rating function to identify paths that fulfill user-defined properties. The workflow allows a visual inspection of the affected tissues and its substructures. Therefore, the workflow includes a linked view system indicating the three-dimensional location of selected surgery paths as well as how these paths affect the patients tissue. To show the effectiveness of the presented approach, we applied it to the planning of a keyhole surgery of a brain tumor removal and a kneecap surgery.}, author = {Gillmann, Christina and Maack, Robin G.C. and Post, Tobias and Wischgoll, Thomas and Hagen, Hans}, doi = {10.1016/j.visinf.2018.04.004}, issn = {2468502X}, journal = {Visual Informatics}, keywords = {Keyhole Surgery Planning,Uncertainty Visualization,Visual Analytics Workflow}, number = {1}, pages = {26--36}, title = {{An Uncertainty-aware Workflow for Keyhole Surgery Planning using Hierarchical Image Semantics}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85066739022{\&}doi=10.1016{\%}2Fj.visinf.2018.04.004{\&}partnerID=40{\&}md5=373c953331ead7b18b936c470ef6d581}, volume = {2}, year = {2018} } @article{RN868, abstract = {Surface extraction is an important step in the image processing pipeline to estimate the size and shape of an object. Unfortunately, state of the art surface extraction algorithms form a straight forward extraction based on a pre-defined value that can lead to surfaces, that are not accurate. Furthermore, most isosurface extraction algorithms lack the ability to communicate uncertainty originating from the image data. This can lead to a rejection of such algorithms in many applications. To solve this problem, we propose a methodology to extract and optimize surfaces from image data based on a defined uncertainty model. To identify optimal parameters, the presented method defines a parameter space that is evaluated and rates each extraction run based on the remaining surface uncertainty. The resulting surfaces can be explored intuitively in an interactive framework. We applied our methodology to a variety of datasets to demonstrate the quality of the resulting surfaces.}, author = {Gillmann, Christina and Wischgoll, Thomas and Hamann, Bernd and Hagen, Hans}, doi = {10.1016/j.gmod.2018.07.004}, issn = {15240703}, journal = {Graphical Models}, keywords = {Parameter space exploration,Surface extraction,Uncertainty visualization}, pages = {13--21}, title = {{Accurate and reliable extraction of surfaces from image data using a multi-dimensional uncertainty model}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85051412806{\&}doi=10.1016{\%}2Fj.gmod.2018.07.004{\&}partnerID=40{\&}md5=5d10dfa161a616c746fa9eae0597d362}, volume = {99}, year = {2018} } @article{RN786, abstract = {Purpose: To describe a new system for scanned ion beam therapy, named RIDOS (Real-time Ion DOse planning and delivery System), which performs real time delivered dose verification integrating the information from a clinical beam monitoring system with a Graphic Processing Unit (GPU) based dose calculation in patient Computed Tomography. Methods: A benchmarked dose computation algorithm for scanned ion beams has been parallelized and adapted to run on a GPU architecture. A workstation equipped with a NVIDIA GPU has been interfaced through a National Instruments PXI-crate with the dose delivery system of the Italian National Center of Oncological Hadrontherapy (CNAO) to receive in real-time the measured beam parameters. Data from a patient monitoring system are also collected to associate the respiratory phases with each spot during the delivery of the dose. Using both measured and planned spot properties, RIDOS evaluates during the few seconds of inter-spill time the cumulative delivered and prescribed dose distributions and compares them through a fast $\gamma$-index algorithm. Results: The accuracy of the GPU-based algorithms was assessed against the CPU-based ones and the differences were found below 1‰. The cumulative planned and delivered doses are computed at the end of each spill in about 300 ms, while the dose comparison takes approximatively 400 ms. The whole operation provides the results before the next spill starts. Conclusions: RIDOS system is able to provide a fast computation of the delivered dose in the inter-spill time of the CNAO facility and allows to monitor online the dose deposition accuracy all along the treatment.}, author = {Giordanengo, S. and Vignati, A. and Attili, A. and Ciocca, M. and Donetti, M. and Fausti, F. and Manganaro, L. and Milian, F. M. and Molinelli, S. and Monaco, V. and Russo, G. and Sacchi, R. and {Varasteh Anvar}, M. and Cirio, R.}, doi = {10.1016/j.ejmp.2019.03.029}, issn = {1724191X}, journal = {Physica Medica}, keywords = {Dose delivery,GPU,Online dose computation,Pencil beam scanning}, pages = {139--149}, title = {{RIDOS: A new system for online computation of the delivered dose distributions in scanning ion beam therapy}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {60}, year = {2019} } @article{RN957, abstract = {Objectives: To evaluate the accuracy of ITK-SNAP software for measuring volumes of a non-regular shape structure, using cone beam computed tomography (CBCT) scans, besides for developing a mathematical model to correct the software measurement error in case it existed. Methods: A phantom made by moulding a rubber duck's head was filled with total (38,000 mm3) and partial volumes of water (7000 mm3, 14,000 mm3, 21,000 mm3, 28,000 mm3 and 35,000 mm3), which constituted the gold standards. The sound phantom and the phantom filled with different volumes of water were scanned in a Picasso Trio CBCT unit set at 80 kVp, 3.7 mA, 0.2 mm3 voxel and 12 × 8.5 cm field of view. Semi-automatic segmentation was performed with ITK-SNAP 3.0 software by two trained oral radiologists. Linear regression analyzed the relation between ITK-SNAP calculated volumes and the gold standard. Intraclass correlation coefficient was applied to analyze the reproducibility of the method. Significance level was set at 5{\%}. Results: Linear regression analysis showed a significant relationship between ITK-SNAP volumes and the gold standard (F = 22,537.3, p {\textless} 0.0001), with an R2 of 0.9993. The average error found was 4.7 (± 4.3) {\%}. To minimize this error, a mathematical model was developed and provided a reduction of it. ICC revealed excellent intra-examiner agreements for both examiners 1 (ICC = 0.9991, p {\textless} 0.0001) and 2 (ICC = 0.9989, p {\textless} 0.0001). Likewise, inter-examiner agreement was excellent (ICC = 0.9991, p {\textless} 0.0001). Conclusion: The software showed to be accurate for evaluating non-regular shape structures. The mathematical model developed reduced an already small error on the software's measurements.}, author = {Gomes, Amanda Farias and Brasil, Danieli Moura and Silva, Amaro Il{\'{i}}dio Vespasiano and Freitas, Deborah Queiroz and Haiter-Neto, Francisco and Groppo, Francisco Carlos}, doi = {10.1007/s11282-019-00397-y}, issn = {16139674}, journal = {Oral Radiology}, keywords = {Cone beam computed tomography,Cross-sectional anatomy,Software,Three-dimensional imaging}, title = {{Accuracy of ITK-SNAP software for 3D analysis of a non-regular topography structure}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85068856165{\&}doi=10.1007{\%}2Fs11282-019-00397-y{\&}partnerID=40{\&}md5=951d995522aefad42b00463ceb33e155}, year = {2019} } @article{RN812, abstract = {The deposition of light-absorbing particles (LAPs) such as mineral dust and black carbon on snow is responsible for a highly effective climate forcing, through darkening of the snow surface and associated feedbacks. The interplay between post-depositional snow transformation (metamorphism) and the dynamics of LAPs in snow remains largely unknown. We obtained time series of X-ray tomography images of dust-contaminated samples undergoing dry snow metamorphism at around-2C. They provide the first observational evidence that temperature gradient metamorphism induces dust particle motion in snow, while no movement is observed under isothermal conditions. Under temperature gradient metamorphism, dust particles can enter the ice matrix due to sublimation-condensation processes and spread down mainly by falling into the pore space. Overall, such motions might reduce the radiative impact of dust in snow, in particular in arctic regions where temperature gradient metamorphism prevails.}, author = {Hagenmuller, Pascal and Flin, Frederic and Dumont, Marie and Tuzet, Fran{\c{c}}ois and Peinke, Isabel and Lapalus, Philippe and Dufour, Anne and Roulle, Jacques and P{\'{e}}zard, Laurent and Voisin, Didier and Ando, Edward and {Rolland Du Roscoat}, Sabine and Charrier, Pascal}, doi = {10.5194/tc-13-2345-2019}, issn = {19940424}, journal = {Cryosphere}, number = {9}, pages = {2345--2359}, title = {{Motion of dust particles in dry snow under temperature gradient metamorphism}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {13}, year = {2019} } @article{RN798, abstract = {Each year, approximately 300,000 heart valve repair or replacement procedures are performed worldwide, including approximately 70,000 aortic valve replacement surgeries in the United States alone. Computational platforms for simulating cardiovascular devices such as prosthetic heart valves promise to improve device design and assist in treatment planning, including patient-specific device selection. This paper describes progress in constructing anatomically and physiologically realistic immersed boundary (IB) models of the dynamics of the aortic root and ascending aorta. This work builds on earlier IB models of fluid–structure interaction (FSI) in the aortic root, which previously achieved realistic hemodynamics over multiple cardiac cycles, but which also were limited to simplified aortic geometries and idealized descriptions of the biomechanics of the aortic valve cusps. By contrast, the model described herein uses an anatomical geometry reconstructed from patient-specific computed tomography angiography (CTA) data, and employs a description of the elasticity of the aortic valve leaflets based on a fiber-reinforced constitutive model fit to experimental tensile test data. The resulting model generates physiological pressures in both systole and diastole, and yields realistic cardiac output and stroke volume at physiological Reynolds numbers. Contact between the valve leaflets during diastole is handled automatically by the IB method, yielding a fully competent valve model that supports a physiological diastolic pressure load without regurgitation. Numerical tests show that the model is able to resolve the leaflet biomechanics in diastole and early systole at practical grid spacings. The model is also used to examine differences in the mechanics and fluid dynamics yielded by fresh valve leaflets and glutaraldehyde-fixed leaflets similar to those used in bioprosthetic heart valves. Although there are large differences in the leaflet deformations during diastole, the differences in the open configurations of the valve models are relatively small, and nearly identical hemodynamics are obtained in all cases considered.}, archivePrefix = {arXiv}, arxivId = {1705.04279}, author = {Hasan, Ali and Kolahdouz, Ebrahim M. and Enquobahrie, Andinet and Caranasos, Thomas G. and Vavalle, John P. and Griffith, Boyce E.}, doi = {10.1016/j.medengphy.2017.05.007}, eprint = {1705.04279}, issn = {18734030}, journal = {Medical Engineering and Physics}, keywords = {Aortic valve,Finite element method,Fluid–structure interaction,Immersed boundary method,Nonlinear elasticity}, pages = {72--84}, title = {{Image-based immersed boundary model of the aortic root}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {47}, year = {2017} } @inproceedings{RN958, abstract = {Programs organization and advancement that endeavor to kill a portion of the pitfalls of traditional programming strategies by data organized in a hierarchy of classes with several powerful new concepts. This paper displays another strategy for examining and reengineering Constructor in class hierarchies. In object oriented Software (OOS), the accentuation is on data instead of procedure. Class is a way that ties the data and procedure together. Constructor is a special member procedure (SMP) class and destructor restores the memory delivers back to the program. The advancement comprises of exercises, for example, analyze, design, code, test, implement and maintenance. This paper gives the information about measuring the estimations of Object Oriented programming (OOP) advancement and proposed a Metric Measurement Model to guarantee that measurements of value properties are imperative in Object Oriented programming improvement.}, author = {Hassan, Nadia Moqbel}, booktitle = {2017 2nd Al-Sadiq International Conference on Multidisciplinary in IT and Communication Science and Applications, AIC-MITCSA 2017}, doi = {10.1109/AIC-MITCSA.2017.8722987}, isbn = {9781538642412}, keywords = {Constructors in derived class,Inheritance,Metrics measurement model,Object Oriented programming,Object oriented design metrics,Software Metrics}, pages = {7--12}, publisher = {Institute of Electrical and Electronics Engineers Inc.}, title = {{Analysis and Implementation of Constructor in Class Hierarchy}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85067129598{\&}doi=10.1109{\%}2FAIC-MITCSA.2017.8722987{\&}partnerID=40{\&}md5=2c3f65035b60b33f66bb21bb6842bc6a}, year = {2017} } @article{RN919, abstract = {Effective image-based artifact correction is an essential step in the analysis of diffusion MR images. Many current approaches are based on retrospective registration, which becomes challenging in the realm of high b-values and low signal-to-noise ratio, rendering the corresponding correction schemes more and more ineffective. We propose a novel registration scheme based on memetic search optimization that allows for simultaneous exploitation of different signal intensity relationships between the images, leading to more robust registration results. We demonstrate the increased robustness and efficacy of our method on simulated as well as in vivo datasets. In contrast to the state-of-art methods, the median target registration error (TRE) stayed below the voxel size even for high b-values (3000 s {\textperiodcentered} mm-2 and higher) and low SNR conditions. We also demonstrate the increased precision in diffusion-derived quantities by evaluating Neurite Orientation Dispersion and Density Imaging (NODDI) derived measures on a in vivo dataset with severe motion artifacts. These promising results will potentially inspire further studies on metaheuristic optimization in diffusion MRI artifact correction and image registration in general.}, author = {Hering, Jan and Wolf, Ivo and Maier-Hein, Klaus H.}, doi = {10.1109/TMI.2016.2557580}, issn = {1558254X}, journal = {IEEE Transactions on Medical Imaging}, keywords = {Artifact correction,diffusion-weighted images,motion correction,particle swarm optimization,registration}, number = {10}, pages = {2280--2291}, title = {{Multi-Objective Memetic Search for Robust Motion and Distortion Correction in Diffusion MRI}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84991444277{\&}doi=10.1109{\%}2FTMI.2016.2557580{\&}partnerID=40{\&}md5=c42388a9bec72c65c2504113fc52a566}, volume = {35}, year = {2016} } @article{RN882, abstract = {Polysaccharide gels assembled from the anionic biopolymers pectin and carrageenan have been studied using transmission electron microscopy (TEM). Gels were formed in several different ways: for pectin, hydrogen bonding was used to form junction zones between strands, whereas for carrageenan systems, several different ion types were used to form ionotropic networks. Using this approach, several distinct network architectures were realized. In addition to preparing gelled samples for electron microscopy, a set of samples was taken without performing the additional treatment necessitated by the TEM measurements, and these were studied directly by small-angle X-ray scattering (SAXS). Taking careful consideration of the relative merits of different image sizes and available processing techniques, the real-space images acquired by TEM were used via radial integration of the Fourier transform to produce simulated scattering patterns. These intensity-versus-wavevector plots were compared with the results of SAXS experiments carried out on the unadulterated gels using synchrotron radiation. Although information regarding chain thicknesses and flexibilities was found to be modified by labeling and changes in the dielectric constant and mechanical properties of the surroundings in the TEM, the studies carried out here show that careful protocols can produce data sets where information acquired above ∼20 nm is broadly consistent with that obtained by SAXS studies carried out on unadulterated samples. The fact that at larger length scale the structure of these water-rich networks seems largely preserved in the TEM samples suggests that three-dimensional (3D) TEM tomography experiments carried out with careful sample preparation will be valuable tools for measuring network architecture and connectivity; information that is lost in SAXS owing to the intrinsic averaging nature of the technique.}, author = {Hernandez-Cerdan, Pablo and Mansel, Bradley W. and Leis, Andrew and Lundin, Leif and Williams, Martin A.K.}, doi = {10.1021/acs.biomac.7b01773}, issn = {15264602}, journal = {Biomacromolecules}, number = {3}, pages = {989--995}, title = {{Structural Analysis of Polysaccharide Networks by Transmission Electron Microscopy: Comparison with Small-Angle X-ray Scattering}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85043595060{\&}doi=10.1021{\%}2Facs.biomac.7b01773{\&}partnerID=40{\&}md5=395df47da135ea25e2ffd52248d04f49}, volume = {19}, year = {2018} } @inproceedings{RN858, author = {Hernandez-Cerdan, Pablo and Paniagua, Beatriz and Prothero, Jack and Marron, James S. and Livingston, Eric and Bateman, Ted and McCormick, Matthew M.}, booktitle = {Medical Imaging 2019: Biomedical Applications in Molecular, Structural, and Functional Imaging}, doi = {10.1117/12.2513007}, editor = {Gimi, B and Krol, A}, isbn = {9781510625532}, issn = {0277-786X}, pages = {40}, publisher = {SPIE}, title = {{Methods for quantitative characterization of bone injury from computed-tomography images}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85068414398{\&}doi=10.1117{\%}2F12.2513007{\&}partnerID=40{\&}md5=919e41c5d51d55066df5c354c9c554f4}, volume = {10953}, year = {2019} } @inproceedings{RN914, abstract = {From the preoperative partial nephrectomy planning perspective, it is essential to expose separately different kidney structures and to analyze their mutual topological relations. Only then, the identification of possible conflicts prior to surgical intervention can be facilitated. To enable this, we propose a segmentation frameworks for renal vascular tree, kidney and pelvicalyceal system from corresponding CT phases. In order to compensate for both patient position changes and volumetric changes related to respiratory activity, registration of different CT phases is required. It is performed by combining global rigid transform with multilevel and multiresolution B-spline registration. The research material consisted of fifteen patients that underwent CT scanning preceding kidney cancer surgery. Presented results using checkerboards and differential images prove the effectiveness of the proposed method. In addition, visualizations of the segmented structures (renal arteries, kidney, pelvicalyceal system) from registered CT phases are provided to exemplary demonstrate individual model for preoperative planning. This kind of solution meets the expectations of urological oncology in terms of facilitating planning the optimal surgical approach in partial nephrectomy. To the best of our knowledge, such a comprehensive strategy involving both the proposed segmentation frameworks and registration has not been introduced yet.}, author = {Heryan, Katarzyna and Skalski, Andrzej and Gajda, Janusz and Drewniak, Tomasz and Jakubowski, Jacek}, booktitle = {IST 2016 - 2016 IEEE International Conference on Imaging Systems and Techniques, Proceedings}, doi = {10.1109/IST.2016.7738233}, isbn = {9781509018178}, keywords = {B-spline registration,image registration,medical image processing,partial nephrectomy,segmentation,surgery planning}, pages = {255--260}, publisher = {Institute of Electrical and Electronics Engineers Inc.}, title = {{Registration of different phases of contrast-enhanced CT for facilitation of partial nephrectomy}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85004072339{\&}doi=10.1109{\%}2FIST.2016.7738233{\&}partnerID=40{\&}md5=aa3e12d899e600e256992bc3fbaface3}, year = {2016} } @article{RN829, abstract = {Background and purpose: To evaluate the impact of deformation magnitude and image modality on deformable-image-registration (DIR) accuracy using Halcyon megavoltage cone beam CT images (MVCBCT). Materials and methods: Planning CT images of an anthropomorphic Head phantom were aligned rigidly with MVCBCT and re-sampled to achieve the same resolution, denoted as pCT. MVCBCT was warped with twenty simulated pre-known virtual deformation fields (Ti, i = 1–20) with increasing deformation magnitudes, yielding warped CBCT (wCBCT). The pCT and MVCBCT were registered to wCBCT respectively (Multi-modality and Uni-modality DIR), generating deformation vector fields Vi and Vi′ (i = 1–20). Vi and Vi′ were compared with Ti respectively to assess the DIR accuracy geometrically. In addition, Vi, Ti, and Vi′ were applied to pCT, generating deformed CT (dCTi), ground-truth CT (Gi) and deformed CT′ (dCTi′) respectively. The Hounsfield Unit (HU) on these virtual CT images were also compared. Results: The mean errors of vector displacement increased with the deformation magnitude. For deformation magnitudes between 2.82 mm and 7.71 mm, the errors of uni-modality DIR were 1.16 mm {\~{}} 1.73 mm smaller than that of multi-modality (p = 0.0001, Wilcoxon signed rank test). DIR could reduce the maximum signed and absolute HU deviations from 70.8 HU to 11.4 HU and 208 HU to 46.2 HU respectively. Conclusions: As deformation magnitude increases, DIR accuracy continues to deteriorate and uni-modality DIR consistently outperformed multi-modality DIR. DIR-based adaptive radiotherapy utilizing the noisy MVCBCT images is only conditionally applicable with caution.}, author = {Huang, Yuliang and Li, Chenguang and Wang, Haiyang and Hu, Qiaoqiao and Wang, Ruoxi and Chang, Cheng and Ma, Wenjun and Li, Weibo and Wu, Hao and Zhang, Yibao}, doi = {10.1016/j.ejmp.2020.02.016}, issn = {1724191X}, journal = {Physica Medica}, keywords = {Adaptive radiotherapy,Deformable image registration,MVCBCT}, pages = {82--87}, title = {{A quantitative evaluation of deformable image registration based on MV cone beam CT images: Impact of deformation magnitudes and image modalities}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85079849262{\&}doi=10.1016{\%}2Fj.ejmp.2020.02.016{\&}partnerID=40{\&}md5=ffe26ce9da5145f82cc70d5a9518fab4}, volume = {71}, year = {2020} } @inbook{RN821, abstract = {Accurate methods for computer aided diagnosis of breast cancer increase accuracy of detection and provide support to physicians in detecting challenging cases. In dynamic contrast enhancing magnetic resonance imaging (DCE-MRI), motion artifacts can appear as a result of patient displacements. Non-linear deformation algorithms for breast image registration provide with a solution to the correspondence problem in contrast with affine models. In this study we evaluate 3 popular non-linear registration algorithms: MIRTK, Demons, SyN Ants, and compare to the affine baseline. We propose automatic measures for reproducible evaluation on the DCE-MRI breast-diagnosis TCIA-database, based on edge detection and clustering algorithms, and provide a rank of the methods according to these measures.}, address = {Cham}, author = {Illan, I. A. and Ramirez, J. and Gorriz, J. M. and Pinker, K. and Meyer-Baese, A.}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-030-00946-5_14}, editor = {Stoyanov, D and Taylor, Z and Kainz, B and Maicas, G and Beichel, R R}, isbn = {9783030009458}, issn = {16113349}, keywords = {DCE-MRI,Diffeomorphism,Medical image processing,Non-affine registration,Optical flow,Registration,Reproducibility}, pages = {124--131}, publisher = {Springer International Publishing Ag}, series = {Lecture Notes in Computer Science}, title = {{Reproducible evaluation of registration algorithms for movement correction in dynamic contrast enhancing magnetic resonance imaging for breast cancer diagnosis}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {11040 LNCS}, year = {2018} } @article{RN811, abstract = {An increasing number of studies are analysing the shapes of objects using geometric morphometrics with tomographic data, which are often segmented and transformed to three-dimensional (3D) surface models before measurement. This study aimed to evaluate the effects of different image segmentation methods on geometric morphometric data collection using computed tomography data collected from non-human primate skulls. Three segmentation methods based on a visually selected threshold, a half-maximum height protocol and a gradient and watershed algorithm were compared. For each method, the efficiency of surface reconstruction, the accuracy of landmark placement and the level of variation in shape and size compared with various levels of biological variation were evaluated. The visual-based method inflated the surface in high-density anatomical regions, whereas the half-maximum height protocol resulted in a large number of artificial holes and erosion. However, the gradient-based method mitigated these issues and generated the most efficient surface model. The segmentation method used had a much smaller effect on shape and size variation than interspecific and inter-individual differences. However, this effect was statistically significant and not negligible when compared with intra-individual (fluctuating asymmetric) variation. Although the gradient-based method is not widely used in geometric morphometric analyses, it may be one of promising options for reconstructing 3D surfaces. When evaluating small variations, such as fluctuating asymmetry, care should be taken around combining 3D data that were obtained using different segmentation methods.}, author = {Ito, Tsuyoshi}, doi = {10.1111/2041-210X.13274}, issn = {2041210X}, journal = {Methods in Ecology and Evolution}, keywords = {computed tomography,geometric morphometrics,measurement error,repeatability,segmentation}, number = {11}, pages = {1972--1984}, title = {{Effects of different segmentation methods on geometric morphometric data collection from primate skulls}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {10}, year = {2019} } @inproceedings{RN859, abstract = {Magnetic Resonance Spectroscopy Imaging (MRSI) is a medical imaging method that measures per voxel a spectrum of signal intensities. It allows for the analysis of chemical compositions within the scanned tissue, which is particularly useful for tumor classification and measuring its infiltration of healthy tissue. Common analysis approaches consider one metabolite concentration at a time to produce intensity maps in the image space, which does not consider all relevant information at hand. We propose a system that uses coordinated views between image-space visualizations and visual representations of the spectral (or feature) space. Coordinated interaction allows for analyzing both aspects and relating the analysis results back to the other for further investigations. We demonstrate how our system can be used to analyze brain tumors.}, author = {Jawad, Muhammad and Molchanov, Vladimir and Linsen, Lars}, booktitle = {VISIGRAPP 2019 - Proceedings of the 14th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications}, doi = {10.5220/0007571801180128}, editor = {Kerren, A and Hurter, C and Braz, J}, isbn = {9789897583544}, keywords = {Coordinated views,Medical visualization,Multidimensional data visualization,Spectral imaging analysis}, pages = {118--128}, publisher = {SciTePress}, title = {{Coordinated image- and feature-space visualization for interactive magnetic resonance spectroscopy imaging data analysis}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85064760921{\&}partnerID=40{\&}md5=85bea0be829568fc67dd653faf50fc47}, volume = {3}, year = {2019} } @article{RN897, abstract = {This paper presents an overview of several efforts towards reproducible research in the field of medical imaging and visualization. In the first section, the components of Open Science are presented: open access, open data and open source. In the second section, the challenges of open-science are described and potential solutions are mentioned. Finally, a discussion on the potential future of open science and reproducible research is introduced.}, author = {Jomier, Julien}, doi = {10.3233/ISU-170846}, issn = {01675265}, journal = {Information Services and Use}, keywords = {Open access,Open data,Open science,Open source,Reproducibility}, number = {3}, pages = {361--367}, title = {{Open science - towards reproducible Research}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85032568749{\&}doi=10.3233{\%}2FISU-170846{\&}partnerID=40{\&}md5=e551028edd91b74209c5cb3d27e210c2}, volume = {37}, year = {2017} } @article{RN945, abstract = {This paper presents the development of the cloud software services for computational analysis of blood flows on a private university cloud. The main focus is on the software service level built on the top of the computational platform provided. Moreover, user friendly management tools have been developed by using the Apache jclouds API to enhance the management of OpenStack cloud infrastructure and to increase the accessibility of engineering software. The blood flow through an aortic valve is considered as a pilot application of the private cloud infrastructure. The investigated flows can be described using numerical models based on viscous incompressible Navier-Stokes equations. The modelling software environment based on ANSYS Fluent is developed as a software service (SaaS) for the numerical analysis of low flow, low pressure gradient aortic stenosis. The performance of the developed cloud infrastructure has been assessed testing CPU, memory IO, disk IO, network and the developed software service for computations of blood flow through an aortic valve. The results obtained have been compared with the performance obtained using the native hardware.}, author = {Kaceniauskas, A. and Pacevic, R. and Sta{\v{s}}kuniene, M. and Starikovicius, V. and Davidavicius, G.}, doi = {10.4203/ccp.107.23}, issn = {17593433}, journal = {Civil-Comp Proceedings}, keywords = {ANSYS Fluent,Aortic valve,Blood flows,Cloud computing,OpenStack,Performance analysis,Software as a service}, title = {{Development of cloud software services for computational analysis of blood flows}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84971597818{\&}partnerID=40{\&}md5=313cdcaa420e54b4e9dad1c9f2228cc8}, volume = {107}, year = {2015} } @article{RN827, abstract = {This paper presents a simulator tool that can simulate large databases of visually realistic longitudinal MRIs with known volume changes. The simulator is based on a previously proposed biophysical model of brain deformation due to atrophy in AD. In this work, we propose a novel way of reproducing realistic intensity variation in longitudinal brain MRIs, which is inspired by an approach used for the generation of synthetic cardiac sequence images. This approach combines a deformation field obtained from the biophysical model with a deformation field obtained by a non-rigid registration of two images. The combined deformation field is then used to simulate a new image with specified atrophy from the first image, but with the intensity characteristics of the second image. This allows to generate the realistic variations present in real longitudinal time-series of images, such as the independence of noise between two acquisitions and the potential presence of variable acquisition artifacts. Various options available in the simulator software are briefly explained in this paper. In addition, the software is released as an open-source repository. The availability of the software allows researchers to produce tailored databases of images with ground truth volume changes; we believe this will help developing more robust brain morphometry tools. Additionally, we believe that the scientific community can also use the software to further experiment with the proposed model, and add more complex models of brain deformation and atrophy generation.}, author = {Khanal, Bishesh and Ayache, Nicholas and Pennec, Xavier}, doi = {10.3389/fnins.2017.00132}, issn = {1662453X}, journal = {Frontiers in Neuroscience}, keywords = {Biomechanical simulation,Biophysical modeling,Neurodegeneration,Simulated database,Synthetic images,Synthetic longitudinal MRIs}, number = {MAR}, title = {{Simulating longitudinal brain MRIs with known volume changes and realistic variations in image intensity}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {11}, year = {2017} } @article{RN877, abstract = {Background: With the development of versatile magnetic resonance acquisition techniques there arises a need for more advanced imaging simulation tools to enable adequate image appearance prediction, measurement sequence design and testing thereof. Recently, there is a growing interest in phase contrast angiography (PCA) sequence due to the capabilities of blood flow quantification that it offers. Moreover, as it is a non-contrast enhanced protocol, it has become an attractive option in areas, where usage of invasive contrast agents is not indifferent for the imaged tissue. Monitoring of the kidney function is an example of such an application. Results: We present a computer framework for simulation of the PCA protocol, both conventional and accelerated with echo-planar imaging (EPI) readout, and its application to the numerical models of kidney vasculatures. Eight patient-specific renal arterial trees were reconstructed following vessel segmentation in real computed tomography angiograms. In addition, a synthetic model was designed using a vascular tree growth simulation algorithm. The results embrace a series of synthetic PCA images of the renal arterial trees giving insight into the image formation and quantification of kidney hemodynamics. Conclusions: The designed simulation framework enables quantification of the PCA measurement error in relation to ground-truth flow velocity data. The mean velocity measurement error for the reconstructed renal arterial trees range from 1.5 to 12.8{\%} of the aliasing velocity value, depending on image resolution and flip angle. No statistically significant difference was observed between measurements obtained using EPI with a number of echos (NETL) = 4 and conventional PCA. In case of higher NETL factors peak velocity values can be underestimated up to 34{\%}.}, author = {Klepaczko, Artur and Szczypi{\'{n}}ski, Piotr and Strzelecki, Micha{\l} and Stefa{\'{n}}czyk, Ludomir}, doi = {10.1186/s12938-018-0471-y}, issn = {1475925X}, journal = {BioMedical Engineering Online}, keywords = {Blood flow simulation,Kidney vasculature modeling,MRI simulation,Phase contrast angiography,Vessel segmentation}, number = {1}, title = {{Simulation of phase contrast angiography for renal arterial models}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85045392503{\&}doi=10.1186{\%}2Fs12938-018-0471-y{\&}partnerID=40{\&}md5=eb7e35093af28b9fca81c4e33c818b84}, volume = {17}, year = {2018} } @article{RN818, abstract = {Magnetic resonance imaging (MRI) has evolved into the gold standard for quantifying excess adiposity, but reliable, efficient use in longitudinal studies requires analysis of large numbers of images. The objective of this study is to develop and evaluate a segmentation method designed to identify cardiac, subcutaneous, and visceral adipose tissue (VAT) in Dixon MRI scans. The proposed method is evaluated using 10 scans from volunteer females 18- to 35-years old, with body mass indexes between 30 and 39.99  kg  /  m2. Cross-sectional area (CSA) for cardiac adipose tissue (CAT), subcutaneous adipose tissue (SAT), and VAT, is compared to manually-traced results from three observers. Comparisons of CSA are made in 191 images for CAT, 394 images for SAT, and 50 images for VAT. The segmentation correlated well with respect to average observer CSA with Pearson correlation coefficient (R2) values of 0.80 for CAT, 0.99 for SAT, and 0.99 for VAT. The proposed method provides accurate segmentation of CAT, SAT, and VAT and provides an option to support longitudinal studies of obesity intervention.}, author = {Klingensmith, Jon D. and Elliott, Addison L. and Givan, Amy H. and Faszold, Zechariah D. and Mahan, Cory L. and Doedtman, Adam M.}, doi = {10.1117/1.jmi.6.1.014004}, issn = {2329-4310}, journal = {Journal of Medical Imaging}, number = {01}, pages = {1}, title = {{Development and evaluation of a method for segmentation of cardiac, subcutaneous, and visceral adipose tissue from Dixon magnetic resonance images}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {6}, year = {2019} } @article{RN946, abstract = {A method for generating three-dimensional tomograms from multiple three-dimensional axial projections in STimulated Emission Depletion (STED) superresolution microscopy is introduced. Our STED{\textless} method, based on the use of a micromirror placed on top of a standard microscopic sample, is used to record a three-dimensional projection at an oblique angle in relation to the main optical axis. Combining the STED{\textless} projection with the regular STED image into a single view by tomographic reconstruction, is shown to result in a tomogram with three-to-four-fold improved apparent axial resolution. Registration of the different projections is based on the use of a mutual-information histogram similarity metric. Fusion of the projections into a single view is based on Richardson-Lucy iterative deconvolution algorithm, modified to work with multiple projections. Our tomographic reconstruction method is demonstrated to work with real biological STED superresolution images, including a data set with a limited signal-to-noise ratio (SNR); the reconstruction software (SuperTomo) and its source code will be released under BSD open-source license.}, author = {Koho, S. and Deguchi, T. and H{\"{a}}nninen, P. E.}, doi = {10.1111/jmi.12287}, issn = {13652818}, journal = {Journal of Microscopy}, keywords = {Axial tomography,Image fusion,Image processing,Image registration,Open source software,Reconstruction algorithms,STED,Superresolution microscopy}, number = {2}, pages = {208--218}, title = {{A software tool for tomographic axial superresolution in STED microscopy}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84945471664{\&}doi=10.1111{\%}2Fjmi.12287{\&}partnerID=40{\&}md5=a793356576a358ca87cce6a3b305aea9}, volume = {260}, year = {2015} } @article{RN873, abstract = {In this work, a data assimilation method is proposed following an optimise-then-discretise approach, and is applied in the context of computational haemodynamics. The methodology aims to make use of phase-contrast magnetic resonance imaging to perform optimal flow control in computational fluid dynamic simulations. Flow matching between observations and model predictions is performed in luminal regions, excluding near-wall areas, improving the near-wall flow reconstruction to enhance the estimation of related quantities such as wall shear stresses. The proposed approach remarkably improves the flow field at the aortic root and reveals a great potential for predicting clinically relevant haemodynamic phenomenology. This work presents model validation against an analytical solution using the standard 3-D Hagen-Poiseuille flow, and validation with real data involving the flow control problem in a glass replica of a human aorta imaged with a 3T magnetic resonance scanner. In vitro experiments consist of both a numerically generated reference flow solution, which is considered as the ground truth, as well as real flow MRI data obtained from phase-contrast flow acquisitions. The validation against the in vitro flow MRI experiments is performed for different flow regimes and model parameters including different mesh refinements.}, author = {Koltukluoǧlu, Taha S. and Blanco, Pablo J.}, doi = {10.1017/jfm.2018.329}, issn = {14697645}, journal = {Journal of Fluid Mechanics}, keywords = {blood flow,control theory,variational methods}, pages = {329--364}, title = {{Boundary control in computational haemodynamics}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85047266553{\&}doi=10.1017{\%}2Fjfm.2018.329{\&}partnerID=40{\&}md5=50aee576fbf3bbc2426b262c16341404}, volume = {847}, year = {2018} } @article{RN805, abstract = {Kondo et al. have generated a comprehensive library of convertible GAL4 knockin Drosophila strains for neurotransmitter receptor genes. The GAL4 lines can be converted into other reporters through the use of RMCE, providing a versatile toolkit. Expression profiling of receptor genes reveals neurochemical segmentation of the brain.}, author = {Kondo, Shu and Takahashi, Takahiro and Yamagata, Nobuhiro and Imanishi, Yasuhito and Katow, Hidetaka and Hiramatsu, Shun and Lynn, Katrina and Abe, Ayako and Kumaraswamy, Ajayrama and Tanimoto, Hiromu}, doi = {10.1016/j.celrep.2019.12.018}, issn = {22111247}, journal = {Cell Reports}, number = {1}, pages = {284--297.e5}, title = {{Neurochemical Organization of the Drosophila Brain Visualized by Endogenously Tagged Neurotransmitter Receptors}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {30}, year = {2020} } @article{RN866, abstract = {Watershed is a widespread technique for image segmentation. Many researchers apply the method implemented in open source libraries without a deep understanding of its characteristics and limitations. In the review, we describe benchmarking outcomes of six open-source marker-controlled watershed implementations for the segmentation of 2D and 3D images. Even though the considered solutions are based on the same algorithm by flooding having O(n)computational complexity, these implementations have significantly different performance. In addition, building of watershed lines grows processing time. High memory consumption is one more bottleneck for dealing with huge volumetric images. Sometimes, the usage of more optimal software is capable of mitigating the issues with the long processing time and insufficient memory space. We assume parallel processing is capable of overcoming the current limitations. However, the development of concurrent approaches for the watershed segmentation remains a challenging problem.}, author = {Kornilov, Anton S. and Safonov, Ilia V.}, doi = {10.3390/jimaging4100123}, issn = {2313433X}, journal = {Journal of Imaging}, keywords = {Computational complexity,Flooding,Memory consumption,Open source software,Processing speed,Rain falling,Watershed segmentation}, number = {10}, title = {{An overview of watershed algorithm implementations in open source libraries}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85059431965{\&}doi=10.3390{\%}2Fjimaging4100123{\&}partnerID=40{\&}md5=3ef917586e8b50f8e99994a08d52d2c9}, volume = {4}, year = {2018} } @incollection{RN956, abstract = {Biological membrane images contain a variety of objects and patterns, which convey information about the underlying biological structures and mechanisms. The field of image analysis includes methods of computation which convert features and objects identified in images into quantitative information about biological structures represented in these images. Microscopy images are complex, noisy, and full of artifacts and consequently require multiple image processing steps for the extraction of meaningful quantitative information. This review is focused on methods of analysis of images of cells and biological membranes such as detection, segmentation, classification and machine learning, registration, tracking, and visualization. These methods could make possible, for example, to automatically identify defects in the cell membrane which affect physiological processes. Detailed analysis of membrane images could facilitate understanding of the underlying physiological structures or help in the interpretation of biological experiments.}, author = {Kulbacki, Marek and Segen, Jakub and Bak, Artur}, booktitle = {Advances in Anatomy Embryology and Cell Biology}, doi = {10.1007/978-3-319-56895-9_8}, isbn = {03015556 (ISSN)}, issn = {03015556}, pages = {119--140}, publisher = {Springer Verlag}, title = {{Analysis, recognition, and classification of biological membrane images}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85030665154{\&}doi=10.1007{\%}2F978-3-319-56895-9{\_}8{\&}partnerID=40{\&}md5=ee50a7c74dbc3a51f07be9c4038bb7b0}, volume = {227}, year = {2017} } @inproceedings{RN937, abstract = {Lung cancer is the leading cause of cancer-related death, and early-stage diagnosis is critical to survival. Biopsy is typically required for a definitive diagnosis, but current low-risk clinical options for lung biopsy cannot access all biopsy sites. We introduce a motion planner for a multilumen transoral lung access system, a new system that has the potential to perform safe biopsies anywhere in the lung, which could enable more effective early-stage diagnosis of lung cancer. The system consists of three stages in which a bronchoscope is deployed transorally to the lung, a concentric tube robot pierces through the bronchial tubes into the lung parenchyma, and a steerable needle deploys through a properly oriented concentric tube and steers through the lung parenchyma to the target site while avoiding anatomical obstacles such as significant blood vessels. A sampling-based motion planner computes actions for each stage of the system and considers the coupling of the stages in an efficient manner. We demonstrate the motion planner's fast performance and ability to compute plans with high clearance from obstacles in simulated anatomical scenarios.}, author = {Kuntz, Alan and Torres, Luis G. and Feins, Richard H. and Webster, Robert J. and Alterovitz, Ron}, booktitle = {IEEE International Conference on Intelligent Robots and Systems}, doi = {10.1109/IROS.2015.7353829}, isbn = {9781479999941}, issn = {21530866}, keywords = {Biomedical imaging,Biopsy,Electron tubes,Lungs,Needles,Planning,Robots}, pages = {3255--3261}, publisher = {Institute of Electrical and Electronics Engineers Inc.}, title = {{Motion planning for a three-stage multilumen transoral lung access system}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84958176573{\&}doi=10.1109{\%}2FIROS.2015.7353829{\&}partnerID=40{\&}md5=7513ff75c060dfaaf113a4487f0d9a72}, volume = {2015-Decem}, year = {2015} } @incollection{RN907, abstract = {CLARITY is a method for converting biological tissues into translucent and porous hydrogel-tissue hybrids. This facilitates interrogation with light sheet microscopy and penetration of molecular probes while avoiding physical slicing. In this work, we develop a pipeline for registering CLARIfied mouse brains to an annotated brain atlas. Due to the novelty of this microscopy technique it is impractical to use absolute intensity values to align these images to existing standard atlases. Thus we adopt a large deformation diffeomorphic approach for registering images via mutual information matching. Furthermore we show how a cascaded multi-resolution approach can improve registration quality while reducing algorithm run time. As acquired image volumes were over a terabyte in size, they were far too large for work on personal computers. Therefore the NeuroData computational infrastructure was deployed for multi-resolution storage and visualization of these images and aligned annotations on the web.}, archivePrefix = {arXiv}, arxivId = {1612.00356}, author = {Kutten, Kwame S. and Charon, Nicolas and Miller, Michael I. and Ratnanather, J. Tilak and Matelsky, Jordan and Baden, Alexander D. and Lillaney, Kunal and Deisseroth, Karl and Ye, Li and Vogelstein, Joshua T.}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-319-66182-7_32}, editor = {Descoteaux, M and Duchesne, S and Franz, A and Jannin, P and Collins, D L and Maier-Hein, L}, eprint = {1612.00356}, isbn = {9783319661810}, issn = {16113349}, pages = {275--282}, publisher = {Springer Verlag}, title = {{A large deformation diffeomorphic approach to registration of CLARITY images via mutual information}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85029360827{\&}doi=10.1007{\%}2F978-3-319-66182-7{\_}32{\&}partnerID=40{\&}md5=3534498089ac268864d5dd2937d2983d}, volume = {10433 LNCS}, year = {2017} } @inproceedings{RN922, abstract = {The CLARITY method renders brains optically transparent to enable high-resolution imaging in the structurally intact brain. Anatomically annotating CLARITY brains is necessary for discovering which regions contain signals of interest. Manually annotating whole-brain, terabyte CLARITY images is difficult, time-consuming, subjective, and error-prone. Automatically registering CLARITY images to a pre-annotated brain atlas offers a solution, but is difficult for several reasons. Removal of the brain from the skull and subsequent storage and processing cause variable non-rigid deformations, thus compounding inter-subject anatomical variability. Additionally, the signal in CLARITY images arises from various biochemical contrast agents which only sparsely label brain structures. This sparse labeling challenges the most commonly used registration algorithms that need to match image histogram statistics to the more densely labeled histological brain atlases. The standard method is a multiscale Mutual Information B-spline algorithm that dynamically generates an average template as an intermediate registration target. We determined that this method performs poorly when registering CLARITY brains to the Allen Institute's Mouse Reference Atlas (ARA), because the image histogram statistics are poorly matched. Therefore, we developed a method (Mask-LDDMM) for registering CLARITY images, that automatically find the brain boundary and learns the optimal deformation between the brain and atlas masks. Using Mask-LDDMM without an average template provided better results than the standard approach when registering CLARITY brains to the ARA. The LDDMM pipelines developed here provide a fast automated way to anatomically annotate CLARITY images. Our code is available as open source software at http://NeuroData.io.}, archivePrefix = {arXiv}, arxivId = {1605.02060}, author = {Kutten, Kwame S. and Vogelstein, Joshua T. and Charon, Nicolas and Ye, Li and Deisseroth, Karl and Miller, Michael I.}, booktitle = {Optics, Photonics and Digital Technologies for Imaging Applications IV}, doi = {10.1117/12.2227444}, editor = {Truchetet, F and Schelkens, P and Ebrahimi, T and Cristobal, G and Saarikko, P}, eprint = {1605.02060}, isbn = {9781510601413}, issn = {1996756X}, pages = {989616}, publisher = {SPIE}, title = {{Deformably registering and annotating whole CLARITY brains to an atlas via masked LDDMM}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84991503754{\&}doi=10.1117{\%}2F12.2227444{\&}partnerID=40{\&}md5=e5bfa771d1160566e24476b33887e0d9}, volume = {9896}, year = {2016} } @inproceedings{RN950, abstract = {In this paper we present a novel image registration software that has been specifically designed to suit the needs of the study of tumor response quantification. Joining a cross-platform architecture, the exclusive use of open-source libraries and some unique features, our tool is being successfully used in the frame of the ARTFIBio project, focused on the study of predictive individualized models of head and neck tumor response to radiotherapy. {\textcopyright} 2014 IEEE.}, author = {Landesa-Vazquez, Iago and Alba-Castro, Jose Luis and Mera-Iglesias, Moises and Aramburu-Nunez, David and Lopez-Medina, Antonio and Munoz-Garzon, Victor}, booktitle = {2014 IEEE-EMBS International Conference on Biomedical and Health Informatics, BHI 2014}, doi = {10.1109/BHI.2014.6864326}, isbn = {9781479921317}, pages = {149--152}, publisher = {IEEE Computer Society}, title = {{ARTFIBio: A cross-platform image registration tool for tumor response quantification in head and neck cancer}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84906848785{\&}doi=10.1109{\%}2FBHI.2014.6864326{\&}partnerID=40{\&}md5=edbfdf06c7b7a66d48a98f667bc3d4ad}, year = {2014} } @article{RN898, abstract = {This paper provides an overview of the highlights of the 2017 NFAIS Annual Conference, The Big Pivot: Re- Engineering Scholarly Communication, that was held in Alexandria, VA from February 26-28, 2017. The goal of the conference was to examine the scholarly record and its current evolution in a digital world - both in how it functions and how it serves the information and scholarly research communities. The program stressed how in today's environment, new and innovative advances in information technology are drafting a blueprint that will optimize the ways in which users create, access, and use data and information. New government mandates and policies continue to be implemented on a global basis to facilitate open access to research outputs while in parallel alternative methods for peer review and measuring impact are being utilized.Within the context of these changes, the conference attempted to look at where this blueprint may lead the information community over the next few years.}, author = {Lawlor, Bonnie}, doi = {10.3233/ISU-170854}, issn = {01675265}, journal = {Information Services and Use}, keywords = {Data management,Library cyberinfrastructure,Open access,Open data,Scholarly communication}, number = {3}, pages = {283--306}, title = {{An overview of the NFAIS 2017 annual conference: The big pivot: Re-engineering scholarly communication}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85032578212{\&}doi=10.3233{\%}2FISU-170854{\&}partnerID=40{\&}md5=b8ce9fb87b2e39ac050ef613e4bf6b5a}, volume = {37}, year = {2017} } @inproceedings{RN972, abstract = {A precise analysis of medical image is an important stage in the contouring phase throughout radiotherapy preparation. Medical images are mostly used as radiographic techniques in diagnosis, clinical studies and treatment planning Medical image processing tool are also similarly as important. With a medical image processing tool, it is possible to speed up and enhance the operation of the analysis of the medical image. This paper describes medical image processing software tool which attempts to secure the same kind of programmability advantage for exploring applications of the pipelined processors. These tools simulate complete systems consisting of several of the proposed processing components, in a configuration described by a graphical schematic diagram. In this paper, fifteen different medical image processing tools will be compared in several aspects. The main objective of the comparison is to gather and analysis on the tool in order to recommend users of different operating systems on what type of medical image tools to be used when analysing different types of imaging. A result table was attached and discussed in the paper.}, author = {Lee, Lay Khoon and Liew, Siau Chuin}, booktitle = {2015 4th International Conference on Software Engineering and Computer Systems, ICSECS 2015: Virtuous Software Solutions for Big Data}, doi = {10.1109/ICSECS.2015.7333105}, editor = {Zain, J M and Majid, M A and Ameedeen, M A and Arshah, R A and Azmi, Z R M and Mustaffa, Z M}, isbn = {9781467367226}, keywords = {computer vision,image processing,tools component}, pages = {171--176}, publisher = {Institute of Electrical and Electronics Engineers Inc.}, title = {{A survey of medical image processing tools}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84962073514{\&}doi=10.1109{\%}2FICSECS.2015.7333105{\&}partnerID=40{\&}md5=67e62e688838868692dd48230c893599}, year = {2015} } @inbook{RN902, abstract = {We are entering a new era of biomedical research that is driven by the demand for more effective therapeutics to prevent and treat human disease. Organoids, cultured ex vivo, are the future of this new era of biomedical research and are poised to replace preclinical 2D cell models, and in some cases animal models of human disease. Therefore, the drug discovery and development pipeline is retooling high-throughput technologies to accommodate organoids as the model of choice. In particular, the marriage of high-content screening (HCS) with organoid models for drug discovery will be a critical component in this new era of drug development. This book chapter is focused on the state-of-the-art HCS technology and how this technology is being retooled for drug discovery and development with human organoids.}, author = {Li, L. and LaBarbera, D. V.}, booktitle = {Comprehensive Medicinal Chemistry III}, doi = {10.1016/B978-0-12-409547-2.12329-7}, isbn = {9780128032008}, keywords = {3D imaging technology,3D tissue culture,Drug discovery,Extracellular matrix,High-content screening,High-throughput imaging,High-throughput screening,Image analysis software,Microenvironment,Multicellular tumor spheroids,Organoids,Organotypic,Pluripotent stem cells,Precision medicine,Spheroids}, pages = {388--415}, publisher = {Elsevier Inc.}, title = {{3D high-content screening of organoids for drug discovery}}, type = {Book Section}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85046151116{\&}doi=10.1016{\%}2FB978-0-12-409547-2.12329-7{\&}partnerID=40{\&}md5=e152840c738581970058285c2f8412a3}, volume = {2-8}, year = {2017} } @article{RN893, abstract = {Background Most existing objective surgical motion analysis schemes are limited to structured surgical tasks or recognition of motion patterns for certain categories of surgeries. Analyzing instrument motion data with respect to anatomical structures can break the limit, and an anatomical region segmentation algorithm is required for the analysis. Methods An atlas was generated by manually segmenting the skull base into nine regions, including left/right anterior/posterior ethmoid sinuses, frontal sinus, left and right maxillary sinuses, nasal airway, and sphenoid sinus. These regions were selected based on anatomical and surgical significance in skull base and sinus surgery. Six features, including left and right eye center, nasofrontal beak, anterior tip of nasal spine, posterior edge of hard palate at midline, and clival body at foramen magnum, were used for alignment. The B-spline deformable registration was adapted to fine tune the registration, and bony boundaries were automatically extracted for final precision improvement. The resultant deformation field was applied to the atlas, and the motion data were clustered according to the deformed atlas. Results Eight maxillofacial computed tomography scans were used in experiments. One was manually segmented as the atlas. The others were segmented by the proposed method. Motion data were clustered into nine groups for every dataset and outliers were filtered. Conclusions The proposed algorithm improved the efficiency of motion data clustering and requires limited human interaction in the process. The anatomical region segmentations effectively filtered out the portion of motion data that are out of surgery sites and grouped them according to anatomical similarities.}, author = {Li, Yangming and Bly, Randall A. and Harbison, R. Alex and Humphreys, Ian M. and Whipple, Mark E. and Hannaford, Blake and Moe, Kris S.}, doi = {10.1055/s-0037-1604406}, issn = {21936331}, journal = {Journal of Neurological Surgery, Part B: Skull Base}, keywords = {anatomical region,atlas-based segmentation,motion analysis,objective skill assessment,operating room data,sinus surgery,skull base}, number = {6}, pages = {490--496}, title = {{Anatomical Region Segmentation for Objective Surgical Skill Assessment with Operating Room Motion Data}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85026850018{\&}doi=10.1055{\%}2Fs-0037-1604406{\&}partnerID=40{\&}md5=e63320b0921b5580303ada6248b2e769}, volume = {78}, year = {2017} } @incollection{RN879, abstract = {Data fusion and specific visualization of CT and SPECT are important for diagnosis and research purposes. Selected problems are considered in the paper and are related to the developed CT–SPECT Analyzer software. Hierarchical mapping with SPECT priority for maximum value of rays is applied in this software. Three variants of color mappings are presented. Some practical aspects related to low quality of CT are considered also. The most promising is the rainbow gradient with gamma curve adjustment.}, author = {Listewnik, Maria H. and Piwowarska-Bilska, Hanna and Safranow, Krzysztof and Iwanowski, Jacek and Laszczy{\'{n}}ska, Maria and Chosia, Maria and Ostrowski, Marek and Birkenfeld, Bo{\.{z}}ena and Mazurek, Przemys{\l}aw}, booktitle = {Advances in Intelligent Systems and Computing}, doi = {10.1007/978-3-319-68720-9_2}, editor = {Choras, M and Choras, R S}, isbn = {9783319687193}, issn = {21945357}, keywords = {CT,Data fusion,SPECT,Volumetric visualization}, pages = {11--18}, publisher = {Springer Verlag}, title = {{CT–SPECT analyzer - A tool for CT and SPECT data fusion and volumetric visualization}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85031410415{\&}doi=10.1007{\%}2F978-3-319-68720-9{\_}2{\&}partnerID=40{\&}md5=333a82c9184f554ff151234b1617f13b}, volume = {681}, year = {2018} } @article{RN841, abstract = {An abdominal aortic aneurysm (AAA) is a focal dilation of the abdominal aorta, that if not treated, tends to grow and may rupture. The most common treatment for AAAs is the endovascular aneurysm repair (EVAR), which requires that patients undergo Computed Tomography Angiography (CTA)-based post-operative lifelong surveillance due to the possible appearance of complications. These complications may again lead to AAA dilation and rupture. However, there is a lack of advanced quantitative image-analysis tools to support the clinicians in the follow-up. Currently, the approach is to evaluate AAA diameter changes along time to infer the progress of the patient and the post-operative risk of AAA rupture. An increased AAA diameter is usually associated with a higher rupture risk, but there are some small AAAs that rupture, whereas other larger aneurysms remain stable. This means that the diameter-based rupture risk assessment is not suitable for all the cases, and there is increasing evidence that the biomechanical behavior of the AAA may provide additional valuable information regarding the progression of the disease and the risk of rupture. Hence, we propose a promising methodology for post-operative CTA time-series registration and subsequent aneurysm biomechanical strain analysis. From these strains, quantitative image-based descriptors are extracted using a principal component analysis of the tensile and compressive strain fields. Evaluated on 22 patients, our approach yields a mean area under the curve of 88.6{\%} when correlating the strain-based quantitative descriptors with the long-term patient prognosis. This suggests that the strain information directly extracted from the CTA images is able to capture the biomechanical behavior of the aneurysm without relying on finite element modeling and simulation. Furthermore, the extracted descriptors set the basis for possible future imaging biomarkers that may be used in clinical practice. Apart from the diameter, these biomarkers may be used to assess patient prognosis and to enable informed decision making after an EVAR intervention, especially in difficult uncertain cases.}, author = {L{\'{o}}pez-Linares, Karen and Garc{\'{i}}a, Inmaculada and Garc{\'{i}}a, Ainhoa and Cortes, Camilo and Piella, Gemma and Mac{\'{i}}a, Iv{\'{a}}n and Noailly, J{\'{e}}r{\^{o}}me and {Gonz{\'{a}}lez Ballester}, Miguel A.}, doi = {10.3389/fbioe.2019.00267}, issn = {22964185}, journal = {Frontiers in Bioengineering and Biotechnology}, keywords = {abdominal aortic aneurysm,biomarker,biomechanics,computed tomography angiography,deformation,follow-up,prognosis,strain analysis}, title = {{Image-Based 3D Characterization of Abdominal Aortic Aneurysm Deformation After Endovascular Aneurysm Repair}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85075353420{\&}doi=10.3389{\%}2Ffbioe.2019.00267{\&}partnerID=40{\&}md5=a232028ba988f3c1b4c51cbbb0913dc4}, volume = {7}, year = {2019} } @article{RN961, abstract = {Facial nerve segmentation is of considerable importance for preoperative planning of cochlear implantation. However, it is strongly influenced by the relatively low resolution of the cone-beam computed tomography (CBCT) images used in clinical practice. In this paper, we propose a super-resolution classification method, which refines a given initial segmentation of the facial nerve to a subvoxel classification level from CBCT/CT images. The super-resolution classification method learns the mapping from low-resolution CBCT/CT images to high-resolution facial nerve label images, obtained from manual segmentation on micro-CT images. We present preliminary results on dataset, 15 ex vivo samples scanned including pairs of CBCT/CT scans and high-resolution micro-CT scans, with a leave-one-out evaluation, and manual segmentations on micro-CT images as ground truth. Our experiments achieved a segmentation accuracy with a Dice coefficient of 0.818 ± 0.052, surface-to-surface distance of 0.121 ± 0.030 mm, and Hausdorff distance of 0.715 ± 0.169 mm. We compared the proposed technique to two other semiautomated segmentation software tools, ITK-SNAP and GeoS, and show the ability of the proposed approach to yield subvoxel levels of accuracy in delineating the facial nerve.}, author = {Lu, Ping and Barazzetti, Livia and Chandran, Vimal and Gavaghan, Kate and Weber, Stefan and Gerber, Nicolas and Reyes, Mauricio}, doi = {10.1109/TBME.2017.2697916}, issn = {15582531}, journal = {IEEE Transactions on Biomedical Engineering}, keywords = {CBCT,CT,Cochlear implantation,Facial nerve,Micro-CT,Segmentation,Superresolution,Supervised learning}, number = {1}, pages = {178--188}, title = {{Highly accurate facial nerve segmentation refinement from CBCT/CT imaging using a super-resolution classification approach}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85047089305{\&}doi=10.1109{\%}2FTBME.2017.2697916{\&}partnerID=40{\&}md5=1e4c788c488d0055ad983d484d1cb30c}, volume = {65}, year = {2018} } @article{RN860, abstract = {Automatic and accurate segmentation of the prostate is still a challenging task due to intensity inhomogeneity and complicated deformation of MR images. To tackle these problems with multi-atlas segmentation, in this paper, we propose a new metric for image registration and new descriptor for label fusion. First, to reduce the amount of edges in entropic graph, a modified {\$}\backslashalpha {\$} -mutual information ( {\$}\backslashalpha {\$} -MI) based on fast minimal spanning tree (MST) is implemented for deformable registration. Second, localized {\$}\backslashalpha {\$} -MI allowing for the spatial information is proposed with the stochastic gradient optimization, and the feature space is encoded by a sparse auto-encoder. Finally, a multi-scale descriptor utilizing local self-similarity is integrated into the patch-based label fusion to obtain final segmentation. Experiments were performed on two subsets of totally 46 T2-weighted prostate MR images from 46 patients. Compared to {\$}\backslashalpha {\$} -MI based on {\$}{\{}k{\}}{\$} -nearest neighbor graph, the registration time of {\$}\backslashalpha {\$} -MI based on fast MST can be reduced by almost half. The median Dice overlap of registration using localized {\$}\backslashalpha {\$} -MI on one subset is shown to improve significantly from 0.725 to 0.764 ( {\$}p=1.14\backslashtimes 10{\^{}}{\{}-5{\}}{\$} ), compared to using {\$}\backslashalpha {\$} -MI without the spatial information. The median Dice overlap of prostate segmentation using the proposed method on 20 testing images of another subset is 0.871, and the median Hausdorff distance is 8.013 mm, which demonstrate a comparable accuracy to state-of-the-art methods.}, author = {Lu, Xuesong and Zha, Yunfei and Qiao, Yuchuan and Wang, Defeng}, doi = {10.1109/ACCESS.2019.2943485}, issn = {21693536}, journal = {IEEE Access}, keywords = {Deformable registration,local self-similarity,minimal spanning tree,patch-based label fusion,prostate segmentation,$\alpha$-mutual information}, pages = {138645--138656}, title = {{Feature-Based Deformable Registration Using Minimal Spanning Tree for Prostate MR Segmentation}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85077958932{\&}doi=10.1109{\%}2FACCESS.2019.2943485{\&}partnerID=40{\&}md5=daba943919bc91c3fa945bebda516afa}, volume = {7}, year = {2019} } @article{RN785, abstract = {In order to effectively improve the accuracy of multi-atlas segmentation algorithm of hippocampus, U-Net convolutional neural network is applied to label fusion of multi-atlas. The algorithm in the atlases selection performs mutual information and gradient similarity calculation, avoids the interference of the surrounding tissue structure on the atlas selection, selects the floating image group which is more suitable for the target map. In the pre-processing stage, extracting the region of interest centered on hippocampus can effectively reduce the size of data. In the registration process, re-sampling is used instead of the coarse registration, which reduces the time, and then uses the diffeomorphic demons algorithm, which has good smoothness, continuity and topological retentiveness. In the label fusion stage, an improved U-Net network based on deep learning theory for multi-atlas MRI hippocampal segmentation algorithm is proposed. The experimental results show that the segmentation accuracy of the improved algorithm is about 5{\%} higher than that of the traditional algorithm, the algorithm time is reduced by about 50{\%}. The improved U-Net network based multi-atlas hippocampal segmentation algorithm has the characteristics of high precision and high efficiency for segmentation of the hippocampus in target image.}, author = {Lu, Yue and Ma, Yu and Wang, Hui and Wang, Yuan}, doi = {10.3788/YJYXS20193411.1091}, issn = {10072780}, journal = {Chinese Journal of Liquid Crystals and Displays}, keywords = {Hippocampus,Label fusion,Multi-atlas,U-Net}, number = {11}, pages = {1090--1103}, title = {{Multi-atlaslabel fusion based on U-Net}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {34}, year = {2019} } @article{RN828, abstract = {Techniques based on imaging serial sections of brain tissue provide insight into brain structure and function. However, to compare or combine them with results from three dimensional imaging methods, reconstruction into a volumetric form is required. Currently, there are no tools for performing such a task in a streamlined way. Here we propose the Possum volumetric reconstruction framework which provides a selection of 2D to 3D image reconstruction routines allowing one to build workflows tailored to one's specific requirements. The main components include routines for reconstruction with or without using external reference and solutions for typical issues encountered during the reconstruction process, such as propagation of the registration errors due to distorted sections. We validate the implementation using synthetic datasets and actual experimental imaging data derived from publicly available resources. We also evaluate efficiency of a subset of the algorithms implemented. The Possum framework is distributed under MIT license and it provides researchers with a possibility of building reconstruction workflows from existing components, without the need for low-level implementation. As a consequence, it also facilitates sharing and data exchange between researchers and laboratories.}, author = {Majka, Piotr and W{\'{o}}jcik, Daniel K.}, doi = {10.1007/s12021-015-9286-1}, issn = {15392791}, journal = {Neuroinformatics}, keywords = {3D reconstruction,Brain atlas,Histology,Image analysis,Image registration,Light microscopy}, number = {3}, pages = {265--278}, title = {{Possum—A Framework for Three-Dimensional Reconstruction of Brain Images from Serial Sections}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {14}, year = {2016} } @inproceedings{RN951, abstract = {Peripheral Quantitative Computed Tomography (pQCT) is a non-invasive imaging technology that is well-suited for quantification of bone structural and material properties. Because of its increasing use and applicability, the development of automated quantification methods for pQCT images is an appealing field of research. In this paper we introduce a software system for hard and soft tissue quantification in the lower leg using pQCT imaging data. The main stages of our approach are the segmentation and identification of bone, muscle and fat, and the computation of densitometric and geometric variables of each regional tissue type. Our system was validated against reference area and densitometric measurements over a set of test images and produced encouraging results. {\textcopyright} 2014 Copyright SPIE.}, author = {Makrogiannis, Sokratis and Ferrucci, Luigi}, booktitle = {Sensing Technologies for Global Health, Military Medicine, and Environmental Monitoring IV}, doi = {10.1117/12.2050790}, isbn = {9781628410495}, issn = {1996756X}, pages = {911216}, publisher = {SPIE}, title = {{Software system for computing material and structural properties of bone and muscle in the lower extremity from pQCT}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84907042843{\&}doi=10.1117{\%}2F12.2050790{\&}partnerID=40{\&}md5=f686ccba07e29e0052a7bb5edd216e5b}, volume = {9112}, year = {2014} } @article{RN862, abstract = {With this work we release $\backslash$ttC $\backslash$ttL $\backslash$ttA $\backslash$ttI $\backslash$ttR $\backslash$ttE, a distributed-memory implementation of an effective solver for constrained large deformation diffeomorphic image registration problems in three dimensions. We consider an optimal control formulation. We invert for a stationary velocity field that parameterizes the deformation map. Our solver is based on a globalized, preconditioned, inexact reduced space Gauss-Newton-Krylov scheme. We exploit state-of-the-art techniques in scientific computing to develop an effective solver that scales to thousands of distributed memory nodes on high-end clusters. We present the formulation, discuss algorithmic features, describe the software package, and introduce an improved preconditioner for the reduced space Hessian to speed up the convergence of our solver. We test registration performance on synthetic and real data. We demonstrate registration accuracy on several neuroimaging datasets. We compare the performance of our scheme against different flavors of the $\backslash$ttD $\backslash$tte $\backslash$ttm $\backslash$tto $\backslash$ttn $\backslash$tts algorithm for diffeomorphic image registration. We study convergence of our preconditioner and our overall algorithm. We report scalability results on state-of-the-art supercomputing platforms. We demonstrate that we can solve registration problems for clinically relevant data sizes in two to four minutes on a standard compute node with 20 cores, attaining excellent data fidelity. With the present work we achieve a speedup of (on average) 5$\backslash$times with a peak performance of up to 17$\backslash$times compared to our former work.}, archivePrefix = {arXiv}, arxivId = {1808.04487}, author = {Mang, Andreas and Gholami, Amir and Davatzikos, Christos and Biros, George}, doi = {10.1137/18M1207818}, eprint = {1808.04487}, issn = {10957197}, journal = {SIAM Journal on Scientific Computing}, keywords = {Diffeomorphic image registration,Distributed-memory algorithm,KKT preconditioner,LDDMM,Newton-Krylov method,Optimal control,PDE-constrained optimization}, number = {5}, pages = {C548--C584}, title = {{Claire: A distributed-memory solver for constrained large deformation diffeomorphic image registration}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85074644073{\&}doi=10.1137{\%}2F18M1207818{\&}partnerID=40{\&}md5=e3c350f33ddb25e3d2f751778e14fc3c}, volume = {41}, year = {2019} } @article{RN900, abstract = {Diffusion weighted magnetic resonance imaging, or DWI, is one of the most promising tools for the analysis of neural microstructure and the structural connectome of the human brain. The application of DWI to map early development of the human connectome in-utero, however, is challenged by intermittent fetal and maternal motion that disrupts the spatial correspondence of data acquired in the relatively long DWI acquisitions. Fetuses move continuously during DWI scans. Reliable and accurate analysis of the fetal brain structural connectome requires careful compensation of motion effects and robust reconstruction to avoid introducing bias based on the degree of fetal motion. In this paper we introduce a novel robust algorithm to reconstruct in-vivo diffusion-tensor MRI (DTI) of the moving fetal brain and show its effect on structural connectivity analysis. The proposed algorithm involves multiple steps of image registration incorporating a dynamic registration-based motion tracking algorithm to restore the spatial correspondence of DWI data at the slice level and reconstruct DTI of the fetal brain in the standard (atlas) coordinate space. A weighted linear least squares approach is adapted to remove the effect of intra-slice motion and reconstruct DTI from motion-corrected data. The proposed algorithm was tested on data obtained from 21 healthy fetuses scanned in-utero at 22–38 weeks gestation. Significantly higher fractional anisotropy values in fiber-rich regions, and the analysis of whole-brain tractography and group structural connectivity, showed the efficacy of the proposed method compared to the analyses based on original data and previously proposed methods. The results of this study show that slice-level motion correction and robust reconstruction is necessary for reliable in-vivo structural connectivity analysis of the fetal brain. Connectivity analysis based on graph theoretic measures show high degree of modularity and clustering, and short average characteristic path lengths indicative of small-worldness property of the fetal brain network. These findings comply with previous findings in newborns and a recent study on fetuses. The proposed algorithm can provide valuable information from DWI of the fetal brain not available in the assessment of the original 2D slices and may be used to more reliably study the developing fetal brain connectome.}, author = {Marami, Bahram and {Mohseni Salehi}, Seyed Sadegh and Afacan, Onur and Scherrer, Benoit and Rollins, Caitlin K. and Yang, Edward and Estroff, Judy A. and Warfield, Simon K. and Gholipour, Ali}, doi = {10.1016/j.neuroimage.2017.04.033}, issn = {10959572}, journal = {NeuroImage}, keywords = {Connectome,Diffusion-weighted MRI,Fetal brain,Motion Correction,Registration}, pages = {475--488}, pmid = {28433624}, title = {{Temporal slice registration and robust diffusion-tensor reconstruction for improved fetal brain structural connectivity analysis}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85018689711{\&}doi=10.1016{\%}2Fj.neuroimage.2017.04.033{\&}partnerID=40{\&}md5=2a9b47eff0a2637f2e0b729996723477}, volume = {156}, year = {2017} } @article{RN918, abstract = {This work proposes a novel approach for motion-robust diffusion-weighted (DW) brain MRI reconstruction through tracking temporal head motion using slice-to-volume registration. The slice-level motion is estimated through a filtering approach that allows tracking the head motion during the scan and correcting for out-of-plane inconsistency in the acquired images. Diffusion-sensitized image slices are registered to a base volume sequentially over time in the acquisition order where an outlier-robust Kalman filter, coupled with slice-to-volume registration, estimates head motion parameters. Diffusion gradient directions are corrected for the aligned DWI slices based on the computed rotation parameters and the diffusion tensors are directly estimated from the corrected data at each voxel using weighted linear least squares. The method was evaluated in DWI scans of adult volunteers who deliberately moved during scans as well as clinical DWI of 28 neonates and children with different types of motion. Experimental results showed marked improvements in DWI reconstruction using the proposed method compared to the state-of-the-art DWI analysis based on volume-to-volume registration. This approach can be readily used to retrieve information from motion-corrupted DW imaging data.}, author = {Marami, Bahram and Scherrer, Benoit and Afacan, Onur and Erem, Burak and Warfield, Simon K. and Gholipour, Ali}, doi = {10.1109/TMI.2016.2555244}, issn = {1558254X}, journal = {IEEE Transactions on Medical Imaging}, keywords = {Diffusion-weighted MRI,motion tracking,motion-robust MRI,outlier-robust kalman filter,slice registration}, number = {10}, pages = {2258--2269}, pmid = {27834639}, title = {{Motion-Robust Diffusion-Weighted Brain MRI Reconstruction Through Slice-Level Registration-Based Motion Tracking}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84991454091{\&}doi=10.1109{\%}2FTMI.2016.2555244{\&}partnerID=40{\&}md5=377eb413c86d8cfebc71fb33434e9bf4}, volume = {35}, year = {2016} } @inproceedings{RN977, abstract = {In this paper we present SimpleElastix, an extension of SimpleITK designed to bring the Elastix medical image registration library to a wider audience. Elastix is a modular collection of robust C++ image registration algorithms that is widely used in the literature. However, its command-line interface introduces overhead during prototyping, experimental setup, and tuning of registration algorithms. By integrating Elastix with SimpleITK, Elastix can be used as a native library in Python, Java, R, Octave, Ruby, Lua, Tcl and C{\#} on Linux, Mac and Windows. This allows Elastix to intregrate naturally with many development environments so the user can focus more on the registration problem and less on the underlying C++ implementation. As means of demonstration, we show how to register MR images of brains and natural pictures of faces using minimal amount of code. SimpleElastix is open source, licensed under the permissive Apache License Version 2.0 and available at https://github.com/kaspermarstal/SimpleElastix.}, author = {Marstal, Kasper and Berendsen, Floris and Staring, Marius and Klein, Stefan}, booktitle = {IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops}, doi = {10.1109/CVPRW.2016.78}, isbn = {9781467388504}, issn = {21607516}, pages = {574--582}, publisher = {IEEE Computer Society}, title = {{SimpleElastix: A User-Friendly, Multi-lingual Library for Medical Image Registration}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85010216516{\&}doi=10.1109{\%}2FCVPRW.2016.78{\&}partnerID=40{\&}md5=0d777eb051999b3ff620a82e4ec05c67}, year = {2016} } @article{RN960, abstract = {Medical image atlases contain much information about human anatomy, but learning the shapes of anatomical regions and making sense of the overall structure defined in the atlas can be problematic. Atlases may contain hundreds of regions with complex shapes which can be tightly packed together. This makes visualisation difficult since the shapes can fit together in complex ways and visually obscure each other. In this work, we describe a technique which enables interactive exploration of medical image atlases that permits the hierarchical structure of the atlas and the content of an underlying medical image to be investigated simultaneously. Our method enables a user to create visualizations of the atlas similar to the exploded views used in technical illustrations to show the structure of mechanical assemblies. These views are constrained by the geometry of the atlas and the hierarchical structure to reduce the complexity of user interaction. We also enable the user to explode the atlas meshes themselves. The atlas meshes are registered with a medical image which is displayed on the cut surfaces of the meshes using raycasting. Results from the AAL human brain atlas are presented and discussed.}, author = {McGraw, Tim and Guayaquil-Sosa, Alejandro}, doi = {10.1080/21681163.2017.1343686}, issn = {21681171}, journal = {Computer Methods in Biomechanics and Biomedical Engineering: Imaging and Visualization}, keywords = {Biomedical visualisation,brain atlas,volume rendering}, number = {6}, pages = {668--677}, title = {{Hybrid rendering of exploded views for medical image atlas visualization}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85024392095{\&}doi=10.1080{\%}2F21681163.2017.1343686{\&}partnerID=40{\&}md5=627a45812ac92f205dffd7dce1423f77}, volume = {6}, year = {2018} } @incollection{RN881, abstract = {Thoracic Aortic Aneurysm (TAA) is an enlargement of the aortic lumen at chest level. An accurate assessment of the geometry of the enlarged vessel is crucial when planning vascular interventions. This study developed an automatic method to extract aortic geometry and supra-aortic vessels from computerized tomography (CT) images. The proposed method consists of a fast-marching level-set method for detection of the initial aortic region from multiple seed points automatically selected along the pre-extracted vessel centerline, and a level-set method for extraction of the detailed aortic geometry from the initial aortic region. The automatic method was implemented inside Endosize (Therenva, Rennes), a commercially available software used for planning minimally invasive techniques. The performance of the algorithm was compare with the existing Endosize segmentation method (based on the region growing approach). For this comparison a CT dataset from an open source data file system (Osirix Advanced Imaging in 3D, 2016) was used. Results showed that, whilst the segmentation time increased (956 s for the new method, 0.308 s for the existing one), the new method produced a more accurate aortic segmentation, particularly in the region of supra-aortic branches. Further work to examine the efficacy of the proposed method should include a statistical study of performance across many datasets.}, author = {Mercuri, Massimiliano and Narracott, Andrew J. and Hose, Dr and G{\"{o}}ksu, Cemil}, booktitle = {Lecture Notes in Computational Vision and Biomechanics}, doi = {10.1007/978-3-319-68195-5_95}, isbn = {22129391 (ISSN)}, issn = {22129413}, keywords = {Automatic segmentation, Level-set method,Thoracic Aortic Aneurysm (TAA),Virtual aortic surgery planning}, pages = {875--882}, publisher = {Springer Netherlands}, title = {{An automatic method for aortic segmentation based on level-set methods using multiple seed points}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85032380517{\&}doi=10.1007{\%}2F978-3-319-68195-5{\_}95{\&}partnerID=40{\&}md5=332ed513ca2cfe55ca8c7c67df29dd9d}, volume = {27}, year = {2018} } @inproceedings{RN896, abstract = {Lagrangian carotid strain imaging (LCSI) involves estimation of deformation in the carotid artery due to blood pressure variations under cardiac pulsation. Local strain over a cardiac cycle is tracked, which is computationally intensive. We incur long offline processing times for LCSI which becomes a limiting factor for clinical adoption. We report on the computational speedup obtained for a parallelized implementation of LCSI using CUDA programming for fast computation. LCSI is currently performed using a multi-level block matching algorithm written in C++ using the Insight Toolkit (ITK) system. We have implemented this code on a NVIDIA k40 GPU for running CUDA kernels called from the ITK C++ code. The multi-level algorithm consists of three processing stages; stage 1 performs block matching at the coarsest level while level 3 performs block-matching at the finest scale on radiofrequency signals. The regularization step which incurred the largest computational time was implemented on the GPU. Cross-correlation was then implemented with the regularization step thereby avoiding a CPU to GPU data transfer. Shared memory was used in the regularization step to further reduce processing time. The computation time per frame pairs for LCSI with our initial implementation was about 316.41 secs for an in vivo human carotid data set, thereby taking 131 minutes for an entire loop over a cardiac cycle with 25 frames. GPU implementation of regularization provided per frame results in 99.92 secs, a speedup of 3.16X. Further optimization with implementation of cross correlation on the GPU and use of shared memory improved the computation time to 23 secs per frame, a speed up of 13.75X, reducing processing time to 9.5 minutes over a cardiac cycle.}, author = {Meshram, Nirvedh H. and Varghese, Tomy}, booktitle = {IEEE International Ultrasonics Symposium, IUS}, doi = {10.1109/ULTSYM.2017.8092085}, isbn = {9781538633830}, issn = {19485727}, keywords = {CUDA,Carotid Plaque,GPU,Lagrangian,Strain Imaging,Ultrasound}, publisher = {IEEE Computer Society}, title = {{Fast multilevel Lagrangian carotid strain imaging with GPU computing}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85039446737{\&}doi=10.1109{\%}2FULTSYM.2017.8092085{\&}partnerID=40{\&}md5=6bb0c6a43db0421599b9f25e0926d036}, year = {2017} } @article{RN871, abstract = {A multilevel Lagrangian carotid strain imaging algorithm is analyzed to identify computational bottlenecks for implementation on a graphics processing unit (GPU). Displacement tracking including regularization was found to be the most computationally expensive aspect of this strain imaging algorithm taking about 2.2 h for an entire cardiac cycle. This intensive displacement tracking was essential to obtain Lagrangian strain tensors. However, most of the computational techniques used for displacement tracking are parallelizable, and hence GPU implementation is expected to be beneficial. A new scheme for subsample displacement estimation referred to as a multilevel global peak finder was also developed since the Nelder-Mead simplex optimization technique used in the CPU implementation was not suitable for GPU implementation. GPU optimizations to minimize thread divergence and utilization of shared and texture memories were also implemented. This enables efficient use of the GPU computational hardware and memory bandwidth. Overall, an application speedup of 168.75 × was obtained enabling the algorithm to finish in about 50 s for a cardiac cycle. Last, comparison of GPU and CPU implementations demonstrated no significant difference in the quality of displacement vector and strain tensor estimation with the two implementations up to a 5{\%} interframe deformation. Hence, a GPU implementation is feasible for clinical adoption and opens opportunity for other computationally intensive techniques.}, author = {Meshram, Nirvedh H. and Varghese, Tomy}, doi = {10.1109/TUFFC.2018.2841346}, issn = {08853010}, journal = {IEEE Transactions on Ultrasonics, Ferroelectrics, and Frequency Control}, keywords = {Carotid strain imaging,compute unified device architecture (CUDA),elastography,graphics processing unit (GPU),ultrasound}, number = {8}, pages = {1370--1379}, title = {{GPU accelerated multilevel lagrangian carotid strain imaging}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85047652346{\&}doi=10.1109{\%}2FTUFFC.2018.2841346{\&}partnerID=40{\&}md5=635a26fdcb2c9b17367aeddc67197444}, volume = {65}, year = {2018} } @article{RN817, abstract = {In this article we present the NeuTomPy Toolbox, a new Python package for tomographic data processing and reconstruction. The toolbox includes pre-processing algorithms, artifacts removal and a wide range of iterative reconstruction methods as well as the Filtered Back Projection algorithm. The NeuTomPy toolbox was conceived primarily for neutron tomography datasets and developed to support the need of users and researchers to compare state-of-the-art reconstruction methods and choose the optimal data processing workflow for their data. In fact, in several cases sparse-view datasets are acquired to reduce scan time during a neutron tomography experiment. Hence, there is great interest in improving quality of the reconstructed images by means of iterative methods and advanced image-processing algorithms. The toolbox has a modular design, multi-threading capabilities and it supports Windows, Linux and Mac OS operating systems. The NeuTomPy toolbox is open source and it is released under the GNU General Public License v3, encouraging researchers and developers to contribute. In this paper we present an overview of the main toolbox functionalities and finally we show a typical usage example.}, author = {Micieli, Davide and Minniti, Triestino and Gorini, Giuseppe}, doi = {10.1016/j.softx.2019.01.005}, issn = {23527110}, journal = {SoftwareX}, keywords = {Neutron imaging,Tomographic reconstruction software,Tomography}, pages = {260--264}, title = {{NeuTomPy toolbox, a Python package for tomographic data processing and reconstruction}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {9}, year = {2019} } @inproceedings{RN857, author = {Nain, Jagjeet and Mueller, Johannes}, booktitle = {Image and Signal Processing for Remote Sensing XXV 2019}, doi = {10.1117/12.2532730}, editor = {Bruzzone, L and Bovolo, F and Benediktsson, J A}, isbn = {9781510630130}, issn = {1996756X}, pages = {2}, publisher = {SPIE}, title = {{Improving band to band registration accuracy of SEVIRI level 1.5 products}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85078146492{\&}doi=10.1117{\%}2F12.2532730{\&}partnerID=40{\&}md5=9a2bd6db58344dccac47650014f184cb}, volume = {11155}, year = {2019} } @article{RN947, abstract = {Classical surgery is being overtaken by minimally invasive and transcatheter procedures. As there is no direct view or access to the affected anatomy, advanced imaging techniques such as 3D C-arm computed tomography (CT) and C-arm fluoroscopy are routinely used in clinical practice for intraoperative guidance. However, due to constraints regarding acquisition time and device configuration, intraoperative modalities have limited soft tissue image quality and reliable assessment of the cardiac anatomy typically requires contrast agent, which is harmful to the patient and requires complex acquisition protocols. We propose a probabilistic sparse matching approach to fuse high-quality preoperative CT images and nongated, noncontrast intraoperative C-arm CT images by utilizing robust machine learning and numerical optimization techniques. Thus, high-quality patient-specific models can be extracted from the preoperative CT and mapped to the intraoperative imaging environment to guide minimally invasive procedures. Extensive quantitative experiments on 95 clinical datasets demonstrate that our model-based fusion approach has an average execution time of 1.56 s, while the accuracy of 5.48 mm between the anchor anatomy in both images lies within expert user confidence intervals. In direct comparison with image-to-image registration based on an open-source state-of-the-art medical imaging library and a recently proposed quasi-global, knowledge-driven multi-modal fusion approach for thoracic-abdominal images, our model-based method exhibits superior performance in terms of registration accuracy and robustness with respect to both target anatomy and anchor anatomy alignment errors.}, author = {Neumann, Dominik and Grbic, Sasa and John, Matthias and Navab, Nassir and Hornegger, Joachim and Ionasec, Razvan}, doi = {10.1109/TMI.2014.2343936}, issn = {1558254X}, journal = {IEEE Transactions on Medical Imaging}, keywords = {Anatomical overlay,Procedure guidance,computed tomography (CT),model-based cardiac image registration}, number = {1}, pages = {49--60}, pmid = {25095250}, title = {{Probabilistic sparse matching for robust 3D/3D fusion in minimally invasive surgery}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84920159378{\&}doi=10.1109{\%}2FTMI.2014.2343936{\&}partnerID=40{\&}md5=a81867f10d05f5ddb53549f54f45df01}, volume = {34}, year = {2015} } @article{RN794, abstract = {Background and objective: Stereotactical procedures require exact trajectory planning to avoid blood vessels in the trajectory path. Innovation in imaging and image recognition techniques have facilitated the automatic detection of blood vessels during the planning process and may improve patient safety in the future. To assess the feasibility of a vessel detection and warning system using currently available imaging and vessel segmentation techniques. Methods: Image data were acquired from post-contrast, isovolumetric T1-weighted sequences (T1CE) and time.-of-flight MR angiography at 3T or 7T from a total of nine subjects. Vessel segmentation by a combination of a vessel-enhancement filter with subsequent level-set segmentation was evaluated using three different methods (Vesselness, FastMarching and LevelSet) in 45 stereotactic trajectories. Segmentation results were compared to a gold-standard of manual segmentation performed jointly by two human experts. Results: The LevelSet method performed best with a mean interclass correlation coefficient (ICC) of 0.76 [0.73, 0.81] compared to the FastMarching method with ICC 0.70 [0.67, 0.73] respectively. The Vesselness algorithm achieved clearly inferior overall performance with a mean ICC of 0.56 [0.53, 0.59]. The differences in mean ICC between all segmentation methods were statistically significant (p {\textless} 0.001 with post-hoc p {\textless} 0.026). The LevelSet method performed likewise good in MPRAGE and 3T-TOF images and excellent in 7T-TOF image data. The negative predictive value (NPV) was very high ({\textgreater}97{\%}) for all methods and modalities. Positive predictive values (PPV) were found in the overall range of 65–90{\%} likewise depending on algorithm and modality. This pattern reflects the disposition of all segmentation methods – in case of misclassification - to produce preferentially false-positive than false-negative results. In a clinical setting, two to three potential collision warnings would be given per trajectory on average with a PPV of around 50{\%}. Conclusions: It is feasible to integrate a clinically meaningful vessel detection and collision warning system into stereotactical planning software. Both, T1CE and MRA sequences are suitable as image data for such an application.}, author = {Neumann, Jan Oliver and Campos, Benito and Younes, B. and Jakobs, Martin and Unterberg, Andreas and Kiening, Karl and Hubert, Alexander}, doi = {10.1016/j.cmpb.2019.105037}, issn = {18727565}, journal = {Computer Methods and Programs in Biomedicine}, keywords = {Decision support,Stereotaxy,Vessel segmentation}, pages = {8}, title = {{Evaluation of three automatic brain vessel segmentation methods for stereotactical trajectory planning}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {182}, year = {2019} } @inproceedings{RN944, abstract = {We analyse CT image denoising when applied to vessel segmentation. Proposed semi-global quality metric based on the contrast-to-noise ratio allowed us to estimate initial image quality and efficiency of denoising procedures without prior knowledge about a noise-free image. We show that the total variance filtering in L1 metric provides the best denoising when compared to other well-known denoising procedures such as nonlocal means denoising or anisotropic diffusion. Computational complexity of this denoising algorithm is addressed by comparing its implementation for Intel MIC and for NVIDIA CUDA HPC systems.}, author = {Nikonorov, A. and Kolsanov, A. and Petrov, M. and Yuzifovich, Y. and Prilepin, E. and Bychenkov, K.}, booktitle = {SIGMAP 2015 - 12th International Conference on Signal Processing and Multimedia Applications, Proceedings; Part of 12th International Joint Conference on e-Business and Telecommunications, ICETE 2015}, doi = {10.5220/0005542400590067}, editor = {Obaidat, M S and Lorenz, P and Cabello, E}, isbn = {9789897581182}, keywords = {CUDA,Contrast to noise ratio,Fast marching,GPGPU,Geodesic active contours,Liver,Proximal algorithms,Total variance de-noising,Vessels segmentation,Xeon phi}, pages = {59--67}, publisher = {SciTePress}, title = {{Contrast-to-noise based metric of denoising algorithms for liver vein segmentation}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84970959837{\&}partnerID=40{\&}md5=0d3090fef893d468c72b58001dc5eceb}, year = {2015} } @incollection{RN930, abstract = {This paper describes a comprehensive multi-step algorithm for vascular structure segmentation in CT scan data, from raw slice images to a 3D object, with an emphasis on improving segmentation quality and assessing computational complexity. To estimate initial image quality and to evaluate denoising in the absence of the noise-free image, we propose a semi-global contrast-to-noise quality metric. We show that total variation-based filtering in the L1 metric results in the best denoising when compared to widely used nonlocal means or anisotropic diffusion denoising. To address higher computational complexity of our denoising algorithm, we created two high performance implementations, using Intel MIC and NVIDIA CUDA and compared results. In combination with proposed nearly real-time incremental segmentation technique, it provides fast and framework with controlled quality.}, author = {Nikonorov, A. and Kolsanov, A. and Petrov, M. and Yuzifovich, Y. and Prilepin, E. and Chaplygin, S. and Zelter, P. and Bychenkov, K.}, booktitle = {Communications in Computer and Information Science}, doi = {10.1007/978-3-319-30222-5_23}, editor = {Obaidat, M S and Lorenz, P}, isbn = {9783319302218}, issn = {18650929}, keywords = {CUDA,Contrast to noise ratio,Fast marching,GPGPU,Geodesic active contours,Liver,Proximal algorithms,Total variance de-noising,Vessels segmentation,Xeon phi}, pages = {490--507}, publisher = {Springer Verlag}, title = {{Vessel segmentation for noisy CT data with quality measure based on single-point contrast-to-noise ratio}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84960396227{\&}doi=10.1007{\%}2F978-3-319-30222-5{\_}23{\&}partnerID=40{\&}md5=00b082a6118fc326c7a012f9772959cf}, volume = {585}, year = {2016} } @article{RN806, abstract = {Patient-specific finite element (FE) modeling of atherosclerotic plaque is challenging, as there is limited information available clinically to characterize plaque components. This study proposes that for the limited data available in vivo, material properties of plaque and artery can be identified using inverse FE analysis and either a simple neo-Hookean constitutive model or assuming linear elasticity provides sufficient accuracy to capture the changes in vessel deformation, which is the available clinical metric. To test this, 10 human cadaveric femoral arteries were each pressurized ex vivo at 6 pressure levels, while intravascular ultrasound (IVUS) and virtual histology (VH) imaging were performed during controlled pull-back to determine vessel geometry and plaque structure. The VH images were then utilized to construct FE models with heterogeneous material properties corresponding to the vessel plaque components. The constitutive models were then fit to each plaque component by minimizing the difference between the experimental and the simulated geometry using the inverse FE method. Additionally, we further simplified the analysis by assuming the vessel wall had a homogeneous structure, i.e. lumping artery and plaque as one tissue. We found that for the heterogeneous wall structure, the simulated and experimental vessel geometries compared well when the fitted neo-Hookean parameters or elastic modulus, in the case of linear elasticity, were utilized. Furthermore, taking the median of these fitted parameters then inputting these as plaque component mechanical properties in the finite element simulation yielded differences between simulated and experimental geometries that were on average around 2{\%} greater (1.30–5.55{\%} error range to 2.33–11.71{\%} error range). For the homogeneous wall structure the simulated and experimental wall geometries had an average difference of around 4{\%} although when the difference was calculated using the median fitted value this difference was larger than for the heterogeneous fits. Finally, comparison to uniaxial tension data and to literature constitutive models also gave confidence to the suitability of this simplified approach for patient-specific arterial simulation based on data that may be acquired in the clinic.}, author = {Noble, Christopher and Carlson, Kent D. and Neumann, Erica and Dragomir-Daescu, Dan and Erdemir, Ahmet and Lerman, Amir and Young, Melissa}, doi = {10.1016/j.jmbbm.2019.103453}, issn = {18780180}, journal = {Journal of the Mechanical Behavior of Biomedical Materials}, keywords = {Intravascular ultrasound,Inverse finite element analysis,Peripheral artery disease,Pressure inflation testing,Virtual histology}, pages = {14}, title = {{Patient specific characterization of artery and plaque material properties in peripheral artery disease}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {101}, year = {2020} } @incollection{RN836, abstract = {We present a new framework for fine-scale vessel segmentation from fundus images through registration and segmentation of corresponding fluorescein angiography (FA) images. In FA, fluorescent dye is used to highlight the vessels and increase their contrast. Since these highlights are temporally dispersed among multiple FA frames, we first register the FA frames and aggregate the per-frame segmentations to construct a detailed vessel mask. The constructed FA vessel mask is then registered to the fundus image based on an initial fundus vessel mask. Postprocessing is performed to refine the final vessel mask. Registration of FA frames, as well as registration of FA vessel mask to the fundus image, are done by similar hierarchical coarse-to-fine frameworks, both comprising rigid and non-rigid registration. Two CNNs with identical network structures, both trained on public datasets but with different settings, are used for vessel segmentation. The resulting final vessel segmentation contains fine-scale, filamentary vessels extracted from FA and corresponding to the fundus image. We provide quantitative evaluation as well as qualitative examples which support the robustness and the accuracy of the proposed method.}, author = {Noh, Kyoung Jin and Park, Sang Jun and Lee, Soochahn}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-030-32239-7_86}, editor = {Shen, D and Yap, P T and Liu, T and Peters, T M and Khan, A and Staib, L H and Essert, C and Zhou, S}, isbn = {9783030322380}, issn = {16113349}, keywords = {Filamentary vessels,Fine-scale vessel segmentation,Fluorescein angiography,Fundus images,Registration}, pages = {779--787}, publisher = {Springer}, title = {{Fine-scale vessel extraction in fundus images by registration with fluorescein angiography}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85075630951{\&}doi=10.1007{\%}2F978-3-030-32239-7{\_}86{\&}partnerID=40{\&}md5=e5b88e05907541abbf2b36b425c9f544}, volume = {11764 LNCS}, year = {2019} } @inbook{RN822, abstract = {Convolutional neural networks (CNNs) have been widely used to address various image analysis problems at the cost of intensive computational load and large amounts of annotated training data. When it comes to Medical Imaging, annotation is often complicated and/or expensive, and innovative methods for dealing with small or very imbalanced training sets are mostly welcome. In this context, this paper proposes a novel approach for efficiently synthesizing volumetric patch data from a small amount of samples using their latent data. Our method consists of two major steps. First, we train a 3D CNN auto-encoder for unsupervised learning of volumetric latent data by means of multivariate Gaussian mixture models (GMMs): while the encoder finds latent representations of volumes using GMMs, the decoder uses the estimated GMMs parameters to reconstruct the volume observed in the input. Then, we modify latent data of samples at training time to generate similar, but different, new samples: we run non-rigid registrations between patches decoded from real latent data and patches decoded from modified latent data, and warp the corresponding original image patches using the resulting displacement fields. We evaluated our method in the context of lung nodules synthesis using the publicly available LUNA challenge dataset, and generated new realistic samples out of real lung nodules, preserving their original texture and neighbouring anatomical structures. Our results demonstrate that 3D CNNs trained using our synthesis method were able to consistently deliver lower lung nodule false positive rates, which indicates an improvement in the networks discriminant power.}, address = {Cham}, author = {Oliveira, Dario Augusto Borges and Viana, Matheus Palhares}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-030-00536-8_12}, editor = {Gooya, A and Goksel, O and Oguz, I and Burgos, N}, isbn = {9783030005351}, issn = {16113349}, keywords = {Convolutional neural networks,Generative models,Lung nodule false positive reduction,Multivariate Gaussian mixture models,Nodules synthesis}, pages = {111--118}, publisher = {Springer International Publishing Ag}, series = {Lecture Notes in Computer Science}, title = {{Lung nodule synthesis using cnn-based latent data representation}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {11037 LNCS}, year = {2018} } @article{RN768, abstract = {The deep grey matter (DGM) nuclei of the brain play a crucial role in learning, behaviour, cognition, movement and memory. Although automated segmentation strategies can provide insight into the impact of multiple neurological conditions affecting these structures, such as Multiple Sclerosis (MS), Huntington's disease (HD), Alzheimer's disease (AD), Parkinson's disease (PD) and Cerebral Palsy (CP), there are a number of technical challenges limiting an accurate automated segmentation of the DGM. Namely, the insufficient contrast of T1 sequences to completely identify the boundaries of these structures, as well as the presence of iso-intense white matter lesions or extensive tissue loss caused by brain injury. Therefore in this systematic review, 269 eligible studies were analysed and compared to determine the optimal approaches for addressing these technical challenges. The automated approaches used among the reviewed studies fall into three broad categories, atlas-based approaches focusing on the accurate alignment of atlas priors, algorithmic approaches which utilise intensity information to a greater extent, and learning-based approaches that require an annotated training set. Studies that utilise freely available software packages such as FIRST, FreeSurfer and LesionTOADS were also eligible, and their performance compared. Overall, deep learning approaches achieved the best overall performance, however these strategies are currently hampered by the lack of large-scale annotated data. Improving model generalisability to new datasets could be achieved in future studies with data augmentation and transfer learning. Multi-atlas approaches provided the second-best performance overall, and may be utilised to construct a “silver standard” annotated training set for deep learning. To address the technical challenges, providing robustness to injury can be improved by using multiple channels, highly elastic diffeomorphic transformations such as LDDMM, and by following atlas-based approaches with an intensity driven refinement of the segmentation, which has been done with the Expectation Maximisation (EM) and level sets methods. Accounting for potential lesions should be achieved with a separate lesion segmentation approach, as in LesionTOADS. Finally, to address the issue of limited contrast, R2*, T2* and QSM sequences could be used to better highlight the DGM due to its higher iron content. Future studies could look to additionally acquire these sequences by retaining the phase information from standard structural scans, or alternatively acquiring these sequences for only a training set, allowing models to learn the “improved” segmentation from T1-sequences alone.}, author = {Pagnozzi, Alex M. and Fripp, Jurgen and Rose, Stephen E.}, doi = {10.1016/j.neuroimage.2019.116018}, issn = {10959572}, journal = {NeuroImage}, keywords = {Deep grey matter,Magnetic resonance imaging,Segmentation,Subcortical anatomies}, pages = {20}, title = {{Quantifying deep grey matter atrophy using automated segmentation approaches: A systematic review of structural MRI studies}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {201}, year = {2019} } @inproceedings{RN915, abstract = {Current prostate cancer screening methods involve non-targeted needle biopsies and detection of clinically-insignificant lesions that receive excessive treatments, exposing patients to unnecessary adverse side effects and placing a burden on our health care systems. There is a strong clinical need for improved prostate imaging methods that are sensitive and specific for clinically-significant prostate cancer lesions to guide needle biopsies, target focal treatments, and improve overall patient outcomes. In this study, we compared 3D in vivo Acoustic Radiation Force Impulse (ARFI) imaging with 3 Tesla, endorectal coil, multi-parametric magnetic resonance imaging (mpMRI) to correlate the ability for each modality to identify clinically-significant prostate cancer lesions. We also correlated Apparent Diffusion Coefficient (ADC) values from Diffusion Weighted Imaging (DWI) MR sequences with ARFI indices of suspicion and MR Prostate Imaging - Reporting and Data Systems (PI-RADS) scores, testing the hypothesis that increased cellular density is associated with regions suspicious for prostate cancer in ARFI images. Overall, ARFI and mpMR imaging were well-correlated in identifying clinically-significant prostate cancer lesions. There were several cases where only one of the imaging modalities was able to identify the prostate cancer lesion, highlighting the potential to further improve prostate cancer lesion detection and localization with a fused ARFI:mpMRI imaging system. ADC values were decreased in all prostate cancer lesions identified with mpMRI, but there were no obvious trends between the absolute ADC values and the ARFI image indices of suspicion.}, author = {Palmeri, Mark and Glass, Tyler and Gupta, Rajan and McCormick, Matt and Brown, Alison and Polascik, Thomas and Rosenzweig, Stephen and Buck, Andrew and Nightingale, Kathy}, booktitle = {IEEE International Ultrasonics Symposium, IUS}, doi = {10.1109/ULTSYM.2016.7728618}, isbn = {9781467398978}, issn = {19485727}, publisher = {IEEE Computer Society}, title = {{Comparison between 3D ARFI imaging and mpMRI in detecting clinically-significant prostate cancer lesions}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84996562155{\&}doi=10.1109{\%}2FULTSYM.2016.7728618{\&}partnerID=40{\&}md5=06c5d391a733c4454dbbf008fae5b2ca}, volume = {2016-Novem}, year = {2016} } @article{RN807, abstract = {Abstract: Absolute quantification of radiotracer distribution using SPECT/CT imaging is of great importance for dosimetry aimed at personalized radionuclide precision treatment. However, its accuracy depends on many factors. Using phantom measurements, this multi-vendor and multi-center study evaluates the quantitative accuracy and inter-system variability of various SPECT/CT systems as well as the effect of patient size, processing software and reconstruction algorithms on recovery coefficients (RC). Methods: Five SPECT/CT systems were included: Discovery™ NM/CT 670 Pro (GE Healthcare), Precedence™ 6 (Philips Healthcare), Symbia Intevo™, and Symbia™ T16 (twice) (Siemens Healthineers). Three phantoms were used based on the NEMA IEC body phantom without lung insert simulating body mass indexes (BMI) of 25, 28, and 47 kg/m2. Six spheres (0.5–26.5 mL) and background were filled with 0.1 and 0.01 MBq/mL 99mTc-pertechnetate, respectively. Volumes of interest (VOI) of spheres were obtained by a region growing technique using a 50{\%} threshold of the maximum voxel value corrected for background activity. RC, defined as imaged activity concentration divided by actual activity concentration, were determined for maximum (RCmax) and mean voxel value (RCmean) in the VOI for each sphere diameter. Inter-system variability was expressed as median absolute deviation (MAD) of RC. Acquisition settings were standardized. Images were reconstructed using vendor-specific 3D iterative reconstruction algorithms with institute-specific settings used in clinical practice and processed using a standardized, in-house developed processing tool based on the SimpleITK framework. Additionally, all data were reconstructed with a vendor-neutral reconstruction algorithm (Hybrid Recon™; Hermes Medical Solutions). Results: RC decreased with decreasing sphere diameter for each system. Inter-system variability (MAD) was 16 and 17{\%} for RCmean and RCmax, respectively. Standardized reconstruction decreased this variability to 4 and 5{\%}. High BMI hampers quantification of small lesions ({\textless} 10 ml). Conclusion: Absolute SPECT quantification in a multi-center and multi-vendor setting is feasible, especially when reconstruction protocols are standardized, paving the way for a standard for absolute quantitative SPECT.}, author = {Peters, Steffie M.B. and van der Werf, Niels R. and Segbers, Marcel and van Velden, Floris H.P. and Wierts, Roel and Blokland, Koos (J ).A.K. and Konijnenberg, Mark W. and Lazarenko, Sergiy V. and Visser, Eric P. and Gotthardt, Martin}, doi = {10.1186/s40658-019-0268-5}, issn = {21977364}, journal = {EJNMMI Physics}, keywords = {SPECT/CT,absolute quantification,performance evaluation,recovery coefficient}, number = {1}, pages = {14}, title = {{Towards standardization of absolute SPECT/CT quantification: a multi-center and multi-vendor phantom study}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {6}, year = {2019} } @article{RN804, abstract = {Background: Quantitative SPECT imaging in targeted radionuclide therapy with lutetium-177 holds great potential for individualized treatment based on dose assessment. The establishment of dose-effect relations requires a standardized method for SPECT quantification. The purpose of this multi-center study is to evaluate quantitative accuracy and inter-system variations of different SPECT/CT systems with corresponding commercially available quantitative reconstruction algorithms. This is an important step towards a vendor-independent standard for quantitative lutetium-177 SPECT. Methods: Four state-of-the-art SPECT/CT systems were included: Discovery™ NM/CT 670Pro (GE Healthcare), Symbia Intevo™, and two Symbia™ T16 (Siemens Healthineers). Quantitative accuracy and inter-system variations were evaluated by repeatedly scanning a cylindrical phantom with 6 spherical inserts (0.5 – 113 ml). A sphere-to-background activity concentration ratio of 10:1 was used. Acquisition settings were standardized: medium energy collimator, body contour trajectory, photon energy window of 208 keV (± 10{\%}), adjacent 20{\%} lower scatter window, 2 × 64 projections, 128 × 128 matrix size, and 40 s projection time. Reconstructions were performed using GE Evolution with Q.Metrix™, Siemens xSPECT Quant™, Siemens Broad Quantification™ or Siemens Flash3D™ algorithms using vendor recommended settings. In addition, projection data were reconstructed using Hermes SUV SPECT™ with standardized reconstruction settings to obtain a vendor-neutral quantitative reconstruction for all systems. Volumes of interest (VOI) for the spheres were obtained by applying a 50{\%} threshold of the sphere maximum voxel value corrected for background activity. For each sphere, the mean and maximum recovery coefficient (RCmean and RCmax) of three repeated measurements was calculated, defined as the imaged activity concentration divided by the actual activity concentration. Inter-system variations were defined as the range of RC over all systems. Results: RC decreased with decreasing sphere volume. Inter-system variations with vendor-specific reconstructions were between 0.06 and 0.41 for RCmean depending on sphere size (maximum 118{\%} quantification difference), and improved to 0.02–0.19 with vendor-neutral reconstructions (maximum 38{\%} quantification difference). Conclusion: This study shows that eliminating sources of possible variation drastically reduces inter-system variation in quantification. This means that absolute SPECT quantification for 177Lu is feasible in a multi-center and multi-vendor setting; however, close agreement between vendors and sites is key for multi-center dosimetry and quantitative biomarker studies.}, author = {Peters, Steffie M.B. and {Meyer Viol}, Sebastiaan L. and van der Werf, Niels R. and de Jong, Nick and van Velden, Floris H.P. and Meeuwis, Antoi and Konijnenberg, Mark W. and Gotthardt, Martin and de Jong, Hugo W.A.M. and Segbers, Marcel}, doi = {10.1186/s40658-020-0278-3}, issn = {21977364}, journal = {EJNMMI Physics}, number = {1}, pages = {13}, title = {{Variability in lutetium-177 SPECT quantification between different state-of-the-art SPECT/CT systems}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {7}, year = {2020} } @article{RN856, abstract = {Purpose The widely known field ‘Radiomics' aims to provide an extensive image based phenotyping of e.g. tumors using a wide variety of feature values extracted from medical images. Therefore, it is of utmost importance that feature values calculated by different institutes follow the same feature definitions. For this purpose, the imaging biomarker standardization initiative (IBSI) provides detailed mathematical feature descriptions, as well as (mathematical) test phantoms and corresponding reference feature values. We present here an easy to use radiomic feature calculator, RaCaT, which provides the calculation of a large number of radiomic features for all kind of medical images which are in compliance with the standard. Methods The calculator is implemented in C++ and comes as a standalone executable. Therefore, it can be easily integrated in any programming language, but can also be called from the command line. No programming skills are required to use the calculator. The software architecture is highly modularized so that it is easily extendible. The user can also download the source code, adapt it if needed and build the calculator from source. The calculated feature values are compliant with the ones provided by the IBSI standard. Source code, example files for the software configuration, and documentation can be found online on GitHub (https://github.com/ellipfaehlerUMCG/RaCat). Results The comparison with the standard values shows that all calculated features as well as image preprocessing steps, comply with the IBSI standard. The performance is also demonstrated on clinical examples. Conclusions The authors successfully implemented an easy to use Radiomics calculator that can be called from any programming language or from the command line. Image preprocessing and feature settings and calculations can be adjusted by the user.}, author = {Pfaehler, Elisabeth and Zwanenburg, Alex and de Jong, Johan R. and Boellaard, Ronald}, doi = {10.1371/journal.pone.0212223}, issn = {19326203}, journal = {PLoS ONE}, number = {2}, title = {{RACAT: An open source and easy to use radiomics calculator tool}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85061901064{\&}doi=10.1371{\%}2Fjournal.pone.0212223{\&}partnerID=40{\&}md5=3929eeb04d5b73fc66f936910e38e854}, volume = {14}, year = {2019} } @incollection{RN964, abstract = {The Virtual Pediatric Airways Workbench (VPAW) is a patient-centered surgical planning software system targeted to pediatric patients with airway obstruction. VPAW provides an intuitive surgical planning interface for clinicians and supports quantitative analysis regarding prospective surgeries to aid clinicians deciding on potential surgical intervention. VPAW enables a full surgical planning pipeline, including importing DICOM images, segmenting the airway, interactive 3D editing of airway geometries to express potential surgical treatment planning options, and creating input files for offline geometric analysis and computational fluid dynamics simulations for evaluation of surgical outcomes. In this paper, we describe the VPAW system and its use in one case study with a clinician to successfully describe an intended surgery outcome.}, author = {Quammen, Cory W. and Taylor, Russell M. and Krajcevski, Pavel and Mitran, Sorin and Enquobahrie, Andinet and Superfine, Richard and Davis, Brad and Davis, Stephanie and Zdanski, Carlton}, booktitle = {Studies in Health Technology and Informatics}, doi = {10.3233/978-1-61499-625-5-295}, editor = {Fellander-Tsai, L and Vosburgh, K G and Westwood, J D and Senger, S and Westwood, S W and Fidopiastis, C M and Liu, A}, isbn = {9781614996248}, issn = {18798365}, keywords = {3D modeling,Surgery planning,Virtual reality}, pages = {295--300}, publisher = {IOS Press}, title = {{The virtual pediatric airways workbench}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84965026283{\&}doi=10.3233{\%}2F978-1-61499-625-5-295{\&}partnerID=40{\&}md5=a0854888010669dc0a00ab47368c672c}, volume = {220}, year = {2016} } @article{RN884, abstract = {Lung cancer is still one of the most popular types of cancer worldwide. The analysis of consecutive timely spaced CT scans is a useful tool to analyze the lesion's malignant behavior during treatment or for the follow up of lesions undetermined as to its nature (malignant or benign). This paper aims to propose a method to obtain more detailed information about the lesion changes through complementary studies on the biological activity of pulmonary nodules. The methodology presented uses mixture of dynamic textures to cluster different tissues of lung lesions, in accordance with the density change over time, and to describe regional changes using similarity measures. The study was conducted on two chest computed tomography databases. The Public Lung Database (PLD), which has lesions that undergo evaluation for drug therapy and a private database of lung lesions of undetermined diagnosis. The lesions from the public database had areas with density variations in the range from 0.01{\%} to 110.53{\%}. Lesions from the private database showed regions with density variations from 0.11{\%} to 46.94{\%} range. The density change analysis proposed, that is done by regionally different locations, provides more detailed information about their change over time. Lesions considered volumetrically stable may contain locations that suffer noticeable changes, as well as lesions with large volumetric growth may not show a significant change in density.}, author = {Quintanilha, Darlan Bruno Pontes and Silva, Arist{\'{o}}fanes Corr{\^{e}}a and {De Paiva}, Anselmo Cardoso and Gattass, Marcelo}, doi = {10.1166/jctn.2018.7321}, issn = {15461963}, journal = {Journal of Computational and Theoretical Nanoscience}, keywords = {Dynamic texture mixture,Lung cancer,Medical image,Temporal analysis,Tissue change detection}, number = {6-7}, pages = {1839--1852}, title = {{Mixture of dynamic textures applied to temporal analysis of lung lesions}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85057328183{\&}doi=10.1166{\%}2Fjctn.2018.7321{\&}partnerID=40{\&}md5=332bb09efa18d59dcf58b44028959bc7}, volume = {15}, year = {2018} } @article{RN917, abstract = {Purpose The main focus of the current paper is the clinical implementation of a Monte Carlo based platform for treatment plan validation for Tomotherapy and Cyberknife, without adding additional tasks to the dosimetry department. Methods The Monte Carlo platform consists of C++ classes for the actual functionality and a web based GUI that allows accessing the system using a web browser. Calculations are based on BEAMnrc/DOSXYZnrc and/or GATE and are performed automatically after exporting the dicom data from the treatment planning system. For Cyberknife treatments of moving targets, the log files saved during the treatment (position of robot, internal fiducials and external markers) can be used in combination with the 4D planning CT to reconstruct the actually delivered dose. The Monte Carlo platform is also used for calculation on MRI images, using pseudo-CT conversion. Results For Tomotherapy treatments we obtain an excellent agreement (within 2{\%}) for almost all cases. However, we have been able to detect a problem regarding the CT Hounsfield units definition of the Toshiba Large Bore CT when using a large reconstruction diameter. For Cyberknife treatments we obtain an excellent agreement with the Monte Carlo algorithm of the treatment planning system. For some extreme cases, when treating small lung lesions in low density lung tissue, small differences are obtained due to the different cut-off energy of the secondary electrons. Conclusions A Monte Carlo based treatment plan validation tool has successfully been implemented in clinical routine and is used to systematically validate all Cyberknife and Tomotherapy plans.}, author = {Reynaert, N. and Demol, B. and Charoy, M. and Bouchoucha, S. and Crop, F. and Wagner, A. and Lacornerie, T. and Dubus, F. and Rault, E. and Comte, P. and Cayez, R. and Boydev, C. and Pasquier, D. and Mirabel, X. and Lartigau, E. and Sarrazin, T.}, doi = {10.1016/j.ejmp.2016.09.009}, issn = {1724191X}, journal = {Physica Medica}, keywords = {Delivered dose,Monte Carlo,QA,Treatment planning}, number = {10}, pages = {1225--1237}, title = {{Clinical implementation of a Monte Carlo based treatment plan QA platform for validation of Cyberknife and Tomotherapy treatments}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84992718263{\&}doi=10.1016{\%}2Fj.ejmp.2016.09.009{\&}partnerID=40{\&}md5=3de38bf62efd4bb22fcce5398852ce95}, volume = {32}, year = {2016} } @article{RN948, abstract = {In the context of head and neck cancer (HNC) adaptive radiation therapy (ART), the two purposes of the study were to compare the performance of multiple deformable image registration (DIR) methods and to quantify their impact for dose accumulation, in healthy structures. Fifteen HNC patients had a planning computed tomography (CT0) and weekly CTs during the 7 weeks of intensity-modulated radiation therapy (IMRT). Ten DIR approaches using different registration methods (demons or B-spline free form deformation (FFD)), preprocessing, and similarity metrics were tested. Two observers identified 14 landmarks (LM) on each CT-scan to compute LM registration error. The cumulated doses estimated by each method were compared. The two most effective DIR methods were the demons and the FFD, with both the mutual information (MI) metric and the filtered CTs. The corresponding LM registration accuracy (precision) was 2.44 mm (1.30 mm) and 2.54 mm (1.33 mm), respectively. The corresponding LM estimated cumulated dose accuracy (dose precision) was 0.85 Gy (0.93 Gy) and 0.88 Gy (0.95 Gy), respectively. The mean uncertainty (difference between maximal and minimal dose considering all the 10 methods) to estimate the cumulated mean dose to the parotid gland (PG) was 4.03 Gy (SD = 2.27 Gy, range: 1.06-8.91 Gy).}, author = {Rigaud, Bastien and Simon, Antoine and Castelli, Jo{\"{e}}l and Gobeli, Maxime and {Ospina Arango}, Juan David and Cazoulat, Guillaume and Henry, Olivier and Haigron, Pascal and {De Crevoisier}, Renaud}, doi = {10.1155/2015/726268}, issn = {23146141}, journal = {BioMed Research International}, title = {{Evaluation of deformable image registration methods for dose monitoring in head and neck radiotherapy}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84924164245{\&}doi=10.1155{\%}2F2015{\%}2F726268{\&}partnerID=40{\&}md5=d9d5d935280b872c6cac8a60ef5e6f65}, volume = {2015}, year = {2015} } @article{RN935, abstract = {The deposits of fat on the surroundings of the heart are correlated to several health risk factors such as atherosclerosis, carotid stiffness, coronary artery calcification, atrial fibrillation and many others. These deposits vary unrelated to obesity, which reinforces its direct segmentation for further quantification. However, manual segmentation of these fats has not been widely deployed in clinical practice due to the required human workload and consequential high cost of physicians and technicians. In this work, we propose a unified method for an autonomous segmentation and quantification of two types of cardiac fats. The segmented fats are termed epicardial and mediastinal, and stand apart from each other by the pericardium. Much effort was devoted to achieve minimal user intervention. The proposed methodology mainly comprises registration and classification algorithms to perform the desired segmentation. We compare the performance of several classification algorithms on this task, including neural networks, probabilistic models and decision tree algorithms. Experimental results of the proposed methodology have shown that the mean accuracy regarding both epicardial and mediastinal fats is 98.5{\%} (99.5{\%} if the features are normalized), with a mean true positive rate of 98.0{\%}. In average, the Dice similarity index was equal to 97.6{\%}.}, author = {Rodrigues, O. and Morais, F. F.C. and Morais, N. A.O.S. and Conci, L. S. and Neto, L. V. and Conci, A.}, doi = {10.1016/j.cmpb.2015.09.017}, issn = {18727565}, journal = {Computer Methods and Programs in Biomedicine}, keywords = {Atlas,Classification,Computed tomography,Image registration,Machine learning,Segmentation}, pages = {109--128}, title = {{A novel approach for the automated segmentation and volume quantification of cardiac fats on computed tomography}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84951273755{\&}doi=10.1016{\%}2Fj.cmpb.2015.09.017{\&}partnerID=40{\&}md5=03ff84a656a2d758e69a4052ed2af8ee}, volume = {123}, year = {2016} } @inproceedings{RN863, abstract = {According to the guidelines, patients with locally advanced colorectal cancer undergo neoadjuvant chemotherapy. However, response to therapy is reached only up to 30{\%} of cases. Therefore, it would be important to predict response to therapy before treatment. In this study, we demonstrated that the simultaneous optimization of feature subset and classifier parameters on different imaging datasets (T2w, DWI and PET) could improve classification performance. On a dataset of 51 patients (21 responders, 30 non responders), we obtained an accuracy of 90{\%}, 84{\%} and 76{\%} using three optimized SVM classifiers fed with selected features from PET, T2w and ADC images, respectively.}, author = {Rosati, S. and Gianfreda, C. M. and Balestra, G. and Giannini, V. and Mazzetti, S. and Regge, D.}, booktitle = {2018 IEEE Life Sciences Conference, LSC 2018}, doi = {10.1109/LSC.2018.8572194}, isbn = {9781538667095}, keywords = {Feature selection,Genetic algorithms,Rectal cancer,Response to chemoradiotherapy,SVM optimization}, pages = {65--68}, publisher = {Institute of Electrical and Electronics Engineers Inc.}, title = {{Radiomics to predict response to neoadjuvant chemotherapy in rectal cancer: Influence of simultaneous feature selection and classifier optimization}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85060201982{\&}doi=10.1109{\%}2FLSC.2018.8572194{\&}partnerID=40{\&}md5=2339615f5270587ffef9440d34bd5ca9}, year = {2018} } @article{RN888, abstract = {Mass spectrometry imaging (MSI) is a transformative imaging method that supports the untargeted, quantitative measurement of the chemical composition and spatial heterogeneity of complex samples with broad applications in life sciences, bioenergy, and health. While MSI data can be routinely collected, its broad application is currently limited by the lack of easily accessible analysis methods that can process data of the size, volume, diversity, and complexity generated by MSI experiments. The development and application of cutting-edge analytical methods is a core driver in MSI research for new scientific discoveries, medical diagnostics, and commercial-innovation. However, the lack of means to share, apply, and reproduce analyses hinders the broad application, validation, and use of novel MSI analysis methods. To address this central challenge, we introduce the Berkeley Analysis and Storage Toolkit (BASTet), a novel framework for shareable and reproducible data analysis that supports standardized data and analysis interfaces, integrated data storage, data provenance, workflow management, and a broad set of integrated tools. Based on BASTet, we describe the extension of the OpenMSI mass spectrometry imaging science gateway to enable web-based sharing, reuse, analysis, and visualization of data analyses and derived data products. We demonstrate the application of BASTet and OpenMSI in practice to identify and compare characteristic substructures in the mouse brain based on their chemical composition measured via MSI.}, author = {R{\"{u}}bel, Oliver and Bowen, Benjamin P.}, doi = {10.1109/TVCG.2017.2744479}, issn = {10772626}, journal = {IEEE Transactions on Visualization and Computer Graphics}, keywords = {Analysis Workflows,Data management,Data provenance,Data sharing,Mass spectrometry imaging,Visualization}, number = {1}, pages = {1025--1035}, title = {{BASTet: Shareable and Reproducible Analysis and Visualization of Mass Spectrometry Imaging Data via OpenMSI}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85028711599{\&}doi=10.1109{\%}2FTVCG.2017.2744479{\&}partnerID=40{\&}md5=ed08c67415ad9ee80b1a99e0116908fa}, volume = {24}, year = {2018} } @article{RN869, abstract = {Cerebral palsy (CP) is a common pediatric neurodevelopmental disorder, frequently resulting in motor and developmental deficits and often accompanied by cognitive impairments. A regular pathobiological hallmark of CP is oligodendrocyte maturation impairment resulting in white matter (WM) injury and reduced axonal myelination. Regeneration therapies based on cell replacement are currently limited, but neural precursor cells (NPCs), as cellular support for myelination, represent a promising regeneration strategy to treat CP, although the transplantation parameters (e.g., timing, dosage, mechanism) remain to be determined. We optimized a hemiplegic mouse model of neonatal hypoxia-ischemia that mirrors the pathobiological hallmarks of CP and transplanted NPCs into the corpus callosum (CC), a major white matter structure impacted in CP patients. The NPCs survived, engrafted, and differentiated morphologically in male and female mice. Histology and MRI showed repair of lesioned structures. Furthermore, electrophysiology revealed functional myelination of the CC (e.g., restoration of conduction velocity), while cylinder and CatWalk tests demonstrated motor recovery of the affected forelimb. Endogenous oligodendrocytes, recruited in the CC following transplantation of exogenous NPCs, are the principal actors in this recovery process. The lack of differentiation of the transplanted NPCs is consistent with enhanced recovery due to an indirect mechanism, such as a trophic and/or “bio-bridge�? support mediated by endogenous oligodendrocytes. Our work establishes that transplantation of NPCs represents a viable therapeutic strategy for CP treatment, and that the enhanced recovery is mediated by endogenous oligodendrocytes. This will further our understanding and contribute to the improvement of cellular therapeutic strategies.}, author = {Rumajogee, Prakasham and Altamentova, Svetlana and Li, Lijun and Li, Junyi and Wang, Jian and Kuurstra, Alan and Khazaei, Mohamad and Beldick, Stephanie and Menon, Ravi S. and {Van der Kooy}, Derek and Fehlings, Michael G.}, doi = {10.1523/ENEURO.0369-18.2018}, issn = {23732822}, journal = {eNeuro}, keywords = {Cerebral palsy,Hypoxic-ischemia,Myelination,Neural precursor cells,Oligodendrocytes,White matter injury}, number = {5}, title = {{Exogenous neural precursor cell transplantation results in structural and functional recovery in a hypoxic-ischemic hemiplegic mouse model}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85057951115{\&}doi=10.1523{\%}2FENEURO.0369-18.2018{\&}partnerID=40{\&}md5=904cdf49f286dcf83962f118f9096771}, volume = {5}, year = {2018} } @article{RN846, abstract = {Human age prediction is an interesting and applicable issue in different fields. It can be based on various criteria such as face image, DNA methylation, chest plate radiographs, knee radiographs, dental images and etc. Most of the age prediction researches have mainly been based on images. Since the image processing and Machine Learning (ML) techniques have grown up, the investigations were led to use them in age prediction problem. The implementations would be used in different fields, especially in medical applications. Brain Age Estimation (BAE) has attracted more attention in recent years and it would be so helpful in early diagnosis of some neurodegenerative diseases such as Alzheimer, Parkinson, Huntington, etc. BAE is performed on Magnetic Resonance Imaging (MRI) images to compute the brain ages. Studies based on brain MRI shows that there is a relation between accelerated aging and accelerated brain atrophy. This refers to the effects of neurodegenerative diseases on brain structure while making the whole of it older. This paper reviews and summarizes the main approaches for age prediction based on brain MRI images including preprocessing methods, useful tools used in different research works and the estimation algorithms. We categorize the BAE methods based on two factors, first the way of processing MRI images, which includes pixel-based, surface-based, or voxel-based methods and second, the generation of ML algorithms that includes traditional or Deep Learning (DL) methods. The modern techniques as DL methods help MRI based age prediction to get results that are more accurate. In recent years, more precise and statistical ML approaches have been utilized with the help of related tools for simplifying computations and getting accurate results. Pros and cons of each research and the challenges in each work are expressed and some guidelines and deliberations for future research are suggested.}, author = {Sajedi, Hedieh and Pardakhti, Nastaran}, doi = {10.1007/s10916-019-1401-7}, issn = {1573689X}, journal = {Journal of Medical Systems}, keywords = {Age prediction,BAE,Brain MRI,Brain age,Chronological age,Deep Learning,Image processing,Machine Learning}, number = {8}, pmid = {31297614}, title = {{Age Prediction Based on Brain MRI Image: A Survey}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85068766678{\&}doi=10.1007{\%}2Fs10916-019-1401-7{\&}partnerID=40{\&}md5=5d6cacfa42ec287f82160fe89c70fe3a}, volume = {43}, year = {2019} } @article{RN790, abstract = {Introduction Longitudinal magnetic resonance imaging (MRI) analysis has an important role in multiple sclerosis diagnosis and follow-up. The presence of new T2-w lesions on brain MRI scans is considered a prognostic and predictive biomarker for the disease. In this study, we propose a supervised approach for detecting new T2-w lesions using features from image intensities, subtraction values, and deformation fields (DF). Methods One year apart multi-channel brain MRI scans were obtained for 60 patients, 36 of them with new T2-w lesions. Images from both temporal points were preprocessed and co-registered. Afterwards, they were registered using multi-resolution affine registration, allowing their subtraction. In particular, the DFs between both images were computed with the Demons non-rigid registration algorithm. Afterwards, a logistic regression model was trained with features from image intensities, subtraction values, and DF operators. We evaluated the performance of the model following a leave-one-out cross-validation scheme. Results In terms of detection, we obtained a mean Dice similarity coefficient of 0.77 with a true-positive rate of 74.30{\%} and a false-positive detection rate of 11.86{\%}. In terms of segmentation, we obtained a mean Dice similarity coefficient of 0.56. The performance of our model was significantly higher than state-of-the-art methods. Conclusions The performance of the proposed method shows the benefits of using DF operators as features to train a supervised learning model. Compared to other methods, the proposed model decreases the number of false-positives while increasing the number of true-positives, which is relevant for clinical settings.}, author = {Salem, Mostafa and Cabezas, Mariano and Valverde, Sergi and Pareto, Deborah and Oliver, Arnau and Salvi, Joaquim and Rovira, {\`{A}}lex and Llad{\'{o}}, Xavier}, doi = {10.1016/j.nicl.2017.11.015}, issn = {22131582}, journal = {NeuroImage: Clinical}, keywords = {Automatic new lesion detection,Brain,MRI,Machine learning,Multiple sclerosis}, pages = {607--615}, title = {{A supervised framework with intensity subtraction and deformation field features for the detection of new T2-w lesions in multiple sclerosis}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {17}, year = {2018} } @article{RN892, abstract = {Purpose: Estimating the biodistribution and the pharmacokinetics from time-sequence SPECT images on a per-voxel basis is useful for studying activity nonuniformity or computing absorbed dose distributions by convolution of voxel kernels or Monte-Carlo radiation transport. Current approaches are either region-based, thus assuming uniform activity within the region, or voxel-based but using the same fitting model for all voxels. Methods: We propose a voxel-based multimodel fitting method (VoMM) that estimates a fitting function for each voxel by automatically selecting the most appropriate model among a predetermined set with Akaike criteria. This approach can be used to compute the time integrated activity (TIA) for all voxels in the image. To control fitting optimization that may fail due to excessive image noise, an approximated version based on trapezoid integration, named restricted method, is also studied. From this comparison, the number of failed fittings within images was estimated and analyzed. Numerical experiments were used to quantify uncertainties and feasibility was demonstrated with real patient data. Results: Regarding numerical experiments, root mean square errors of TIA obtained with VoMM were similar to those obtained with bi-exponential fitting functions, and were lower ({\textless} 5{\%} vs. {\textgreater} 10{\%}) than with single model approaches that consider the same fitting function for all voxels. Failure rates were lower with VoMM and restricted approaches than with single-model methods. On real clinical data, VoMM was able to fit 90{\%} of the voxels and led to less failed fits than single-model approaches. On regions of interest (ROI) analysis, the difference between ROI-based and voxel-based TIA estimations was low, less than 4{\%}. However, the computation of the mean residence time exhibited larger differences, up to 25{\%}. Conclusions: The proposed voxel-based multimodel fitting method, VoMM, is feasible on patient data. VoMM leads organ-based TIA estimations similar to conventional ROI-based method. However, for pharmacokinetics analysis, studies of spatial heterogeneity or voxel-based absorbed dose assessment, VoMM could be used preferentially as it prevents model overfitting.}, author = {Sarrut, David and Halty, Adrien and Badel, Jean Noel and Ferrer, Ludovic and Bardi{\`{e}}s, Manuel}, doi = {10.1002/mp.12586}, issn = {00942405}, journal = {Medical Physics}, keywords = {SPECT,dosimetry,targeted radionuclide therapy,time activity curve,voxel-based}, number = {12}, pages = {6280--6288}, title = {{Voxel-based multimodel fitting method for modeling time activity curves in SPECT images:}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85037856250{\&}doi=10.1002{\%}2Fmp.12586{\&}partnerID=40{\&}md5=e15c5e0e2cbe8975290ae3d911a44b52}, volume = {44}, year = {2017} } @article{RN809, abstract = {Background: Men suspected of having clinically significant prostate cancer (sPC) increasingly undergo prostate MRI. The potential of deep learning to provide diagnostic support for human interpretation requires further evaluation. Purpose: To compare the performance of clinical assessment to a deep learning system optimized for segmentation trained with T2-weighted and diffusion MRI in the task of detection and segmentation of lesions suspicious for sPC. Materials and Methods: In this retrospective study, T2-weighted and diffusion prostate MRI sequences from consecutive men examined with a single 3.0-T MRI system between 2015 and 2016 were manually segmented. Ground truth was provided by combined targeted and extended systematic MRI-transrectal US fusion biopsy, with sPC defined as International Society of Urological Pathology Gleason grade group greater than or equal to 2. By using split-sample validation, U-Net was internally validated on the training set (80{\%} of the data) through cross validation and subsequently externally validated on the test set (20{\%} of the data). U-Net-derived sPC probability maps were calibrated by matching sextant-based cross-validation performance to clinical performance of Prostate Imaging Reporting and Data System (PI-RADS). Performance of PI-RADS and U-Net were compared by using sensitivities, specificities, predictive values, and Dice coefficient. Results: A total of 312 men (median age, 64 years; interquartile range [IQR], 58-71 years) were evaluated. The training set consisted of 250 men (median age, 64 years; IQR, 58-71 years) and the test set of 62 men (median age, 64 years; IQR, 60-69 years). In the test set, PI-RADS cutoffs greater than or equal to 3 versus cutoffs greater than or equal to 4 on a per-patient basis had sensitivity of 96{\%} (25 of 26) versus 88{\%} (23 of 26) at specificity of 22{\%} (eight of 36) versus 50{\%} (18 of 36). U-Net at probability thresholds of greater than or equal to 0.22 versus greater than or equal to 0.33 had sensitivity of 96{\%} (25 of 26) versus 92{\%} (24 of 26) (both P . .99) with specificity of 31{\%} (11 of 36) versus 47{\%} (17 of 36) (both P . .99), not statistically different from PI-RADS. Dice coefficients were 0.89 for prostate and 0.35 for MRI lesion segmentation. In the test set, coincidence of PI-RADS greater than or equal to 4 with U-Net lesions improved the positive predictive value from 48{\%} (28 of 58) to 67{\%} (24 of 36) for U-Net probability thresholds greater than or equal to 0.33 (P = .01), while the negative predictive value remained unchanged (83{\%} [25 of 30] vs 83{\%} [43 of 52]; P . .99). Conclusion: U-Net trained with T2-weighted and diffusion MRI achieves similar performance to clinical Prostate Imaging Reporting and Data System assessment.}, author = {Schelb, Patrick and Kohl, Simon and Radtke, Jan Philipp and Wiesenfarth, Manuel and Kickingereder, Philipp and Bickelhaupt, Sebastian and Kuder, Tristan Anselm and Stenzinger, Albrecht and Hohenfellner, Markus and Schlemmer, Heinz Peter and Maier-Hein, Klaus H. and Bonekamp, David}, doi = {10.1148/radiol.2019190938}, issn = {15271315}, journal = {Radiology}, number = {3}, pages = {607--617}, title = {{Classification of cancer at prostate MRI: Deep Learning versus Clinical PI-RADS Assessment}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {293}, year = {2019} } @article{RN901, abstract = {Objective: Focused ultrasound (FUS) is rapidly gaining clinical acceptance for several target tissues in the human body. Yet, treating liver targets is not clinically applied due to a high complexity of the procedure (noninvasiveness, target motion, complex anatomy, blood cooling effects, shielding by ribs, and limited image-based monitoring). To reduce the complexity, numerical FUS simulations can be utilized for both treatment planning and execution. These use-cases demand highly accurate and computationally efficient simulations. Methods: We propose a numerical method for the simulation of abdominal FUS treatments during respiratory motion of the organs and target. Especially, a novel approach is proposed to simulate the heating during motion by solving Pennes' bioheat equation in a computational reference space, i.e., the equation is mathematically transformed to the reference. The approach allows for motion discontinuities, e.g., the sliding of the liver along the abdominal wall. Results: Implementing the solver completely on the graphics processing unit and combining it with an atlas-based ultrasound simulation approach yields a simulation performance faster than real time (less than 50-s computing time for 100 s of treatment time) on a modern off-the-shelf laptop. The simulation method is incorporated into a treatment planning demonstration application that allows to simulate real patient cases including respiratory motion. Conclusion: The high performance of the presented simulation method opens the door to clinical applications. Significance: The methods bear the potential to enable the application of FUS for moving organs.}, author = {Schwenke, Michael and Georgii, Joachim and Preusser, Tobias}, doi = {10.1109/TBME.2016.2619741}, issn = {15582531}, journal = {IEEE Transactions on Biomedical Engineering}, keywords = {Abdominal organs treatment,Biomedical computing,High-intensity focused ultrasound,Medical simulation,Medical treatment,Numerical models,Numerical simulation,Respiratory motion,Simulation during motion,Therapeutic ultrasound,Thermal Ablation,Thermal analysis}, number = {7}, pages = {1455--1468}, title = {{Fast numerical simulation of focused ultrasound treatments during respiratory motion with discontinuous motion boundaries}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85026725476{\&}doi=10.1109{\%}2FTBME.2016.2619741{\&}partnerID=40{\&}md5=b0cf2a924d4dd6d672f984b75f9bf20c}, volume = {64}, year = {2017} } @article{RN781, abstract = {Purpose: To present a method to enrich atlases for atlas based segmentation. Such enriched atlases can then be used as a single atlas or within a multiatlas framework. Methods: In this paper, machine learning techniques have been used to enhance the atlas based segmentation approach. The enhanced atlas defined in this work is a pair composed of a gray level image alongside an image of multiclass classifiers with one classifier per voxel. Each classifier embeds local information from the whole training dataset that allows for the correction of some systematic errors in the segmentation and accounts for the possible local registration errors. The authors also propose to use these images of classifiers within a multiatlas framework: results produced by a set of such local classifier atlases can be combined using a label fusion method. Results: Experiments have been made on the in vivo images of the IBSR dataset and a comparison has been made with several state-of-the-art methods such as FreeSurfer and the multiatlas nonlocal patch based method of Coup{\'{e}} or Rousseau. These experiments show that their method is competitive with state-of-the-art methods while having a low computational cost. Further enhancement has also been obtained with a multiatlas version of their method. It is also shown that, in this case, nonlocal fusion is unnecessary. The multiatlas fusion can therefore be done efficiently. Conclusions: The single atlas version has similar quality as state-of-the-arts multiatlas methods but with the computational cost of a naive single atlas segmentation. The multiatlas version offers a improvement in quality and can be done efficiently without a nonlocal strategy.}, author = {Sdika, Micha{\"{e}}l}, doi = {10.1118/1.4935946}, issn = {00942405}, journal = {Medical Physics}, keywords = {atlas based segmentation,machine learning,multiple atlas,support vector machine}, number = {12}, pages = {7169--7181}, title = {{Enhancing atlas based segmentation with multiclass linear classifiers}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {42}, year = {2015} } @article{RN800, abstract = {The ability to monitor tumor motion without implanted markers can potentially enable broad access to more accurate and precise lung radiotherapy. A major challenge is that kilovoltage (kV) imaging based methods are rarely able to continuously track the tumor due to the inferior tumor visibility on 2D kV images. Another challenge is the estimation of 3D tumor position based on only 2D imaging information. The aim of this work is to address both challenges by proposing a Bayesian approach for markerless tumor tracking for the first time. The proposed approach adopts the framework of the extended Kalman filter, which combines a prediction and measurement steps to make the optimal tumor position update. For each imaging frame, the tumor position is first predicted by a respiratory-correlated model. The 2D tumor position on the kV image is then measured by template matching. Finally, the prediction and 2D measurement are combined based on the 3D distribution of tumor positions in the past 10 s and the estimated uncertainty of template matching. To investigate the clinical feasibility of the proposed method, a total of 13 lung cancer patient datasets were used for retrospective validation, including 11 cone-beam CT scan pairs and two stereotactic ablative body radiotherapy cases. The ground truths for tumor motion were generated from the the 3D trajectories of implanted markers or beacons. The mean, standard deviation, and 95th percentile of the 3D tracking error were found to range from 1.6-2.9 mm, 0.6-1.5 mm, and 2.6-5.8 mm, respectively. Markerless tumor tracking always resulted in smaller errors compared to the standard of care. The improvement was the most pronounced in the superior-inferior (SI) direction, with up to 9.5 mm reduction in the 95th-percentile SI error for patients with {\textgreater}10 mm 5th-to-95th percentile SI tumor motion. The percentage of errors with 3D magnitude {\textless}5 mm was 96.5{\%} for markerless tumor tracking and 84.1{\%} for the standard of care. The feasibility of 3D markerless tumor tracking has been demonstrated on realistic clinical scenarios for the first time. The clinical implementation of the proposed method will enable more accurate and precise lung radiotherapy using existing hardware and workflow. Future work is focused on the clinical and real-time implementation of this method.}, author = {Shieh, Chun Chien and Caillet, Vincent and Dunbar, Michelle and Keall, Paul J. and Booth, Jeremy T. and Hardcastle, Nicholas and Haddad, Carol and Eade, Thomas and Feain, Ilana}, doi = {10.1088/1361-6560/aa6393}, issn = {13616560}, journal = {Physics in Medicine and Biology}, keywords = {Bayesian,intrafraction imaging,lung cancer,markerless,tumor tracking}, number = {8}, pages = {3065--3080}, title = {{A Bayesian approach for three-dimensional markerless tumor tracking using kV imaging during lung radiotherapy}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {62}, year = {2017} } @article{RN962, abstract = {The ability to monitor tumor motion without implanted markers is clinically advantageous for lung image-guided radiotherapy (IGRT). Existing markerless tracking methods often suffer from overlapping structures and low visibility of tumors on kV projection images. We introduce the short arc tumor tracking (SATT) method to overcome these issues. The proposed method utilizes multiple kV projection images selected from a nine-degree imaging arc to improve tumor localization, and respiratory-correlated 4D cone-beam CT (CBCT) prior knowledge to minimize the effects of overlapping anatomies. The 3D tumor position is solved as an optimization problem with prior knowledge incorporated via regularization. We retrospectively validated SATT on 11 clinical scans from four patients with central tumors. These patients represent challenging scenarios for markerless tumor tracking due to the inferior adjacent contrast. The 3D trajectories of implanted fiducial markers were used as the ground truth for tracking accuracy evaluation. In all cases, the tumors were successfully tracked at all gantry angles. Compared to standard pre-treatment CBCT guidance alone, trajectory errors were significantly smaller with tracking in all cases, and the improvements were the most prominent in the superior-inferior direction. The mean 3D tracking error ranged from 2.2-9.9 mm, which was 0.4-2.6 mm smaller compared to pre-treatment CBCT. In conclusion, we were able to directly track tumors with inferior visibility on kV projection images using SATT. Tumor localization accuracies are significantly better with tracking compared to the current standard of care of lung IGRT. Future work involves the prospective evaluation and clinical implementation of SATT.}, author = {Shieh, Chun Chien and Keall, Paul J. and Kuncic, Zdenka and Huang, Chen Yu and Feain, Ilana}, doi = {10.1088/0031-9155/60/24/9437}, issn = {13616560}, journal = {Physics in Medicine and Biology}, keywords = {IGRT,lung cancer,markerless,tumor tracking}, number = {24}, pages = {9437--9454}, title = {{Markerless tumor tracking using short kilovoltage imaging arcs for lung image-guided radiotherapy}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84957922433{\&}doi=10.1088{\%}2F0031-9155{\%}2F60{\%}2F24{\%}2F9437{\&}partnerID=40{\&}md5=b33e8bc47270bdc2d818b5af2cf039d3}, volume = {60}, year = {2015} } @article{RN787, abstract = {X-ray imaging is the conventional method for diagnosing the orthopedic condition of a patient. Computerized Tomography(CT) scanning is another diagnostic method that provides patient's 3D anatomical information. However, both methods have limitations when diagnosing the whole leg; X-ray imaging does not provide 3D information, and normal CT scanning cannot be performed with a standing posture. Obtaining 3D data regarding the whole leg in a standing posture is clinically important because it enables 3D analysis in the weight bearing condition. Based on these clinical needs, a hardware-based bi-plane X-ray imaging system has been developed; it uses two orthogonal X-ray images. However, such methods have not been made available in general clinics because of the hight cost. Therefore, we proposed a widely adaptive method for 2D X-ray image and 3D CT scan data. By this method, it is possible to threedimensionally analyze the whole leg in standing posture. The optimal position that generates the most similar image is the captured X-ray image. The algorithm verifies the similarity using the performance of the proposed method by simulation-based experiments. Then, we analyzed the internal-external rotation angle of the femur using real patient data. Approximately 10.55 degrees of internal rotations were found relative to the defined anterior-posterior direction. In this paper, we present a useful registration method using the conventional X-ray image and 3D CT scan data to analyze the whole leg in the weight-bearing condition.}, author = {Shim, Eungjune and Kim, Youngjun and Lee, Deukhee and Lee, Byung Hoon and Woo, Sungkyung and Lee, Kunwoo}, doi = {10.1007/s11766-018-3459-2}, issn = {10051031}, journal = {Applied Mathematics}, keywords = {2D-3D registration,3D analysis,CT,X-ray,simulated annealing}, number = {1}, pages = {59--70}, title = {{2D-3D registration for 3D analysis of lower limb alignment in a weight-bearing condition}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {33}, year = {2018} } @article{RN854, abstract = {Hybrid 3D scaffolds composed of different biomaterials with fibrous structure or enriched with different inclusions (i.e., nano- and microparticles) have already demonstrated their positive effect on cell integration and regeneration. The analysis of fibers in hybrid biomaterials, especially in a 3D space is often difficult due to their various diameters (from micro to nanoscale) and compositions. Though biomaterials processing workflows are implemented, there are no software tools for fiber analysis that can be easily integrated into such workflows. Due to the demand for reproducible science with Jupyter notebooks and the broad use of the Python programming language, we have developed the new Python package quanfima offering a complete analysis of hybrid biomaterials, that include the determination of fiber orientation, fiber and/or particle diameter and porosity. Here, we evaluate the provided tensor-based approach on a range of generated datasets under various noise conditions. Also, we show its application to the X-ray tomography datasets of polycaprolactone fibrous scaffolds pure and containing silicate-substituted hydroxyapatite microparticles, hydrogels enriched with bioglass contained strontium and alpha-tricalcium phosphate microparticles for bone tissue engineering and porous cryogel 3D scaffold for pancreatic cell culturing. The results obtained with the help of the developed package demonstrated high accuracy and performance of orientation, fibers and microparticles diameter and porosity analysis.}, author = {Shkarin, Roman and Shkarin, Andrei and Shkarina, Svetlana and Cecilia, Angelica and Surmenev, Roman A. and Surmeneva, Maria A. and Weinhardt, Venera and Baumbach, Tilo and Mikut, Ralf}, doi = {10.1371/journal.pone.0215137}, issn = {19326203}, journal = {PLoS ONE}, number = {4}, title = {{Quanfima: An open source Python package for automated fiber analysis of biomaterials}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85064242516{\&}doi=10.1371{\%}2Fjournal.pone.0215137{\&}partnerID=40{\&}md5=4418e4939d68c3c8368a16476e37b7fb}, volume = {14}, year = {2019} } @article{RN795, abstract = {Deformable image registration (DIR) is a rapidly developing discipline in the field of medical imaging that has found numerous applications in modern radiation therapy. To be used in the clinical environment, DIR requires an accurate and robust algorithm supported by the careful evaluation. The purpose of this study was to evaluate the performance of the non-linear Dense Anatomical Block Matching (DABM) algorithm for CT-CBCT image registration of prostate cancer patients. Pre-treatment CT (pCT) images of five prostate patients that underwent intensity modulated radiation therapy (IMRT) were selected for this work. Mid-treatment CBCT data sets acquired during radiotherapy course were used to help validate the algorithm performance and benchmark against other commonly used DIR algorithms. Rigid alignment was followed by the DIR of considered images. After registration, structures (PTV, GTV, Bladder and Rectum) delineated on the pCT were deformed using the obtained deformation vector fields (DVFs), then propagated to the CBCT images and compared to the analogous contours delineated on the CBCT by an experienced radiation oncologist. The accuracy of image registration was assessed by several quantitative metrics: Dice Similarity Coefficient (DSC), Hausdorff Distances (HD; average and 95th percentile), Center of the Mass Shift (COM) as well as by physician validation. The topology of the obtained deformation vector fields was analyzed by the Jacobian determinant. The accuracy of the inverted DFVs was investigated by the application of the Inverse Consistency Error (ICE). The performance of the DABM algorithm was quantitatively compared to Rigid, Affine and B-spline algorithms. Results indicate that for all the patients and anatomical structures considered here, both the accuracy and the consistency of the DABM algorithm are considerably better than the other evaluated registration methods. Generated DVFs have a well-preserved topology and small ICEs. Presented findings show that DABM is a promising alternative to the existing common strategies for CT-CBCT image registration and its application in the adaptive radiation therapy of the pelvic region.}, author = {Siciarz, Pawel and McCurdy, Boyd and Alshafa, Faiez and Greer, Peter and Hatton, Joan and Wright, Philip}, doi = {10.1088/2057-1976/aacada}, issn = {20571976}, journal = {Biomedical Physics and Engineering Express}, keywords = {CT-CBCT deformable image registration,dense anatomical block matching,prostate cancer,radiation therapy}, number = {4}, pages = {15}, title = {{Evaluation of CT to CBCT non-linear dense anatomical block matching registration for prostate patients}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {4}, year = {2018} } @article{RN934, abstract = {The authors proposed a solution to the over-segmentation of color images processed by watershed segmentation algorithm. The solution utilizes hierarchical cluster analysis and treats watersheds as objects characterized by a number of attributes. This paper briefly discusses the solution (clustering methods, their parameters, selected watershed attributes) and presents an algorithm used for selecting optimal parameters for cluster analysis. Detailed results obtained for a set of test images are presented and discussed.}, author = {Smo{\l}ka, Jakub and Skublewska-Paszkowska, Maria and {\L}ukasik, Edyta}, doi = {10.15199/48.2016.09.61}, issn = {00332097}, journal = {Przeglad Elektrotechniczny}, keywords = {Cluster analysis,Over-segmentation reduction,Watershed transformation}, number = {9}, pages = {250--256}, title = {{Algorytm doboru optymalnych parametr{\'{o}}w analizy skupie{\'{n}} zastosowanej do redukcji nadsegmentacji}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84986000629{\&}doi=10.15199{\%}2F48.2016.09.61{\&}partnerID=40{\&}md5=311e10c8007ae785c0cef57929b23170}, volume = {92}, year = {2016} } @article{RN910, abstract = {The paper presents efficient parallel computations of the patient-specific aortic valve on virtualized resources of the OpenStack cloud infrastructure. The main focus is on parallel performance analysis of the developed software service based on ANSYS Fluent platform, which runs on Docker containers of the private university cloud. The patient-specific aortic valve simulation, described by incompressible Navier-Stokes equations, is considered as a pilot application of the hosted cloud infrastructure. The parallel performance of the developed software service is assessed measuring parallel speedup of computations carried out on virtualized resources. The results obtained on Docker containers are compared with the performance measured by using the native hardware.}, author = {Staskuniene, M. and Kaceniauskas, A. and Starikovicius, V. and Maknickas, A. and Stupak, E. and Pacevic, R.}, doi = {10.4203/ccp.111.16}, issn = {17593433}, journal = {Civil-Comp Proceedings}, keywords = {ANSYS Fluent,Cloud computing,Docker,OpenStack,Patient-specific aortic valve simulation,Performance analysis}, title = {{Parallel simulation of the aortic valve flows on the openstack cloud}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85020416209{\&}partnerID=40{\&}md5=c98d54cb21401e2192b0bf6051b51636}, volume = {111}, year = {2017} } @inproceedings{RN864, abstract = {Medical imaging visualization technology takes a significant role in the medical community. With the assistance of medical imaging visualization applications, huge convenience has been brought into clinical diagnosis, monitoring, and treatment. It allows doctors and researchers to see inside the human body, to identify medical problems, and to diagnose diseases. This article presents a lightweight, fast and user-friendly image viewer for medical imaging called SPIFFY. Some developing methodologies with the integration of VTK, ITK, and Qt will be presented in this article. Besides, the minimalist user interface(UI) design of SPIFFY with an application of Human-Computer Interaction(HCI) psychology principles will also be introduced. Moreover, this article will identify the benefits provided by SPIFFY and present a benchmark against some existing medical visualization applications. Experiments using cognitive walkthrough evaluation shows that SPIFFY provides both high effectiveness and efficiency.}, author = {Sun, Jiayu and Chandra, Shekhar S.}, booktitle = {Proceedings of 2018 IEEE 4th Information Technology and Mechatronics Engineering Conference, ITOEC 2018}, doi = {10.1109/ITOEC.2018.8740656}, editor = {Xu, B}, isbn = {9781538653739}, keywords = {Cognitive walkthrough evaluation,Human-computer interaction,Medical imaging,Visualizing software}, pages = {297--301}, publisher = {Institute of Electrical and Electronics Engineers Inc.}, title = {{SPIFFY: A simpler image viewer for medical imaging}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85068362296{\&}doi=10.1109{\%}2FITOEC.2018.8740656{\&}partnerID=40{\&}md5=1c3ff161aa23226e508668105edd9d2f}, year = {2018} } @incollection{RN880, abstract = {The main aim of this work was to perform rigid registration of Computed Tomography (CT) and scanner datasets. The surgeon applies CT and scanner datasets in computer aided surgery and performs registration in order to visualize the location of surgical instrument on screen. It is well known fact that the registration procedure is crucial for efficient computer aiding of surgery. Selected algorithm should take into account types of datasets, required accuracy and time of calculations. The algorithms are classified basing on the various criteria: e.g. precision (coarse and fine registration), types of pointset (set of pair of corresponding points – so called point-point method, unorganized sets of points – so called surface registration). The paper presents exemplary results of applying the following algorithms: Landmark Transform (point-point registration), two methods of uninitialized Iterative Closest Point type (surface registration) and a hybrid method. The evaluated factors were: distance error (mean, minimal and maximal value) and running time of algorithm. The algorithms were tested on various datasets: (1) two similar datasets from Computed Tomography (one is geometrically transformed), (2) Computed Tomography dataset and cloud of points recorded using 3D Artec Space Spider scanner. In the first case the mean error values equaled: 102.08 mm – 121.70 mm for uninitialized ICPs methods, 0.005 mm for Landmark Transform method, and 0.0003 mm for hybrid method. The slowest algorithms in our tests were ICPs methods, faster was hybrid algorithm, and the fastest was Landmark Transform method. In the second case the distance errors were evaluated in four selected points, and the smallest errors were: 23.21 mm for uninitialized ICPs method, 0.69 mm for Landmark Transform, 9.03 for hybrid method. All algorithms were relatively slow for these large datasets, the fastest was Landmark Transform. In the second part of research we analysed the Target Registration Error (TRE) for fused Computed Tomography and scanner-recorded dataset. The TRE values equaled 0.7 mm - 2.8 mm. The results of CT – scanner datasets registration highly depend on the similarity of sets, especially their overlapping, but also their resolutions and uniformities.}, author = {{\'{S}}wi{\c{a}}tek-Najwer, Ewelina and {\.{Z}}uk, Magdalena and Majak, Marcin and Popek, Micha{\l}}, booktitle = {Lecture Notes in Computational Vision and Biomechanics}, doi = {10.1007/978-3-319-68195-5_38}, isbn = {22129391 (ISSN)}, issn = {22129413}, pages = {345--353}, publisher = {Springer Netherlands}, title = {{The rigid registration of CT and scanner dataset for computer aided surgery}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85032361988{\&}doi=10.1007{\%}2F978-3-319-68195-5{\_}38{\&}partnerID=40{\&}md5=f66e1b53e39dbae26c9578f6ea426d78}, volume = {27}, year = {2018} } @article{RN889, abstract = {FIB/SEM nanotomography (FIB-nt) is a powerful technique for the determination and quantification of the three-dimensional microstructure in subsurface features. Often times, the microstructure of a sample is the ultimate determiner of the overall performance of a system, and a detailed understanding of its properties is crucial in advancing the materials engineering of a resulting device. While the FIB-nt technique has developed significantly in the 15 years since its introduction, advanced nanotomographic analysis is still far from routine, and a number of challenges remain in data acquisition and post-processing. In this work, we present a number of techniques to improve the quality of the acquired data, together with easy-to-implement methods to obtain “advanced” microstructural quantifications. The techniques are applied to a solid oxide fuel cell cathode of interest to the electrochemistry community, but the methodologies are easily adaptable to a wide range of material systems. Finally, results from an analyzed sample are presented as a practical example of how these techniques can be implemented.}, author = {Taillon, Joshua A. and Pellegrinelli, Christopher and Huang, Yi Lin and Wachsman, Eric D. and Salamanca-Riba, Lourdes G.}, doi = {10.1016/j.ultramic.2017.07.017}, issn = {18792723}, journal = {Ultramicroscopy}, keywords = {3D reconstruction,Focused ion beam,Microstructure quantification,Scanning electron microscopy,Tortuosity,Triple phase boundaries}, pages = {24--38}, title = {{Improving microstructural quantification in FIB/SEM nanotomography}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85027830512{\&}doi=10.1016{\%}2Fj.ultramic.2017.07.017{\&}partnerID=40{\&}md5=ec5c5bcb14a5795c82946322610e647d}, volume = {184}, year = {2018} } @inproceedings{RN928, author = {Tan, Maxine and Li, Zheng and Moore, Kathleen and Thai, Theresa and Ding, Kai and Liu, Hong and Zheng, Bin}, booktitle = {Medical Imaging 2016: Computer-Aided Diagnosis}, doi = {10.1117/12.2216303}, editor = {Tourassi, G D and Armato, S G}, isbn = {9781510600201}, issn = {16057422}, pages = {97853D}, publisher = {SPIE}, title = {{A B-spline image registration based CAD scheme to evaluate drug treatment response of ovarian cancer patients}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84988892588{\&}doi=10.1117{\%}2F12.2216303{\&}partnerID=40{\&}md5=eab17b988f48475720c13a6e6b359caa}, volume = {9785}, year = {2016} } @article{RN932, abstract = {Although Response Evaluation Criteria in Solid Tumors (RECIST) is the current clinical guideline to assess size change of solid tumors after therapeutic treatment, it has a relatively lower association to the clinical outcome of progression free survival (PFS) of the patients. In this paper, we presented a new approach to assess responses of ovarian cancer patients to new chemotherapy drugs in clinical trials. We first developed and applied a multi-resolution B-spline based deformable image registration method to register two sets of computed tomography (CT) image data acquired pre- and post-treatment. The B-spline difference maps generated from the co-registered CT images highlight the regions related to the volumetric growth or shrinkage of the metastatic tumors, and density changes related to variation of necrosis inside the solid tumors. Using a testing dataset involving 19 ovarian cancer patients, we compared patients' response to the treatment using the new image registration method and RECIST guideline. The results demonstrated that using the image registration method yielded higher association with the six-month PFS outcomes of the patients than using RECIST. The image registration results also provided a solid foundation of developing new computerized quantitative image feature analysis schemes in the future studies.}, author = {Tan, Maxine and Li, Zheng and Qiu, Yuchen and McMeekin, Scott D. and Thai, Theresa C. and Ding, Kai and Moore, Kathleen N. and Liu, Hong and Zheng, Bin}, doi = {10.1109/TMI.2015.2473823}, issn = {1558254X}, journal = {IEEE Transactions on Medical Imaging}, keywords = {Computed tomography (CT),Computer-aided diagnosis (CAD),Deformable image registration,Metastatic tumors,Ovarian cancer,Response evaluation criteria in solid tumors (RECI,Tumor volume and necrosis tracking}, number = {1}, pages = {316--325}, title = {{A new approach to evaluate drug treatment response of ovarian cancer patients based on deformable image registration}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84959364855{\&}doi=10.1109{\%}2FTMI.2015.2473823{\&}partnerID=40{\&}md5=f4cd96ce6d440ec0f4576be7ed0f2e7c}, volume = {35}, year = {2016} } @article{RN933, abstract = {{\textcopyright} 2016 American Vacuum Society. In order to utilize complementary imaging techniques to supply higher resolution data for fusion with secondary ion mass spectrometry (SIMS) chemical images, there are a number of aspects that, if not given proper consideration, could produce results which are easy to misinterpret. One of the most critical aspects is that the two input images must be of the same exact analysis area. With the desire to explore new higher resolution data sources that exists outside of the mass spectrometer, this requirement becomes even more important. To ensure that two input images are of the same region, an implementation of the insight segmentation and registration toolkit (ITK) was developed to act as a preprocessing step before performing image fusion. This implementation of ITK allows for several degrees of movement between two input images to be accounted for, including translation, rotation, and scale transforms. First, the implementation was confirmed to accurately register two multimodal images by supplying a known transform. Once validated, two model systems, a copper mesh grid and a group of RAW 264.7 cells, were used to demonstrate the use of the ITK implementation to register a SIMS image with a microscopy image for the purpose of performing image fusion.}, author = {Tarolli, Jay Gage and Bloom, Anna and Winograd, Nicholas}, doi = {10.1116/1.4939892}, issn = {1934-8630}, journal = {Biointerphases}, number = {2}, pages = {02A311}, title = {{Multimodal image fusion with SIMS: Preprocessing with image registration}}, type = {Journal Article}, volume = {11}, year = {2016} } @article{RN899, abstract = {Region of interest (ROI) alignment in medical images plays a crucial role in diagnostics, procedure planning, treatment, and follow-up. Frequently, a model is represented as triangulated mesh while the patient data is provided from CAT scanners as pixel or voxel data. Previously, we presented a 2D method for curve-to-pixel registration. This paper contributes (i) a general mesh-to-raster (M2R) framework to register ROIs in multi-modal images; (ii) a 3D surface-to-voxel application, and (iii) a comprehensive quantitative evaluation in 2D using ground truth provided by the simultaneous truth and performance level estimation (STAPLE) method. The registration is formulated as a minimization problem where the objective consists of a data term, which involves the signed distance function of the ROI from the reference image, and a higher order elastic regularizer for the deformation. The evaluation is based on quantitative light-induced fluoroscopy (QLF) and digital photography (DP) of decalcified teeth. STAPLE is computed on 150 image pairs from 32 subjects, each showing one corresponding tooth in both modalities. The ROI in each image is manually marked by three experts (900 curves in total). In the QLF-DP setting, our approach significantly outperforms the mutual information-based registration algorithm implemented with the Insight Segmentation and Registration Toolkit (ITK) and Elastix.}, archivePrefix = {arXiv}, arxivId = {1703.01972}, author = {Tatano, Rosalia and Berkels, Benjamin and Deserno, Thomas M.}, doi = {10.1117/1.jmi.4.4.044002}, eprint = {1703.01972}, issn = {2329-4310}, journal = {Journal of Medical Imaging}, number = {04}, pages = {1}, title = {{Mesh-to-raster region-of-interest-based nonrigid registration of multimodal images}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85032914246{\&}doi=10.1117{\%}2F1.JMI.4.4.044002{\&}partnerID=40{\&}md5=c806ed79840aa4e089be7a24062199ec}, volume = {4}, year = {2017} } @article{RN839, abstract = {Purpose: For planning and guidance of minimally invasive mitral valve repair procedures, 3D+t transesophageal echocardiography (TEE) sequences are acquired before and after the intervention. The valve is then visually and quantitatively assessed in selected phases. To enable a quantitative assessment of valve geometry and pathological properties in all heart phases, as well as the changes achieved through surgery, we aim to provide a new 4D segmentation method. Methods: We propose a tracking-based approach combining gradient vector flow (GVF) and position-based dynamics (PBD). An open-state surface model of the valve is propagated through time to the closed state, attracted by the GVF field of the leaflet area. The PBD method ensures topological consistency during deformation. For evaluation, one expert in cardiac surgery annotated the closed-state leaflets in 10 TEE sequences of patients with normal and abnormal mitral valves, and defined the corresponding open-state models. Results: The average point-to-surface distance between the manual annotations and the final tracked model was 1.00mm±1.08mm. Qualitatively, four cases were satisfactory, five passable and one unsatisfactory. Each sequence could be segmented in 2–6 min. Conclusion: Our approach enables to segment the mitral valve in 4D TEE image data with normal and pathological valve closing behavior. With this method, in addition to the quantification of the remaining orifice area, shape and dimensions of the coaptation zone can be analyzed and considered for planning and surgical result assessment.}, author = {Tautz, Lennart and Walczak, Lars and Georgii, Joachim and Jazaerli, Amer and Vellguth, Katharina and Wamala, Isaac and S{\"{u}}ndermann, Simon and Falk, Volkmar and Hennemuth, Anja}, doi = {10.1007/s11548-019-02071-4}, issn = {18616429}, journal = {International Journal of Computer Assisted Radiology and Surgery}, keywords = {Echocardiography,Mitral valve,Position-based dynamics,Segmentation,Tracking}, number = {1}, pages = {119--128}, title = {{Combining position-based dynamics and gradient vector flow for 4D mitral valve segmentation in TEE sequences}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85074526687{\&}doi=10.1007{\%}2Fs11548-019-02071-4{\&}partnerID=40{\&}md5=d65f50a05a3b5b16c3d481c565ea1497}, volume = {15}, year = {2020} } @article{RN953, abstract = {We introduce a region template abstraction and framework for the efficient storage, management and processing of common data types in analysis of large datasets of high resolution images on clusters of hybrid computing nodes. The region template abstraction provides a generic container template for common data structures, such as points, arrays, regions, and object sets, within a spatial and temporal bounding box. It allows for different data management strategies and I/O implementations, while providing a homogeneous, unified interface to applications for data storage and retrieval. A region template application is represented as a hierarchical dataflow in which each computing stage may be represented as another dataflow of finer-grain tasks. The execution of the application is coordinated by a runtime system that implements optimizations for hybrid machines, including performance-aware scheduling for maximizing the utilization of computing devices and techniques to reduce the impact of data transfers between CPUs and GPUs. An experimental evaluation on a state-of-the-art hybrid cluster using a microscopy imaging application shows that the abstraction adds negligible overhead (about 3{\%}) and achieves good scalability and high data transfer rates. Optimizations in a high speed disk based storage implementation of the abstraction to support asynchronous data transfers and computation result in an application performance gain of about 1.13x. Finally, a processing rate of 11,730 4K x 4K tiles per minute was achieved for the microscopy imaging application on a cluster with 100 nodes (300 GPUs and 1200 CPU cores). This computation rate enables studies with very large datasets.}, archivePrefix = {arXiv}, arxivId = {1405.7958}, author = {Teodoro, George and Pan, Tony and Kurc, Tahsin and Kong, Jun and Cooper, Lee and Klasky, Scott and Saltz, Joel}, doi = {10.1016/j.parco.2014.09.003}, eprint = {1405.7958}, issn = {01678191}, journal = {Parallel Computing}, keywords = {GPGPU,Heterogeneous environments,Image analysis,Microscopy imaging,Storage and I/O}, number = {10}, pages = {589--610}, title = {{Region templates: Data representation and management for high-throughput image analysis}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84908545293{\&}doi=10.1016{\%}2Fj.parco.2014.09.003{\&}partnerID=40{\&}md5=811558beee3d6bf9758c3001bf55bb7b}, volume = {40}, year = {2014} } @article{RN799, abstract = {A comprehensive methodology for treatment simulation and evaluation of dose coverage probabilities is presented where a population based statistical shape model (SSM) provide samples of fraction specific patient geometry deformations. The learning data consists of vector fields from deformable image registration of repeated imaging giving intra-patient deformations which are mapped to an average patient serving as a common frame of reference. The SSM is created by extracting the most dominating eigenmodes through principal component analysis of the deformations from all patients. The sampling of a deformation is thus reduced to sampling weights for enough of the most dominating eigenmodes that describe the deformations. For the cervical cancer patient datasets in this work, we found seven eigenmodes to be sufficient to capture 90{\%} of the variance in the deformations of the, and only three eigenmodes for stability in the simulated dose coverage probabilities. The normality assumption of the eigenmode weights was tested and found relevant for the 20 most dominating eigenmodes except for the first. Individualization of the SSM is demonstrated to be improved using two deformation samples from a new patient. The probabilistic evaluation provided additional information about the trade-offs compared to the conventional single dataset treatment planning.}, author = {Tilly, David and {Van De Schoot}, Agustinus J.A.J. and Grusell, Erik and Bel, Arjan and Ahnesj{\"{o}}, Anders}, doi = {10.1088/1361-6560/aa64ef}, issn = {13616560}, journal = {Physics in Medicine and Biology}, keywords = {cervix,deformable image registration,probabilistic planning,radiotherapy,statistical shape model}, number = {10}, pages = {4140--4159}, title = {{Dose coverage calculation using a statistical shape model - Applied to cervical cancer radiotherapy}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {62}, year = {2017} } @inproceedings{RN952, abstract = {Concentric tube robots may enable new, safer minimally invasive surgical procedures by moving along curved paths to reach difficult-to-reach sites in a patient's anatomy. Operating these devices is challenging due to their complex, unintuitive kinematics and the need to avoid sensitive structures in the anatomy. In this paper, we present a motion planning method that computes collision-free motion plans for concentric tube robots at interactive rates. Our method's high speed enables a user to continuously and freely move the robot's tip while the motion planner ensures that the robot's shaft does not collide with any anatomical obstacles. Our approach uses a highly accurate mechanical model of tube interactions, which is important since small movements of the tip position may require large changes in the shape of the device's shaft. Our motion planner achieves its high speed and accuracy by combining offline precomputation of a collision-free roadmap with online position control. We demonstrate our interactive planner in a simulated neurosurgical scenario where a user guides the robot's tip through the environment while the robot automatically avoids collisions with the anatomical obstacles.}, author = {Torres, Luis G. and Baykal, Cenk and Alterovitz, Ron}, booktitle = {Proceedings - IEEE International Conference on Robotics and Automation}, doi = {10.1109/ICRA.2014.6907112}, isbn = {10504729 (ISSN)}, issn = {10504729}, pages = {1915--1921}, publisher = {Institute of Electrical and Electronics Engineers Inc.}, title = {{Interactive-rate motion planning for concentric tube robots}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84929208407{\&}doi=10.1109{\%}2FICRA.2014.6907112{\&}partnerID=40{\&}md5=fda323933ad36f2a50b9d3d5493cdb34}, year = {2014} } @article{RN813, abstract = {Quantitative imaging features (radiomics) extracted from apparent diffusion coefficient (ADC) maps of rectal cancer patients can provide additional information to support treatment decision. Most available radiomic computational packages allow extraction of hundreds to thousands of features. However, two major factors can influence the reproducibility of radiomic features: interobserver variability, and imaging filtering applied prior to features extraction. In this exploratory study we seek to determine to what extent various commonly-used features are reproducible with regards to the mentioned factors using ADC maps from two different clinics (56 patients). Features derived from intensity distribution histograms are less sensitive to manual tumour delineation differences, noise in ADC images, pixel size resampling and intensity discretization. Shape features appear to be strongly affected by delineation quality. On the whole, textural features appear to be poorly or moderately reproducible with respect to the image pre-processing perturbations we reproduced.}, author = {Traverso, Alberto and Kazmierski, Michal and Shi, Zhenwei and Kalendralis, Petros and Welch, Mattea and Nissen, Henrik Dahl and Jaffray, David and Dekker, Andre and Wee, Leonard}, doi = {10.1016/j.ejmp.2019.04.009}, issn = {1724191X}, journal = {Physica Medica}, keywords = {Apparent diffusion coefficient,Diffusion weighted imaging,Locally advanced rectal carcinoma,Magnetic resonance imaging,Radiomic feature reproducibility}, pages = {44--51}, title = {{Stability of radiomic features of apparent diffusion coefficient (ADC) maps for locally advanced rectal cancer in response to image pre-processing}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {61}, year = {2019} } @inproceedings{RN906, abstract = {{\textcopyright} 2017 SPIE. Image registration of preprocedural contrast-enhanced CTs to intraprocedual cone-beam computed tomography (CBCT) can provide additional information for interventional liver oncology procedures such as transcatheter arterial chemoembolisation (TACE). In this paper, a novel similarity metric for gradient-based image registration is proposed. The metric relies on the patch-based computation of histograms of oriented gradients (HOG) building the basis for a feature descriptor. The metric was implemented in a framework for rigid 3D-3D-registration of pre-interventional CT with intra-interventional CBCT data obtained during the workflow of a TACE. To evaluate the performance of the new metric, the capture range was estimated based on the calculation of the mean target registration error and compared to the results obtained with a normalized cross correlation metric. The results show that 3D HOG feature descriptors are suitable as image-similarity metric and that the novel metric can compete with established methods in terms of registration accuracy.}, author = {Trimborn, Barbara and Wolf, Ivo and Abu-Sammour, Denis and Henzler, Thomas and Schad, Lothar R. and Z{\"{o}}llner, Frank G.}, booktitle = {Medical Imaging 2017: Image-Guided Procedures, Robotic Interventions, and Modeling}, doi = {10.1117/12.2255601}, editor = {Webster, R J and Fei, B}, isbn = {9781510607156}, issn = {16057422}, pages = {101350C}, publisher = {SPIE}, title = {{Investigation of 3D histograms of oriented gradients for image-based registration of CT with interventional CBCT}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85020450535{\&}doi=10.1117{\%}2F12.2255601{\&}partnerID=40{\&}md5=d433d81da054c950f6c4f2534eda48af}, volume = {10135}, year = {2017} } @article{RN803, abstract = {We report here a multipurpose dynamic-interface-based segmentation tool, suitable for segmenting planar, cylindrical, and spherical surfaces in 3D. The method is fast enough to be used conveniently even for large images. Its implementation is straightforward and can be easily realized in many environments. Its memory consumption is low, and the set of parameters is small and easy to understand. The method is based on the Edwards-Wilkinson equation, which is traditionally used to model the equilibrium fluctuations of a propagating interface under the influence of temporally and spatially varying noise. We report here an adaptation of this equation into multidimensional image segmentation, and its efficient discretization.}, author = {Turpeinen, Tuomas and Myllys, Markko and Kekalainen, Pekka and Timonen, Jussi}, doi = {10.1109/TIP.2015.2484061}, issn = {10577149}, journal = {IEEE Transactions on Image Processing}, keywords = {Gray-scale,Image segmentation,Mathematical model,Noise,Surface morphology,Surface topography,Three-dimensional displays}, number = {12}, pages = {5696--5705}, title = {{Interface Detection Using a Quenched-Noise Version of the Edwards-Wilkinson Equation}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {24}, year = {2015} } @inproceedings{RN835, abstract = {Automatic segmentation of brain Magnetic Resonance Imaging (MRI) images is one of the vital steps for quantitative analysis of brain for further inspection. In this paper, NeuroNet has been adopted to segment the brain tissues (white matter (WM), grey matter (GM) and cerebrospinal fluid (CSF)) which uses Residual Network (ResNet) in encoder and Fully Convolution Network (FCN) in the decoder. To achieve the best performance, various hyper-parameters have been tuned, while, network parameters (kernel and bias) were initialized using the NeuroNet pre-trained model. Different pre-processing pipelines have also been introduced to get a robust trained model. The model has been trained and tested on IBSR18 data-set. To validate the research outcome, performance was measured quantitatively using Dice Similarity Coefficient (DSC) and is reported on average as 0.84 for CSF, 0.94 for GM, and 0.94 for WM. The outcome of the research indicates that for the IBSR18 data-set, pre-processing and proper tuning of hyper-parameters for NeuroNet model have improvement in DSC for the brain tissue segmentation.}, author = {Tushar, Fakrul Islam and Alyafi, Basel and Hasan, Md Kamrul and Dahal, Lavsen}, booktitle = {2019 Joint 8th International Conference on Informatics, Electronics and Vision, ICIEV 2019 and 3rd International Conference on Imaging, Vision and Pattern Recognition, icIVPR 2019 with International Conference on Activity and Behavior Computing, ABC 2019}, doi = {10.1109/ICIEV.2019.8858515}, isbn = {9781728107868}, keywords = {Brain tissue segmentation,Dice Similarity Coefficient (DSC),Fully Convolution Network (FCN),IBSR18,Magnetic resonance imaging (MRI),NeuroNet,Residual Network (ResNet)}, pages = {223--227}, publisher = {Institute of Electrical and Electronics Engineers Inc.}, title = {{Brain tissue segmentation using neuronet with different pre-processing techniques}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85074060440{\&}doi=10.1109{\%}2FICIEV.2019.8858515{\&}partnerID=40{\&}md5=8ce695907c0589166ae123e5259efcf0}, year = {2019} } @inproceedings{RN925, abstract = {Tumor contouring is a challenging task for physicians especially when functional as well as anatomical images are incorporated. This paper presents a semi-automated technique to solve this problem. The proposed method is a variant of the level-set segmentation where the initial region is based on the functional image and the speed function combines both images. The presented approach allows the user to balance between the information of two input images. The method was evaluated on registered head and neck PET-MRI image pairs using manually defined tumor contours as reference. The algorithm was tested for various types of tumors using different weights to combine the functional and the anatomical information. The best results showed good correlation with the reference (3{\%} volume difference and 80{\%} DICE similarity in average).}, author = {Urb{\'{a}}n, Szabolcs and Rusk{\'{o}}, L{\'{a}}szl{\'{o}} and Nagy, Antal}, booktitle = {Computational Vision and Medical Image Processing V - Proceedings of 5th Eccomas Thematic Conference on Computational Vision and Medical Image Processing, VipIMAGE 2015}, doi = {10.1201/b19241-35}, editor = {Tavares, J M R S and Jorge, R M N}, isbn = {9781138029262}, pages = {209--214}, publisher = {CRC Press/Balkema}, title = {{Semi-automatic tumor contouring method using PET and MRI medical images}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84959298536{\&}partnerID=40{\&}md5=54e2f8c3e13fa638156bc0e79854a686}, year = {2016} } @inproceedings{RN942, abstract = {The introduction of 4D image acquisition techniques has made it possible to analyse anatomical motion in vivo. With 4D computed tomography (CT), it is now possible to study the motion of joints leading to a deeper understanding of the role of morphology on joint motion and a better assessment of pathologies. Although 4D CT shows a lot of opportunities, the workload to process these 4D acquisitions has increased dramatically. A major part of this process is segmentation, the delineation of the objects of interest within the image volume. This paper presents an algorithm to accelerate this step by registering the segmentation of one frame onto the others. This results in a fast segmentation of the whole 4D dataset, all identical in shape. We show that the proposed algorithm is able to segment two carpal bones, the trapezoid and the scaphoid, with results close to a manual segmentation in less than 5{\%} of the processing time.}, author = {{Van Dijck}, Christophe and Kerkhof, Faes and Vereecke, Evie and Wirix-Speetjens, Roel and {Vander Sloten}, Jos}, booktitle = {Proceedings - International Symposium on Biomedical Imaging}, doi = {10.1109/ISBI.2015.7163950}, isbn = {9781479923748}, issn = {19458452}, keywords = {4D CT,Registration,Segmentation}, pages = {621--624}, publisher = {IEEE Computer Society}, title = {{Segmentation of 4D CT bone images by sequential registration}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84944313955{\&}doi=10.1109{\%}2FISBI.2015.7163950{\&}partnerID=40{\&}md5=7b264d4c8cefb0383cb157bd19741c43}, volume = {2015-July}, year = {2015} } @article{RN895, abstract = {Radiomics aims to quantify phenotypic characteristics on medical imaging through the use of automated algorithms. Radiomic artificial intelligence (AI) technology, either based on engineered hard-coded algorithms or deep learning methods, can be used to develop noninvasive imaging-based biomarkers. However, lack of standardized algorithm definitions and image processing severely hampers reproducibility and comparability of results. To address this issue, we developed PyRadiomics, a flexible open-source platform capable of extracting a large panel of engineered features from medical images. PyRadiomics is implemented in Python and can be used standalone or using 3D Slicer. Here, we discuss the workflow and architecture of PyRadiomics and demonstrate its application in characterizing lung lesions. Source code, documentation, and examples are publicly available at www. radiomics.io. With this platform, we aim to establish a reference standard for radiomic analyses, provide a tested and maintained resource, and to grow the community of radiomic developers addressing critical needs in cancer research. Cancer Res; 77(21); e104-7.}, author = {{Van Griethuysen}, Joost J.M. and Fedorov, Andriy and Parmar, Chintan and Hosny, Ahmed and Aucoin, Nicole and Narayan, Vivek and Beets-Tan, Regina G.H. and Fillion-Robin, Jean Christophe and Pieper, Steve and Aerts, Hugo J.W.L.}, doi = {10.1158/0008-5472.CAN-17-0339}, issn = {15387445}, journal = {Cancer Research}, number = {21}, pages = {e104--e107}, pmid = {29092951}, title = {{Computational radiomics system to decode the radiographic phenotype}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85035021353{\&}doi=10.1158{\%}2F0008-5472.CAN-17-0339{\&}partnerID=40{\&}md5=e2bbcfe2a2e6a8ec00b02cf00ef7f2fa}, volume = {77}, year = {2017} } @inproceedings{RN927, abstract = {In this paper we consider the problem of building parallel methods for three-dimensional human organs' geometrical reconstruction based on the active contour method. The idea of computing decomposition is based on opportunity to visit all layers in both directions from the starting layer and use several starting layers that have initial contours generated from the reference body geometrical model. Using ITK library we have implemented parallel kidney segmentation algorithms. The first parallel version gave acceleration of about 1.6 for 2 threads on CPU, and the second - a significant increase in efficiency with an increase in the number of points in the contour, but in the range 3.2 times for 8 threads on CPU. We are planning to transfer the algorithm on the GPU.}, author = {Vasil'ev, E. P. and Belokamenskaja, A. A. and Novozhilov, M. M. and Turlapov, V. E.}, booktitle = {CEUR Workshop Proceedings}, editor = {Starodubov, I and Sokolinsky, L}, isbn = {16130073 (ISSN)}, issn = {16130073}, keywords = {3D reconstruction,3D segmentation,Reference geometrical model,Segmentation,Tomography active contour method}, pages = {482--489}, publisher = {CEUR-WS}, title = {{A parallel algorithm for 3D reconstruction of internal organs according to imaging based on the active contour model}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84978488492{\&}partnerID=40{\&}md5=ce7b9f7668691df1ee707e7ab33ab66e}, volume = {1576}, year = {2016} } @inproceedings{RN855, abstract = {In this work, a semi-automatic computational strategy is proposed for brain tumor segmentation. The filtering (erosion + gaussian filters), segmentation (level set technique) and quantification (BT volume) stages are applied to magnetic resonance imaging in order to generate the three-dimensional morphology of brain tumors. The Jaccard's Similarity Index is considered to contrast manual segmentation with semi-automatic segmentations of brain tumor. In this sense, the highest Jaccard's Similarity Index provides the best parameters of the techniques that constitute the semi-automatic computational strategy. Results are promising, showing an excellent correlation between these segmentations. The volume is used for the brain tumors characterization.}, author = {Vera, M. and Hu{\'{e}}rfano, Y. and Gelvez, E. and Valbuena, O. and Salazar, J. and Molina, V. and Vera, M. I. and Salazar, W. and S{\'{a}}enz, F.}, booktitle = {Journal of Physics: Conference Series}, doi = {10.1088/1742-6596/1160/1/012002}, editor = {E.D, V -Nino and E.D, V -Nino and Almeida, E G}, isbn = {17426588 (ISSN)}, issn = {17426596}, number = {1}, publisher = {Institute of Physics Publishing}, title = {{Segmentation of brain tumors using a semi-automatic computational strategy}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85062482878{\&}doi=10.1088{\%}2F1742-6596{\%}2F1160{\%}2F1{\%}2F012002{\&}partnerID=40{\&}md5=03a70ac3ff75e964350486d67a57b008}, volume = {1160}, year = {2019} } @article{RN940, abstract = {To explore contrast (C) and homogeneity (H) gray-level co-occurrence matrix texture features on T2-weighted (T2w) Magnetic Resonance (MR) images and apparent diffusion coefficient (ADC) maps for predicting prostate cancer (PCa) aggressiveness, and to compare them with traditional ADC metrics for differentiating low- from intermediate/high-grade PCas. The local Ethics Committee approved this prospective study of 93 patients (median age, 65 years), who underwent 1.5 T multiparametric endorectal MR imaging before prostatectomy. Clinically significant (volume≥0.5 ml) peripheral tumours were outlined on histological sections, contoured on T2w and ADC images, and their pathological Gleason Score (pGS) was recorded. C, H, and traditional ADC metrics (mean, median, 10th and 25th percentile) were calculated on the largest lesion slice, and correlated with the pGS through the Spearman correlation coefficient. The area under the receiver operating characteristic curve (AUC) assessed how parameters differentiate pGS = 6 from pGS≥7. The dataset included 49 clinically significant PCas with a balanced distribution of pGS. The Spearman $\rho$ and AUC values on ADC were:-0.489, 0.823 (mean);-0.522, 0.821 (median);-0.569, 0.854 (10th percentile);-0.556, 0.854 (25th percentile);-0.386, 0.871 (C); 0.533, 0.923 (H); while on T2w they were:-0.654, 0.945 (C); 0.645, 0.962 (H). AUC of H on ADC and T2w, and C on T2w were significantly higher than that of the mean ADC (p = 0.05). H and C calculated on T2w images outperform ADC parameters in correlating with pGS and differentiating low- from intermediate/high-risk PCas, supporting the role of T2w MR imaging in assessing PCa biological aggressiveness.}, author = {Vignati, A. and Mazzetti, S. and Giannini, V. and Russo, F. and Bollito, E. and Porpiglia, F. and Stasi, M. and Regge, D.}, doi = {10.1088/0031-9155/60/7/2685}, issn = {13616560}, journal = {Physics in Medicine and Biology}, keywords = {ADC maps,GLCM texture feature,T2-weighted MR imaging,pathologic Gleason score correlation,prostate cancer aggressiveness}, number = {7}, pages = {2685--2701}, title = {{Texture features on T2-weighted magnetic resonance imaging: New potential biomarkers for prostate cancer aggressiveness}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84925609486{\&}doi=10.1088{\%}2F0031-9155{\%}2F60{\%}2F7{\%}2F2685{\&}partnerID=40{\&}md5=4b5ae2306deef2604c6595214b20549c}, volume = {60}, year = {2015} } @article{RN876, abstract = {Recent improvements in cardiac computed tomography (CCT) allow for whole-heart functional studies to be acquired at low radiation dose ({\textless}2mSv) and high-temporal resolution ({\textless}100ms) in a single heart beat. Although the extraction of regional functional information from these images is of great clinical interest, there is a paucity of research into the quantification of regional function from CCT, contrasting with the large body of work in echocardiography and cardiac MR. Here we present the Simultaneous Subdivision Surface Registration (SiSSR) method: a fast, semi-automated image analysis pipeline for quantifying regional function from contrast-enhanced CCT. For each of thirteen adult male canines, we construct an anatomical reference mesh representing the left ventricular (LV) endocardium, obviating the need for a template mesh to be manually sculpted and initialized. We treat this generated mesh as a Loop subdivision surface, and adapt a technique previously described in the context of 3-D echocardiography to register these surfaces to the endocardium efficiently across all cardiac frames simultaneously. Although previous work performs the registration at a single resolution, we observe that subdivision surfaces naturally suggest a multiresolution approach, leading to faster convergence and avoiding local minima. We additionally make two notable changes to the cost function of the optimization, explicitly encouraging plausible biological motion and high mesh quality. Finally, we calculate an accepted functional metric for CCT from the registered surfaces, and compare our results to an alternate state-of-the-art CCT method.}, author = {Vigneault, Davis M. and Pourmorteza, Amir and Thomas, Marvin L. and Bluemke, David A. and Noble, J. Alison}, doi = {10.1016/j.media.2018.03.009}, issn = {13618423}, journal = {Medical Image Analysis}, keywords = {Cardiac computed tomography,Loop subdivision surface,Personalized cardiac mesh generation,Regional cardiac function}, pages = {215--228}, title = {{SiSSR: Simultaneous subdivision surface registration for the quantification of cardiac function from computed tomography in canines}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85044919491{\&}doi=10.1016{\%}2Fj.media.2018.03.009{\&}partnerID=40{\&}md5=24f381dd27d1b120828f01ad803dafe5}, volume = {46}, year = {2018} } @article{RN920, abstract = {Purpose: Magnetic particle imaging (MPI) is a quantitative imaging modality that allows the distribution of superparamagnetic nanoparticles to be visualized. Compared to other imaging techniques like x-ray radiography, computed tomography (CT), and magnetic resonance imaging (MRI), MPI only provides a signal from the administered tracer, but no additional morphological information, which complicates geometry planning and the interpretation of MP images. The purpose of the authors study was to develop bimodal fiducial markers that can be visualized by MPI and MRI in order to create MP-MR fusion images. Methods: A certain arrangement of three bimodal fiducial markers was developed and used in a combined MRI/MPI phantom and also during in vivo experiments in order to investigate its suitability for geometry planning and image fusion. An algorithm for automated marker extraction in both MR and MP images and rigid registration was established. Results: The developed bimodal fiducial markers can be visualized by MRI and MPI and allow for geometry planning as well as automated registration and fusion of MRMP images. Conclusions: To date, exact positioning of the object to be imaged within the field of view (FOV) and the assignment of reconstructed MPI signals to corresponding morphological regions has been difficult. The developed bimodal fiducial markers and the automated image registration algorithm help to overcome these difficulties.}, author = {Werner, F. and Jung, C. and Hofmann, M. and Werner, R. and Salamon, J. and S{\"{a}}ring, D. and Kaul, M. G. and Them, K. and Weber, O. M. and Mummert, T. and Adam, G. and Ittrich, H. and Knopp, T.}, doi = {10.1118/1.4948998}, issn = {00942405}, journal = {Medical Physics}, keywords = {MPI,MRI,fiducialmarker,geometry planning,registration}, number = {6}, pages = {2884--2893}, title = {{Geometry planning and image registration in magnetic particle imaging using bimodal fiducial markers}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84969765405{\&}doi=10.1118{\%}2F1.4948998{\&}partnerID=40{\&}md5=6ef63d0a878e48dd0de13440e94b2c70}, volume = {43}, year = {2016} } @article{RN949, abstract = {The lung parenchyma provides a maximal surface area of blood-containing capillaries that are in close contact with a large surface areaof the air-containing alveoli. Volume and surface area of capillaries are the classic stereological parameters to characterize the alveolar capillary network (ACN) and have provided essential structure-function information of the lung. When loss (rarefaction) or gain (angiogenesis) of capillaries occurs, these parameters may not be sufficient to provide mechanistic insight. Therefore, it would be desirable to estimate the number of capillaries, as it contains more distinct and mechanistically oriented information. Here, we present a new stereological method to estimate the number of capillary loops in the ACN. One advantage of this method is that it is independent of the shape, size, or distribution of the capillaries. We used consecutive, 1 {\_}mthick sections from epoxy resin-embedded material as a physical disector. The Euler-Poincar{\'{e}} characteristic of capillary networks can be estimated by counting the easily recognizable topolog ical constellations of “islands,” “bridges,” and “holes.” The total number of capillary loops in the ACN can then be calculated from the Euler- Poincar{\'{e}} characteristic. With the use of the established estimator of alveolar number, it is possible to obtain the mean number of capillary loops per alveolus. In conclusion, estimation of alveolar capillaries by design-based stereology is an efficient and unbiased method to characterize the ACN and may be particularly useful for studies on emphysema, pulmonary hypertension, or lung development.}, author = {Willf{\"{u}}hr, Alper and Brandenberger, Christina and Piatkowski, Tanja and Grothausmann, Roman and Nyengaard, Jens Randel and Ochs, Matthias and M{\"{u}}hlfeld, Christian}, doi = {10.1152/ajplung.00410.2014}, issn = {15221504}, journal = {American Journal of Physiology - Lung Cellular and Molecular Physiology}, keywords = {Capillary number,Euler number,Stereology}, number = {11}, pages = {L1286--L1293}, title = {{Estimation of the number of alveolar capillaries by the euler number (Euler-poincar{\'{e}} characteristic)}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84949526543{\&}doi=10.1152{\%}2Fajplung.00410.2014{\&}partnerID=40{\&}md5=56c6058ebfd732edb1588ab2e55b6ec9}, volume = {309}, year = {2015} } @inproceedings{RN870, abstract = {A rigid registration is a crucial initial step for a correct deformable medical image registration. In this work, we propose rigid registration method resistant to large deformations and missing data. The proposed method is based on the bones segmentation, feature matching and outliers elimination inspired by traditional computer vision approach. The method is compared to other state-of-the-art algorithms, the iterative closest point and intensity-based registration using widely available dataset. The proposed algorithm does not fail into local minima and reconstructs correct deformations for average vector length greater than 150 mm and data overlap ratio less than 50{\%}, where currently applied methods fail. The algorithm is evaluated using angle and magnitude errors between corresponding deformation vectors, Hausdorff distance between bone segmentations and resistance to fail into local minima.}, author = {Wodzinski, Marek and Skalski, Andrzej}, booktitle = {International Conference on Systems, Signals, and Image Processing}, doi = {10.1109/IWSSIP.2018.8439679}, editor = {Zamuda, A and Planinsic, P and Gleich, D}, isbn = {9781538669792}, issn = {21578702}, keywords = {Image Registration,Initial Alignment,Medical Imaging,Rigid Registration}, publisher = {IEEE Computer Society}, title = {{Rigid Registration Method for Medical Volumes with Large Deformations and Missing Data}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85053151071{\&}doi=10.1109{\%}2FIWSSIP.2018.8439679{\&}partnerID=40{\&}md5=c8fc60e15ec9db845e0390f82b94df27}, volume = {2018-June}, year = {2018} } @article{RN913, abstract = {Prefailure microdamage in bone tissue is considered to be the most detrimental factor in defining its strength and toughness with respect to age and disease. To understand the influence of microcracks on bone mechanics it is necessary to assess their morphology and three-dimensional distribution. This requirement reaches beyond classic histology and stereology, and methods to obtain such information are currently missing. Therefore, the aim of the study was to develop a methodology that allows to characterize three-dimensional microcrack distributions in bulk bone samples. Four dumbbell-shaped specimens of human cortical bone of a 77-year-old female donor were loaded beyond yield in either tension, compression or torsion (one control). Subsequently, synchrotron radiation micro-computed tomography (SR$\mu$CT) was used to obtain phase-contrast images of the damaged samples. A microcrack segmentation algorithm was developed and used to segment microcrack families for which microcrack orientation distribution functions were determined. Distinct microcrack families were observed for each load case that resulted in distinct orientation distribution functions. Microcracks had median areas of approximately 4.7 $\mu$m2, 33.3 $\mu$m2 and 64.0 $\mu$m2 for tension, compression and torsion. Verifying the segmentation algorithm against a manually segmented ground truth showed good results when comparing the microcrack orientation distribution functions. A size dependence was noted when investigating the orientation distribution functions with respect to the size of the volume of interest used for their determination. Furthermore, a scale separation between tensile, compressive and torsional microcracks was noticeable. Visual comparison to classic histology indicated that microcrack families were successfully distinguished. We propose a methodology to analyse three-dimensional microcrack distributions in overloaded cortical bone. Such information could improve our understanding of bone microdamage and its impact on bone failure in relation to tissue age and disease.}, author = {Wolfram, U. and Schwiedrzik, J. J. and Mirzaali, M. J. and B{\"{U}}RKI, A. and Varga, P. and Olivier, C. and Peyrin, F. and Zysset, P. K.}, doi = {10.1111/jmi.12440}, issn = {13652818}, journal = {Journal of Microscopy}, keywords = {Cortical bone,X-ray phase micro-tomography,microcrack segmentation,microdamage,orientation distribution function,synchrotron radiation}, number = {3}, pages = {268--281}, title = {{Characterizing microcrack orientation distribution functions in osteonal bone samples}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84978744015{\&}doi=10.1111{\%}2Fjmi.12440{\&}partnerID=40{\&}md5=eaacef5f6889af31fbaf82da8a2c4344}, volume = {264}, year = {2016} } @article{RN796, abstract = {In large scale biological experiments, like high-throughput or high-content cellular screening, the amount and the complexity of images to be analyzed are steadily increasing. To handle and process these images, well defined image processing and analysis steps need to be performed by applying dedicated workflows. Multiple software tools have emerged with the aim to facilitate creation of such workflows by integrating existing methods, tools, and routines, and by adapting them to different applications and questions, as well as making them reusable and interchangeable. In this review, we describe workflow systems for the integration of microscopy image analysis techniques with focus on KNIME and Galaxy.}, author = {Wollmann, Thomas and Erfle, Holger and Eils, Roland and Rohr, Karl and Gunkel, Manuel}, doi = {10.1016/j.jbiotec.2017.07.019}, issn = {18734863}, journal = {Journal of Biotechnology}, keywords = {Galaxy,Image analysis,KNIME,Microscopy,Pipeline,Workflow,de.NBI}, pages = {70--75}, title = {{Workflows for microscopy image analysis and cellular phenotyping}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {261}, year = {2017} } @inproceedings{RN976, abstract = {Selecting regions of interest (ROI) of the medical images is an important task in medical image processing. Manual selection of ROIs serves as the main method for single images and it has a high accuracy. However, it will become infeasible to manually segment ROIs on a large number of images. Observing this problem, this paper proposes a fast and accurate segmentation method to obtain ROIs on a batch of medical images. Firstly, we segment the standard brain image St which has not been injected with tracer. Secondly, we use a B-Spline elastic registration method to get the inverse-registration parameters. Thirdly, we get the template image Te with the registration parameters. Finally, we search the target region by template matching. Experimental results show that the proposed method performs well on medical image segmentation.}, author = {Wu, Jiatao and Li, Yong and Peng, Yun and Fan, Chunxiao}, booktitle = {IS and T International Symposium on Electronic Imaging Science and Technology}, doi = {10.2352/ISSN.2470-1173.2017.2.VIPC-404}, editor = {Stevenson, R L and Delp, E}, isbn = {24701173 (ISSN)}, issn = {24701173}, pages = {38--43}, publisher = {Society for Imaging Science and Technology}, title = {{A fast and accurate segmentation method for medical images}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85041527605{\&}doi=10.2352{\%}2FISSN.2470-1173.2017.2.VIPC-404{\&}partnerID=40{\&}md5=611236c5db623b23e4984bfc71e20bb2}, year = {2017} } @article{RN784, abstract = {Typical packages used for coregistration in functional image analyses include automated image registration (AIR) and statistical parametric mapping (SPM). However, both methods have limited-dimension deformation models. A fully deformable model, which combines the piecewise linear registration for coarse alignment with demons algorithm for voxel-level refinement, allows a higher degree of spatial deformation. This leads to a more accurate colocalization of the functional signal from different subjects and therefore can produce a more reliable group average signal. We quantitatively compared the performance of the three different registration approaches through a series of experiments and we found that the fully deformable model consistently produces a more accurate structural segmentation and a more reliable functional signal colocalization than does AIR or SPM. {\textcopyright} 2006 Wiley-Liss, Inc.}, author = {Wu, Minjie and Carmichael, Owen and Lopez-Garcia, Pilar and Carter, Cameron S. and Aizenstein, Howard J.}, doi = {10.1002/hbm.20216}, issn = {10659471}, journal = {Human Brain Mapping}, keywords = {Atlas-based segmentation,Deformable model,Image registration,fMRI}, number = {9}, pages = {747--754}, title = {{Quantitative comparison of AIR, SPM, and the fully deformable model for atlas-based segmentation of functional and structural MR images}}, type = {Journal Article}, url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/hbm.20216}, volume = {27}, year = {2006} } @article{RN909, abstract = {Ultrasound tomography (UST) image segmentation is fundamental in breast density estimation, medicine response analysis, and anatomical change quantification. Existing methods are time consuming and require massive manual interaction. To address these issues, an automatic algorithm based on GrabCut (AUGC) is proposed in this paper. The presented method designs automated GrabCut initialization for incomplete labeling and is sped up with multicore parallel programming. To verify performance, AUGC is applied to segment thirty-two in vivo UST volumetric images. The performance of AUGC is validated with breast overlapping metrics (Dice coefficient (D), Jaccard (J), and False positive (FP)) and time cost (TC). Furthermore, AUGC is compared to other methods, including Confidence Connected Region Growing (CCRG), watershed, and Active Contour based Curve Delineation (ACCD). Experimental results indicate that AUGC achieves the highest accuracy (D=0.9275 and J=0.8660 and FP=0.0077) and takes on average about 4 seconds to process a volumetric image. It was said that AUGC benefits large-scale studies by using UST images for breast cancer screening and pathological quantification.}, author = {Wu, Shibin and Yu, Shaode and Zhuang, Ling and Wei, Xinhua and Sak, Mark and Duric, Neb and Hu, Jiani and Xie, Yaoqin}, doi = {10.1155/2017/2059036}, issn = {23146141}, journal = {BioMed Research International}, title = {{Automatic Segmentation of Ultrasound Tomography Image}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85030642114{\&}doi=10.1155{\%}2F2017{\%}2F2059036{\&}partnerID=40{\&}md5=c0598599370d8f954d410519826406e2}, volume = {2017}, year = {2017} } @article{RN849, abstract = {Purpose: Diagnostic imaging procedures require optimization depending on the medical task at hand, the apparatus being used, and patient physical and anatomical characteristics. The assessment of the radiation dose and associated risks plays a key role in safety and quality management for radiation protection purposes. In this work, we aim at developing a methodology for personalized organ-level dose assessment in x-ray computed tomography (CT) imaging. Methods: Regional voxel models representing reference patient-specific computational phantoms were generated through image segmentation of CT images for four patients. The best-fitting anthropomorphic phantoms were selected from a previously developed comprehensive phantom library according to patient's anthropometric parameters, then registered to the anatomical masks (skeleton, lung, and body contour) of patients to produce a patient-specific whole-body phantom. Well-established image registration metrics including Jaccard's coefficients for each organ, organ mass, body perimeter, organ-surface distance, and effective diameter are compared between the reference patient model, registered model, and anchor phantoms. A previously validated Monte Carlo code is utilized to calculate the absorbed dose in target organs along with the effective dose delivered to patients. The calculated absorbed doses from the reference patient models are then compared with the produced personalized model, anchor phantom, and those reported by commercial dose monitoring systems. Results: The evaluated organ-surface distance and body effective diameter metrics show a mean absolute difference between patient regional voxel models, serving as reference, and patient-specific models around 4.4{\%} and 4.5{\%}, respectively. Organ-level radiation doses of patient-specific models are in good agreement with those of the corresponding patient regional voxel models with a mean absolute difference of 9.1{\%}. The mean absolute difference of organ doses for the best-fitting model extracted from the phantom library and Radimetrics™ commercial dose tracking software are 15.5{\%} and 41.1{\%}, respectively. Conclusion: The results suggest that the proposed methodology improves the accuracy of organ-level dose estimation in CT, especially for extreme cases [high body mass index (BMI) and large skeleton]. Patient-specific radiation dose calculation and risk assessment can be performed using the proposed methodology for both monitoring of cumulative radiation exposure of patients and epidemiological studies. Further validation using a larger database is warranted.}, author = {Xie, Tianwu and Akhavanallaf, Azadeh and Zaidi, Habib}, doi = {10.1002/mp.13471}, issn = {00942405}, journal = {Medical Physics}, keywords = {Monte Carlo simulations,computational models,radiation dose,radiological imaging}, number = {5}, pages = {2403--2411}, title = {{Construction of patient-specific computational models for organ dose estimation in radiological imaging}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85063261918{\&}doi=10.1002{\%}2Fmp.13471{\&}partnerID=40{\&}md5=7e6f21940c6d8f7bbd4b733b927a0667}, volume = {46}, year = {2019} } @article{RN867, abstract = {The radiation dose delivered to pregnant patients during radiologic imaging procedures raises health concerns because the developing embryo and fetus are considered to be highly radiosensitive. To appropriately weigh the diagnostic benefits against the radiation risks, the radiologist needs reasonably accurate and detailed estimates of the fetal dose. Expanding our previously developed series of computational phantoms for pregnant women, we here describe a personalized model for twin pregnancy, based on an actual clinical scan. Methods: The model is based on a standardized hybrid pregnant female and fetus phantom and on a clinical case of a patient who underwent an 18F-FDG PET/CT scan while expecting twins at 25 weeks' gestation. This model enabled us to produce a realistic physical representation of the pregnant patient and to estimate the maternal and fetal organ doses from the 18FFDG and CT components. The Monte Carlo N-Particle Extended general-purpose code was used for radiation transport simulation. Results: The 18F-FDG doses for the 2 fetuses were 3.78 and 3.99 mGy, and the CT doses were 0.76 and 0.70 mGy, respectively. Therefore, the relative contribution of 18F-FDG and CT to the total dose to the fetuses was about 84{\%} and 16{\%}, respectively. Meanwhile, for 18F-FDG, the calculated personalized absorbed dose was about 40{\%}-50{\%} higher than the doses reported by other dosimetry computer software tools. Conclusion: Our approach to constructing personalized computational models allows estimation of a patient- specific radiation dose, even in cases with unusual anatomic features such as a twin pregnancy. Our results also show that, even in twins, the fetal organ doses from both 18F-FDG and CT present a certain variability linked to the anatomic characteristics. The CT fetal dose is smaller than the 18F-FDG PET dose.}, author = {Xie, Tianwu and Zanotti-Fregonara, Paolo and Edet-Sanson, Agathe and Zaidi, Habib}, doi = {10.2967/jnumed.117.205286}, issn = {2159662X}, journal = {Journal of Nuclear Medicine}, keywords = {CT,Fetus,Monte Carlo simulation,Pregnant female models,Radiation dosimetry}, number = {9}, pages = {1451--1458}, title = {{Patient-specific computational model and dosimetry calculations for PET/CT of a patient pregnant with twins}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85053206243{\&}doi=10.2967{\%}2Fjnumed.117.205286{\&}partnerID=40{\&}md5=2ee14ea7a0f6a85f10442957bf4c6b2e}, volume = {59}, year = {2018} } @article{RN861, abstract = {Heterogeneity of echo-texture and lack of sharply delineated tissue boundaries in diagnostic ultrasound images make three-dimensional (3D) registration challenging, especially when the volumes to be registered are considerably different due to local changes. We implemented a novel computational method that optimally registers volumetric ultrasound image data containing significant and local anatomical differences. It is A Multi-stage, Multi-resolution, and Multi-volumes-of-interest Volume Registration Method. A single region registration is optimized first for a close initial alignment to avoid convergence to a locally optimal solution. Multiple sub-volumes of interest can then be selected as target alignment regions to achieve confident consistency across the volume. Finally, a multi-resolution rigid registration is performed on these sub-volumes associated with different weights in the cost function. We applied the method on 3D endovaginal ultrasound image data acquired from patients during biopsy procedure of the pelvic floor muscle. Systematic assessment of our proposed method through cross validation demonstrated its accuracy and robustness. The algorithm can also be applied on medical imaging data of other modalities for which the traditional rigid registration methods would fail.}, author = {Xing, Qi and Chitnis, Parag and Sikdar, Siddhartha and Alshiek, Jonia and {Abbas Shobeiri}, S. and Wei, Qi}, doi = {10.1371/journal.pone.0224583}, issn = {19326203}, journal = {PLoS ONE}, number = {11}, title = {{M3VR—A multi-stage, multi-resolution, and multi-volumes-of-interest volume registration method applied to 3D endovaginal ultrasound}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85075267830{\&}doi=10.1371{\%}2Fjournal.pone.0224583{\&}partnerID=40{\&}md5=18cd44cc5e14de2956edb1e316ba170a}, volume = {14}, year = {2019} } @article{RN810, abstract = {This paper had published originally without open access, but has since been republished with open access.}, author = {Yaniv, Ziv and Lowekamp, Bradley C. and Johnson, Hans J. and Beare, Richard}, doi = {10.1007/s10278-018-0165-9}, issn = {1618727X}, journal = {Journal of Digital Imaging}, number = {6}, pages = {1118}, title = {{Correction to: SimpleITK Image-Analysis Notebooks: a Collaborative Environment for Education and Reproducible Research (Journal of Digital Imaging, (2018), 31, 3, (290-303), 10.1007/s10278-017-0037-8)}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {32}, year = {2019} } @article{RN872, abstract = {The stiffness of foam concrete depends primarily on the added porosity. Nevertheless, by performing 3D elastic numerical simulations on artificial unit cells in the frame of periodic homogenization, it is shown that describing foam concrete as a porous material is not sufficient to explain the experimental measurements of the Young modulus for added porosity higher than 40{\%}. Indeed, introducing sand as a third phase enables to recover accurate estimates of the Young modulus. Furthermore, for highly porous concrete foams, it is shown that the stress concentrates in thin members deprived of stiff sand particles, thus leading to a softer overall stiffness.}, author = {Youssef, M. Ben and Lavergne, F. and Sab, K. and Miled, K. and Neji, J.}, doi = {10.1016/j.cemconres.2018.04.021}, issn = {00088846}, journal = {Cement and Concrete Research}, keywords = {Foam concrete,Homogenization,Microstructure,Numerical simulations,Young modulus}, pages = {13--23}, title = {{Upscaling the elastic stiffness of foam concrete as a three-phase composite material}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85046623861{\&}doi=10.1016{\%}2Fj.cemconres.2018.04.021{\&}partnerID=40{\&}md5=2e128f4fc86a6df7726434940ab8ceb7}, volume = {110}, year = {2018} } @article{RN820, abstract = {Multi atlas based segmentation (MABS) uses a database of atlas images, and an atlas selection process is used to choose an atlas subset for registration and voting. In the current state of the art, atlases are chosen according to a similarity criterion between the target subject and each atlas in the database. In this paper, we propose a new concept for atlas selection that relies on selecting the best performing group of atlases rather than the group of highest scoring individual atlases. Experiments were performed using CT images of 50 patients, with contours of brainstem and parotid glands. The dataset was randomly split into two groups: 20 volumes were used as an atlas database and 30 served as target subjects for testing. Classic oracle selection, where atlases are chosen by the highest dice similarity coefficient (DSC) with the target, was performed. This was compared to oracle group selection, where all the combinations of atlas subgroups were considered and scored by computing DSC with the target subject. Subsequently, convolutional neural networks were designed to predict the best group of atlases. The results were also compared with the selection strategy based on normalized mutual information (NMI). Oracle group was proven to be significantly better than classic oracle selection (p {\textless} 10-5). Atlas group selection led to a median ± interquartile DSC of 0.740 ± 0.084, 0.718 ± 0.086 and 0.670 ± 0.097 for brainstem and left/right parotid glands respectively, outperforming NMI selection 0.676 ± 0.113, 0.632 ± 0.104 and 0.606 ± 0.118 (p {\textless} 0.001) as well as classic oracle selection. The implemented methodology is a proof of principle that selecting the atlases by considering the performance of the entire group of atlases instead of each single atlas leads to higher segmentation accuracy, being even better then current oracle strategy. This finding opens a new discussion about the most appropriate atlas selection criterion for MABS.}, author = {Zaffino, Paolo and Ciardo, Delia and Raudaschl, Patrik and Fritscher, Karl and Ricotti, Rosalinda and Alterio, Daniela and Marvaso, Giulia and Fodor, Cristiana and Baroni, Guido and Amato, Francesco and Orecchia, Roberto and Jereczek-Fossa, Barbara Alicja and Sharp, Gregory C. and Spadea, Maria Francesca}, doi = {10.1088/1361-6560/aac712}, issn = {13616560}, journal = {Physics in Medicine and Biology}, keywords = {atlas selection,convolutional neural network,medical image segmentation,multi atlas based segmentation,oracle selection}, number = {12}, pages = {9}, title = {{Multi atlas based segmentation: Should we prefer the best atlas group over the group of best atlases?}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {63}, year = {2018} } @article{RN955, abstract = {Background and objective: Modern microscopes can acquire multi-channel large histological data from tissues of human beings or animals, which contain rich biomedical information for disease diagnosis and biological feature analysis. However, due to the large size, fuzzy tissue structure, and complicated multiple elements integrated in the image color space, it is still a challenge for current software systems to effectively calculate histological data, show the inner tissue structures and unveil hidden biomedical information. Therefore, we developed new algorithms and a software platform to address this issue. Methods: This paper presents a multi-channel biomedical data computing and visualization system that can efficiently process large 3D histological images acquired from high-resolution microscopes. A novelty of our system is that it can dynamically display a volume of interest and extract tissue information using a layer-based data navigation scheme. During the data exploring process, the actual resolution of the loaded data can be dynamically determined and updated, and data rendering is synchronized in four display windows at each data layer, where 2D textures are extracted from the imaging volume and mapped onto the displayed clipping planes in 3D space. Results: To test the efficiency and scalability of this system, we performed extensive evaluations using several different hardware systems and large histological color datasets acquired from a CryoViz 3D digital system. The experimental results demonstrated that our system can deliver interactive data navigation speed and display detailed imaging information in real time, which is beyond the capability of commonly available biomedical data exploration software platforms. Conclusion: Taking advantage of both CPU (central processing unit) main memory and GPU (graphics processing unit) graphics memory, the presented software platform can efficiently compute, process and visualize very large biomedical data and enhance data information. The performance of this system can satisfactorily address the challenges of navigating and interrogating volumetric multi-spectral large histological image at multiple resolution levels.}, author = {Zhang, Qi and Peters, Terry and Fenster, Aaron}, doi = {10.1016/j.compmedimag.2019.01.004}, issn = {18790771}, journal = {Computerized Medical Imaging and Graphics}, keywords = {Biomedical information,Large histological data,Layer-based data navigation,Texture extraction and mapping,Visualization,Volume of interest}, pages = {34--46}, title = {{Layer-based visualization and biomedical information exploration of multi-channel large histological data}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85061394368{\&}doi=10.1016{\%}2Fj.compmedimag.2019.01.004{\&}partnerID=40{\&}md5=da16d481a2565d81446681597a01b201}, volume = {72}, year = {2019} } @article{RN808, abstract = {Purpose: The study is to explore potential features and develop classification models for distinguishing benign and malignant lung lesions based on CT-radiomics features and PET metabolic parameters extracted from PET/CT images. Materials and methods: A retrospective study was conducted in baseline 18 F-flurodeoxyglucose positron emission tomography/ computed tomography (18 F-FDG PET/CT) images of 135 patients. The dataset was utilized for feature extraction of CT-radiomics features and PET metabolic parameters based on volume of interest, then went through feature selection and model development with strategy of five-fold cross-validation. Specifically, model development used support vector machine, PET metabolic parameters selection used Akaike's information criterion, and CT-radiomics were reduced by the least absolute shrinkage and selection operator method then forward selection approach. The diagnostic performances of CT-radiomics, PET metabolic parameters and combination of both were illustrated by receiver operating characteristic (ROC) curves, and compared by Delong test. Five groups of selected PET metabolic parameters and CT-radiomics were counted, and potential features were found and analyzed with Mann-Whitney U test. Results: The CT-radiomics, PET metabolic parameters, and combination of both among five subsets showed mean area under the curve (AUC) of 0.820 ± 0.053, 0.874 ± 0.081, and 0.887 ± 0.046, respectively. No significant differences in ROC among models were observed through pairwise comparison in each fold (P-value from 0.09 to 0.81, Delong test). The potential features were found to be SurfaceVolumeRatio and SUVpeak (P {\textless} 0.001 of both, U test). Conclusion: The classification models developed by CT-radiomics features and PET metabolic parameters based on PET/CT images have substantial diagnostic capacity on lung lesions.}, author = {Zhang, Ruiping and Zhu, Lei and Cai, Zhengting and Jiang, Wei and Li, Jian and Yang, Chengwen and Yu, Chunxu and Jiang, Bo and Wang, Wei and Xu, Wengui and Chai, Xiangfei and Zhang, Xiaodong and Tang, Yong}, doi = {10.1016/j.ejrad.2019.108735}, issn = {18727727}, journal = {European Journal of Radiology}, keywords = {CT-radiomics features,Lung lesion,PET metabolic parameters,Potential feature}, pages = {9}, title = {{Potential feature exploration and model development based on 18F-FDG PET/CT images for differentiating benign and malignant lung lesions}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {121}, year = {2019} } @article{RN844, abstract = {Linear registration is often the crucial first step for various types of image analysis. Although this is mathematically simple, failure is not uncommon. When investigating the brain by magnetic resonance imaging (MRI), the brain is the target organ for registration but the existence of other tissues, in addition to a variety of fields of view, different brain locations, orientations and anatomical features, poses some serious fundamental challenges. Consequently, a number of different algorithms have been put forward to minimize potential errors. In the present study, we tested a knowledge-based approach that can be combined with any form of registration algorithm. This approach consisted of a library of intermediate images (mediators) with known transformation to the target image. Test images were first registered to all mediators and the best mediator was selected to ensure optimum registration to the target. In order to select the best mediator, we evaluated two similarity criteria: the sum of squared differences and mutual information. This approach was applied to 48 mediators and 96 test images. In order to reduce one of the main drawbacks of the approach, increased computation time, we reduced the size of the library by clustering. Our results indicated clear improvement in registration accuracy.}, author = {Zhang, Xinyuan and Feng, Yanqiu and Chen, Wufan and Li, Xin and Faria, Andreia V. and Feng, Qianjin and Mori, Susumu}, doi = {10.3389/fnins.2019.00909}, issn = {1662453X}, journal = {Frontiers in Neuroscience}, keywords = {MNI space,T1-weighted brain image,dice value,linear registration,mediator selection}, title = {{Linear Registration of Brain MRI Using Knowledge-Based Multiple Intermediator Libraries}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85073020740{\&}doi=10.3389{\%}2Ffnins.2019.00909{\&}partnerID=40{\&}md5=eedcc60b9393d11a272628fc4bcf4e67}, volume = {13}, year = {2019} } @article{RN842, abstract = {In case of the complicated anatomical structure of the liver, landmark points on a three dimensional (3D) liver surface is hardly distinguished as corresponding pairs visually and automated landmark placing will be extremely time saving for liver registration. This paper presents a fully automated landmark detection method to register livers on multi-phase computed tomography (CT) images. Edge texture features and Support Vector Machine (SVM) are applied to detect the discriminated landmarks of the liver, including both surface and internal points. Using the information of liver shape, 3D gray level co-occurrence matrix is calculated into texture features, from which the most informatics there features are selected by our optimization algorithm for choosing a sub-set of features from a high dimensional feature set. Then automated landmarks detection begins at scanning surface points on the pre-contrast and portal venous phase images, where positive outputs of the SVM classifier are regarded as initial candidates and final candidates are obtained by eliminating false positives (FPs). Finally, relied on the detected landmarks, thin plate splines (TPS) algorithm is used to register livers. Five surface landmarks, together with internal landmarks of the liver center from every 25 mm slice interval, can be detected automatically with sensitivity of 88.33{\%} and accuracy of 98.5{\%}. Surface-based mean error (SME) is decreased from 3.80 to 2.87 mm on average, while SME value has increased 32.4 and 8.0{\%} on average respectively when comparing with the rigid and B-spline methods. The results demonstrate that edge textures and SVM classifier are effective in the automated landmark detection. Together with TPS algorithm, fully automated liver registration is able to be achieved on multi-phase CT images.}, author = {Zhang, Xuejun and Tan, Xiaomin and Gao, Xin and Wu, Dongbo and Zhou, Xiangrong and Fujita, Hiroshi}, doi = {10.1007/s10586-018-2567-3}, issn = {15737543}, journal = {Cluster Computing}, keywords = {Edge textures,Landmark detection,Liver registration,TPS}, pages = {15305--15319}, title = {{Non-rigid registration of multi-phase liver CT data using fully automated landmark detection and TPS deformation}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85044478175{\&}doi=10.1007{\%}2Fs10586-018-2567-3{\&}partnerID=40{\&}md5=43af833a57941962e238b16f43fb845f}, volume = {22}, year = {2019} } @article{Li2018, abstract = {Purpose: The purpose of this study was to enhance the deformation range of demons-based deformable image registration (DIR) for large respiration-induced organ motion in the reconstruction of time-resolved four-dimensional magnetic resonance imaging (TR-4DMRI) for multi-breath motion simulation. Methods: A demons-based DIR algorithm was modified to enhance the deformation range for TR-4DMRI reconstruction using the super-resolution approach. A pseudo demons force was introduced to accelerate the coarse deformation in a multi-resolution (n = 3) DIR approach. The intensity gradient of a voxel was applied to its neighboring (5 × 5 × 5) voxels with a weight of Gaussian probability profile ($\sigma$ = 1 voxel) to extend the demons force, especially on those voxels that have little intensity gradience but high-intensity difference. A digital 4DMRI phantom with 3–8 cm diaphragmatic motions was used for DIR comparison. Six volunteers were scanned with two high-resolution (highR: 2 × 2 × 2 mm3) breath-hold (BH) 3DMR images at full inhalation (BHI) and full exhalation (BHE) and low-resolution (lowR: 5 × 5 × 5 mm3) free-breathing (FB) 3DMR cine images (2 Hz) under an IRB-approved protocol. A cross-consistency check (CCC) (BHI→FB←BHE), with voxel intensity correlation (VIC) and inverse consistency error (ICE), was introduced for cross-verification of TR-4DMRI reconstruction. Results: Using the digital phantom, the maximum deformable magnitude is doubled using the modified DIR from 3 to 6 cm at the diaphragm. In six human subjects, the first 15-iteration DIR using the pseudo force deforms 200 ± 150{\%} more than the original force, and succeeds in all 12 cases, whereas the original demons-based DIR failed in 67{\%} of tested cases. Using the pseudo force, high VIC ({\textgreater}0.9) and small ICE (1.6 ± 0.6 mm) values are observed for DIR of BHI{\&}BHE, BHI→FB, and BHE→FB. The CCC identifies four questionable cases, in which two cases need further DIR refinement, without missing true negative. Conclusions: The introduction of a pseudo demons force enhances the largest deformation magnitude up to 6 cm. The cross-consistency check ensures the quality of TR-4DMRI reconstruction. Further investigation is ongoing to fully characterize TR-4DMRI for potential multi-breathing-cycle radiotherapy simulation.}, author = {Li, Guang and Sun, August and Nie, Xingyu and Moody, Jason and Huang, Kirk and Zhang, Shirong and Sharma, Satyam and Deasy, Joseph}, doi = {10.1002/mp.13179}, issn = {00942405}, journal = {Medical Physics}, keywords = {deformable image registration (DIR),image-guided radiotherapy (IGRT),multi-breath motion assessment,time-resolved four-dimensional magnetic resonance }, month = {nov}, number = {11}, pages = {5197--5207}, publisher = {John Wiley and Sons Ltd.}, title = {{Introduction of a pseudo demons force to enhance deformation range for robust reconstruction of super-resolution time-resolved 4DMRI}}, volume = {45}, year = {2018} } @article{Klemm2017, abstract = {Purpose: Due to rapid developments in the research areas of medical imaging, medical image processing and robotics, computer-assisted interventions (CAI) are becoming an integral part of modern patient care. From a software engineering point of view, these systems are highly complex and research can benefit greatly from reusing software components. This is supported by a number of open-source toolkits for medical imaging and CAI such as the medical imaging interaction toolkit (MITK), the public software library for ultrasound imaging research (PLUS) and 3D Slicer. An independent inter-toolkit communication such as the open image-guided therapy link (OpenIGTLink) can be used to combine the advantages of these toolkits and enable an easier realization of a clinical CAI workflow. Methods: MITK-OpenIGTLink is presented as a network interface within MITK that allows easy to use, asynchronous two-way messaging between MITK and clinical devices or other toolkits. Performance and interoperability tests with MITK-OpenIGTLink were carried out considering the whole CAI workflow from data acquisition over processing to visualization. Results: We present how MITK-OpenIGTLink can be applied in different usage scenarios. In performance tests, tracking data were transmitted with a frame rate of up to 1000 Hz and a latency of 2.81 ms. Transmission of images with typical ultrasound (US) and greyscale high-definition (HD) resolutions of 640 × 480 and 1920 × 1080 is possible at up to 512 and 128 Hz, respectively. Conclusion: With the integration of OpenIGTLink into MITK, this protocol is now supported by all established open-source toolkits in the field. This eases interoperability between MITK and toolkits such as PLUS or 3D Slicer and facilitates cross-toolkit research collaborations. MITK and its submodule MITK-OpenIGTLink are provided open source under a BSD-style licence (http://mitk.org).}, author = {Klemm, Martin and Kirchner, Thomas and Gr{\"{o}}hl, Janek and Cheray, Dominique and Nolden, Marco and Seitel, Alexander and Hoppe, Harald and Maier-Hein, Lena and Franz, Alfred M.}, doi = {10.1007/s11548-016-1488-y}, issn = {18616429}, journal = {International Journal of Computer Assisted Radiology and Surgery}, keywords = {Computer-assisted interventions,Image-guided therapy,Interoperability,MITK,OpenIGTLink,Ultrasound}, month = {mar}, number = {3}, pages = {351--361}, publisher = {Springer Verlag}, title = {{MITK-OpenIGTLink for combining open-source toolkits in real-time computer-assisted interventions}}, volume = {12}, year = {2017} } @inproceedings{Yatabe2017, author = {Yatabe, Marilia and Ruellas, Antonio and Gomes, Liliane and Macron, Lucie and Lopinto, Julia and Paniagua, Beatriz and Budin, Francois and Cevidanes, Lucia}, booktitle = {2017 IADR/AADR/CADR General Session (San Francisco, California) }, title = {{Comparative Study of Three Methods to Compute 3D Craniofacial Angular Measurements}}, url = {https://iadr.abstractarchives.com/abstract/17iags-2636290/comparative-study-of-three-methods-to-compute-3d-craniofacial-angular-measurements}, year = {2017} } @article{Sebille2019, abstract = {Background: Deep brain stimulation of the pedunculopontine nucleus has been performed to treat dopamine-resistant gait and balance disorders in patients with degenerative diseases. The outcomes, however, are variable, which may be the result of the lack of a well-defined anatomical target. Objectives: The objectives of this study were to identify the main neuronal populations of the pedunculopontine and the cuneiform nuclei that compose the human mesencephalic locomotor region and to compare their 3-dimensional distribution with those found in patients with Parkinson's disease and progressive supranuclear palsy. Methods: We used high-field MRI, immunohistochemistry, and in situ hybridization to characterize the distribution of the different cell types, and we developed software to merge all data within a common 3-dimensional space. Results: We found that cholinergic, GABAergic, and glutamatergic neurons comprised the main cell types of the mesencephalic locomotor region, with the peak densities of cholinergic and GABAergic neurons similarly located within the rostral pedunculopontine nucleus. Cholinergic and noncholinergic neuronal losses were homogeneous in the mesencephalic locomotor region of patients, with the peak density of remaining neurons at the same location as in controls. The degree of denervation of the pedunculopontine nucleus was highest in patients with progressive supranuclear palsy, followed by Parkinson's disease patients with falls. Conclusions: The peak density of cholinergic and GABAergic neurons was located similarly within the rostral pedunculopontine nucleus not only in controls but also in pathological cases. The neuronal loss was homogeneously distributed and highest in the pedunculopontine nucleus of patients with falls, which suggests a potential pathophysiological link. {\textcopyright} 2018 International Parkinson and Movement Disorder Society.}, author = {S{\'{e}}bille, Sophie B. and Rolland, Anne Sophie and Faillot, Matthieu and Perez-Garcia, Fernando and Colomb-Clerc, Antoine and Lau, Brian and Dumas, Sylvie and Vidal, Sara Fernandez and Welter, Marie Laure and Francois, Chantal and Bardinet, Eric and Karachi, Carine}, doi = {10.1002/mds.27578}, issn = {15318257}, journal = {Movement Disorders}, keywords = {Gait disorders,Parkinson's disease,cuneiform nucleus,pedunculopontine nucleus,progressive supranuclear palsy}, month = {feb}, number = {2}, pages = {218--227}, title = {{Normal and pathological neuronal distribution of the human mesencephalic locomotor region}}, url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/mds.27578}, volume = {34}, year = {2019} } @article{Looney2018, abstract = {We present a new technique to fully automate the segmentation of an organ from 3D ultrasound (3D-US) volumes, using the placenta as the target organ. Image analysis tools to estimate organ volume do exist but are too time consuming and operator dependant. Fully automating the segmentation process would potentially allow the use of placental volume to screen for increased risk of pregnancy complications. The placenta was segmented from 2,393 first trimester 3D-US volumes using a semiautomated technique. This was quality controlled by three operators to produce the "ground-truth" data set. A fully convolutional neural network (OxNNet) was trained using this ground-truth data set to automatically segment the placenta. OxNNet delivered state-of-the-art automatic segmentation. The effect of training set size on the performance of OxNNet demonstrated the need for large data sets. The clinical utility of placental volume was tested by looking at predictions of small-for-gestational-age babies at term. The receiver-operating characteristics curves demonstrated almost identical results between OxNNet and the ground-truth). Our results demonstrated good similarity to the ground-truth and almost identical clinical results for the prediction of SGA.}, author = {Looney, P{\'{a}}draig and Stevenson, Gordon N. and Nicolaides, Kypros H. and Plasencia, Walter and Molloholli, Malid and Natsis, Stavros and Collins, Sally L.}, doi = {10.1172/jci.insight.120178}, issn = {23793708}, journal = {JCI insight}, keywords = {Diagnostic imaging,Obstetrics/gynecology,Reproductive Biology}, month = {jun}, number = {11}, publisher = {NLM (Medline)}, title = {{Fully automated, real-time 3D ultrasound segmentation to estimate first trimester placental volume using deep learning}}, volume = {3}, year = {2018} } @article{Belmonte2018, abstract = {Spatial and spatio-temporal model checking techniques have a wide range of application domains, among which large scale distributed systems and signal and image analysis. We explore a new domain, namely (semi-)automatic contouring in Medical Imaging, introducing the tool VoxLogicA which merges the state-of-the-art library of computational imaging algorithms ITK with the unique combination of declarative specification and optimised execution provided by spatial logic model checking. The result is a rapid, logic based analysis development methodology. The analysis of an existing benchmark of medical images for segmentation of brain tumours shows that simple VoxLogicA analysis can reach state-of-the-art accuracy, competing with best-in-class algorithms, with the advantage of explainability and easy replicability. Furthermore, due to a two-orders-of-magnitude speedup compared to the existing general-purpose spatio-temporal model checker topochecker, VoxLogicA enables interactive development of analysis of 3D medical images, which can greatly facilitate the work of professionals in this domain.}, archivePrefix = {arXiv}, arxivId = {1811.05677}, author = {Belmonte, Gina and Ciancia, Vincenzo and Latella, Diego and Massink, Mieke}, doi = {10.1007/978-3-030-17462-0_16}, eprint = {1811.05677}, isbn = {9783030174613}, issn = {16113349}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, keywords = {Closure spaces,Medical Imaging,Model checking,Spatial logics}, month = {nov}, pages = {281--298}, title = {{Voxlogica: A spatial model checker for declarative image analysis}}, url = {http://arxiv.org/abs/1811.05677}, volume = {11427 LNCS}, year = {2019} } @article{Oliveira2015, abstract = {Objective. The aim of the present study was to develop a fully-automated computational solution for computer-aided diagnosis in Parkinson syndrome based on [123I]FP-CIT single photon emission computed tomography (SPECT) images. Approach. A dataset of 654 [123I]FP-CIT SPECT brain images from the Parkinson's Progression Markers Initiative were used. Of these, 445 images were of patients with Parkinson's disease at an early stage and the remainder formed a control group. The images were pre-processed using automated template-based registration followed by the computation of the binding potential at a voxel level. Then, the binding potential images were used for classification, based on the voxel-as-feature approach and using the support vector machines paradigm. Main results. The obtained estimated classification accuracy was 97.86{\%}, the sensitivity was 97.75{\%} and the specificity 98.09{\%}. Significance. The achieved classification accuracy was very high and, in fact, higher than accuracies found in previous studies reported in the literature. In addition, results were obtained on a large dataset of early Parkinson's disease subjects. In summation, the information provided by the developed computational solution potentially supports clinical decision-making in nuclear medicine, using important additional information beyond the commonly used uptake ratios and respective statistical comparisons. (ClinicalTrials.gov Identifier: NCT01141023)}, author = {Oliveira, Francisco P.M. and Castelo-Branco, Miguel}, doi = {10.1088/1741-2560/12/2/026008}, issn = {17412552}, journal = {Journal of Neural Engineering}, keywords = {DaTSCAN,automated image analysis,binding potential,classification}, month = {apr}, number = {2}, publisher = {Institute of Physics Publishing}, title = {{Computer-aided diagnosis of Parkinson's disease based on [123I]FP-CIT SPECT binding potential images, using the voxels-as-features approach and support vector machines}}, volume = {12}, year = {2015} } @inproceedings{Paniagua2018, abstract = {Studies show that cracked teeth are the third most common cause for tooth loss in industrialized countries. If detected early and accurately, patients can retain their teeth for a longer time. Most cracks are not detected early because of the discontinuous symptoms and lack of good diagnostic tools. Currently used imaging modalities like Cone Beam Computed Tomography (CBCT) and intraoral radiography often have low sensitivity and do not show cracks clearly. This paper introduces a novel method that can detect, quantify, and localize cracks automatically in high resolution CBCT (hr-CBCT) scans of teeth using steerable wavelets and learning methods. These initial results were created using hr-CBCT scans of a set of healthy teeth and of teeth with simulated longitudinal cracks. The cracks were simulated using multiple orientations. The crack detection was trained on the most significant wavelet coefficients at each scale using a bagged classifier of Support Vector Machines. Our results show high discriminative specificity and sensitivity of this method. The framework aims to be automatic, reproducible, and open-source. Future work will focus on the clinical validation of the proposed techniques on different types of cracks ex-vivo. We believe that this work will ultimately lead to improved tracking and detection of cracks allowing for longer lasting healthy teeth.}, author = {Paniagua, Beatriz and Shah, Hina and Hernandez-Cerdan, Pablo and Budin, Francois and Chittajallu, Deepak and Walter, Rick and Mol, Andre and Khan, Asma and Vimort, Jean-Baptiste}, booktitle = {Medical Imaging 2018: Biomedical Applications in Molecular, Structural, and Functional Imaging}, doi = {10.1117/12.2293603}, editor = {Gimi, Barjor and Krol, Andrzej}, isbn = {9781510616455}, issn = {0277-786X}, month = {mar}, pages = {55}, publisher = {SPIE}, title = {{Automatic quantification framework to detect cracks in teeth}}, url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/10578/2293603/Automatic-quantification-framework-to-detect-cracks-in-teeth/10.1117/12.2293603.full}, volume = {10578}, year = {2018} } @inproceedings{Paniagua2019, author = {Paniagua, Beatriz and Prothero, Jack and Vimort, Jean-Baptiste and Ruellas, Antonio Carlos O. and Marron, James S. and Cevidanes, Lucia and Benavides, Erika and McCormick, Matthew M. and Hernandez-Cerdan, Pablo}, doi = {10.1117/12.2507978}, isbn = {9781510625532}, issn = {0277-786X}, month = {mar}, pages = {42}, publisher = {SPIE-Intl Soc Optical Eng}, title = {{Advanced statistical analysis to classify high dimensionality textural probability-distribution matrices}}, year = {2019} } @article{Oliveira2018, abstract = {Positron emission tomography (PET) neuroimaging with the Pittsburgh Compound{\_}B (PiB) is widely used to assess amyloid plaque burden. Standard quantification approaches normalize PiB-PET by mean cerebellar gray matter uptake. Previous studies suggested similar pons and white-matter uptake in Alzheimer's disease (AD) and healthy controls (HC), but lack exhaustive comparison of normalization across the three regions, with data-driven diagnostic classification. We aimed to compare the impact of distinct reference regions in normalization, measured by data-driven statistical analysis, and correlation with cerebrospinal fluid (CSF) amyloid $\beta$ (A$\beta$) species concentrations. 243 individuals with clinical diagnosis of AD, HC, mild cognitive impairment (MCI) and other dementias, from the Biomarkers for Alzheimer's/Parkinson's Disease (BIOMARKAPD) initiative were included. PiB-PET images and CSF concentrations of A$\beta$38, A$\beta$40 and A$\beta$42 were submitted to classification using support vector machines. Voxel-wise group differences and correlations between normalized PiB-PET images and CSF A$\beta$ concentrations were calculated. Normalization by cerebellar gray matter and pons yielded identical classification accuracy of AD (accuracy-96{\%}, sensitivity-96{\%}, specificity-95{\%}), and significantly higher than A$\beta$ concentrations (best accuracy 91{\%}). Normalization by the white-matter showed decreased extent of statistically significant multivoxel patterns and was the only method not outperforming CSF biomarkers, suggesting statistical inferiority. A$\beta$38 and A$\beta$40 correlated negatively with PiB-PET images normalized by the white-matter, corroborating previous observations of correlations with non-AD-specific subcortical changes in white-matter. In general, when using the pons as reference region, higher voxel-wise group differences and stronger correlation with A$\beta$42, the A$\beta$42/A$\beta$40 or A$\beta$42/A$\beta$38 ratios were found compared to normalization based on cerebellar gray matter.}, author = {Oliveira, Francisco and Leuzy, Antoine and Castelhano, Jo{\~{a}}o and Chiotis, Konstantinos and Hasselbalch, Steen Gregers and Rinne, Juha and Mendon{\c{c}}a, Alexandre and Otto, Markus and Lle{\'{o}}, Alberto and Santana, Isabel and Johansson, Jarkko and Anderl-Straub, Sarah and Arnim, Christine and Beer, Ambros and Blesa, Rafael and Fortea, Juan and Sanna-Kaisa, Herukka and Portelius, Erik and Pannee, Josef and Zetterberg, Henrik and Blennow, Kaj and Moreira, Ana P. and Abrunhosa, Antero and Nordberg, Agneta and Castelo-Branco, Miguel}, doi = {10.1016/j.nicl.2018.08.023}, issn = {22131582}, journal = {NeuroImage: Clinical}, month = {jan}, pages = {603--610}, publisher = {Elsevier Inc.}, title = {{Data driven diagnostic classification in Alzheimer's disease based on different reference regions for normalization of PiB-PET images and correlation with CSF concentrations of A$\beta$ species}}, volume = {20}, year = {2018} } @article{Oliveira2018a, abstract = {Background: Pittsburgh Compound B (PiB) positron emission tomography (PET) is used to visualize in vivo amyloid plaques in the brain. Frequently the PiB examinations are complemented with a fluorodeoxyglucose (FDG) PET scan to further assess neurodegeneration. Objective: Our goal is to identify alternative correlates of FDG images by assessing which kinetic methods originate PiB derived relative delivery ratio (R 1) images that can be correlated with the FDG images, and to compare them with PiB perfusion (pPiB) images obtained from the early-phase of PiB acquisition. Methods: We selected 52 patients with cognitive impairment who underwent a dynamic PiB and FDG acquisitions. To compute the R 1 images, two simplified reference tissue models (SRTM and SRTM2) and two multi-linear reference tissue models (MRTM and MRTM2) were used. The pPiB images were obtained in two different time intervals. Results: All six types of images were of good quality and highly correlated with the FDG images (mean voxelwise within-subjects r {\textgreater} 0.92). The higher correlation was found for FDG-R 1 (MRTM). Regarding the voxelwise regional correlation, the higher mean all brain correlations was r = 0.825 for FDG-R 1 (MRTM) and statistically significant in the whole brain analysis. Conclusion: All R 1 and pPiB images here tested have potential to assess the metabolic impact of neurodegeneration almost as reliably as the FDG images. However, this is not enough to validate these images for a single-subject analysis compared with the FDG image, and thus they cannot yet be used clinically to replace the FDG image before such evaluation.}, author = {Oliveira, Francisco P.M. and Moreira, Ana Paula and {De Mendon{\c{c}}a}, Alexandre and Verdelho, Ana and Xavier, Carolina and Barroca, Dalila and Rio, Joana and Cardoso, Eva and Cruz, {\^{A}}ngela and Abrunhosa, Antero and Castelo-Branco, Miguel}, doi = {10.3233/JAD-180274}, issn = {18758908}, journal = {Journal of Alzheimer's Disease}, keywords = {11 C-PIB,18 F-FDG,Alzheimer's disease,compartmental models,neurodegeneration,perfusion}, number = {1}, pages = {89--97}, publisher = {IOS Press}, title = {{Can 11 C-PiB-PET relative delivery R 1 or 11 C-PiB-PET perfusion replace 18 F-FDG-PET in the assessment of brain neurodegeneration?}}, volume = {65}, year = {2018} } @article{Grothausmann2017, abstract = {The alveolar capillary network (ACN) provides an enormously large surface area that is necessary for pulmonary gas exchange. Changes of the ACN during normal or pathological development or in pulmonary diseases are of great functional impact and warrant further analysis. Due to the complexity of the three-dimensional (3D) architecture of the ACN, 2D approaches are limited in providing a comprehensive impression of the characteristics of the normal ACN or the nature of its alterations. Stereological methods offer a quantitative way to assess the ACN in 3D in terms of capillary volume, surface area, or number but lack a 3D visualization to interpret the data. Hence, the necessity to visualize the ACN in 3D and to correlate this with data from the same set of data arises. Such an approach requires a large sample volume combined with a high resolution. Here, we present a technically simple and cost-efficient approach to create 3D representations of lung tissue ranging from bronchioles over alveolar ducts and alveoli up to the ACN from more than 1 mm sample extent to a resolution of less than 1 $\mu$m. The method is based on automated image acquisition of serially sectioned epoxy resin-embedded lung tissue fixed by vascular perfusion and subsequent automated digital reconstruction and analysis of the 3D data. This efficient method may help to better understand mechanisms of vascular development and pathology of the lung.}, author = {Grothausmann, Roman and Knudsen, Lars and Ochs, Matthias and M{\"{u}}hlfeld, Christian}, doi = {10.1152/ajplung.00326.2016}, issn = {15221504}, journal = {American Journal of Physiology - Lung Cellular and Molecular Physiology}, keywords = {3D reconstruction,Alveolar capillary network (ACN),Stacked histological slices,Virtual endoscopy}, month = {feb}, number = {2}, pages = {L243--L257}, title = {{Digital 3D reconstructions using histological serial sections of lung tissue including the alveolar capillary network}}, url = {https://www.physiology.org/doi/10.1152/ajplung.00326.2016}, volume = {312}, year = {2017} } @inproceedings{Grothausmann2018, abstract = {Changes in lung volume during the breathing cycle and also lung diseases are likely to deform even the smallest airspace units, the alveoli. This study reports general ideas to investigate such changes with 3D digital image processing. It comprises morphological characterizations like volume and surface, an evaluation of the angle distribution between facets formed by the septal walls, the number of neighboring alveoli and a shape analysis of the alveolar airspace. The software used is open-source and custom programs are available at: http://github.com/romangrothausmann/.}, author = {Grothausmann, Roman and M{\"{u}}hlfeld, Christian and Ochs, Matthias and Knudsen, Lars}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-030-04747-4_5}, isbn = {9783030047467}, issn = {16113349}, pages = {49--64}, publisher = {Springer Verlag}, title = {{Shape and Facet Analyses of Alveolar Airspaces of the Lung}}, volume = {11167 LNCS}, year = {2018} } @article{Herrmann2018, abstract = {Background: Digital Imaging and Communications in Medicine (DICOM{\textregistered}) is the standard for the representation, storage, and communication of medical images and related information. A DICOM file format and communication protocol for pathology have been defined; however, adoption by vendors and in the field is pending. Here, we implemented the essential aspects of the standard and assessed its capabilities and limitations in a multisite, multivendor healthcare network. Methods: We selected relevant DICOM attributes, developed a program that extracts pixel data and pixel-related metadata, integrated patient and specimen-related metadata, populated and encoded DICOM attributes, and stored DICOM files. We generated the files using image data from four vendor-specific image file formats and clinical metadata from two departments with different laboratory information systems. We validated the generated DICOM files using recognized DICOM validation tools and measured encoding, storage, and access efficiency for three image compression methods. Finally, we evaluated storing, querying, and retrieving data over the web using existing DICOM archive software. Results: Whole slide image data can be encoded together with relevant patient and specimen-related metadata as DICOM objects. These objects can be accessed efficiently from files or through RESTful web services using existing software implementations. Performance measurements show that the choice of image compression method has a major impact on data access efficiency. For lossy compression, JPEG achieves the fastest compression/decompression rates. For lossless compression, JPEG-LS significantly outperforms JPEG 2000 with respect to data encoding and decoding speed. Conclusion: Implementation of DICOM allows efficient access to image data as well as associated metadata. By leveraging a wealth of existing infrastructure solutions, the use of DICOM facilitates enterprise integration and data exchange for digital pathology.}, author = {Herrmann, Markus D. and Clunie, David A. and Fedorov, Andriy and Doyle, Sean W. and Pieper, Steven and Klepeis, Veronica and Le, Long P. and Mutter, George L. and Milstone, David S. and Schultz, Thomas J. and Kikinis, Ron and Kotecha, Gopal K. and Hwang, David H. and Andriole, Katherine P. and {John Iafrate}, A. and Brink, James A. and Boland, Giles W. and Dreyer, Keith J. and Michalski, Mark and Golden, Jeffrey A. and Louis, David N. and Lennerz, Jochen K.}, doi = {10.4103/jpi.jpi_42_18}, issn = {21533539}, journal = {Journal of Pathology Informatics}, keywords = {Computational pathology,DICOMweb,Image compression,Slide scanning,Whole slide imaging}, month = {jan}, number = {1}, pmid = {30533276}, publisher = {Wolters Kluwer Medknow Publications}, title = {{Implementing the DICOM standard for digital pathology}}, volume = {9}, year = {2018} } @article{Malkasian2018, abstract = {Background: As combined morphological and physiological assessment of coronary artery disease (CAD) is necessary to reliably resolve CAD severity, the objective of this study was to validate an automated minimum-cost path assignment (MCP) technique which enables accurate, vessel-specific assignment of the left (LCA) and right (RCA) coronary perfusion territories using computed tomography (CT) angiography data for both left and right ventricles. Methods: Six swine were used to validate the MCP technique. In each swine, a dynamic acquisition comprised of twenty consecutive volume scans was acquired with a 320-slice CT scanner following peripheral injection of contrast material. From this acquisition the MCP technique was used to automatically assign LCA and RCA perfusion territories for the left and right ventricles, independently. Each animal underwent another dynamic CT acquisition following direct injection of contrast material into the LCA or RCA. Using this acquisition, reference standard LCA and RCA perfusion territories were isolated from the myocardial blush. The accuracy of the MCP technique was evaluated by quantitatively comparing the MCP-derived LCA and RCA perfusion territories to these reference standard territories. Results: All MCP perfusion territory masses (MassMCP) and all reference standard perfusion territory masses (MassRS) in the left ventricle were related by MassMCP = 0.99MassRS+0.35 g (r = 1.00). MassMCP and MassRS in the right ventricle were related by MassMCP = 0.94MassRS+0.39 g (r = 0.96). Conclusion: The MCP technique was validated in a swine animal model and has the potential to be used for accurate, vessel-specific assignment of LCA and RCA perfusion territories in both the left and right ventricular myocardium using CT angiography data. In order to provide a comprehensive morphological and physiological cardiac imaging datasets, the minimum-cost path assignment (MCP) method has been validated, using a swine animal model and computed tomography (CT) imaging. MCP has been shown to accurately quantify left and right coronary artery perfusion territories in the left ventricle, right ventricle and whole heart. The MCP technique provides clinicians and researchers in cardiovascular imaging with a means to accurately and automatically determine coronary-specific perfusion territories in the left and right heart, and could also be used to assess myocardium at-risk distal to a stenosis.}, author = {Malkasian, Shant and Hubbard, Logan and Dertli, Brian and Kwon, Jungnam and Molloi, Sabee}, doi = {10.1016/j.jcct.2018.06.006}, issn = {1876861X}, journal = {Journal of Cardiovascular Computed Tomography}, keywords = {Angiography,Cardiovascular disease,Computerized tomography,Coronary artery disease,Imaging,Myocardium}, month = {sep}, number = {5}, pages = {425--435}, publisher = {Elsevier Inc.}, title = {{Quantification of vessel-specific coronary perfusion territories using minimum-cost path assignment and computed tomography angiography: Validation in a swine model}}, volume = {12}, year = {2018} } @article{HadjHamou2016, abstract = {We propose and detail a deformation-based morphometry computational framework, called Longitudinal Log-Demons Framework (LLDF), to estimate the longitudinal brain deformations from image data series, transport them in a common space and perform statistical group-wise analyses. It is based on freely available software and tools, and consists of three main steps: (i) Pre-processing, (ii) Position correction, and (iii) Non-linear deformation analysis. It is based on the LCC log-Demons non-linear symmetric diffeomorphic registration algorithm with an additional modulation of the similarity term using a confidence mask to increase the robustness with respect to brain boundary intensity artifacts. The pipeline is exemplified on the longitudinal Open Access Series of Imaging Studies (OASIS) database and all the parameters values are given so that the study can be reproduced. We investigate the group-wise differences between the patients with Alzheimer's disease and the healthy control group, and show that the proposed pipeline increases the sensitivity with no decrease in the specificity of the statistical study done on the longitudinal deformations.}, author = {Hadj-Hamou, Mehdi and Lorenzi, Marco and Ayache, Nicholas and Pennec, Xavier}, doi = {10.3389/fnins.2016.00236}, issn = {1662453X}, journal = {Frontiers in Neuroscience}, keywords = {Deformation-based morphometry,Diffeomorphism parametrized by stationary velocity,Longitudinal study,Non-linear registration,Reproducible research,Statistical analysis}, month = {jun}, number = {JUN}, publisher = {Frontiers Research Foundation}, title = {{Longitudinal analysis of image time series with diffeomorphic deformations: A computational framework based on stationary velocity fields}}, volume = {10}, year = {2016} } @article{Abbasi2017, abstract = {Brain tumor pathology is one of the most common mortality issues considered as an essential priority for health care societies. Accurate diagnosis of the type of disorder is crucial to make a plan for remedy that can minimize the deadly results. The main purpose of segmentation and detection is to make distinction between different regions of the brain. Besides accuracy, these techniques should be implemented quickly. In this paper an automatic method for brain tumor detection in 3D images has been proposed. In the first step, the bias field correction and histogram matching are used for pre-processing of the images. In the next step, the region of interest is identified and separated from the background of the Flair image. Local binary pattern in three orthogonal planes (LBP-TOP) and histogram of orientation gradients (HOG-TOP) are used as the learning features. Since 3D images are used in this research we use the idea of in local binary pattern in three orthogonal planes in order to extend histogram orientation gradients for 3D images. The random forest is then used to segment tumorous regions. We evaluate the performance of our algorithm on glioma images from BRATS 2013. Our experimental results and analyses indicate that our proposed framework is superior in detecting brain tumors in comparison with other techniques.}, author = {Abbasi, Solmaz and Tajeripour, Farshad}, doi = {10.1016/j.neucom.2016.09.051}, issn = {18728286}, journal = {Neurocomputing}, keywords = {Histogram orientation gradient,Local binary patterns,MRI images,Medical image processing,Tumor detection}, pages = {526--535}, title = {{Detection of brain tumor in 3D MRI images using local binary patterns and histogram orientation gradient}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {219}, year = {2017} } @incollection{Achim2018, abstract = {The method described here aims at the construction of a single-cell resolution gene expression atlas for an animal or tissue, combining in situ hybridization (ISH) and single-cell mRNA-sequencing (scRNAseq). A high resolution and medium-coverage gene expression atlas of an animal or tissue of interest can be obtained by performing a series of ISH experiments, followed by a process of image registration and gene expression averaging. Using the overlapping fraction of the genes, concomitantly obtained scRNAseq data can be fitted into the spatial context of the gene expression atlas, complementing the coverage by genes.}, author = {Achim, Kaia and Vergara, Hernando Mart{\'{i}}nez and Pettit, Jean Baptiste}, booktitle = {Methods in Molecular Biology}, doi = {10.1007/978-1-4939-7213-5_7}, issn = {10643745}, keywords = {Gene expression,Image registration,Single-cell mRNA-seq,Spatial transcriptomics}, pages = {111--125}, title = {{Spatial transcriptomics: Constructing a single-cell resolution transcriptome-wide expression atlas}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85033693516{\&}doi=10.1007{\%}2F978-1-4939-7213-5{\_}7{\&}partnerID=40{\&}md5=697ad0e6f4701bd81a55352e26ae2fbb}, volume = {1649}, year = {2018} } @article{Achterberg2016, abstract = {With the increasing number of datasets encountered in imaging studies, the increasing complexity of processing workflows, and a growing awareness for data stewardship, there is a need for managed, automated workflows. In this paper, we introduce Fastr, an automated workflow engine with support for advanced data flows. Fastr has built-in data provenance for recording processing trails and ensuring reproducible results. The extensible plugin-based design allows the system to interface with virtually any image archive and processing infrastructure. This workflow engine is designed to consolidate quantitative imaging biomarker pipelines in order to enable easy application to new data.}, author = {Achterberg, Hakim C. and Koek, Marcel and Niessen, Wiro J.}, doi = {10.3389/fict.2016.00015}, issn = {2297198X}, journal = {Frontiers in ICT}, keywords = {Data flow,Data processing,Distributed computing,Pipeline,Provenance,Python,Reproducible research,Workflow}, number = {AUG}, title = {{Fastr: A Workflow engine for advanced data flows in medical image analysis}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85048206888{\&}doi=10.3389{\%}2Ffict.2016.00015{\&}partnerID=40{\&}md5=453da670107b4919c49586ea9b393a99}, volume = {3}, year = {2016} } @article{Acosta2017, abstract = {Background and purpose Segmentation of intra-prostatic urethra for dose assessment from planning CT may help explaining urinary toxicity in prostate cancer radiotherapy. This work sought to: i) propose an automatic method for urethra segmentation in CT, ii) compare it with previously proposed surrogate models and iii) quantify the dose received by the urethra in patients treated with IMRT. Materials and methods A weighted multi-atlas-based urethra segmentation method was devised from a training data set of 55 CT scans of patients receiving brachytherapy with visible urinary catheters. Leave-one-out cross validation was performed to quantify the error between the urethra segmentation and the catheter ground truth with two scores: the centerlines distance (CLD) and the percentage of centerline within a certain distance from the catheter (PWR). The segmentation method was then applied to a second test data set of 95 prostate cancer patients having received 78 Gy IMRT to quantify dose to the urethra. Results Mean CLD was 3.25 ± 1.2 mm for the whole urethra and 3.7 ± 1.7 mm, 2.52 ± 1.5 mm, and 3.01 ± 1.7 mm for the top, middle, and bottom thirds, respectively. In average, 53{\%} of the segmented centerlines were within a radius {\textless} 3.5 mm from the centerline ground truth and 83{\%} in a radius {\textless} 5 mm. The proposed method outperformed existing surrogate models. In IMRT, urethra DVH was significantly higher than prostate DVH from V74 Gy to V79 Gy. Conclusion A multi-atlas-based segmentation method was proposed enabling assessment of the dose within the prostatic urethra.}, author = {Acosta, Oscar and Mylona, Eugenia and {Le Dain}, Mathieu and Voisin, Camille and Lizee, Thibaut and Rigaud, Bastien and Lafond, Carolina and Gnep, Khemara and de Crevoisier, Renaud}, doi = {10.1016/j.radonc.2017.09.015}, issn = {18790887}, journal = {Radiotherapy and Oncology}, keywords = {Atlas-based segmentation,Dose computation,Prostate cancer radiotherapy,Urethra segmentation,Urinary toxicity}, number = {3}, pages = {492--499}, title = {{Multi-atlas-based segmentation of prostatic urethra from planning CT imaging to quantify dose distribution in prostate cancer radiotherapy}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {125}, year = {2017} } @article{Adebar2014, abstract = {Robotic needle steering systems have the potential to greatly improve medical interventions, but they require new methods for medical image guidance. Three-dimensional (3-D) ultrasound is a widely available, low-cost imaging modality that may be used to provide real-time feedback to needle steering robots. Unfortunately, the poor visibility of steerable needles in standard grayscale ultrasound makes automatic segmentation of the needles impractical. A new imaging approach is proposed, in which high-frequency vibration of a steerable needle makes it visible in ultrasound Doppler images. Experiments demonstrate that segmentation from this Doppler data is accurate to within 1-2 mm. An image-guided control algorithm that incorporates the segmentation data as feedback is also described. In experimental tests in ex vivo bovine liver tissue, a robotic needle steering system implementing this control scheme was able to consistently steer a needle tip to a simulated target with an average error of 1.57 mm. Implementation of 3-D ultrasound-guided needle steering in biological tissue represents a significant step toward the clinical application of robotic needle steering.}, author = {Adebar, Troy K. and Fletcher, Ashley E. and Okamura, Allison M.}, doi = {10.1109/TBME.2014.2334309}, issn = {15582531}, journal = {IEEE Transactions on Biomedical Engineering}, keywords = {Image-guided intervention,robotic needle steering,ultrasound Doppler,ultrasound imaging}, number = {12}, pages = {2899--2910}, title = {{3-D ultrasound-guided robotic needle steering in biological tissue}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {61}, year = {2014} } @article{Adluru2007, abstract = {Dynamic contrast-enhanced (DCE) MRI is a powerful technique to probe an area of interest in the body. Here a temporally constrained reconstruction (TCR) technique that requires less k-space data over time to obtain good-quality reconstructed images is proposed. This approach can be used to improve the spatial or temporal resolution, or increase the coverage of the object of interest. The method jointly reconstructs the space-time data iteratively with a temporal constraint in order to resolve aliasing. The method was implemented and its feasibility tested on DCE myocardial perfusion data with little or no motion. The results obtained from sparse k-space data using the TCR method were compared with results obtained with a sliding-window (SW) method and from full data using the standard inverse Fourier transform (IFT) reconstruction. Acceleration factors of 5 (R = 5) were achieved without a significant loss in image quality. Mean improvements of 28 ± 4{\%} in the signal-to-noise ratio (SNR) and 14 ± 4{\%} in the contrast-to-noise ratio (CNR) were observed in the images reconstructed using the TCR method on sparse data (R = 5) compared to the standard I FT reconstructions from full data for the perfusion datasets. The method has the potential to improve dynamic myocardial perfusion imaging and also to reconstruct other sparse dynamic MR acquisitions. {\textcopyright} 2007 Wiley-Liss, Inc.}, author = {Adluru, Ganesh and Awate, Suyash P. and Tasdizen, Tolga and Whitaker, Ross T. and DiBella, Edward V.R.}, doi = {10.1002/mrm.21248}, issn = {07403194}, journal = {Magnetic Resonance in Medicine}, keywords = {Cardiac perfusion,Dynamic contrast-enhanced MR,L-curve,Regularization,Regularization parameter}, number = {6}, pages = {1027--1036}, title = {{Temporally constrained reconstruction of dynamic cardiac perfusion MRI}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {57}, year = {2007} } @article{Ai2019, abstract = {Since the honeybee possesses eusociality, advanced learning, memory ability, and information sharing through the use of various pheromones and sophisticated symbol communication (i.e., the “waggle dance”), this remarkable social animal has been one of the model symbolic animals for biological studies, animal ecology, ethology, and neuroethology. Karl von Frisch discovered the meanings of the waggle dance and called the communication a “dance language.” Subsequent to this discovery, it has been extensively studied how effectively recruits translate the code in the dance to reach the advertised destination and how the waggle dance information conflicts with the information based on their own foraging experience. The dance followers, mostly foragers, detect and interact with the waggle dancer, and are finally recruited to the food source. In this review, we summarize the current state of knowledge on the neural processing underlying this fascinating behavior.}, author = {Ai, Hiroyuki and Okada, Ryuichi and Sakura, Midori and Wachtler, Thomas and Ikeno, Hidetoshi}, doi = {10.3390/insects10100336}, issn = {20754450}, journal = {Insects}, keywords = {Antenna-mechanosensory center,Brain,Computational analysis,Distance information,Honeybee,Polarized light processing,Sensory processing,Standard brain,Vibration,Waggle dance}, number = {10}, pages = {16}, title = {{Neuroethology of the waggle dance: How followers interact with the waggle dancer and detect spatial information}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {10}, year = {2019} } @article{Akbarzadeh2013, abstract = {Objective: Hybrid PET/MRI presents many advantages in comparison with its counterpart PET/CT in terms of improved soft-tissue contrast, decrease in radiation exposure, and truly simultaneous and multi-parametric imaging capabilities. However, the lack of well-established methodology for MR-based attenuation correction is hampering further development and wider acceptance of this technology. We assess the impact of ignoring bone attenuation and using different tissue classes for generation of the attenuation map on the accuracy of attenuation correction of PET data. Methods: This work was performed using simulation studies based on the XCAT phantom and clinical input data. For the latter, PET and CT images of patients were used as input for the analytic simulation model using realistic activity distributions where CT-based attenuation correction was utilized as reference for comparison. For both phantom and clinical studies, the reference attenuation map was classified into various numbers of tissue classes to produce three (air, soft tissue and lung), four (air, lungs, soft tissue and cortical bones) and five (air, lungs, soft tissue, cortical bones and spongeous bones) class attenuation maps. Results: The phantom studies demonstrated that ignoring bone increases the relative error by up to 6.8 {\%} in the body and up to 31.0 {\%} for bony regions. Likewise, the simulated clinical studies showed that the mean relative error reached 15 {\%} for lesions located in the body and 30.7 {\%} for lesions located in bones, when neglecting bones. These results demonstrate an underestimation of about 30 {\%} of tracer uptake when neglecting bone, which in turn imposes substantial loss of quantitative accuracy for PET images produced by hybrid PET/MRI systems. Conclusion: Considering bones in the attenuation map will considerably improve the accuracy of MR-guided attenuation correction in hybrid PET/MR to enable quantitative PET imaging on hybrid PET/MR technologies. {\textcopyright} 2012 The Japanese Society of Nuclear Medicine.}, author = {Akbarzadeh, A. and Ay, M. R. and Ahmadian, A. and {Riahi Alam}, N. and Zaidi, H.}, doi = {10.1007/s12149-012-0667-3}, issn = {09147187}, journal = {Annals of Nuclear Medicine}, keywords = {Attenuation correction,PET/CT,PET/MRI,Quantification,Tissue classification}, number = {2}, pages = {152--162}, title = {{MRI-guided attenuation correction in whole-body PET/MR: Assessment of the effect of bone attenuation}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {27}, year = {2013} } @article{Akbarzadeh2013a, abstract = {Multimodality image registration plays a crucial role in various clinical and research applications. The aim of this study is to present an optimized MR to CT whole-body deformable image registration algorithm and its validation using clinical studies. A 3D intermodality registration technique based on B-spline transformation was performed using optimized parameters of the elastix package based on the Insight Toolkit (ITK) framework. Twenty-eight (17 male and 11 female) clinical studies were used in this work. The registration was evaluated using anatomical landmarks and segmented organs. In addition to 16 anatomical landmarks, three key organs (brain, lungs, and kidneys) and the entire body volume were segmented for evaluation. Several parameters - such as the Euclidean distance between anatomical landmarks, target overlap, Dice and Jaccard coefficients, false positives and false negatives, volume similarity, distance error, and Hausdorff distance - were calculated to quantify the quality of the registration algorithm. Dice coefficients for the majority of patients ({\textgreater} 75{\%}) were in the 0.8-1 range for the whole body, brain, and lungs, which satisfies the criteria to achieve excellent alignment. On the other hand, for kidneys, Dice coefficients for volumes of 25{\%} of the patients meet excellent volume agreement requirement, while the majority of patients satisfy good agreement criteria ({\textgreater} 0.6). For all patients, the distance error was in 0-10 mm range for all segmented organs. In summary, we optimized and evaluated the accuracy of an MR to CT deformable registration algorithm. The registered images constitute a useful 3D whole-body MR-CT atlas suitable for the development and evaluation of novel MR-guided attenuation correction procedures on hybrid PET-MR systems.}, author = {Akbarzadeh, A. and Gutierrez, D. and Baskin, A. and Ay, M. R. and Ahmadian, A. and {Riahi Alam}, N. and L{\"{o}}vblad, K. O. and Zaidi, H.}, doi = {10.1120/jacmp.v14i4.4163}, issn = {15269914}, journal = {Journal of Applied Clinical Medical Physics}, keywords = {Attenuationcorrection,Deformable model,Image registration,PET/CT,PET/MRI}, number = {4}, pages = {238--253}, title = {{Evaluation of whole-body mr to ct deformable image registration}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {14}, year = {2013} } @article{Aksenov2014, abstract = {Purpose: The local injection of neurotransmitter agonists and antagonists to modulate recorded neurons in awake animals has long been an important and widely used technique in neuroscience. Combined with functional magnetic resonance imaging (fMRI) and simultaneous electrophysiology, local injection enables the study of specific brain regions under precise modulations of their neuronal activity. However, localized injections are often accompanied by mechanical displacement of the tissue, known as volume effect (VE), which can induce changes in electrophysiological recordings as well as artifacts that are particular to fMRI studies. Methods: We characterize the changes produced by VE in an agarose phantom as well as during stimulus-evoked and resting-state fMRI and simultaneously acquired electrophysiology in awake rabbits. Results: Our results demonstrate that localized injection can produce significant intensity changes in fMRI data, even while effects on electrophysiological recordings are minimized. These changes are localized to the vicinity of the injection needle and diminish over time due to diffusion of the injected volume. Conclusion: Sufficient time should be allowed for drug diffusion to ensure stable results, particularly for resting-state fMRI experiments.}, author = {Aksenov, Daniil P. and Li, Limin and Iordanescu, Gheorghe and Miller, Michael J. and Wyrwicz, Alice M.}, doi = {10.1002/mrm.24996}, issn = {15222594}, journal = {Magnetic Resonance in Medicine}, keywords = {ACSF,Brain,Injection,Single unit,Volume effect,fMRI}, number = {4}, pages = {1170--1175}, pmid = {24273205}, title = {{Volume effect of localized injection in functional MRI and electrophysiology}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {72}, year = {2014} } @article{Aksenov2016, abstract = {The adaptation of neuronal responses to stimulation, in which a peak transient response is followed by a sustained plateau, has been well-studied. The blood oxygenation level dependent (BOLD) functional magnetic resonance imaging (fMRI) signal has also been shown to exhibit adaptation on a longer time scale. However, some regions such as the visual and auditory cortices exhibit significant BOLD adaptation, whereas other such as the whisker barrel cortex may not adapt. In the sensory cortex a combination of thalamic inputs and intracortical activity drives hemodynamic changes, although the relative contributions of these components are not entirely understood. The aim of this study is to assess the role of thalamic inputs vs. intracortical processing in shaping BOLD adaptation during stimulation in the somatosensory cortex. Using simultaneous fMRI and electrophysiology in awake rabbits, we measured BOLD, local field potentials (LFPs), single- and multi-unit activity in the cortex during whisker and optogenetic stimulation. This design allowed us to compare BOLD and haemodynamic responses during activation of the normal thalamocortical sensory pathway (i.e., both inputs and intracortical activity) vs. the direct optical activation of intracortical circuitry alone. Our findings show that whereas LFP and multi-unit (MUA) responses adapted, neither optogenetic nor sensory stimulation produced significant BOLD adaptation. We observed for both paradigms a variety of excitatory and inhibitory single unit responses. We conclude that sensory feed-forward thalamic inputs are not primarily responsible for shaping BOLD adaptation to stimuli; but the single-unit results point to a role in this behaviour for specific excitatory and inhibitory neuronal sub-populations, which may not correlate with aggregate neuronal activity.}, author = {Aksenov, Daniil P. and Li, Limin and Miller, Michael J. and Wyrwicz, Alice M.}, doi = {10.1111/ejn.13384}, issn = {14609568}, journal = {European Journal of Neuroscience}, keywords = {functional magnetic resonance imaging,neural activity,rabbit,whisker barrel cortex}, number = {9}, pages = {2722--2729}, title = {{Blood oxygenation level dependent signal and neuronal adaptation to optogenetic and sensory stimulation in somatosensory cortex in awake animals}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {44}, year = {2016} } @article{Aksenov2016a, abstract = {Millions of children undergo general anesthesia each year in the USA alone, and a growing body of literature from animals and humans suggests that exposure to anesthesia at an early age can impact neuronal development, leading to learning and memory impairments later in childhood. Although a number of studies have reported behavioral and structural effects of anesthesia exposure during infancy, the functional manifestation of these changes has not been previous examined. In this study we used BOLD fMRI to measure the functional response to stimulation in the whisker barrel cortex of awake rabbits before and after learning a trace eyeblink classical conditioning paradigm. The functional changes, in terms of activated volume and time course, in rabbits exposed to isoflurane anesthesia during infancy was compared to unanesthetized controls when both groups reached young adulthood. Our findings show that whereas both groups exhibited decreased BOLD response duration after learning, the anesthesia-exposed group also showed a decrease in BOLD response volume in the whisker barrel cortex, particularly in the deeper infragranular layer. These results suggest that anesthesia exposure during infancy may affect the intracortical processes that mediate learning-related plasticity.}, author = {Aksenov, Daniil P. and Miller, Michael J. and Li, Limin and Wyrwicz, Alice M.}, doi = {10.1016/j.physbeh.2016.08.030}, issn = {1873507X}, journal = {Physiology and Behavior}, keywords = {Anesthesia,Infant,Learning}, pages = {10--15}, title = {{Eyeblink classical conditioning and BOLD fMRI of anesthesia-induced changes in the developing brain}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {167}, year = {2016} } @article{Allan2012, abstract = {Data-intensive research depends on tools that manage multidimensional, heterogeneous datasets. We built OME Remote Objects (OMERO), a software platform that enables access to and use of a wide range of biological data. OMERO uses a server-based middleware application to provide a unified interface for images, matrices and tables. OMERO's design and flexibility have enabled its use for light-microscopy, high-content-screening, electron-microscopy and even non-image-genotype data. OMERO is open-source software, available at http://openmicroscopy.org/. {\textcopyright} 2012 Nature America, Inc. All rights reserved.}, author = {Allan, Chris and Burel, Jean Marie and Moore, Josh and Blackburn, Colin and Linkert, Melissa and Loynton, Scott and MacDonald, Donald and Moore, William J. and Neves, Carlos and Patterson, Andrew and Porter, Michael and Tarkowska, Aleksandra and Loranger, Brian and Avondo, Jerome and Lagerstedt, Ingvar and Lianas, Luca and Leo, Simone and Hands, Katherine and Hay, Ron T. and Patwardhan, Ardan and Best, Christoph and Kleywegt, Gerard J. and Zanetti, Gianluigi and Swedlow, Jason R.}, doi = {10.1038/nmeth.1896}, issn = {15487091}, journal = {Nature Methods}, number = {3}, pages = {245--253}, title = {{OMERO: Flexible, model-driven data management for experimental biology}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {9}, year = {2012} } @inproceedings{Alvarez, abstract = {We introduce a morphological approach to curve evolution. The differential operators used in the standard PDE snake models can be approached using morphological operations on a binary level set. By combining the morphological operators associated to the PDE components we achieve a new snakes evolution algorithm. This new solution is based on numerical methods which are very simple, fast and stable. Moreover, since the level set is just a binary piecewise constant function, this approach does not require to estimate a contour distance function. To illustrate the results obtained we present some numerical experiments on real images. {\textcopyright}2010 IEEE.}, author = {{\'{A}}lvarez, Luis and Baumela, Luis and Henr{\'{i}}quez, Pedro and M{\'{a}}rquez-Neila, Pablo}, booktitle = {Proceedings of the IEEE Computer Society Conference on Computer Vision and Pattern Recognition}, doi = {10.1109/CVPR.2010.5539900}, isbn = {9781424469840}, issn = {10636919}, pages = {2197--2202}, title = {{Morphological snakes}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-77955987357{\&}doi=10.1109{\%}2FCVPR.2010.5539900{\&}partnerID=40{\&}md5=15e7390806889a59ec5c0fa284c87850}, year = {2010} } @article{Anderson2010, abstract = {Applying Python to a neuroscience project let developers put complex data processing and advanced visualization techniques together in a coherent framework. {\textcopyright} 2006 IEEE.}, author = {Anderson, Erik W. and Preston, Gilbert A. and Silva, Claudio T.}, doi = {10.1109/MCSE.2010.91}, issn = {15219615}, journal = {Computing in Science and Engineering}, keywords = {Brain,Data visualization,Electroencephalography,Libraries,Magnetic resonance imaging,Python,Sensors,Signal processing,Time frequency analysis,Visualization}, number = {4}, pages = {90--95}, title = {{Using python for signal processing and visualization}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {12}, year = {2010} } @article{Arabi2016, abstract = {Quantitative whole-body PET/MR imaging is challenged by the lack of accurate and robust strategies for attenuation correction. In this work, a new pseudo-CT generation approach, referred to as sorted atlas pseudo-CT (SAP), is proposed for accurate extraction of bones and estimation of lung attenuation properties. This approach improves the Gaussian process regression (GPR) kernel proposed by Hofmann et al. which relies on the information provided by a co-registered atlas (CT and MRI) using a GPR kernel to predict the distribution of attenuation coefficients. Our approach uses two separate GPR kernels for lung and non-lung tissues. For non-lung tissues, the co-registered atlas dataset was sorted on the basis of local normalized cross-correlation similarity to the target MR image to select the most similar image in the atlas for each voxel. For lung tissue, the lung volume was incorporated in the GPR kernel taking advantage of the correlation between lung volume and corresponding attenuation properties to predict the attenuation coefficients of the lung. In the presence of pathological tissues in the lungs, the lesions are segmented on PET images corrected for attenuation using MRI-derived three-class attenuation map followed by assignment of soft-tissue attenuation coefficient. The proposed algorithm was compared to other techniques reported in the literature including Hofmann's approach and the three-class attenuation correction technique implemented on the Philips Ingenuity TF PET/MR where CT-based attenuation correction served as reference. Fourteen patients with head and neck cancer undergoing PET/CT and PET/MR examinations were used for quantitative analysis. SUV measurements were performed on 12 normal uptake regions as well as high uptake malignant regions. Moreover, a number of similarity measures were used to evaluate the accuracy of extracted bones. The Dice similarity metric revealed that the extracted bone improved from 0.58±0.09 to 0.65±0.07 when using the SAP technique compared to Hofmann's approach. This enabled to reduce the SUV mean bias in bony structures for the SAP approach to -1.7±4.8{\%} as compared to -7.3±6.0{\%} and -27.4±10.1{\%} when using Hofmann's approach and the three-class attenuation map, respectively. Likewise, the three-class attenuation map produces a relative absolute error of 21.7±11.8{\%} in the lungs. This was reduced on average to 15.8±8.6{\%} and 8.0±3.8{\%} when using Hofmann's and SAP techniques, respectively. The SAP technique resulted in better overall PET quantification accuracy than both Hofmann's and the three-class approaches owing to the more accurate extraction of bones and better prediction of lung attenuation coefficients. Further improvement of the technique and reduction of the computational time are still required.}, author = {Arabi, Hossein and Zaidi, Habib}, doi = {10.1016/j.media.2016.02.002}, issn = {13618423}, journal = {Medical Image Analysis}, keywords = {Atlas,Attenuation correction,PET/MRI,Pseudo-CT generation,Quantification}, pages = {1--15}, title = {{Magnetic resonance imaging-guided attenuation correction in whole-body PET/MRI using a sorted atlas approach}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {31}, year = {2016} } @article{Asadulina2012, abstract = {Background: Digital anatomical atlases are increasingly used in order to depict different gene expression patterns and neuronal morphologies within a standardized reference template. In evo-devo, a discipline in which the comparison of gene expression patterns is a widely used approach, such standardized anatomical atlases would allow a more rigorous assessment of the conservation of and changes in gene expression patterns during micro- and macroevolutionary time scales. Due to its small size and invariant early development, the annelid Platynereis dumerilii is particularly well suited for such studies. Recently a reference template with registered gene expression patterns has been generated for the anterior part (episphere) of the Platynereis trochophore larva and used for the detailed study of neuronal development.Results: Here we introduce and evaluate a method for whole-body gene expression pattern registration for Platynereis trochophore and nectochaete larvae based on whole-mount in situ hybridization, confocal microscopy, and image registration. We achieved high-resolution whole-body scanning using the mounting medium 2,2'-thiodiethanol (TDE), which allows the matching of the refractive index of the sample to that of glass and immersion oil thereby reducing spherical aberration and improving depth penetration. This approach allowed us to scan entire whole-mount larvae stained with nitroblue tetrazolium/5-bromo-4-chloro-3-indolyl phosphate (NBT/BCIP) in situ hybridization and counterstained fluorescently with an acetylated-tubulin antibody and the nuclear stain 4'6-diamidino-2-phenylindole (DAPI). Due to the submicron isotropic voxel size whole-mount larvae could be scanned in any orientation. Based on the whole-body scans, we generated four different reference templates by the iterative registration and averaging of 40 individual image stacks using either the acetylated-tubulin or the nuclear-stain signal for each developmental stage. We then registered to these templates the expression patterns of cell-type specific genes. In order to evaluate the gene expression pattern registration, we analyzed the absolute deviation of cell-center positions. Both the acetylated-tubulin- and the nuclear-stain-based templates allowed near-cellular-resolution gene expression registration. Nuclear-stain-based templates often performed significantly better than acetylated-tubulin-based templates. We provide detailed guidelines and scripts for the use and further expansion of the Platynereis gene expression atlas.Conclusions: We established whole-body reference templates for the generation of gene expression atlases for Platynereis trochophore and nectochaete larvae. We anticipate that nuclear-staining-based image registration will be applicable for whole-body alignment of the embryonic and larval stages of other organisms in a similar size range. {\textcopyright} 2012 Asadulina et al.; licensee BioMed Central Ltd.}, author = {Asadulina, Albina and Panzera, Aurora and Veraszt{\'{o}}, Csaba and Liebig, Christian and J{\'{e}}kely, G{\'{a}}sp{\'{a}}r}, doi = {10.1186/2041-9139-3-27}, issn = {20419139}, journal = {EvoDevo}, number = {1}, pages = {12}, title = {{Whole-body gene expression pattern registration in Platynereis larvae}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {3}, year = {2012} } @article{Avants2014, abstract = {Publicly available scientific resources help establish evaluation standards, provide a platform for teaching and improve reproducibility. Version 4 of the Insight ToolKit (ITK4) seeks to establish new standards in publicly available image registration methodology. ITK4 makes several advances in comparison to previous versions of ITK. ITK4 supports both multivariate images and objective functions; it also unifies high-dimensional (deformation field) and low-dimensional (affine) transformations with metrics that are reusable across transform types and with composite transforms that allow arbitrary series of geometric mappings to be chained together seamlessly. Metrics and optimizers take advantage of multi-core resources, when available. Furthermore, ITK4 reduces the parameter optimization burden via principled heuristics that automatically set scaling across disparate parameter types (rotations vs. translations). A related approach also constrains steps sizes for gradient-based optimizers. The result is that tuning for different metrics and/or image pairs is rarely necessary allowing the researcher to more easily focus on design/comparison of registration strategies. In total, the ITK4 contribution is intended as a structure to support reproducible research practices, will provide a more extensive foundation against which to evaluate new work in image registration and also enable application level programmers a broad suite of tools on which to build. Finally, we contextualize this work with a reference registration evaluation study with application to pediatric brain labeling. {\textcopyright} 2014 Avants, Tustison, Stauffer, Song, Wuand Gee.}, author = {Avants, Brian B. and Tustison, Nicholas J. and Stauffer, Michael and Song, Gang and Wu, Baohua and Gee, James C.}, doi = {10.3389/fninf.2014.00044}, issn = {16625196}, journal = {Frontiers in Neuroinformatics}, keywords = {Brain,Death,MRI,Open-source,Registration}, number = {APR}, pages = {13}, title = {{The Insight ToolKit image registration framework}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {8}, year = {2014} } @article{B?rdosi2020, abstract = {Purpose : A robotic intraoperative laser guidance system with hybrid optic-magnetic tracking for skull base surgery is presented. It provides in situ augmented reality guidance for microscopic interventions at the lateral skull base with minimal mental and workload overhead on surgeons working without a monitor and dedicated pointing tools. Methods : Three components were developed: a registration tool (Rhinospider), a hybrid magneto-optic-tracked robotic feedback control scheme and a modified robotic end-effector. Rhinospider optimizes registration of patient and preoperative CT data by excluding user errors in fiducial localization with magnetic tracking. The hybrid controller uses an integrated microscope HD camera for robotic control with a guidance beam shining on a dual plate setup avoiding magnetic field distortions. A robotic needle insertion platform (iSYS Medizintechnik GmbH, Austria) was modified to position a laser beam with high precision in a surgical scene compatible to microscopic surgery. Results : System accuracy was evaluated quantitatively at various target positions on a phantom. The accuracy found is 1.2 mm ± 0.5 mm. Errors are primarily due to magnetic tracking. This application accuracy seems suitable for most surgical procedures in the lateral skull base. The system was evaluated quantitatively during a mastoidectomy of an anatomic head specimen and was judged useful by the surgeon. Conclusion : A hybrid robotic laser guidance system with direct visual feedback is proposed for navigated drilling and intraoperative structure localization. The system provides visual cues directly on/in the patient anatomy, reducing the standard limitations of AR visualizations like depth perception. The custom- built end-effector for the iSYS robot is transparent to using surgical microscopes and compatible with magnetic tracking. The cadaver experiment showed that guidance was accurate and that the end-effector is unobtrusive. This laser guidance has potential to aid the surgeon in finding the optimal mastoidectomy trajectory in more difficult interventions.}, author = {B{\'{a}}rdosi, Zolt{\'{a}}n and Plattner, Christian and {\"{O}}zbek, Yusuf and Hofmann, Thomas and Milosavljevic, Srdjan and Schartinger, Volker and Freysinger, Wolfgang}, doi = {10.1007/s11548-019-02066-1}, issn = {18616429}, journal = {International Journal of Computer Assisted Radiology and Surgery}, keywords = {Augmented reality,Laser guidance,Magnetic tracking,Microscope,Navigated surgery,Optical tracking,Robotic control}, number = {1}, pages = {49--57}, title = {{CIGuide: in situ augmented reality laser guidance}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {15}, year = {2020} } @inbook{Baglaeva2016, address = {Paris}, author = {Baglaeva, Elena and Tsapko, Sergey and Tsapko, Irina and Ershov, Aleksey}, booktitle = {Proceedings of the 2016 Conference on Information Technologies in Science, Management, Social Sphere and Medicine}, doi = {10.2991/itsmssm-16.2016.95}, editor = {Berestneva, O and Tikhomirov, A and Trufanov, A}, isbn = {978-94-6252-196-4}, pages = {354--358}, publisher = {Atlantis Press}, series = {ACSR-Advances in Comptuer Science Research}, title = {{Modelling of Cellular Structures Obtained By X-Ray Phase Contrast Imaging}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {51}, year = {2016} } @article{Bailey2017, abstract = {The diffusion signal in breast tissue has primarily been modelled using apparent diffusion coefficient (ADC), intravoxel incoherent motion (IVIM) and diffusion tensor (DT) models, which may be too simplistic to describe the underlying tissue microstructure. Formalin-fixed breast cancer samples were scanned using a wide range of gradient strengths, durations, separations and orientations. A variety of one- and two-compartment models were tested to determine which best described the data. Models with restricted diffusion components and anisotropy were selected in most cancerous regions and there were no regions in which conventional ADC or DT models were selected. Maps of ADC generally related to cellularity on histology, but maps of parameters from more complex models suggest that both overall cell volume fraction and individual cell size can contribute to the diffusion signal, affecting the specificity of ADC to the tissue microstructure. The areas of coherence in diffusion anisotropy images were small, approximately 1 mm, but the orientation corresponded to stromal orientation patterns on histology.}, author = {Bailey, Colleen and Siow, Bernard and Panagiotaki, Eleftheria and Hipwell, John H. and Mertzanidou, Thomy and Owen, Julie and Gazinska, Patrycja and Pinder, Sarah E. and Alexander, Daniel C. and Hawkes, David J.}, doi = {10.1002/nbm.3679}, issn = {10991492}, journal = {NMR in Biomedicine}, keywords = {DTI,MRI,anisotropy,breast cancer,diffusion,ex vivo,restriction}, number = {2}, pages = {13}, title = {{Microstructural models for diffusion MRI in breast cancer and surrounding stroma: an ex vivo study}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {30}, year = {2017} } @article{Bajcsy2015, abstract = {Background: The goal of this survey paper is to overview cellular measurements using optical microscopy imaging followed by automated image segmentation. The cellular measurements of primary interest are taken from mammalian cells and their components. They are denoted as two- or three-dimensional (2D or 3D) image objects of biological interest. In our applications, such cellular measurements are important for understanding cell phenomena, such as cell counts, cell-scaffold interactions, cell colony growth rates, or cell pluripotency stability, as well as for establishing quality metrics for stem cell therapies. In this context, this survey paper is focused on automated segmentation as a software-based measurement leading to quantitative cellular measurements. Methods: We define the scope of this survey and a classification schema first. Next, all found and manually filteredpublications are classified according to the main categories: (1) objects of interests (or objects to be segmented), (2) imaging modalities, (3) digital data axes, (4) segmentation algorithms, (5) segmentation evaluations, (6) computational hardware platforms used for segmentation acceleration, and (7) object (cellular) measurements. Finally, all classified papers are converted programmatically into a set of hyperlinked web pages with occurrence and co-occurrence statistics of assigned categories. Results: The survey paper presents to a reader: (a) the state-of-the-art overview of published papers about automated segmentation applied to optical microscopy imaging of mammalian cells, (b) a classification of segmentation aspects in the context of cell optical imaging, (c) histogram and co-occurrence summary statistics about cellular measurements, segmentations, segmented objects, segmentation evaluations, and the use of computational platforms for accelerating segmentation execution, and (d) open research problems to pursue. Conclusions: The novel contributions of this survey paper are: (1) a new type of classification of cellular measurements and automated segmentation, (2) statistics about the published literature, and (3) a web hyperlinked interface to classification statistics of the surveyed papers at https://isg.nist.gov/deepzoomweb/resources/survey/index.html.}, author = {Bajcsy, Peter and Cardone, Antonio and Chalfoun, Joe and Halter, Michael and Juba, Derek and Kociolek, Marcin and Majurski, Michael and Peskin, Adele and Simon, Carl and Simon, Mylene and Vandecreme, Antoine and Brady, Mary}, doi = {10.1186/s12859-015-0762-2}, issn = {14712105}, journal = {BMC Bioinformatics}, keywords = {Accelerated execution of segmentation for high-thr,Cell segmentation,Cellular measurements,Segmentation evaluation,Segmented objects}, number = {1}, pages = {28}, title = {{Survey statistics of automated segmentations applied to optical imaging of mammalian cells}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {16}, year = {2015} } @article{Balasubramanian2019, abstract = {Retinal image analysis relies on the effectiveness of computational techniques to discriminate various abnormalities in the eye like diabetic retinopathy, macular degeneration and glaucoma. The onset of the disease is often unnoticed in case of glaucoma, the effect of which is felt only at a later stage. Diagnosis of such degenerative diseases warrants early diagnosis and treatment. In this work, performance of statistical and textural features in retinal vessel segmentation is evaluated through classifiers like extreme learning machine, support vector machine and Random Forest. The fundus images are initially preprocessed for any noise reduction, image enhancement and contrast adjustment. The two-dimensional Gabor Wavelets and Partition Clustering is employed on the preprocessed image to extract the blood vessels. Finally, the combined hybrid features comprising statistical textural, intensity and vessel morphological features, extracted from the image, are used to detect glaucomatous abnormality through the classifiers. A crisp decision can be taken depending on the classifying rates of the classifiers. Public databases RIM-ONE and high-resolution fundus and local datasets are used for evaluation with threefold cross validation. The evaluation is based on performance metrics through accuracy, sensitivity and specificity. The evaluation of hybrid features obtained an overall accuracy of 97{\%} when tested using classifiers. The support vector machine classifier is able to achieve an accuracy of 93.33{\%} on high-resolution fundus, 93.8{\%} on RIM-ONE dataset and 95.3{\%} on local dataset. For extreme learning machine classifier, the accuracy is 95.1{\%} on high-resolution fundus, 97.8{\%} on RIM-ONE and 96.8{\%} on local dataset. An accuracy of 94.5{\%} on high-resolution fundus 92.5{\%} on RIM-ONE and 94.2{\%} on local dataset is obtained for the random forest classifier. Validation of the experiment results indicate that the hybrid features can be deployed in supervised classifiers to discriminate retinal abnormalities effectively.}, author = {Balasubramanian, Kishore and Ananthamoorthy, N. P.}, doi = {10.1177/0954411919835856}, issn = {20413033}, journal = {Proceedings of the Institution of Mechanical Engineers, Part H: Journal of Engineering in Medicine}, keywords = {Glaucoma,blood vessel,classifier,clustering,feature selection,fundus image,thresholding}, number = {5}, pages = {506--514}, title = {{Analysis of hybrid statistical textural and intensity features to discriminate retinal abnormalities through classifiers}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {233}, year = {2019} } @article{Barnes2010, abstract = {Introduction We aimed to assess the value of a second MR scan in the radiological diagnosis of dementia. Methods One hundred twenty subjects with clinical followup of at least 1 year with two scans were selected from a cognitive disorders clinic. Scans were reviewed as a single first scan (method A), two unregistered scans presented sideby- side (method B) and a registered pair (method C). Scans were presented to two neuroradiologists and a clinician together with approximate scan interval (if applicable) and age. Raters decided on a main and subtype diagnosis. Results There was no evidence that differences between methods (expressed as relative odds of a correct response) differed between reviewers (p=0.17 for degenerative condition or not, p=0.5 for main diagnosis, p=0.16 for subtype). Accordingly, results were pooled over reviewers. For distinguishing normal/non-progressors from degenerative conditions, the proportions correctly diagnosed were higher with methods B and C than with A (p=0.001, both tests). The difference between method B and C was not statistically significant (p=0.18). For main diagnosis, the proportion of correct diagnoses were highest with method C for all three reviewers; however, this was not statistically significant comparing with method A (p=0.23) or with method B (p=0.16). For subtype diagnosis, there was some evidence that method C was better than method A (p=0.01) and B (p=0.048). Conclusions Serial MRI and registration may improve visual diagnosis in dementia. {\textcopyright} Springer-Verlag 2010.}, author = {Barnes, Josephine and Mitchell, L. Anne and Kennedy, Jonathan and Bastos-Leite, Antonio J. and Barker, Suzie and Lehmann, Manja and Nordstrom, R. Chris and Frost, Chris and Smith, Joseph R. and Garde, Ellen and Rossor, Martin N. and Fox, Nick C.}, doi = {10.1007/s00234-010-0665-x}, issn = {00283940}, journal = {Neuroradiology}, keywords = {Dementia,Diagnosis,Registration,Serial MRI,Visual assessment}, number = {11}, pages = {987--995}, title = {{Does registration of serial MRI improve diagnosis of dementia?}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-78650210726{\&}doi=10.1007{\%}2Fs00234-010-0665-x{\&}partnerID=40{\&}md5=52c55fbcb14f73d705d6a72ebba15776}, volume = {52}, year = {2010} } @article{BastidaJumilla2011, abstract = {Image processing turns out to be essential in the planning and verification of radiotherapy treatments. Before applying a radiotherapy treatment, a dosimetry planning must be performed. Usually, the planning is done by means of an X-ray volumetric analysis using computerized tomography, where the area to be radiated is marked out. During the treatment phase, it is necessary to place the patient under the particle accelerator exactly as considered in the dosimetry stage. Coarse alignment is achieved using fiduciary markers placed over the patient's skin as external references. Later, fine alignment is provided by comparing a digitally reconstructed radiography (DRR) from the planning stage and a portal image captured by the accelerator in the treatment stage. The preprocessing of DRR and portal images, as well as the minimization of the non-shared information between both kinds of images, is mandatory for the correct operation of the image registration algorithm. With this purpose, mathematical morphology and image processing techniques have been used. The present work describes a fully automatic method to calculate more accurately the necessary displacement of the couch to place the patient exactly at the planned position. The proposed method to achieve the correct positioning of the patient is based on advanced image registration techniques. Preliminary results show a perfect match with the displacement estimated by the physician. {\textcopyright} Society for Imaging Informatics in Medicine 2011.}, author = {Bastida-Jumilla, Ma Consuelo and Larrey-Ruiz, Jorge and Verd{\'{u}}-Monedero, Rafael and Morales-S{\'{a}}nchez, Juan and Sancho-G{\'{o}}mez, Jos{\'{e}} Luis}, doi = {10.1007/s10278-011-9376-z}, issn = {1618727X}, journal = {Journal of Digital Imaging}, keywords = {Biomedical image analysis,Image feature enhancement,Image registration,Radiotherapy}, number = {6}, pages = {999--1009}, title = {{DRR and portal image registration for automatic patient positioning in radiotherapy treatment}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {24}, year = {2011} } @article{Beare2016, abstract = {Measuring the distribution of brain tissue types (tissue classification) in neonates is necessary for studying typical and atypical brain development, such as that associated with preterm birth, and may provide biomarkers for neurodevelopmental outcomes. Compared with magnetic resonance images of adults, neonatal images present specific challenges that require the development of specialized, population-specific methods. This paper introduces MANTiS (Morphologically Adaptive Neonatal Tissue Segmentation), which extends the unified segmentation approach to tissue classification implemented in Statistical Parametric Mapping (SPM) software to neonates. MANTiS utilizes a combination of unified segmentation, template adaptation via morphological segmentation tools and topological filtering, to segment the neonatal brain into eight tissue classes: cortical gray matter, white matter, deep nuclear gray matter, cerebellum, brainstem, cerebrospinal fluid (CSF), hippocampus and amygdala. We evaluated the performance of MANTiS using two independent datasets. The first dataset, provided by the NeoBrainS12 challenge, consisted of coronal T2-weighted images of preterm infants (born ≤30 weeks' gestation) acquired at 30 weeks' corrected gestational age (n = 5), coronal T2-weighted images of preterm infants acquired at 40 weeks' corrected gestational age (n = 5) and axial T2-weighted images of preterm infants acquired at 40 weeks' corrected gestational age (n = 5). The second dataset, provided by the Washington University NeuroDevelopmental Research (WUNDeR) group, consisted of T2-weighted images of preterm infants (born {\textless}30 weeks' gestation) acquired shortly after birth (n = 12), preterm infants acquired at term-equivalent age (n = 12), and healthy term-born infants (born ≥38 weeks' gestation) acquired within the first 9 days of life (n = 12). For the NeoBrainS12 dataset, mean Dice scores comparing MANTiS with manual segmentations were all above 0.7, except for the cortical gray matter for coronal images acquired at 30 weeks. This demonstrates that MANTiS' performance is competitive with existing techniques. For the WUNDeR dataset, mean Dice scores comparing MANTiS with manually edited segmentations demonstrated good agreement, where all scores were above 0.75, except for the hippocampus and amygdala. The results show that MANTiS is able to segment neonatal brain tissues well, even in images that have brain abnormalities common in preterm infants. MANTiS is available for download as an SPM toolbox from http://developmentalimagingmcri.github.io/mantis}, author = {Beare, Richard J. and Chen, Jian and Kelly, Claire E. and Alexopoulos, Dimitrios and Smyser, Christopher D. and Rogers, Cynthia E. and Loh, Wai Y. and Matthews, Lillian G. and Cheong, Jeanie L.Y. and Spittle, Alicia J. and Anderson, Peter J. and Doyle, Lex W. and Inder, Terrie E. and Seal, Marc L. and Thompson, Deanne K.}, doi = {10.3389/fninf.2016.00012}, issn = {16625196}, journal = {Frontiers in Neuroinformatics}, keywords = {Magnetic resonance imaging,Neonate,Preterm birth,Statistical parametric mapping,Tissue classification}, number = {MAR}, pages = {17}, title = {{Neonatal brain tissue classification with morphological adaptation and unified segmentation}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {10}, year = {2016} } @article{Becker2016, abstract = {Purpose: With higher resolutions, medical image processing operations like segmentation take more time to calculate per step. The pyramid technique is a common approach to solving this problem. Starting with a low resolution, a stepwise refinement is applied until the original resolution is reached. Methods: Our work proposes a method for deformable model segmentation that generally utilizes the common pyramid technique with our improvement, to calculate and keep synchronized all mesh resolution levels in parallel. The models are coupled to propagate their changes. It presents coupling techniques and shows approaches for synchronization. The interaction with the models is realized using springs and volcanoes, and it is evaluated for the semantics of the operation to share them across the different levels. Results: The locking overhead has been evaluated for different synchronization techniques with meshes of individual resolutions. The partial update strategy has been found to have the least locking overhead. Conclusion: Running multiple models with individual resolutions in parallel is feasible. The synchronization approach has to be chosen carefully, so that an interactive modification of the segmentation remains possible. The proposed technique is aimed at making medical image segmentation more usable while delivering high performance.}, author = {Becker, Matthias and Nijdam, Niels and Magnenat-Thalmann, Nadia}, doi = {10.1007/s11548-015-1241-y}, issn = {18616429}, journal = {International Journal of Computer Assisted Radiology and Surgery}, keywords = {Deformable models,Mesh coupling,Multi-resolution,Synchronization}, number = {5}, pages = {695--705}, title = {{Coupling strategies for multi-resolution deformable meshes: expanding the pyramid approach beyond its one-way nature}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {11}, year = {2016} } @article{Beriault2012, abstract = {Purpose: Both frame-based and frameless approaches to deep brain stimulation (DBS) require planning of insertion trajectories that mitigate hemorrhagic risk and loss of neurological function. Currently, this is done by manual inspection of multiple potential electrode trajectories on MR-imaging data. We propose and validate a method for computer-assisted DBS trajectory planning. Method: Our framework integrates multi-modal MRI analysis (T1w, SWI, TOF-MRA) to compute suitable DBS trajectories that optimize the avoidance of specific critical brain structures. A cylinder model is used to process each trajectory and to evaluate complex surgical constraints described via a combination of binary and fuzzy segmented datasets. The framework automatically aggregates the multiple constraints into a unique ranking of recommended low-risk trajectories. Candidate trajectories are represented as a few well-defined cortical entry patches of best-ranked trajectories and presented to the neurosurgeon for final trajectory selection. Results: The proposed algorithm permits a search space containing over 8,000 possible trajectories to be processed in less than 20 s. A retrospective analysis on 14 DBS cases of patients with severe Parkinson's disease reveals that our framework can improve the simultaneous optimization of many pre-formulated surgical constraints. Furthermore, all automatically computed trajectories were evaluated by two neurosurgeons, were judged suitable for surgery and, in many cases, were judged preferable or equivalent to the manually planned trajectories used during the operation. Conclusions: This work provides neurosurgeons with an intuitive and flexible decision-support system that allows objective and patient-specific optimization of DBS lead trajectories, which should improve insertion safety and reduce surgical time. {\textcopyright} 2012 CARS.}, author = {B{\'{e}}riault, Silvain and Subaie, Fahd Al and Collins, D. Louis and Sadikot, Abbas F. and Pike, G. Bruce}, doi = {10.1007/s11548-012-0768-4}, issn = {18616429}, journal = {International Journal of Computer Assisted Radiology and Surgery}, keywords = {Decision-support system,Deep brain stimulation,Image-guided neurosurgery,Parkinson's disease,Preoperative planning}, number = {5}, pages = {687--704}, title = {{A multi-modal approach to computer-assisted deep brain stimulation trajectory planning}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {7}, year = {2012} } @inproceedings{Bernardini, abstract = {Molecular imaging based on Fluorescence Resonance Energy Transfer (FRET) is widely used in cellular physiology both for protein-protein interaction analysis and detecting conformational changes of single proteins, e.g. during activation of signaling cascades. However, getting reliable results from FRET measurements is still hampered by methodological problems such as spectral bleed through, chromatic aberration, focal plane shifts and false positive FRET. Particularly false positive FRET signals caused by random interaction of the fluorescent dyes can easily lead to misinterpretation of the data. This work introduces a Nipkow Disc based FRET microscopy system, that is easy to operate without expert knowledge of FRET. The system automatically accounts for all relevant sources of errors and provides various result presentations of two, three and four dimensional FRET data. Two examples are given to demonstrate the scope of application. An interaction analysis of the two subunits of the hypoxia-inducible transcription factor 1 demonstrates the use of the system as a tool for protein-protein interaction analysis. As an example for time lapse observations, the conformational change of the fluorophore labeled heat shock protein 33 in the presence of oxidant stress is shown.}, author = {Bernardini, Andr{\'{e}} and Wotzlaw, Christoph and Lipinski, Hans-Gerd and Fandrey, Joachim}, booktitle = {Optics, Photonics, and Digital Technologies for Multimedia Applications}, doi = {10.1117/12.854027}, isbn = {9780819481962}, issn = {0277786X}, pages = {772311}, title = {{An automated real-time microscopy system for analysis of fluorescence resonance energy transfer}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-77954408709{\&}doi=10.1117{\%}2F12.854027{\&}partnerID=40{\&}md5=62af8ffb1680932adc4db6fa28a2b4f1}, volume = {7723}, year = {2010} } @article{Bernus2015, abstract = {Background: Cardiovascular magnetic resonance (CMR) can through the two methods 3D FLASH and diffusion tensor imaging (DTI) give complementary information on the local orientations of cardiomyocytes and their laminar arrays. Methods: Eight explanted rat hearts were perfused with Gd-DTPA contrast agent and fixative and imaged in a 9.4T magnet by two types of acquisition: 3D fast low angle shot (FLASH) imaging, voxels 50∈×∈50∈×∈50 $\mu$m, and 3D spin echo DTI with monopolar diffusion gradients of 3.6 ms duration at 11.5 ms separation, voxels 200∈×∈200∈×∈200 $\mu$m. The sensitivity of each approach to imaging parameters was explored. Results: The FLASH data showed laminar alignments of voxels with high signal, in keeping with the presumed predominance of contrast in the interstices between sheetlets. It was analysed, using structure-tensor (ST) analysis, to determine the most (v 1ST ), intermediate (v 2ST ) and least (v 3ST ) extended orthogonal directions of signal continuity. The DTI data was analysed to determine the most (e 1DTI ), intermediate (e 2DTI ) and least (e 3DTI ) orthogonal eigenvectors of extent of diffusion. The correspondence between the FLASH and DTI methods was measured and appraised. The most extended direction of FLASH signal (v 1ST ) agreed well with that of diffusion (e 1DTI ) throughout the left ventricle (representative discrepancy in the septum of 13.3∈±∈6.7°: median∈±∈absolute deviation) and both were in keeping with the expected local orientations of the long-axis of cardiomyocytes. However, the orientation of the least directions of FLASH signal continuity (v 3ST ) and diffusion (e 3ST ) showed greater discrepancies of up to 27.9∈±∈17.4°. Both FLASH (v 3ST ) and DTI (e 3DTI ) where compared to directly measured laminar arrays in the FLASH images. For FLASH the discrepancy between the structure-tensor calculated v 3ST and the directly measured FLASH laminar array normal was of 9∈±∈7° for the lateral wall and 7∈±∈9°for the septum (median∈±∈inter quartile range), and for DTI the discrepancy between the calculated v 3DTI and the directly measured FLASH laminar array normal was 22∈±∈14°and 61∈±∈53.4°. DTI was relatively insensitive to the number of diffusion directions and to time up to 72 hours post fixation, but was moderately affected by b-value (which was scaled by modifying diffusion gradient pulse strength with fixed gradient pulse separation). Optimal DTI parameters were b∈=∈1000 mm/s2 and 12 diffusion directions. FLASH acquisitions were relatively insensitive to the image processing parameters explored. Conclusions: We show that ST analysis of FLASH is a useful and accurate tool in the measurement of cardiac microstructure. While both FLASH and the DTI approaches appear promising for mapping of the alignments of myocytes throughout myocardium, marked discrepancies between the cross myocyte anisotropies deduced from each method call for consideration of their respective limitations.}, author = {Bernus, Olivier and Radjenovic, Aleksandra and Trew, Mark L. and Legrice, Ian J. and Sands, Gregory B. and Magee, Derek R. and Smaill, Bruce H. and Gilbert, Stephen H.}, doi = {10.1186/s12968-015-0129-x}, issn = {1532429X}, journal = {Journal of Cardiovascular Magnetic Resonance}, keywords = {Cardiovascular magnetic resonance,Diffusion tensor imaging,Myocardium,Myolaminar}, number = {1}, pages = {27}, title = {{Comparison of diffusion tensor imaging by cardiovascular magnetic resonance and gadolinium enhanced 3D image intensity approaches to investigation of structural anisotropy in explanted rat hearts}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {17}, year = {2015} } @inproceedings{Blackburn, abstract = {Despite significant advances in biological imaging and analysis, major informatics challenges remain unsolved: file formats are proprietary, storage and analysis facilities are lacking, as are standards for sharing image data and results. While the open FITS file format is ubiquitous in astronomy, astronomical imaging shares many challenges with biological imaging, including the need to share large image sets using secure, cross-platform APIs, and the need for scalable applications for processing and visualization. The Open Microscopy Environment (OME) is an open-source software framework developed to address these challenges. OME tools include: an open data model for multidimensional imaging (OME Data Model); an open file format (OME-TIFF) and library (Bio-Formats) enabling free access to images (5D+) written in more than 145 formats from many imaging domains, including FITS; and a data management server (OMERO). The Java-based OMERO client-server platform comprises an image metadata store, an image repository, visualization and analysis by remote access, allowing sharing and publishing of image data. OMERO provides a means to manage the data through a multi-platform API. OMERO's model-based architecture has enabled its extension into a range of imaging domains, including light and electron microscopy, high content screening, digital pathology and recently into applications using non-image data from clinical and genomic studies. This is made possible using the Bio-Formats library. The current release includes a single mechanism for accessing image data of all types, regardless of original file format, via Java, C/C++ and Python and a variety of applications and environments (e.g. ImageJ, Matlab and R).}, author = {Blackburn, Colin and Allan, Chris and Besson, S{\'{e}}bastien and Burel, Jean-Marie and Carroll, Mark and Ferguson, Richard K. and Flynn, Helen and Gault, David and Gillen, Kenneth and Leigh, Roger and Leo, Simone and Li, Simon and Lindner, Dominik and Linkert, Melissa and Moore, Josh and Moore, William J. and Ramalingam, Balaji and Rozbicki, Emil and Rustici, Gabriella and Tarkowska, Aleksandra and Walczysko, Petr and Williams, Eleanor and Swedlow, Jason R.}, booktitle = {Software and Cyberinfrastructure for Astronomy IV}, doi = {10.1117/12.2232291}, isbn = {9781510602052}, issn = {1996756X}, pages = {991324}, title = {{The Open Microscopy Environment: open image informatics for the biological sciences}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85006380046{\&}doi=10.1117{\%}2F12.2232291{\&}partnerID=40{\&}md5=e050c03cd02d1a387a82a993b146e1d1}, volume = {9913}, year = {2016} } @article{Blackledge2016, abstract = {We present pyOsiriX, a plugin built for the already popular dicom viewer OsiriX that provides users the ability to extend the functionality of OsiriX through simple Python scripts. This approach allows users to integrate the many cutting-edge scientific/image-processing libraries created for Python into a powerful DICOM visualisation package that is intuitive to use and already familiar to many clinical researchers. Using pyOsiriX we hope to bridge the apparent gap between basic imaging scientists and clinical practice in a research setting and thus accelerate the development of advanced clinical image processing. We provide arguments for the use of Python as a robust scripting language for incorporation into larger software solutions, outline the structure of pyOsiriX and how it may be used to extend the functionality of OsiriX, and we provide three case studies that exemplify its utility.For our first case study we use pyOsiriX to provide a tool for smooth histogram display of voxel values within a user-defined region of interest (ROI) in OsiriX. We used a kernel density estimation (KDE) method available in Python using the scikit-learn library, where the total number of lines of Python code required to generate this tool was 22. Our second example presents a scheme for segmentation of the skeleton from CT datasets. We have demonstrated that good segmentation can be achieved for two example CT studies by using a combination of Python libraries including scikit-learn, scikit-image, SimpleITK and matplotlib. Furthermore, this segmentation method was incorporated into an automatic analysis of quantitative PET-CT in a patient with bone metastases from primary prostate cancer. This enabled repeatable statistical evaluation of PET uptake values for each lesion, before and after treatment, providing estaimes maximum and median standardised uptake values (SUVmax and SUVmed respectively). Following treatment we observed a reduction in lesion volume, SUVmax and SUVmed for all lesions, in agreement with a reduction in concurrent measures of serum prostate-specific antigen (PSA).}, author = {Blackledge, Matthew D. and Collins, David J. and Koh, Dow Mu and Leach, Martin O.}, doi = {10.1016/j.compbiomed.2015.12.002}, issn = {18790534}, journal = {Computers in Biology and Medicine}, keywords = {Computed tomography,Dicom management,Dicom visualisation,Medical imaging,OsiriX,Python,Radiology}, pages = {203--212}, title = {{Rapid development of image analysis research tools: Bridging the gap between researcher and clinician with pyOsiriX}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {69}, year = {2016} } @article{BonnetGarnier2018, abstract = {Changes to the spatial organization of specific chromatin domains such as constitutive heterochromatin have been studied extensively in somatic cells. During early embryonic development, drastic epigenetic reprogramming of both the maternal and paternal genomes, followed by chromatin remodeling at the time of embryonic genome activation (EGA), have been observed in the mouse. Very few studies have been performed in other mammalian species (human, bovine, or rabbit) and the data are far from complete. During this work, we studied the three-dimensional organization of pericentromeric regions during the preimplantation period in the rabbit using specific techniques (3D-FISH) and tools (semi-automated image analysis). We observed that the pericentromeric regions (identified with specific probes for Rsat I and Rsat II genomic sequences) changed their shapes (from pearl necklaces to clusters), their nuclear localizations (from central to peripheral), as from the 4-cell stage. This reorganization goes along with histone modification changes and reduced amount of interactions with nucleolar precursor body surface. Altogether, our results suggest that the 4-cell stage may be a crucial window for events necessary before major EGA, which occurs during the 8-cell stage in the rabbit.}, author = {Bonnet-Garnier, Am{\'{e}}lie and Ki{\^{e}}u, Ki{\^{e}}n and Aguirre-Lavin, Tiphaine and Tar, Krisztina and Flores, Pierre and Liu, Zichuan and Peynot, Nathalie and Chebrout, Martine and Dinny{\'{e}}s, Andr{\'{a}}s and Duranthon, V{\'{e}}ronique and Beaujean, Nathalie}, doi = {10.1007/s00412-018-0671-z}, issn = {14320886}, journal = {Chromosoma}, keywords = {3D-FISH,Centromeres,Embryos,Epigenetic modifications,Satellite sequences}, number = {3}, pages = {387--403}, title = {{Three-dimensional analysis of nuclear heterochromatin distribution during early development in the rabbit}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {127}, year = {2018} } @article{Boyd2006, abstract = {The advent of in vivo micro-computed tomography (micro-CT) provides a novel approach to measure the temporal adaptation of bone micro-architecture within an individual. Spatial alignment in the scanner between serial scans is challenging, but three-dimensional image registration can be used to superimpose the resulting image data, thus ensuring consistent regions of interest (ROI) for analysis. There have been several approaches to image registration developed, yet little is known about their application to high resolution micro-CT data. The purpose of this study was to explore combinations of three image registration similarity measures and three image interpolators, in addition to multiresolution registration configurations, for assessment of computational efficiency and accuracy on both in vitro and in vivo micro-CT data. Accuracy measures were assessed by comparison with a gold-standard reference transform based on attached fiducial markers. It was concluded that a mutual information registration similarity measure with a linear image interpolator, applied at steps of increasing image resolution, provided the best compromise between accurate and efficient results. In vivo registration of tibial bone microstructure measured in an ovariectomized rat model provided consistent ROI thus demonstrating the usefulness of three-dimensional image registration for in vivo experimental and clinical micro-CT research. It is a technique that is poised to become commonly utilized for analysis of micro-CT data to diagnose and monitor efficacy of therapy in bone diseases. {\textcopyright} 2006 Biomedical Engineering Society.}, author = {Boyd, Steven K. and Moser, Stephan and Kuhn, Michael and Klinck, Robert J. and Krauze, Peter L. and M{\"{u}}ller, Ralph and Gasser, J{\"{u}}rg A.}, doi = {10.1007/s10439-006-9168-7}, issn = {00906964}, journal = {Annals of Biomedical Engineering}, keywords = {Image registration,Micro-computed tomography,Osteoporosis,Rat models,Tibial bone micro-structure}, number = {10}, pages = {1587--1599}, title = {{Evaluation of three-dimensional image registration methodologies for in vivo micro-computed tomography}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {34}, year = {2006} } @article{Breighner2019, abstract = {Background: Femoroacetabular impingement syndrome (FAIS) is a common disorder of the hip resulting in groin pain and ultimately osteoarthritis. Radiologic assessment of FAI morphologies, which may present with overlapping radiologic features of hip dysplasia, often requires the use of computed tomography (CT) for evaluation of osseous abnormality, owing to the difficulty of direct visualization of cortical and subchondral bone with conventional magnetic resonance imaging (MRI). The use of a zero echo time (ZTE) MRI pulse sequence may obviate the need for CT by rendering bone directly from MRI. Purpose/Hypothesis: The purpose was to explore the application of ZTE MRI to the assessment of osseous FAI and dysplasia morphologies of the hip. It was hypothesized that angular measurements from ZTE images would show significant agreement with measurements obtained from CT images. Study Design: Cohort study (diagnosis); Level of evidence, 2. Methods: Thirty-eight hips from 23 patients were imaged with ZTE MRI and CT. Clinically relevant angular measurements of hip morphology were made in both modalities and compared to assess agreement. Measurements included coronal and sagittal center-edge angles, femoral neck-shaft angle, acetabular version (at 1-, 2-, and 3-o'clock positions), T{\"{o}}nnis angle, alpha angle, and modified-beta angle. Interrater agreement was assessed for a subset of 10 hips by 2 raters. Intermodal agreement was assessed on the complete cohort and a single rater. Results: Interrater agreement was demonstrated in both CT and ZTE, with intraclass correlation coefficient values ranging from 0.636 to 0.990 for ZTE and 0.747 to 0.983 for CT, indicating “good” to “excellent” agreement. Intermodal agreement was also shown to be significant, with intraclass correlation coefficients ranging from 0.618 to 0.904. Conclusion: Significant agreement of angular measurements for hip morphology exists between ZTE MRI and CT imaging. ZTE MRI may be an effective method to quantitatively evaluate osseous hip morphology.}, author = {Breighner, Ryan E. and Bogner, Eric A. and Lee, Susan C. and Koff, Matthew F. and Potter, Hollis G.}, doi = {10.1177/0363546519878170}, issn = {15523365}, journal = {American Journal of Sports Medicine}, keywords = {FAI,MRI,femoroacetabular impingement,imaging}, number = {14}, pages = {3460--3468}, title = {{Evaluation of Osseous Morphology of the Hip Using Zero Echo Time Magnetic Resonance Imaging}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {47}, year = {2019} } @article{Brown2012, abstract = {OBJECTIVE: The development and evaluation of a computer-aided bone scan analysis technique to quantify changes in tumor burden and assess treatment effects in prostate cancer clinical trials. METHODS: We have developed and report on a commercial fully automated computer-aided detection (CAD) system. Using this system, scan images were intensity normalized, and then lesions were identified and segmented by anatomic region-specific intensity thresholding. Detected lesions were compared against expert markings to assess the accuracy of the CAD system. The metrics Bone Scan Lesion Area, Bone Scan Lesion Intensity, and Bone Scan Lesion Count were calculated from identified lesions, and their utility in assessing treatment effects was evaluated by analyzing before and after scans from metastatic castration-resistant prostate cancer patients: 10 treated and 10 untreated. In this study, patients were treated with cabozantinib, a MET/vascular endothelial growth factor inhibitor resulting in high rates of resolution of bone scan abnormalities. RESULTS: Our automated CAD system identified bone lesion pixels with 94{\%} sensitivity, 89{\%} specificity, and 89{\%} accuracy. Significant differences in changes from baseline were found between treated and untreated groups in all assessed measurements derived by our system. The most significant measure, Bone Scan Lesion Area, showed a median (interquartile range) change from baseline at week 6 of 7.13{\%} (27.61) in the untreated group compared with -73.76{\%} (45.38) in the cabozantinib-treated group (P=0.0003). CONCLUSION: Our system accurately and objectively identified and quantified metastases in bone scans, allowing for interpatient and intrapatient comparison. It demonstrates potential as an objective measurement of treatment effects, laying the foundation for validation against other clinically relevant outcome measures. Copyright {\textcopyright} Lippincott Williams {\&} Wilkins.}, author = {Brown, Matthew S. and Chu, Gregory H. and Kim, Hyun J. and Allen-Auerbach, Martin and Poon, Cheryce and Bridges, Juliette and Vidovic, Adria and Ramakrishna, Bharath and Ho, Judy and Morris, Michael J. and Larson, Steven M. and Scher, Howard I. and Goldin, Jonathan G.}, doi = {10.1097/MNM.0b013e3283503ebf}, issn = {01433636}, journal = {Nuclear Medicine Communications}, keywords = {bone neoplasms,bone scan,computer-assisted detection,computer-assisted image processing,prostate cancer,radionuclide imaging}, number = {4}, pages = {384--394}, title = {{Computer-aided quantitative bone scan assessment of prostate cancer treatment response}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {33}, year = {2012} } @article{Bui2013, abstract = {Imaging has become a prevalent tool in the diagnosis and treatment of many diseases, providing a unique in vivo, multi-scale view of anatomic and physiologic processes. With the increased use of imaging and its progressive technical advances, the role of imaging informatics is now evolving-from one of managing images, to one of integrating the full scope of clinical information needed to contextualize and link observations across phenotypic and genotypic scales. Several challenges exist for imaging informatics, including the need for methods to transform clinical imaging studies and associated data into structured information that can be organized and analyzed. We examine some of these challenges in establishing imaging-based observational databases that can support the creation of comprehensive disease models. The development of these databases and ensuing models can aid in medical decision making and knowledge discovery and ultimately, transform the use of imaging to support individually-tailored patient care.}, author = {Bui, Alex A.T. and Hsu, William and Arnold, Corey and El-Saden, Suzie and Aberle, Denise R. and Taira, Ricky K.}, doi = {10.1136/amiajnl-2012-001340}, issn = {10675027}, journal = {Journal of the American Medical Informatics Association}, number = {6}, pages = {1053--1058}, title = {{Imaging-based observational databases for clinical problem solving: The role of informatics}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {20}, year = {2013} } @article{Bultman2014, abstract = {Purpose To develop and demonstrate the feasibility of a new formulation for quantitative perfusion modeling in the liver using interrupted DCE-MRI data acquired during multiple sequential breathholds. Materials and Methods A new mathematical formulation to estimate quantitative perfusion parameters using interrupted data was developed. Using this method, we investigated whether a second degree-of-freedom in the tissue residue function (TRF) improves quality-of-fit criteria when applied to a dual-input single-compartment perfusion model. We subsequently estimated hepatic perfusion parameters using DCE-MRI data from 12 healthy volunteers and 9 cirrhotic patients with a history of hepatocellular carcinoma (HCC); and examined the utility of these estimates in differentiating between healthy liver, cirrhotic liver, and HCC. Results Quality-of-fit criteria in all groups were improved using a Weibull TRF (2 degrees-of-freedom) versus an exponential TRF (1 degree-of-freedom), indicating nearer concordance of source DCE-MRI data with the Weibull model. Using the Weibull TRF, arterial fraction was greater in cirrhotic versus normal liver (39 ± 23{\%} versus 15 ± 14{\%}, P = 0.07). Mean transit time (20.6 ± 4.1 s versus 9.8 ± 3.5 s, P = 0.01) and arterial fraction (39 ± 23{\%} versus 73 ± 14{\%}, P = 0.04) were both significantly different between cirrhotic liver and HCC, while differences in total perfusion approached significance. Conclusion This work demonstrates the feasibility of estimating hepatic perfusion parameters using interrupted data acquired during sequential breathholds. {\textcopyright} 2013 Wiley Periodicals, Inc.}, author = {Bultman, Eric M. and Brodsky, Ethan K. and Horng, Debra E. and Irarrazaval, Pablo and Schelman, William R. and Block, Walter F. and Reeder, Scott B.}, doi = {10.1002/jmri.24238}, issn = {15222586}, journal = {Journal of Magnetic Resonance Imaging}, keywords = {DCE-MRI,hepatic perfusion modeling,hepatocellular carcinoma,quantitative perfusion MRI,tumor perfusion modeling}, number = {4}, pages = {853--865}, pmid = {24395144}, title = {{Quantitative hepatic perfusion modeling using DCE-MRI with sequential breathholds}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {39}, year = {2014} } @article{Busch2012, abstract = {To fully describe gene expression dynamics requires the ability to quantitatively capture expression in individual cells over time. Automated systems for acquiring and analyzing real-time images are needed to obtain unbiased data across many samples and conditions. We developed a microfluidics device, the RootArray, in which 64 Arabidopsis thaliana seedlings can be grown and their roots imaged by confocal microscopy over several days without manual intervention. To achieve high throughput, we decoupled acquisition from analysis. In the acquisition phase, we obtain images at low resolution and segment to identify regions of interest. Coordinates are communicated to the microscope to record the regions of interest at high resolution. In the analysis phase, we reconstruct three-dimensional objects from stitched high-resolution images and extract quantitative measurements from a virtual medial section of the root. We tracked hundreds of roots to capture detailed expression patterns of 12 transgenic reporter lines under different conditions. {\textcopyright} 2012 Nature America, Inc. All rights reserved.}, author = {Busch, Wolfgang and Moore, Brad T. and Martsberger, Bradley and MacE, Daniel L. and Twigg, Richard W. and Jung, Jee and Pruteanu-Malinici, Iulian and Kennedy, Scott J. and Fricke, Gregory K. and Clark, Robert L. and Ohler, Uwe and Benfey, Philip N.}, doi = {10.1038/nmeth.2185}, issn = {15487091}, journal = {Nature Methods}, number = {11}, pages = {1101--1106}, title = {{A microfluidic device and computational platform for high-throughput live imaging of gene expression}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {9}, year = {2012} } @article{Buzzatti2019, abstract = {The objective of the current study was to explore the potential of dynamic computed tomography to detect kinematic changes, induced by sequential sectioning of the lateral collateral ligaments of the ankle, during full motion sequence of the talocrural joint. A custom-made device was used to induce cyclic controlled ankle inversion movement in one fresh frozen cadaver leg. A 256-slice CT scanner was used to investigate four different scenarios. Scenario 1 with all ligaments intact was first investigated followed by sequential section of the anterior talo-fibular ligament (Scenario 2), the calcaneo-fibular ligament (Scenario 3) and posterior talo-fibular ligament (Scenario 4). Off-line image processing based on semi-automatic segmentation and bone rigid registration was performed. Motion parameters such as translation, rotational angles and orientation and position of the axis of rotation were calculated. Differences between scenarios were calculated. Progressive increase of cranio-caudal displacement up to 3.9 mm and flexion up to 10° compared to Scenario 1 were reported. Progressive changes in orientation (up to 20.6°) and position (up to 4.1 mm) of the axis of rotation were also shown. Estimated effective dose of 0.005 mSv (1.9 mGy CTDIvol) was reported. This study demonstrated that kinematic changes due to the absence of ligament integrity can be detected with 4DCT with minimal radiation exposure. Identifying abnormal kinematic patterns could have future application in helping clinicians to choose patients' optimal treatment. Therefore, further studies with bigger in vitro sample sizes and consequent investigations in vivo are recommended to confirm the current findings.}, author = {Buzzatti, Luca and Keelson, Benyameen and Apperloo, Jildert and Scheerlinck, Thierry and Baeyens, Jean Pierre and {Van Gompel}, Gert and Vandemeulebroucke, Jef and de Maeseneer, Michel and de Mey, Johan and Buls, Nico and Cattrysse, Erik}, doi = {10.1038/s41598-018-38101-5}, issn = {20452322}, journal = {Scientific Reports}, number = {1}, pages = {9}, title = {{Four-dimensional CT as a valid approach to detect and quantify kinematic changes after selective ankle ligament sectioning}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {9}, year = {2019} } @inproceedings{Calvini, abstract = {Atrophy and other brain changes, which are typical of aging, generate wide inter-individual variability of morphology in the medial temporal lobe (MTL), including the hippocampal formation. Starting from a sample population of 133 MR images we developed a procedure that extracts from each MR two sub images, containing the hippocampal formations plus a portion of the adjacent tissues and cavities. Then, a small number of templates is selected among the previously obtained sub images, able to describe the morphological variability present in the whole population. Finally an automatic procedure is prepared which, on the basis of the given set of templates, is able to find both hippocampal formations in any new MR image. MR images ranging from normalcy to extreme atrophy can be successfully processed. The proposed approach, besides being a preliminary step towards the unsupervised segmentation of the hippocampus, extracts from the MR image information useful for diagnostic purposes and, in particular, could give the possibility of performing morphometric studies on the medial temporal lobe in an automated way. The automated analysis of MTL atrophy in the segmented volume is readily applied to the early assessment of Alzheimer Disease (AD), leading to discriminating converters from Mild Cognitive Impairment (MCI) to AD with an average three years follow-up. This procedure can quickly and reliably provide additional information in early diagnosis of AD. {\textcopyright} 2008 IEEE.}, author = {Calvini, Piero and Chincarini, Andrea and Donadio, Stefania and Gemme, Gianluca and Squarcia, Sandro and Nobili, Flavio and Rodriguez, Guido and Bellotti, Roberto and Catanzariti, Ezio and Cerello, Piergiorgio and {De Mitri}, Ivan and Fantacci, Maria Evelina}, booktitle = {IEEE Nuclear Science Symposium Conference Record}, doi = {10.1109/NSSMIC.2008.4774245}, isbn = {9781424427154}, issn = {10957863}, pages = {4348--4354}, title = {{Automatic localization of the hippocampal region in MR images to asses early diagnosis of Alzheimer's disease in MCI patients}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-67649172535{\&}doi=10.1109{\%}2FNSSMIC.2008.4774245{\&}partnerID=40{\&}md5=0876425a595d10f8641904244780ec81}, year = {2008} } @article{Captur2015, abstract = {Many of the structures and parameters that are detected, measured and reported in cardiovascular magnetic resonance (CMR) have at least some properties that are fractal, meaning complex and self-similar at different scales. To date however, there has been little use of fractal geometry in CMR; by comparison, many more applications of fractal analysis have been published in MR imaging of the brain. This review explains the fundamental principles of fractal geometry, places the fractal dimension into a meaningful context within the realms of Euclidean and topological space, and defines its role in digital image processing. It summarises the basic mathematics, highlights strengths and potential limitations of its application to biomedical imaging, shows key current examples and suggests a simple route for its successful clinical implementation by the CMR community. By simplifying some of the more abstract concepts of deterministic fractals, this review invites CMR scientists (clinicians, technologists, physicists) to experiment with fractal analysis as a means of developing the next generation of intelligent quantitative cardiac imaging tools.}, author = {Captur, Gabriella and Karperien, Audrey L. and Li, Chunming and Zemrak, Filip and Tobon-Gomez, Catalina and Gao, Xuexin and Bluemke, David A. and Elliott, Perry M. and Petersen, Steffen E. and Moon, James C.}, doi = {10.1186/s12968-015-0179-0}, issn = {1532429X}, journal = {Journal of Cardiovascular Magnetic Resonance}, keywords = {Cardiovascular magnetic resonance,Image processing,Segmentation}, number = {1}, pages = {10}, title = {{Fractal frontiers in cardiovascular magnetic resonance: Towards clinical implementation}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {17}, year = {2015} } @article{Caruso2018, abstract = {Purpose: Haralick features Texture analysis is a recent oncologic imaging biomarker used to assess quantitatively the heterogeneity within a tumor. The aim of this study is to evaluate which Haralick's features are the most feasible in predicting tumor response to neoadjuvant chemoradiotherapy (CRT) in colorectal cancer. Materials and Methods: After MRI and histological assessment, eight patients were enrolled and divided into two groups based on response to neoadjuvant CRT in complete responders (CR) and non-responders (NR). Oblique Axial T2-weighted MRI sequences before CRT were analyzed by two radiologists in consensus drawing a ROI around the tumor. 14 over 192 Haralick's features were extrapolated from normalized gray-level co-occurrence matrix in four different directions. A dedicated statistical analysis was performed to evaluate distribution of the extracted Haralick's features computing mean and standard deviation. Results: Pretreatment MRI examination showed significant value (p {\textless} 0.05) of 5 over 14 computed Haralick texture. In particular, the significant features are the following: concerning energy, contrast, correlation, entropy and inverse difference moment. Conclusions: Five Haralick's features showed significant relevance in the prediction of response to therapy in colorectal cancer and might be used as additional imaging biomarker in the oncologic management of colorectal patients.}, author = {Caruso, Damiano and Zerunian, Marta and Ciolina, Maria and de Santis, Domenico and Rengo, Marco and Soomro, Mumtaz H. and Giunta, Gaetano and Conforto, Silvia and Schmid, Maurizio and Neri, Emanuele and Laghi, Andrea}, doi = {10.1007/s11547-017-0833-8}, issn = {18266983}, journal = {Radiologia Medica}, keywords = {Colorectal cancer,Haralick's texture analysis,Response to therapy,T2-weighted MRI}, number = {3}, pages = {161--167}, title = {{Haralick's texture features for the prediction of response to therapy in colorectal cancer: a preliminary study}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85033363764{\&}doi=10.1007{\%}2Fs11547-017-0833-8{\&}partnerID=40{\&}md5=507a4f33003fadbdd5ac25f4d11e4428}, volume = {123}, year = {2018} } @article{Chabriais2005, author = {Chabriais, J.}, doi = {10.1016/s0221-0363(05)81460-x}, issn = {02210363}, journal = {Journal de Radiologie}, number = {7-8}, pages = {864--867}, title = {{Informatique et imagerie m{\'{e}}dicale: Les tendances d{\'{e}}celables {\`{a}} Chicago en 2004}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {86}, year = {2005} } @article{Chen2019, abstract = {PURPOSE: In prostate focal therapy, it is important to accurately localize malignant lesions in order to increase biological effect of the tumor region while achieving a reduction in dose to noncancerous tissue. In this work, we proposed a transfer learning-based deep learning approach, for classification of prostate lesions in multiparametric magnetic resonance imaging images. METHODS: Magnetic resonance imaging images were preprocessed to remove bias artifact and normalize the data. Two state-of-the-art deep convolutional neural network models, InceptionV3 and VGG-16, were pretrained on ImageNet data set and retuned on the multiparametric magnetic resonance imaging data set. As lesion appearances differ by the prostate zone that it resides in, separate models were trained. Ensembling was performed on each prostate zone to improve area under the curve. In addition, the predictions from lesions on each prostate zone were scaled separately to increase the area under the curve for all lesions combined. RESULTS: The models were tuned to produce the highest area under the curve on validation data set. When it was applied to the unseen test data set, the transferred InceptionV3 model achieved an area under the curve of 0.81 and the transferred VGG-16 model achieved an area under the curve of 0.83. This was the third best score among the 72 methods from 33 participating groups in ProstateX competition. CONCLUSION: The transfer learning approach is a promising method for prostate cancer detection on multiparametric magnetic resonance imaging images. Features learned from ImageNet data set can be useful for medical images.}, author = {Chen, Quan and Hu, Shiliang and Long, Peiran and Lu, Fang and Shi, Yujie and Li, Yunpeng}, doi = {10.1177/1533033819858363}, issn = {15330338}, journal = {Technology in cancer research {\&} treatment}, keywords = {AI,convolutional neural network,focal therapy,mpMRI,prostate lesion,transfer learning}, pages = {9}, pmid = {31221034}, title = {{A Transfer Learning Approach for Malignant Prostate Lesion Detection on Multiparametric MRI}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {18}, year = {2019} } @article{Chen2018, abstract = {Multiparametric magnetic resonance imaging (mpMRI) has been established as the state-of-the-art examination for the detection and localization of prostate cancer lesions. Prostate Imaging-Reporting and Data System (PI-RADS) has been established as a scheme to standardize the reporting of mpMRI findings. Although lesion delineation and PI-RADS ratings could be performed manually, human delineation and ratings are subjective and time-consuming. In this article, we developed and validated a self-tuned graph-based model for PI-RADS rating prediction. 34 features were obtained at the pixel level from T2-weighted (T2W), apparent diffusion coefficient (ADC) and dynamic contrast enhanced (DCE) images, from which PI-RADS scores were predicted. Two major innovations were involved in this self-tuned graph-based model. First, graph-based approaches are sensitive to the choice of the edge weight. The proposed model tuned the edge weights automatically based on the structure of the data, thereby obviating empirical edge weight selection. Second, the feature weights were tuned automatically to give heavier weights to features important for PI-RADS rating estimation. The proposed framework was evaluated for its lesion localization performance in mpMRI datasets of 12 patients. In the evaluation, the PI-RADS score distribution map generated by the algorithm and from the observers' ratings were binarized by thresholds of 3 and 4. The sensitivity, specificity and accuracy obtained in these two threshold settings ranged from 65 to 77{\%}, 86 to 93{\%} and 85 to 88{\%} respectively, which are comparable to results obtained in previous studies in which non-clinical T2 maps were available. The proposed algorithm took 10s to estimate the PI-RADS score distribution in an axial image. The efficiency achievable suggests that this technique can be developed into a prostate MR analysis system suitable for clinical use after a thorough validation involving more patients.}, author = {Chen, Weifu and Lin, Mingquan and Gibson, Eli and Bastian-Jordan, Matthew and Cool, Derek W. and Kassam, Zahra and Liang, Huageng and Feng, Guocan and Ward, Aaron D. and Chiu, Bernard}, doi = {10.1016/j.compbiomed.2018.03.017}, issn = {18790534}, journal = {Computers in Biology and Medicine}, keywords = {Laplacian regularized regression model,Multiparametric MRI (mpMRI),Multiple kernel learning,Prostate Imaging and Reporting Data System (PI-RAD,Prostate cancer}, pages = {252--265}, title = {{A self-tuned graph-based framework for localization and grading prostate cancer lesions: An initial evaluation based on multiparametric magnetic resonance imaging}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {96}, year = {2018} } @article{Chi2011, abstract = {A novel vessel context-based voting is proposed for automatic liver vasculature segmentation in CT images. It is able to conduct full vessel segmentation and recognition of multiple vasculatures effectively. The vessel context describes context information of a voxel related to vessel properties, such as intensity, saliency, direction, and connectivity. Voxels are grouped to liver vasculatures hierarchically based on vessel context. They are first grouped locally into vessel branches with the advantage of a vessel junction measurement and then grouped globally into vasculatures, which is implemented using a multiple feature point voting mechanism. The proposed method has been evaluated on ten clinical CT datasets. Segmentation of third-order vessel trees from CT images (0.76×0.76 2.0 mm) of the portal venous phase takes less than 3 min on a PC with 2.0 GHz dual core processor and the average segmentation accuracy is up to 98. {\textcopyright} 2011 IEEE.}, author = {Chi, Yanling and Liu, Jimin and Venkatesh, Sudhakar K. and Huang, Su and Zhou, Jiayin and Tian, Qi and Nowinski, Wieslaw L.}, doi = {10.1109/TBME.2010.2093523}, issn = {00189294}, journal = {IEEE Transactions on Biomedical Engineering}, keywords = {Liver vasculature segmentation,multiple feature point voting,vessel context,vessel junction measure}, number = {8}, pages = {2144--2153}, title = {{Segmentation of liver vasculature from contrast enhanced CT images using context-based voting}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {58}, year = {2011} } @article{Chincarini2011, abstract = {Background: Medial temporal lobe (MTL) atrophy is one of the key biomarkers to detect early neurodegenerative changes in the course of Alzheimer's disease (AD). There is active research aimed at identifying automated methodologies able to extract accurate classification indexes from T1-weighted magnetic resonance images (MRI). Such indexes should be fit for identifying AD patients as early as possible. Subjects: A reference group composed of 144. AD patients and 189 age-matched controls was used to train and test the procedure. It was then applied on a study group composed of 302 MCI subjects, 136 having progressed to clinically probable AD (MCI-converters) and 166 having remained stable or recovered to normal condition after a 24. month follow-up (MCI-non converters). All subjects came from the ADNI database. Methods: We sampled the brain with 7 relatively small volumes, mainly centered on the MTL, and 2 control regions. These volumes were filtered to give intensity and textural MRI-based features. Each filtered region was analyzed with a Random Forest (RF) classifier to extract relevant features, which were subsequently processed with a Support Vector Machine (SVM) classifier. Once a prediction model was trained and tested on the reference group, it was used to compute a classification index (CI) on the MCI cohort and to assess its accuracy in predicting AD conversion in MCI patients. The performance of the classification based on the features extracted by the whole 9 volumes is compared with that derived from each single volume. All experiments were performed using a bootstrap sampling estimation, and classifier performance was cross-validated with a 20-fold paradigm. Results: We identified a restricted set of image features correlated with the conversion to AD. It is shown that most information originate from a small subset of the total available features, and that it is enough to give a reliable assessment. We found multiple, highly localized image-based features which alone are responsible for the overall clinical diagnosis and prognosis. The classification index is able to discriminate Controls from AD with an Area Under Curve (AUC) = 0.97 (sensitivity ≃ 89{\%} at specificity ≃ 94{\%}) and Controls from MCI-converters with an AUC = 0.92 (sensitivity ≃ 89{\%} at specificity ≃ 80{\%}). MCI-converters are separated from MCI-non converters with AUC = 0.74(sensitivity ≃ 72{\%} at specificity ≃ 65{\%}). Findings: The present automated MRI-based technique revealed a strong relationship between highly localized baseline-MRI features and the baseline clinical assessment. In addition, the classification index was also used to predict the probability of AD conversion within a time frame of two years. The definition of a single index combining local analysis of several regions can be useful to detect AD neurodegeneration in a typical MCI population. {\textcopyright} 2011 Elsevier Inc.}, author = {Chincarini, Andrea and Bosco, Paolo and Calvini, Piero and Gemme, Gianluca and Esposito, Mario and Olivieri, Chiara and Rei, Luca and Squarcia, Sandro and Rodriguez, Guido and Bellotti, Roberto and Cerello, Piergiorgio and {De Mitri}, Ivan and Retico, Alessandra and Nobili, Flavio}, doi = {10.1016/j.neuroimage.2011.05.083}, issn = {10538119}, journal = {NeuroImage}, keywords = {Alzheimer's disease,Hippocampus,Image analysis,MRI,Medial temporal lobe}, number = {2}, pages = {469--480}, title = {{Local MRI analysis approach in the diagnosis of early and prodromal Alzheimer's disease}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {58}, year = {2011} } @article{Choi2019, abstract = {Objective: A developmental venous anomaly (DVA) is a vascular malformation of ambiguous clinical significance. We aimed to quantify the susceptibility of draining veins ($\chi$vein) in DVA and determine its significance with respect to oxygen metabolism using quantitative susceptibility mapping (QSM). Materials and Methods: Brain magnetic resonance imaging of 27 consecutive patients with incidentally detected DVAs were retrospectively reviewed. Based on the presence of abnormal hyperintensity on T2-weighted images (T2WI) in the brain parenchyma adjacent to DVA, the patients were grouped into edema (E+, n = 9) and non-edema (E-, n = 18) groups. A 3T MR scanner was used to obtain fully flow-compensated gradient echo images for susceptibility-weighted imaging with source images used for QSM processing. The $\chi$vein was measured semi-automatically using QSM. The normalized $\chi$vein was also estimated. Clinical and MR measurements were compared between the E+ and E- groups using Student's t-test or Mann-Whitney U test. Correlations between the $\chi$vein and area of hyperintensity on T2WI and between $\chi$vein and diameter of the collecting veins were assessed. The correlation coefficient was also calculated using normalized veins. Results: The DVAs of the E+ group had significantly higher $\chi$vein (196.5 ± 27.9 vs. 167.7 ± 33.6, p = 0.036) and larger diameter of the draining veins (p = 0.006), and patients were older (p = 0.006) than those in the E- group. The $\chi$vein was also linearly correlated with the hyperintense area on T2WI (r = 0.633, 95{\%} confidence interval 0.333-0.817, p {\textless} 0.001). Conclusion: DVAs with abnormal hyperintensity on T2WI have higher susceptibility values for draining veins, indicating an increased oxygen extraction fraction that might be associated with venous congestion.}, author = {Choi, Yangsean and Jang, Jinhee and Nam, Yoonho and Shin, Na Young and Choi, Hyun Seok and Jung, So Lyung and Ahn, Kook Jin and Kim, Bum Soo}, doi = {10.3348/kjr.2018.0685}, issn = {12296929}, journal = {Korean Journal of Radiology}, keywords = {Developmental venous anomaly,Magnetic resonance imaging,Quantitative susceptibility mapping,Vascular malformation}, number = {4}, pages = {662--670}, title = {{Relationship between abnormal hyperintensity on T2-weighted images around developmental venous anomalies and magnetic susceptibility of their collecting veins: In-vivo quantitative susceptibility mapping study}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {20}, year = {2019} } @article{Ciardo2017, abstract = {Objectives Atlas-based automatic segmentation (ABAS) addresses the challenges of accuracy and reliability in manual segmentation. We aim to evaluate the contribution of specific-purpose in ABAS of breast cancer (BC) patients with respect to generic-purpose libraries. Materials and methods One generic-purpose and 9 specific-purpose libraries, stratified according to type of surgery and size of thorax circumference, were obtained from the computed tomography of 200 BC patients. Keywords about contralateral breast volume and presence of breast expander/prostheses were recorded. ABAS was validated on 47 independent patients, considering manual segmentation from scratch as reference. Five ABAS datasets were obtained, testing single-ABAS and multi-ABAS with simultaneous truth and performance level estimation (STAPLE). Center of mass distance (CMD), average Hausdorff distance (AHD) and Dice similarity coefficient (DSC) between corresponding ABAS and manual structures were evaluated and statistically significant differences between different surgeries, structures and ABAS strategies were investigated. Results Statistically significant differences between patients who underwent different surgery were found, with superior results for conservative-surgery group, and between different structures were observed: ABAS of heart, lungs, kidneys and liver was satisfactory (median values: CMD{\textless}2 mm, DSC≥0.80, AHD{\textless}1.5 mm), whereas chest wall, breast and spinal cord obtained moderate performance (median values: 2 mm ≤ CMD{\textless}5 mm, 0.60 ≤ DSC{\textless}0.80, 1.5 mm ≤ AHD{\textless}4 mm) and esophagus, stomach, brachial plexus and supraclavicular nodes obtained poor performance (median CMD≥5 mm, DSC{\textless}0.60, AHD≥4 mm). The application of STAPLE algorithm generally yields higher performance and the use of keywords improves results for breast ABAS. Conclusion The homogeneity in the selection of atlases based on multiple anatomical and clinical features and the use of specific-purpose libraries can improve ABAS performance with respect to generic-purpose libraries.}, author = {Ciardo, Delia and Gerardi, Marianna Alessandra and Vigorito, Sabrina and Morra, Anna and Dell'acqua, Veronica and Diaz, Federico Javier and Cattani, Federica and Zaffino, Paolo and Ricotti, Rosalinda and Spadea, Maria Francesca and Riboldi, Marco and Orecchia, Roberto and Baroni, Guido and Leonardi, Maria Cristina and Jereczek-Fossa, Barbara Alicja}, doi = {10.1016/j.breast.2016.12.010}, issn = {15323080}, journal = {Breast}, keywords = {Atlas-based segmentation,Automatic contouring,Breast cancer radiotherapy,STAPLE contours}, pages = {44--52}, title = {{Atlas-based segmentation in breast cancer radiotherapy: Evaluation of specific and generic-purpose atlases}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {32}, year = {2017} } @article{Constanzo2017, abstract = {With the improvement of external radiotherapy delivery accuracy, such as intensity-modulated and stereotactic body radiation therapy, radiation oncology has recently entered in the era of precision medicine. Despite these precise irradiation modalities, lung cancers remain one of the most aggressive human cancers worldwide, possibly because of diverse genotypic alterations that drive and maintain lung tumorigenesis. It has been long recognized that imaging could aid in the diagnosis, tumor delineation, and monitoring of lung cancer. Moreover, accumulating evidence suggests that imaging information could be further used to tailor treatment type and intensity, as well as predict treatment outcomes in radiotherapy. However, these imaging tasks have been carried out either qualitatively or using simplistic metrics that doesn't take advantage of the full scale of imaging knowledge. Radiomics, which is a recent field of research that aims to provide a more quantitative representation of imaging information relating tumor phenotypes to clinical and genotypic endpoints by embedding extracted image features into predictive mathematical models. These predictive models can be a key component in the clinician decision making and treatment personalization. This review provides an overview of the radiomics application and its methodology for radiation oncology studies in lung cancer.}, author = {Constanzo, Julie and Wei, Lise and Tseng, Huan Hsin and {El Naqa}, Issam}, doi = {10.21037/tlcr.2017.09.07}, issn = {22264477}, journal = {Translational Lung Cancer Research}, keywords = {Biomarkers,Lung cancer,Quantitative imaging,Radiomics}, number = {6}, pages = {635--647}, title = {{Radiomics in precision medicine for lung cancer}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {6}, year = {2017} } @article{Conzelmann2011, abstract = {Cilia-based locomotion is the major form of locomotion for microscopic planktonic organisms in the ocean. Given their negative buoyancy, these organisms must control ciliary activity to maintain an appropriate depth. The neuronal bases of depth regulation in ciliary swimmers are unknown. To gain insights into depth regulation we studied ciliary locomotor control in the planktonic larva of the marine annelid, Platynereis. We found several neuropeptides expressed in distinct sensory neurons that innervate locomotor cilia. Neuropeptides altered ciliary beat frequency and the rate of calcium-evoked ciliary arrests. These changes influenced larval orientation, vertical swimming, and sinking, resulting in upward or downward shifts in the steady-state vertical distribution of larvae. Our findings indicate that Platynereis larvae have depth-regulating peptidergic neurons that directly translate sensory inputs into locomotor output on effector cilia. We propose that the simple circuitry found in these ciliated larvae represents an ancestral state in nervous system evolution.}, author = {Conzelmann, Markus and Offenburger, Sarah Lena and Asadulina, Albina and Keller, Timea and M{\"{u}}nch, Thomas A. and J{\'{e}}kely, G{\'{a}}sp{\'{a}}r}, doi = {10.1073/pnas.1109085108}, issn = {00278424}, journal = {Proceedings of the National Academy of Sciences of the United States of America}, keywords = {FMRFamide-related peptides,Neural circuit,Sensory-motor neuron,Zooplankton}, number = {46}, pages = {E1174--E1183}, title = {{Neuropeptides regulate swimming depth of Platynereis larvae}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {108}, year = {2011} } @article{Cordova2016, abstract = {The diagnosis, prognosis, and management of patients with gliomas are largely dictated by the pathological analysis of tissue biopsied from a selected region within the lesion. However, due to the heterogeneous and infiltrative nature of gliomas, identifying the optimal region for biopsy with conventional magnetic resonance imaging (MRI) can be quite difficult. This is especially true for low grade gliomas, which often are non-enhancing tumors. To improve the management of patients with these tumors, the field of neuro-oncology requires an imaging modality that can specifically identify a tumor's most anaplastic/aggressive region(s) for biopsy targeting. The addition of metabolic mapping using spectroscopic MRI (sMRI) to supplement conventional MRI could improve biopsy targeting and, ultimately, diagnostic accuracy. Here, we describe a pipeline for the integration of state-of-the-art, high-resolution whole-brain 3D sMRI maps into a stereotactic neuronavigation system for guiding biopsies in gliomas with nonenhancing components. We also outline a machine-learning method for automated histology analysis that generates normalized, quantitative metrics describing tumor infiltration in immunohistochemically-stained tissue specimens. As a proof of concept, we describe the combination of these two techniques in a small cohort of grade III glioma patients. In this work, we aim to set forth a systematic pipeline to stimulate histopathology-image validation of advanced MRI techniques, such as sMRI.}, author = {Cordova, J S and Gurbani, S S and Olson, J J and Liang, Z X and Cooper, L A D and Shu, H K G and Schreibmann, E and Neill, S G and Hadjipanayis, C G and Holder, C A and Shim, H}, doi = {10.18383/j.tom.2016.00136}, issn = {2379-1381}, journal = {Tomography}, number = {2}, pages = {106--116}, title = {{A Systematic Pipeline for the Objective Comparison of Whole-Brain Spectroscopic MRI with Histology in Biopsy Specimens from Grade 3 Glioma}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {2}, year = {2016} } @article{Cresson2019, abstract = {Deep learning (DL) techniques are becoming increasingly important to solve a number of image processing tasks. Among common algorithms, convolutional neural network- and recurrent neural network-based systems achieve state-of-the-art results on satellite and aerial imagery in many applications. While these approaches are subject to scientific interest, there is currently no operational and generic implementation available at the user level for the remote sensing (RS) community. In this letter, we present a framework enabling the use of DL techniques with RS images and geospatial data. Our solution takes roots in two extensively used open-source libraries, the RS image processing library Orfeo ToolBox and the high-performance numerical computation library TensorFlow. It can apply deep nets without restriction on image size and is computationally efficient, regardless of hardware configuration.}, archivePrefix = {arXiv}, arxivId = {1807.06535}, author = {Cresson, Remi}, doi = {10.1109/LGRS.2018.2867949}, eprint = {1807.06535}, issn = {15580571}, journal = {IEEE Geoscience and Remote Sensing Letters}, keywords = {Aerial images,Orfeo Toolbox (OTB),TensorFlow (TF),deep learning (DL),neural networks,remote sensing (RS)}, number = {1}, pages = {25--29}, title = {{A framework for remote sensing images processing using deep learning techniques}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {16}, year = {2019} } @article{Cresson2016, abstract = {The amount of remote sensing (RS) data available for applications is constantly growing due to the rise of very high resolution sensors and short-repeat-cycle satellites. Consequently, tackling the computational complexity in Earth observation information extraction is rising as a major challenge. Resorting to high-performance computing (HPC) is becoming a common practice, since this provides environments and programming facilities that are able to speed up processes. In particular, clusters are flexible cost-effective systems that are able to perform data-intensive tasks ideally fulfilling any computational requirement. However, their use typically implies a significant coding effort to build proper implementations of specific processing pipelines. This letter presents a generic framework for the development of RS images processing applications targeting cluster computing. It is based on common open-source libraries and leverages the parallelization of a wide variety of image processing pipelines in a transparent way. Performances on typical RS tasks implemented using the proposed framework demonstrate a great potential for the effective and timely processing of large amount of data.}, archivePrefix = {arXiv}, arxivId = {1609.08893}, author = {Cresson, Remi and Hautreux, Gabriel}, doi = {10.1109/LGRS.2016.2605138}, eprint = {1609.08893}, issn = {1545598X}, journal = {IEEE Geoscience and Remote Sensing Letters}, keywords = {Clusters,Message Passing Interface (MPI),Orfeo ToolBox (OTB),high-performance computing (HPC),parallel computing,remote sensing (RS) image processing}, number = {11}, pages = {1706--1710}, title = {{A generic framework for the development of geospatial processing pipelines on clusters}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84988653888{\&}doi=10.1109{\%}2FLGRS.2016.2605138{\&}partnerID=40{\&}md5=c94a89e4590d599fb89db287d43a4c88}, volume = {13}, year = {2016} } @article{2005, abstract = {We describe a new algorithm for non-rigid registration capable of estimating a constrained dense displacement field from multi-modal image data. We applied this algorithm to capture non-rigid deformation between digital images of histological slides and digital flat-bed scanned images of cryotomed sections of the larynx, and carried out validation experiments to measure the effectiveness of the algorithm. The implementation was carried out by extending the open-source Insight ToolKit software. In diagnostic imaging of cancer of the larynx, imaging modalities sensitive to both anatomy (such as MRI and CT) and function (PET) are valuable. However, these modalities differ in their capability to discriminate the margins of tumor. Gold standard tumor margins can be obtained from histological images from cryotomed sections of the larynx. Unfortunately, the process of freezing, fixation, cryotoming and staining the tissue to create histological images introduces non-rigid deformations and significantcontrast changes. We demonstrate that the non-rigid registration algorithm we present is able to capture these deformations and the algorithm allows us to align histological images with scanned images of the larynx. Our non-rigid registration algorithm constructs a deformation field to warp one image onto another. The algorithm measures image similarity using a mutual information similarity criterion, and avoids spurious deformations due to noise by constraining the estimated deformation field with a linear elastic regularization term. The finite element method is used to represent the deformation field, and our implementation enables us to assign inhomogeneous material characteristics so that hard regions resist internal deformation whereas soft regions are more pliant. A gradient descent optimization strategy is used and this has enabled rapid and accurate convergence to the desired estimate of the deformation field. A further acceleration in speed without cost of accuracy is achieved by using an adaptive mesh refinement strategy. {\textcopyright} 2005 Elsevier B.V. All rights reserved.}, author = {du Bois d'Aische, Aloys and Craene, Mathieu De and Geets, Xavier and Gregoire, Vincent and Macq, Benoit and Warfield, Simon K.}, doi = {10.1016/j.media.2005.04.003}, issn = {13618415}, journal = {Medical Image Analysis}, keywords = {Elastic regularization,ITK,Laryngectomy,Mutual information,Non-rigid registration}, number = {6}, pages = {538--546}, title = {{Efficient multi-modal dense field non-rigid registration: Alignment of histological and section images}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {9}, year = {2005} } @article{Danelakis2018, abstract = {Multiple sclerosis (MS) is a chronic disease. It affects the central nervous system and its clinical manifestation can variate. Magnetic Resonance Imaging (MRI) is often used to detect, characterize and quantify MS lesions in the brain, due to the detailed structural information that it can provide. Manual detection and measurement of MS lesions in MRI data is time-consuming, subjective and prone to errors. Therefore, multiple automated methodologies for MRI-based MS lesion segmentation have been proposed. Here, a review of the state-of-the-art of automatic methods available in the literature is presented. The current survey provides a categorization of the methodologies in existence in terms of their input data handling, their main strategy of segmentation and their type of supervision. The strengths and weaknesses of each category are analyzed and explicitly discussed. The positive and negative aspects of the methods are highlighted, pointing out the future trends and, thus, leading to possible promising directions for future research. In addition, a further clustering of the methods, based on the databases used for their evaluation, is provided. The aforementioned clustering achieves a reliable comparison among methods evaluated on the same databases. Despite the large number of methods that have emerged in the field, there is as yet no commonly accepted methodology that has been established in clinical practice. Future challenges such as the simultaneous exploitation of more sophisticated MRI protocols and the hybridization of the most promising methods are expected to further improve the performance of the segmentation.}, author = {Danelakis, Antonios and Theoharis, Theoharis and Verganelakis, Dimitrios A.}, doi = {10.1016/j.compmedimag.2018.10.002}, issn = {18790771}, journal = {Computerized Medical Imaging and Graphics}, keywords = {Automated segmentation,Brain MRI,Multiple sclerosis,Survey}, pages = {83--100}, title = {{Survey of automated multiple sclerosis lesion segmentation techniques on magnetic resonance imaging}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {70}, year = {2018} } @inproceedings{Danilov, abstract = {The work addresses segmentation techniques for generation of individualized computational domains on the basis of medical imaging dataset. The computational domains will be used in 3D electrophysiology models and 3D-1D coupled hemodynamics models. Several techniques for user-guided and automated segmentation of soft tissues, segmentation of vascular and tubular structures, generation of centerlines, 1D network reconstruction, correction and local adaptation are examined. We propose two algorithms for automatic vascular network segmentation and user-guided cardiac segmentation.}, author = {Danilov, Alexander A. and Pryamonosov, Roman A. and Yurova, Alexandra S.}, booktitle = {ECCOMAS Congress 2016 - Proceedings of the 7th European Congress on Computational Methods in Applied Sciences and Engineering}, doi = {10.7712/100016.1827.10770}, isbn = {9786188284401}, keywords = {Electrophysiology,Hemodynamics,Image segmentation,Medical images,Mesh generation}, pages = {454--461}, title = {{Image segmentation techniques for biomedical modeling: Electrophysiology and hemodynamics}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84995513594{\&}doi=10.7712{\%}2F100016.1827.10770{\&}partnerID=40{\&}md5=3954727a98c6ce96faf2f9cd25fba8f7}, volume = {1}, year = {2016} } @article{Datta2018, abstract = {An increasing demand for directed assembly of biomaterials has inspired the development of bioprinting, which facilitates the assembling of both cellular and acellular inks into well-arranged three-dimensional (3D) structures for tissue fabrication. Although great advances have been achieved in the recent decade, there still exist issues to be addressed. Herein, a review has been systematically performed to discuss the considerations in the entire procedure of bioprinting. Though bioprinting is advancing at a rapid pace, it is seen that the whole process of obtaining tissue constructs from this technique involves multiple-stages, cutting across various technology domains. These stages can be divided into three broad categories: pre-bioprinting, bioprinting and post-bioprinting. Each stage can influence others and has a bearing on the performance of fabricated constructs. For example, in pre-bioprinting, tissue biopsy and cell expansion techniques are essential to ensure a large number of cells are available for mass organ production. Similarly, medical imaging is needed to provide high resolution designs, which can be faithfully bioprinted. In the bioprinting stage, compatibility of biomaterials is needed to be matched with solidification kinetics to ensure constructs with high cell viability and fidelity are obtained. On the other hand, there is a need to develop bioprinters, which have high degrees of freedom of movement, perform without failure concerns for several hours and are compact, and affordable. Finally, maturation of bioprinted cells are governed by conditions provided during the post-bioprinting process. This review, for the first time, puts all the bioprinting stages in perspective of the whole process of bioprinting, and analyzes their current state-of-the art. It is concluded that bioprinting community will recognize the relative importance and optimize the parameter of each stage to obtain the desired outcomes.}, author = {Datta, Pallab and Barui, Ananya and Wu, Yang and Ozbolat, Veli and Moncal, Kazim K. and Ozbolat, Ibrahim T.}, doi = {10.1016/j.biotechadv.2018.06.003}, issn = {07349750}, journal = {Biotechnology Advances}, keywords = {Biofabrication,Bioink,Bioprinter,Bioprinting,Droplet-based bioprinting,Extrusion-based bioprinting,Laser-based bioprinting}, number = {5}, pages = {1481--1504}, pmid = {29909085}, title = {{Essential steps in bioprinting: From pre- to post-bioprinting}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {36}, year = {2018} } @article{DeBellis2015, abstract = {Magnetic resonance imaging studies of maltreated children with posttraumatic stress disorder (PTSD) suggest that maltreatment-related PTSD is associated with adverse brain development. Maltreated youth resilient to chronic PTSD were not previously investigated and may elucidate neuromechanisms of the stress diathesis that leads to resilience to chronic PTSD. In this cross-sectional study, anatomical volumetric and corpus callosum diffusion tensor imaging measures were examined using magnetic resonance imaging in maltreated youth with chronic PTSD (N = 38), without PTSD (N = 35), and nonmaltreated participants (n = 59). Groups were sociodemographically similar. Participants underwent assessments for strict inclusion/exclusion criteria and psychopathology. Maltreated youth with PTSD were psychobiologically different from maltreated youth without PTSD and nonmaltreated controls. Maltreated youth with PTSD had smaller posterior cerebral and cerebellar gray matter volumes than did maltreated youth without PTSD and nonmaltreated participants. Cerebral and cerebellar gray matter volumes inversely correlated with PTSD symptoms. Posterior corpus callosum microstructure in pediatric maltreatment-related PTSD differed compared to maltreated youth without PTSD and controls. The group differences remained significant when controlling for psychopathology, numbers of Axis I disorders, and trauma load. Alterations of these posterior brain structures may result from a shared trauma-related mechanism or an inherent vulnerability that mediates the pathway from chronic PTSD to comorbidity.}, author = {{De Bellis}, Michael D. and Hooper, Stephen R. and Chen, Steven D. and Provenzale, James M. and Boyd, Brian D. and Glessner, Christopher E. and Macfall, James R. and Payne, Martha E. and Rybczynski, Robert and Woolley, Donald P.}, doi = {10.1017/S0954579415000942}, issn = {14692198}, journal = {Development and Psychopathology}, number = {4}, pages = {1555--1576}, title = {{Posterior structural brain volumes differ in maltreated youth with and without chronic posttraumatic stress disorder}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {27}, year = {2015} } @article{DeLeener2015, abstract = {Quantifying spinal cord (SC) atrophy in neurodegenerative and traumatic diseases brings important diagnosis and prognosis information for the clinician. We recently developed the PropSeg method, which allows for fast, accurate and automatic segmentation of the SC on different types of MRI contrast (e.g.,T1-,T2-and T2z.ast;-weighted sequences) and any field of view. However, comparing measurements from the SC between subjects is hindered by the lack of a generic coordinate system for the SC. In this paper, we present a new framework combining PropSeg and a vertebral level identification method, thereby enabling direct inter-and intra-subject comparison of SC measurements for large cohort studies as well as for longitudinal studies. Our segmentation method is based on the multi-resolution propagation of tubular deformable models. Coupled with an automatic intervertebral disk identification method, our segmentation pipeline provides quantitative metrics of the SC and spinal canal such as cross-sectional areas and volumes in a generic coordinate system based on vertebral levels. This framework was validated on 17 healthy subjects and on one patient with SC injury against manual segmentation. Results have been compared with an existing active surface method and show high local and global accuracy for both SC and spinal canal (Dice coefficients =0.91 ± 0.02) segmentation. Having a robust and automatic framework for SC segmentation and vertebral-based normalization opens the door to bias-free measurement of SC atrophy in large cohorts.}, author = {{De Leener}, Benjamin and Cohen-Adad, Julien and Kadoury, Samuel}, doi = {10.1109/TMI.2015.2437192}, issn = {1558254X}, journal = {IEEE Transactions on Medical Imaging}, keywords = {Automatic segmentation,CSF,MRI,Vertebral labeling,deformable model,spinal canal,spinal cord}, number = {8}, pages = {1705--1718}, title = {{Automatic Segmentation of the Spinal Cord and Spinal Canal Coupled with Vertebral Labeling}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {34}, year = {2015} } @article{DeLeener2014, abstract = {Spinal cord segmentation provides measures of atrophy and facilitates group analysis via inter-subject correspondence. Automatizing this procedure enables studies with large throughput and minimizes user bias. Although several automatic segmentation methods exist, they are often restricted in terms of image contrast and field-of-view. This paper presents a new automatic segmentation method (PropSeg) optimized for robustness, accuracy and speed. The algorithm is based on the propagation of a deformable model and is divided into three parts: firstly, an initialization step detects the spinal cord position and orientation using a circular Hough transform on multiple axial slices rostral and caudal to the starting plane and builds an initial elliptical tubular mesh. Secondly, a low-resolution deformable model is propagated along the spinal cord. To deal with highly variable contrast levels between the spinal cord and the cerebrospinal fluid, the deformation is coupled with a local contrast-to-noise adaptation at each iteration. Thirdly, a refinement process and a global deformation are applied on the propagated mesh to provide an accurate segmentation of the spinal cord. Validation was performed in 15 healthy subjects and two patients with spinal cord injury, using T1- and T2-weighted images of the entire spinal cord and on multiecho T2*-weighted images. Our method was compared against manual segmentation and against an active surface method. Results show high precision for all the MR sequences. Dice coefficients were 0.9 for the T1- and T2-weighted cohorts and 0.86 for the T2*-weighted images. The proposed method runs in less than 1min on a normal computer and can be used to quantify morphological features such as cross-sectional area along the whole spinal cord. {\textcopyright} 2014 Elsevier Inc.}, author = {{De Leener}, Benjamin and Kadoury, Samuel and Cohen-Adad, Julien}, doi = {10.1016/j.neuroimage.2014.04.051}, issn = {10959572}, journal = {NeuroImage}, keywords = {Automatic,Deformable model,MRI,Propagation,Spinal cord segmentation}, pages = {528--536}, title = {{Robust, accurate and fast automatic segmentation of the spinal cord}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {98}, year = {2014} } @article{DeLeener2017, abstract = {For the past 25 years, the field of neuroimaging has witnessed the development of several software packages for processing multi-parametric magnetic resonance imaging (mpMRI) to study the brain. These software packages are now routinely used by researchers and clinicians, and have contributed to important breakthroughs for the understanding of brain anatomy and function. However, no software package exists to process mpMRI data of the spinal cord. Despite the numerous clinical needs for such advanced mpMRI protocols (multiple sclerosis, spinal cord injury, cervical spondylotic myelopathy, etc.), researchers have been developing specific tools that, while necessary, do not provide an integrative framework that is compatible with most usages and that is capable of reaching the community at large. This hinders cross-validation and the possibility to perform multi-center studies. In this study we introduce the Spinal Cord Toolbox (SCT), a comprehensive software dedicated to the processing of spinal cord MRI data. SCT builds on previously-validated methods and includes state-of-the-art MRI templates and atlases of the spinal cord, algorithms to segment and register new data to the templates, and motion correction methods for diffusion and functional time series. SCT is tailored towards standardization and automation of the processing pipeline, versatility, modularity, and it follows guidelines of software development and distribution. Preliminary applications of SCT cover a variety of studies, from cross-sectional area measures in large databases of patients, to the precise quantification of mpMRI metrics in specific spinal pathways. We anticipate that SCT will bring together the spinal cord neuroimaging community by establishing standard templates and analysis procedures.}, author = {{De Leener}, Benjamin and L{\'{e}}vy, Simon and Dupont, Sara M. and Fonov, Vladimir S. and Stikov, Nikola and {Louis Collins}, D. and Callot, Virginie and Cohen-Adad, Julien}, doi = {10.1016/j.neuroimage.2016.10.009}, issn = {10959572}, journal = {NeuroImage}, keywords = {Atlas,MRI,Open-source,Software,Spinal cord,Template}, pages = {24--43}, title = {{SCT: Spinal Cord Toolbox, an open-source software for processing spinal cord MRI data}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {145}, year = {2017} } @article{Reuille2015, abstract = {Morphogenesis emerges from complex multiscale interactions between genetic and mechanical processes. To understand these processes, the evolution of cell shape, proliferation and gene expression must be quantified. This quantification is usually performed either in full 3D, which is computationally expensive and technically challenging, or on 2D planar projections, which introduces geometrical artifacts on highly curved organs. Here we present MorphoGraphX (www.MorphoGraphX.org), a software that bridges this gap by working directly with curved surface images extracted from 3D data. In addition to traditional 3D image analysis, we have developed algorithms to operate on curved surfaces, such as cell segmentation, lineage tracking and fluorescence signal quantification. The software's modular design makes it easy to include existing libraries, or to implement new algorithms. Cell geometries extracted with MorphoGraphX can be exported and used as templates for simulation models, providing a powerful platform to investigate the interactions between shape, genes and growth.}, author = {de Reuille, Pierre Barbier and Routier-Kierzkowska, Anne Lise and Kierzkowski, Daniel and Bassel, George W. and Sch{\"{u}}pbach, Thierry and Tauriello, Gerardo and Bajpai, Namrata and Strauss, S{\"{o}}ren and Weber, Alain and Kiss, Annamaria and Burian, Agata and Hofhuis, Hugo and Sapala, Aleksandra and Lipowczan, Marcin and Heimlicher, Maria B. and Robinson, Sarah and Bayer, Emmanuelle M. and Basler, Konrad and Koumoutsakos, Petros and Roeder, Adrienne H.K. and Aegerter-Wilmsen, Tinri and Nakayama, Naomi and Tsiantis, Miltos and Hay, Angela and Kwiatkowska, Dorota and Xenarios, Ioannis and Kuhlemeier, Cris and Smith, Richard S.}, doi = {10.7554/eLife.05864}, issn = {2050084X}, journal = {eLife}, number = {MAY}, pages = {1--20}, pmid = {25946108}, title = {{MorphoGraphX: A platform for quantifying morphogenesis in 4D}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {4}, year = {2015} } @article{DeSchryver2018, abstract = {This work presents a framework to exploit the synergy between Digital Volume Correlation (DVC) and iterative CT reconstruction to enhance the quality of high-resolution dynamic X-ray CT (4D-$\mu$CT) and obtain quantitative results from the acquired dataset in the form of 3D strain maps which can be directly correlated to the material properties. Furthermore, we show that the developed framework is capable of strongly reducing motion artifacts even in a dataset containing a single 360° rotation.}, author = {{De Schryver}, Thomas and Dierick, Manuel and Heyndrickx, Marjolein and {Van Stappen}, Jeroen and Boone, Marijn A. and {Van Hoorebeke}, Luc and Boone, Matthieu N.}, doi = {10.1038/s41598-018-25916-5}, issn = {20452322}, journal = {Scientific Reports}, number = {1}, pages = {10}, title = {{Motion compensated micro-CT reconstruction for in-situ analysis of dynamic processes}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {8}, year = {2018} } @article{Fresno2009, abstract = {Image segmentation of 3D medical images is a challenging problem with several still not totally solved practical issues, such as noise interference, variable object structures and image artifacts. This paper describes a hybrid 3D image segmentation method which combines region growing and deformable models to obtain accurate and topologically preserving surface structures of anatomical objects of interest. The proposed strategy starts by determining a rough but robust approximation of the objects using a region-growing algorithm. Then, the closed surface mesh that encloses the region is constructed and used as the initial geometry of a deformable model for the final refinement. This integrated strategy provides an alternative solution to one of the flaws of traditional deformable models, achieving good refinements of internal surfaces in few steps. Experimental segmentation results of complex anatomical structures on both simulated and real data from MRI scans are presented, and the method is assessed by comparing with standard reference segmentations of head MRI. The evaluation was mainly based on the average overlap measure, which was tested on the segmentation of white matter, corresponding to a simulated brain data set, showing excellent performance exceeding 90{\%} accuracy. In addition, the algorithm was applied to the detection of anatomical head structures on two real MRI and one CT data set. The final reconstructions resulting from the deformable models produce high quality meshes suitable for 3D visualization and further numerical analysis. The obtained results show that the approach achieves high quality segmentations with low computational complexity. {\textcopyright} 2009 Elsevier Ltd. All rights reserved.}, author = {del Fresno, M. and V{\'{e}}nere, M. and Clausse, A.}, doi = {10.1016/j.compmedimag.2009.03.002}, issn = {08956111}, journal = {Computerized Medical Imaging and Graphics}, keywords = {Deformable surface models,Hybrid methods,Image segmentation,MRI,Region growing}, number = {5}, pages = {369--376}, title = {{A combined region growing and deformable model method for extraction of closed surfaces in 3D CT and MRI scans}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-67349195754{\&}doi=10.1016{\%}2Fj.compmedimag.2009.03.002{\&}partnerID=40{\&}md5=93b30aa2fbeb65fa285de2c2c00f07fc}, volume = {33}, year = {2009} } @article{Deng2016, abstract = {Purpose: Segmentation of brain magnetic resonance (MR) images into white matter (WM), gray matter (GM), and cerebrospinal fluid (CSF) is crucial for brain structural measurement and disease diagnosis. Learning-based segmentation methods depend largely on the availability of good training ground truth. However, the commonly used 3T MR images are of insufficient image quality and often exhibit poor intensity contrast between WM, GM, and CSF. Therefore, they are not ideal for providing good ground truth label data for training learning-based methods. Recent advances in ultrahigh field 7T imaging make it possible to acquire images with excellent intensity contrast and signal-to-noise ratio. Methods: In this paper, the authors propose an algorithm based on random forest for segmenting 3T MR images by training a series of classifiers based on reliable labels obtained semiautomatically from 7T MR images. The proposed algorithm iteratively refines the probability maps of WM, GM, and CSF via a cascade of random forest classifiers for improved tissue segmentation. Results: The proposed method was validated on two datasets, i.e., 10 subjects collected at their institution and 797 3T MR images from the Alzheimer's Disease Neuroimaging Initiative (ADNI) dataset. Specifically, for the mean Dice ratio of all 10 subjects, the proposed method achieved 94.52{\%} ± 0.9{\%}, 89.49{\%} ± 1.83{\%}, and 79.97{\%} ± 4.32{\%} for WM, GM, and CSF, respectively, which are significantly better than the state-of-the-art methods (p-values {\textless} 0.021). For the ADNI dataset, the group difference comparisons indicate that the proposed algorithm outperforms state-of-the-art segmentation methods. Conclusions: The authors have developed and validated a novel fully automated method for 3T brain MR image segmentation.}, author = {Deng, Minghui and Yu, Renping and Wang, Li and Shi, Feng and Yap, Pew Thian and Shen, Dinggang}, doi = {10.1118/1.4967487}, issn = {00942405}, journal = {Medical Physics}, number = {12}, pages = {6588--6597}, title = {{Learning-based 3T brain MRI segmentation with guidance from 7T MRI labeling}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {43}, year = {2016} } @article{Dietrich2014, abstract = {Composite sandwich structures with honeycomb cores show varying properties in geometry and mechanical behavior depending on the studied scale. Herein a new test and evaluation method for sub-surface core damage in the indentation area of honeycomb sandwich structures using computed tomography is presented. The combination of X-ray micro-computed tomography (X-$\mu$CT) and an image analysis procedure adjusted to the detection of core deformation mechanisms allows the extraction and quantification of externally invisible, sub-surface damage in the sandwich composite. For this specific contact or indentation loading case on the sandwich face sheet an in-situ device is introduced, enabling a 3D analysis of the structural change during progressing indentation depth.}, author = {Dietrich, S. and Koch, M. and Elsner, P. and Weidenmann, K.}, doi = {10.1007/s11340-014-9902-2}, issn = {17412765}, journal = {Experimental Mechanics}, keywords = {Contact modelling,GFRP honeycomb sandwich,In-situ loading,Indentation,Micro-computed tomography}, number = {8}, pages = {1385--1393}, title = {{Measurement of Sub-Surface Core Damage in Sandwich Structures Using In-situ Hertzian Indentation During X-ray Computed Tomography}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {54}, year = {2014} } @article{Dolly2018, abstract = {It is widely known that the optimization of imaging systems based on objective, task-based measures of image quality via computer-simulation requires the use of a stochastic object model (SOM). However, the development of computationally tractable SOMs that can accurately model the statistical variations in human anatomy within a specified ensemble of patients remains a challenging task. Previously reported numerical anatomic models lack the ability to accurately model inter-patient and inter-organ variations in human anatomy among a broad patient population, mainly because they are established on image data corresponding to a few of patients and individual anatomic organs. This may introduce phantom-specific bias into computer-simulation studies, where the study result is heavily dependent on which phantom is used. In certain applications, however, databases of high-quality volumetric images and organ contours are available that can facilitate this SOM development. In this work, a novel and tractable methodology for learning a SOM and generating numerical phantoms from a set of volumetric training images is developed. The proposed methodology learns geometric attribute distributions (GAD) of human anatomic organs from a broad patient population, which characterize both centroid relationships between neighboring organs and anatomic shape similarity of individual organs among patients. By randomly sampling the learned centroid and shape GADs with the constraints of the respective principal attribute variations learned from the training data, an ensemble of stochastic objects can be created. The randomness in organ shape and position reflects the learned variability of human anatomy. To demonstrate the methodology, a SOM of an adult male pelvis is computed and examples of corresponding numerical phantoms are created.}, author = {Dolly, Steven R. and Lou, Yang and Anastasio, Mark A. and Li, Hua}, doi = {10.1088/1361-6560/aab000}, issn = {13616560}, journal = {Physics in Medicine and Biology}, number = {6}, pages = {18}, title = {{Learning-based stochastic object models for characterizing anatomical variations}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {63}, year = {2018} } @article{Dolly2019, abstract = {In the majority of current radiation therapy (RT) applications, image quality is still assessed subjectively or by utilizing physical measures. A novel theory that applies objective task-based image quality assessment in radiation therapy (IQA-in-RT) was recently proposed, in which the area under the therapeutic operating characteristic curve (AUTOC) was employed as the figure-of-merit (FOM) for evaluating RT effectiveness. Although theoretically more appealing than conventional subjective or physical measures, a comprehensive implementation and evaluation of this novel task-based IQA-in-RT theory is required for its further application in improving clinical RT. In this work, a practical and modular IQA-in-RT framework is presented for implementing this theory for the assessment of imaging components on the basis of RT treatment outcomes. Computer-simulation studies are conducted to demonstrate the feasibility and utility of the proposed IQA-in-RT framework in optimizing x-ray computed tomography (CT) pre-treatment imaging, including the optimization of CT imaging dose and image reconstruction parameters. The potential advantages of optimizing imaging components in the RT workflow by use of the AUTOC as the FOM are also compared against those of other physical measures. The results demonstrate that optimization using the AUTOC leads to selecting different parameters from those indicated by physical measures, potentially improving RT performance. The sources of systemic randomness and bias that affect the determination of the AUTOC are also analyzed. The presented work provides a practical solution for the further investigation and analysis of the task-based IQA-in-RT theory and advances its applications in improving RT clinical practice and cancer patient care.}, author = {Dolly, Steven R. and Lou, Yang and Anastasio, Mark A. and Li, Hua}, doi = {10.1088/1361-6560/ab2dc5}, issn = {13616560}, journal = {Physics in Medicine and Biology}, keywords = {geometric attribute distribution model,learning-based stochastic object models,radiation therapy,task-based image quality assessment,therapeutic operating characteristic curve}, number = {14}, pages = {19}, title = {{Task-based image quality assessment in radiation therapy: Initial characterization and demonstration with computer-simulation study}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {64}, year = {2019} } @article{Dolz2016, abstract = {Purpose: Accurate delineation of organs at risk (OARs) on computed tomography (CT) image is required for radiation treatment planning (RTP). Manual delineation of OARs being time consuming and prone to high interobserver variability, many (semi-) automatic methods have been proposed. However, most of them are specific to a particular OAR. Here, an interactive computer-assisted system able to segment various OARs required for thoracic radiation therapy is introduced. Methods: Segmentation information (foreground and background seeds) is interactively added by the user in any of the three main orthogonal views of the CT volume and is subsequently propagated within the whole volume. The proposed method is based on the combination of watershed transformation and graph-cuts algorithm, which is used as a powerful optimization technique to minimize the energy function. The OARs considered for thoracic radiation therapy are the lungs, spinal cord, trachea, proximal bronchus tree, heart, and esophagus. The method was evaluated on multivendor CT datasets of 30 patients. Two radiation oncologists participated in the study and manual delineations from the original RTP were used as ground truth for evaluation. Results: Delineation of the OARs obtained with the minimally interactive approach was approved to be usable for RTP in nearly 90{\%} of the cases, excluding the esophagus, which segmentation was mostly rejected, thus leading to a gain of time ranging from 50{\%} to 80{\%} in RTP. Considering exclusively accepted cases, overall OARs, a Dice similarity coefficient higher than 0.7 and a Hausdorff distance below 10 mm with respect to the ground truth were achieved. In addition, the interobserver analysis did not highlight any statistically significant difference, at the exception of the segmentation of the heart, in terms of Hausdorff distance and volume difference. Conclusions: An interactive, accurate, fast, and easy-to-use computer-assisted system able to segment various OARs required for thoracic radiation therapy has been presented and clinically evaluated. The introduction of the proposed system in clinical routine may offer valuable new option to radiation oncologists in performing RTP.}, author = {Dolz, J. and Kirişli, H. A. and Fechter, T. and Karnitzki, S. and Oehlke, O. and Nestle, U. and Vermandel, M. and Massoptier, L.}, doi = {10.1118/1.4947484}, issn = {00942405}, journal = {Medical Physics}, keywords = {autocontouring,lung cancer,organs at risk segmentation,radiotherapy,thoracic oncology,treatment planning}, number = {5}, pages = {2569--2580}, title = {{Interactive contour delineation of organs at risk in radiotherapy: Clinical evaluation on NSCLC patients}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {43}, year = {2016} } @article{Dominguez2017, abstract = {Background and Objective. The effective processing of biomedical images usually requires the interoperability of diverse software tools that have different aims but are complementary. The goal of this work is to develop a bridge to connect two of those tools: ImageJ, a program for image analysis in life sciences, and OpenCV, a computer vision and machine learning library. Methods. Based on a thorough analysis of ImageJ and OpenCV, we detected the features of these systems that could be enhanced, and developed a library to combine both tools, taking advantage of the strengths of each system. The library was implemented on top of the SciJava converter framework. We also provide a methodology to use this library. Results. We have developed the publicly available library IJ-OpenCV that can be employed to create applications combining features from both ImageJ and OpenCV. From the perspective of ImageJ developers, they can use IJ-OpenCV to easily create plugins that use any functionality provided by the OpenCV library and explore different alternatives. From the perspective of OpenCV developers, this library provides a link to the ImageJ graphical user interface and all its features to handle regions of interest. Conclusions. The IJ-OpenCV library bridges the gap between ImageJ and OpenCV, allowing the connection and the cooperation of these two systems.}, author = {Dom{\'{i}}nguez, C{\'{e}}sar and Heras, J{\'{o}}nathan and Pascual, Vico}, doi = {10.1016/j.compbiomed.2017.03.027}, issn = {18790534}, journal = {Computers in Biology and Medicine}, keywords = {Biomedicine,Computer vision,Image processing,ImageJ,Interoperability,Machine learning,OpenCV}, pages = {189--194}, title = {{IJ-OpenCV: Combining ImageJ and OpenCV for processing images in biomedicine}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {84}, year = {2017} } @article{BoisdAische2007, abstract = {We present a new non-rigid registration algorithm estimating the displacement field generated by articulated bodies. Indeed the bony structures between different patient images may rigidly move while other tissues may deform in a more complex way. Our algorithm tracks the displacement induced in the column by a movement of the patient between two acquisitions. The volumetric deformation field in the whole body is then inferred from those displacements using a linear elastic biomechanical finite element model. We demonstrate in this paper that this method provides accurate results on 3D sets of computed tomography (CT), MR and positron emission tomography (PET) images and that the results of the registration algorithm show significant decreases in the mean, min and max errors. {\textcopyright} 2007 Elsevier Ltd. All rights reserved.}, author = {du Bois d'Aische, Aloys and {De Craene}, Mathieu and Geets, Xavier and Gr{\'{e}}goire, Vincent and Macq, Benoit and Warfield, Simon K.}, doi = {10.1016/j.bspc.2007.03.002}, issn = {17468094}, journal = {Biomedical Signal Processing and Control}, keywords = {Articulation,Registration,Vertebrae}, number = {1}, pages = {16--24}, title = {{Estimation of the deformations induced by articulated bodies: Registration of the spinal column}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-34248594102{\&}doi=10.1016{\%}2Fj.bspc.2007.03.002{\&}partnerID=40{\&}md5=845c913387e22bc8438d1efee07c729f}, volume = {2}, year = {2007} } @article{Dziubek2016, abstract = {A computational model for retinal hemodynamics accounting for ocular curvature is presented. The model combines (i) a hierarchical Darcy model for the flow through small arterioles, capillaries and small venules in the retinal tissue, where blood vessels of different size are comprised in different hierarchical levels of a porous medium; and (ii) a one-dimensional network model for the blood flow through retinal arterioles and venules of larger size. The non-planar ocular shape is included by (i) defining the hierarchical Darcy flow model on a two-dimensional curved surface embedded in the three-dimensional space; and (ii) mapping the simplified one-dimensional network model onto the curved surface. The model is solved numerically using a finite element method in which spatial domain and hierarchical levels are discretized separately. For the finite element method, we use an exterior calculus-based implementation which permits an easier treatment of non-planar domains. Numerical solutions are verified against suitably constructed analytical solutions. Numerical experiments are performed to investigate how retinal hemodynamics is influenced by the ocular shape (sphere, oblate spheroid, prolate spheroid and barrel are compared) and vascular architecture (four vascular arcs and a branching vascular tree are compared). The model predictions show that changes in ocular shape induce non-uniform alterations of blood pressure and velocity in the retina. In particular, we found that (i) the temporal region is affected the least by changes in ocular shape, and (ii) the barrel shape departs the most from the hemispherical reference geometry in terms of associated pressure and velocity distributions in the retinal microvasculature. These results support the clinical hypothesis that alterations in ocular shape, such as those occurring in myopic eyes, might be associated with pathological alterations in retinal hemodynamics.}, author = {Dziubek, Andrea and Guidoboni, Giovanna and Harris, Alon and Hirani, Anil N. and Rusjan, Edmond and Thistleton, William}, doi = {10.1007/s10237-015-0731-8}, issn = {16177940}, journal = {Biomechanics and Modeling in Mechanobiology}, keywords = {Finite element exterior calculus,Hierarchical porous medium,Mathematical modeling,Ocular curvature,Retinal hemodynamics,Vascular network}, number = {4}, pages = {893--907}, title = {{Effect of ocular shape and vascular geometry on retinal hemodynamics: a computational model}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {15}, year = {2016} } @inbook{ElNaqa2016, abstract = {PET imaging is a main diagnostic modality of different diseases including cancer. In the particular case of cancer, PET is widely used for staging of disease progression, identification of the treatment gross tumor volume, monitoring of disease, as well as prediction of outcomes and personalization of treatment regimens. Among the arsenal of different functional imaging modalities, PET has benefited from early adoption of quantitative image analysis starting from simple standard uptake value (SUV) normalization to more advanced extraction of complex imaging uptake patterns, thanks chiefly to the application of sophisticated image processing algorithms. In this chapter, we discuss the application of image processing techniques to PET imaging with special focus on the oncological radiotherapy domain starting from basic feature extraction to application in target definition using image segmentation/registration and more recent image-based outcome modeling in the radiomics field. We further extend the discussion into hybrid anatomical functional combinations of PET/CT and PET/MR multimodalities.}, author = {{El Naqa}, Issam}, booktitle = {Basic Science of PET Imaging}, doi = {10.1007/978-3-319-40070-9_12}, isbn = {9783319400709}, keywords = {Hybrid imaging,Image processing,Quantitative PET,Radiomics}, pages = {285--301}, title = {{Image processing and analysis of PET and hybrid PET imaging}}, type = {Book Section}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85018895187{\&}doi=10.1007{\%}2F978-3-319-40070-9{\_}12{\&}partnerID=40{\&}md5=a2572f421a96f250c396c7c0538f189d}, year = {2016} } @article{Ertz2014, author = {Ertz, Olivier and Rey, Sergio J. and Joost, St{\'{e}}phane}, doi = {10.5311/JOSIS.2014.8.182}, issn = {1948660X}, journal = {Journal of Spatial Information Science}, number = {2014}, pages = {67--71}, title = {{The open source dynamics in geospatial research and education}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84919960299{\&}doi=10.5311{\%}2FJOSIS.2014.8.182{\&}partnerID=40{\&}md5=a7c671f5acc3d9aeaea0fe07a547c7fb}, volume = {8}, year = {2014} } @article{Esposito2011, abstract = {In this paper we present the development of a software for the extraction of the hippocampus and surrounding medial-temporal-lobe (MTL) regions from T1-weighted magnetic resonance (MR) and from Positron Emission Tomography (PET) images with no interactive input from the user. With this software we introduce a novel statistical index computed on the intensities in the automatically extracted MTL regions. This index is a measure of gray-matter (GM) atrophy and allows to: distinguish between (a) patients with Alzheimer's disease (AD), patients with amnestic mild cognitive impairment (aMCI), (b) patients with amnestic mild cognitive impairment who will later develop AD in a time frame of 2 years (aMCIconv), and (c) a set of age-matched elderly controls. Once refined, this method could be used to infer about the clinical outcome of aMCI patients. PACS 87.57.nj-Registration. PACS 87.57.nm-Segmentation. PACS 87.57.R-Computer-aided diagnosis. PACS 87.61.Tg-Clinical applications. {\textcopyright} Societ{\`{a}} Italiana di Fisica.}, author = {Esposito, M. and Bosco, P. and Rei, L. and Aiello, M.}, doi = {10.1393/ncc/i2011-10807-0}, issn = {18269885}, journal = {Nuovo Cimento della Societa Italiana di Fisica C}, number = {1}, pages = {175--185}, title = {{Volumetric analysis on MRI and PET images for the early diagnosis of Alzheimer's disease}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84870415031{\&}doi=10.1393{\%}2Fncc{\%}2Fi2011-10807-0{\&}partnerID=40{\&}md5=56add27b1e0232de26da65f2420fcbc5}, volume = {34}, year = {2011} } @article{Fallavollita2010, abstract = {CARTO XP is an electroanatomical cardiac mapping system that provides 3D color-coded maps of the electrical activity of the heart; however it is expensive and it can only use a single costly magnetic catheter for each patient intervention. Our approach consists of integrating fluoroscopic and electrical data from the RF catheters into the same image so as to better guide RF ablation, shorten the duration of this procedure, increase its efficacy, and decrease hospital cost when compared to CARTO XP. We propose a method that relies on multi-view C-arm fluoroscopy image acquisition for (1) the 3D reconstruction of the anatomical structure of interest, (2) the robust temporal tracking of the tip-electrode of a mapping catheter between the diastolic and systolic phases and (3) the 2D/3D registration of color coded isochronal maps directly on the 2D fluoroscopy image that would help the clinician guide the ablation procedure much more effectively. The method has been tested on canine experimental data. {\textcopyright} 2010 Pascal Fallavollita.}, author = {Fallavollita, Pascal}, doi = {10.1155/2010/871409}, issn = {16875176}, journal = {Eurasip Journal on Image and Video Processing}, pages = {10}, title = {{Acquiring multiview C-arm images to assist cardiac ablation procedures}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {2010}, year = {2010} } @article{Fallavollita2010a, abstract = {Purpose: In prostate brachytherapy, transrectal ultrasound (TRUS) is used to visualize the anatomy, while implanted seeds can be visualized by fluoroscopy. Intraoperative dosimetry optimization is possible using a combination of TRUS and fluoroscopy, but requires localization of the fluoroscopy-derived seed cloud, relative to the anatomy as seen on TRUS. The authors propose to develop a method of registration of TRUS images and the implants reconstructed from fluoroscopy. Methods: A phantom was implanted with 48 seeds then imaged with TRUS and CT. Seeds were reconstructed from CT yielding a cloud of seeds. Fiducial-based ground-truth registration was established between the TRUS and CT. TRUS images are filtered, compounded, and registered to the reconstructed implants by using an intensity-based metric. The authors evaluated a volume-to-volume and point-to-volume registration scheme. In total, seven TRUS filtering techniques and three image similarity metrics were analyzed. The method was also tested on human subject data captured from a brachytherapy procedure. Results: For volume-to-volume registration, noise reduction filter and normalized correlation metrics yielded the best result: An average of 0.54±0.11 mm seed localization error relative to ground truth. For point-to-volume registration, noise reduction combined with beam profile filter and mean squares metrics yielded the best result: An average of 0.38±0.19 mm seed localization error relative to the ground truth. In human patient data, C-arm fluoroscopy images showed 81 radioactive seeds implanted inside the prostate. A qualitative analysis showed clinically correct agreement between the seeds visible in TRUS and reconstructed from intraoperative fluoroscopy imaging. The measured registration error compared to the manually selected seed locations by the clinician was 2.86±1.26 mm. Conclusions: Fully automated registration between TRUS and the reconstructed seeds performed well in ground-truth phantom experiments and qualitative observation showed adequate performance on early clinical patient data. {\textcopyright} 2010 American Association of Physicists in Medicine.}, author = {Fallavollita, P. and {Karim Aghaloo}, Z. and Burdette, E. C. and Song, D. Y. and Abolmaesumi, P. and Fichtinger, G.}, doi = {10.1118/1.3416937}, issn = {00942405}, journal = {Medical Physics}, keywords = {Fluoroscopy,Prostate brachytherapy,Registration,Ultrasound}, number = {6}, pages = {2749--2760}, title = {{Registration between ultrasound and fluoroscopy or CT in prostate brachytherapy}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {37}, year = {2010} } @article{Fang2009, abstract = {In this paper, we report new progress in developing the instrument and software platform of a combined X-ray mammography/diffuse optical breast imaging system. Particularly, we focus on system validation using a series of balloon phantom experiments and the optical image analysis of 49 healthy patients. Using the finite-element method for forward modeling and a regularized Gauss-Newton method for parameter reconstruction, we recovered the inclusions inside the phantom and the hemoglobin images of the human breasts. An enhanced coupling coefficient estimation scheme was also incorporated to improve the accuracy and robustness of the reconstructions. The recovered average total hemoglobin concentration (HbT) and oxygen saturation (SO2) from 68 breast measurements are 16.2 $\mu$ and 71{\%}, respectively, where the HbT presents a linear trend with breast density. The low HbT value compared to literature is likely due to the associated mammographic compression. From the spatially co-registered optical/X-ray images, we can identify the chest-wall muscle, fatty tissue, and fibroglandular regions with an average HbT of 20.1 ± 6.1 $\mu$ for fibroglandular tissue, 15.4 ± 5.0 $\mu$ for adipose, and 22.2 ± 7.3 $\mu$ for muscle tissue. The differences between fibroglandular tissue and the corresponding adipose tissue are significant (p≤ 0.0001). At the same time, we recognize that the optical images are influenced, to a certain extent, by mammographical compression. The optical images from a subset of patients show composite features from both tissue structure and pressure distribution. We present mechanical simulations which further confirm this hypothesis. {\textcopyright} 2006 IEEE.}, author = {Fang, Qianqian and Carp, Stefan A. and Selb, Juliette and Boverman, Greg and Zhang, Quan and Kopans, Daniel B. and Moore, Richard H. and Miller, Eric L. and Brooks, Dana H. and Boas, David A.}, doi = {10.1109/TMI.2008.925082}, issn = {02780062}, journal = {IEEE Transactions on Medical Imaging}, keywords = {Breast imaging,Multimodality imaging,Tomography}, number = {1}, pages = {30--42}, pmid = {19116186}, title = {{Combined optical imaging and mammography of the healthy breast: Optical contrast derived from breast structure and compression}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-58149166606{\&}doi=10.1109{\%}2FTMI.2008.925082{\&}partnerID=40{\&}md5=0adeb1e35532e1ca200be1b4fda2df76}, volume = {28}, year = {2009} } @inproceedings{Fang, abstract = {We present a software platform for image reconstruction and data analysis for diffuse optical tomography. The structure, algorithm and functionalities of the platform are reported together with the sample results produced by the platform. {\textcopyright} 2007 Optical Society of America.}, author = {Fang, Q. and Carp, S. A. and Selb, J. and Moore, R. and Kopans, D. B. and Miller, E. L. and Brooks, D. H. and Boas, D. A.}, booktitle = {Biomedical Optics, BIOMED 2008}, doi = {10.1364/biomed.2008.bmd24}, title = {{A multi-modality image reconstruction platform for diffuse optical tomography}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84884342384{\&}partnerID=40{\&}md5=0a660aea258182c137661a84afe6ec4e}, year = {2008} } @inproceedings{Fanga, abstract = {We imaged 65 patients with a combined optical and tomosynthesis imaging system. The bulk optical properties from 72 healthy breasts and the reconstructed images using a spectrally-constrained algorithm for healthy and tumor breasts are reported. {\textcopyright} 2007 Optical Society of America.}, author = {Fang, Q. and Carp, S. A. and Selb, J. and Moore, R. and Kopans, D. B. and Miller, E. L. and Brooks, D. H. and Boas, D. A.}, booktitle = {Biomedical Optics, BIOMED 2008}, doi = {10.1364/biomed.2008.bsub2}, title = {{Spectrally constrained optical breast imaging with coregistered x-ray tomosynthesis}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84884324617{\&}partnerID=40{\&}md5=4341cad05ad6059b2b7866085d7c9037}, year = {2008} } @article{Fehr2015, abstract = {Noninvasive, radiological image-based detection and stratification of Gleason patterns can impact clinical outcomes, treatment selection, and the determination of disease status at diagnosis without subjecting patients to surgical biopsies. We present machine learning-based automatic classification of prostate cancer aggressiveness by combining apparent diffusion coefficient (ADC) and T2-weighted (T2-w) MRI-based texture features. Our approach achieved reasonably accurate classification of Gleason scores (GS) 6(3 + 3) vs. ≥7 and 7(3 + 4) vs. 7(4 + 3) despite the presence of highly unbalanced samples by using two different sample augmentation techniques followed by feature selection-based classification. Our method distinguished between GS 6(3 + 3) and ≥7 cancers with 93{\%} accuracy for cancers occurring in both peripheral (PZ) and transition (TZ) zones and 92{\%} for cancers occurring in the PZ alone. Our approach distinguished the GS 7(3 + 4) from GS 7(4 + 3) with 92{\%} accuracy for cancers occurring in both the PZ and TZ and with 93{\%} for cancers occurring in the PZ alone. In comparison, a classifier using only the ADC mean achieved a top accuracy of 58{\%} for distinguishing GS 6(3 + 3) vs. GS ≥7 for cancers occurring in PZ and TZ and 63{\%} for cancers occurring in PZ alone. The same classifier achieved an accuracy of 59{\%} for distinguishing GS 7(3 + 4) from GS 7(4 + 3) occurring in the PZ and TZ and 60{\%} for cancers occurring in PZ alone. Separate analysis of the cancers occurring in TZ alone was not performed owing to the limited number of samples. Our results suggest that texture features derived from ADC and T2-w MRI together with sample augmentation can help to obtain reasonably accurate classification of Gleason patterns.}, author = {Fehr, Duc and Veeraraghavan, Harini and Wibmer, Andreas and Gondo, Tatsuo and Matsumoto, Kazuhiro and Vargas, Herbert Alberto and Sala, Evis and Hricak, Hedvig and Deasy, Joseph O.}, doi = {10.1073/pnas.1505935112}, issn = {10916490}, journal = {Proceedings of the National Academy of Sciences of the United States of America}, keywords = {Gleason score classification,Learning from unbalanced data,Multiparametric mri,PCa gleason (3+4) vs. (4+3) cancers,PCa gleason 6 vs. ≥7}, number = {46}, pages = {E6265--E6273}, title = {{Automatic classification of prostate cancer Gleason scores from multiparametric magnetic resonance images}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {112}, year = {2015} } @article{FennemaNotestine2016, abstract = {White matter disease in the brain increases with age and cardiovascular disease, emerging in midlife, and these associations may be influenced by both genetic and environmental factors. We examined the frequency, distribution, and heritability of abnormal white matter and its association with hypertension in 395 middle-aged male twins (61.9 ± 2.6 years) from the Vietnam Era Twin Study of Aging, 67{\%} of whom were hypertensive. A multi-channel segmentation approach estimated abnormal regions within the white matter. Using multivariable regression models, we characterized the frequency distribution of abnormal white matter in midlife and investigated associations with hypertension and Apolipoprotein E-$\epsilon$4 status and the impact of duration and control of hypertension. Then, using the classical twin design, we estimated abnormal white matter heritability and the extent of shared genetic overlap with blood pressure. Abnormal white matter was predominantly located in periventricular and deep parietal and frontal regions; associated with age (t = 1.9, p = 0.05) and hypertension (t = 2.9, p = 0.004), but not Apolipoprotein $\epsilon$4 status; and was greater in those with uncontrolled hypertension relative to controlled (t = 3.0, p = 0.003) and normotensive (t = 4.0, p = 0.0001) groups, suggesting that abnormal white matter may reflect currently active cerebrovascular effects. Abnormal white matter was highly heritable (a2 = 0.81) and shared some genetic influences with systolic blood pressure (rA = 0.26), although there was evidence for distinct genetic contributions and unique environmental influences. Future longitudinal research will shed light on factors impacting white matter disease presentation, progression, and potential recovery.}, author = {Fennema-Notestine, Christine and McEvoy, Linda K. and Notestine, Randy and Panizzon, Matthew S. and Yau, Wai Ying Wendy and Franz, Carol E. and Lyons, Michael J. and Eyler, Lisa T. and Neale, Michael C. and Xian, Hong and McKenzie, Ruth E. and Kremen, William S.}, doi = {10.1016/j.nicl.2016.10.001}, issn = {22131582}, journal = {NeuroImage: Clinical}, keywords = {Blood pressure,Brain,Heritability,Hypertension,MRI,White matter}, pages = {737--745}, title = {{White matter disease in midlife is heritable, related to hypertension, and shares some genetic influence with systolic blood pressure}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {12}, year = {2016} } @article{Finnegan2019, abstract = {Toxicity to cardiac and coronary structures is an important late morbidity for patients undergoing left-sided breast radiotherapy. Many current studies have relied on estimates of cardiac doses assuming standardised anatomy, with a calculated increase in relative risk of 7.4{\%} per Gy (mean heart dose). To provide individualised estimates for dose, delineation of various cardiac structures on patient images is required. Automatic multi-atlas based segmentation can provide a consistent, robust solution, however there are challenges to this method. We are aiming to develop and validate a cardiac atlas and segmentation framework, with a focus on the limitations and uncertainties in the process. We present a probabilistic approach to segmentation, which provides a simple method to incorporate inter-observer variation, as well as a useful tool for evaluating the accuracy and sources of error in segmentation. A dataset consisting of 20 planning computed tomography (CT) images of Australian breast cancer patients with delineations of 17 structures (including whole heart, four chambers, coronary arteries and valves) was manually contoured by three independent observers, following a protocol based on a published reference atlas, with verification by a cardiologist. To develop and validate the segmentation framework a leave-one-out cross-validation strategy was implemented. Performance of the automatic segmentations was evaluated relative to inter-observer variability in manuallyderived contours; measures of volume and surface accuracy (Dice similarity coefficient (DSC) and mean absolute surface distance (MASD), respectively) were used to compare automatic segmentation to the consensus segmentation from manual contours. For the whole heart, the resulting segmentation achieved a DSC of 0.944 +0.024, with a MASD of .726 + 1.363 mm. Quantitative results, together with the analysis of probabilistic labelling, indicate the feasibility of accurate and consistent segmentation of larger structures, whereas this is not the case for many smaller structures, where a major limitation in segmentation accuracy is the interobserver variability in manual contouring.}, author = {Finnegan, Robert and Dowling, Jason and Koh, Eng Siew and Tang, Simon and Otton, James and Delaney, Geoff and Batumalai, Vikneswary and Luo, Carol and Atluri, Pramukh and Satchithanandha, Athiththa and Thwaites, David and Holloway, Lois}, doi = {10.1088/1361-6560/ab0ea6}, issn = {13616560}, journal = {Physics in Medicine and Biology}, keywords = {atlas-based segmentation,computed tomography,heart contouring,image registration,medical image processing,whole heart segmentation}, number = {8}, title = {{Feasibility of multi-atlas cardiac segmentation from thoracic planning CT in a probabilistic framework}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85064555945{\&}doi=10.1088{\%}2F1361-6560{\%}2Fab0ea6{\&}partnerID=40{\&}md5=65ca7ad7c68a23ee0cb634955c18a67b}, volume = {64}, year = {2019} } @article{Frohler2016, abstract = {We present GEMSe, an interactive tool for exploring and analyzing the parameter space of multi-channel segmentation algorithms. Our targeted user group are domain experts who are not necessarily segmentation specialists. GEMSe allows the exploration of the space of possible parameter combinations for a segmentation framework and its ensemble of results. Users start with sampling the parameter space and computing the corresponding segmentations. A hierarchically clustered image tree provides an overview of variations in the resulting space of label images. Details are provided through exemplary images from the selected cluster and histograms visualizing the parameters and the derived output in the selected cluster. The correlation between parameters and derived output as well as the effect of parameter changes can be explored through interactive filtering and scatter plots. We evaluate the usefulness of GEMSe through expert reviews and case studies based on three different kinds of datasets: A synthetic dataset emulating the combination of 3D X-ray computed tomography with data from K-Edge spectroscopy, a three-channel scan of a rock crystal acquired by a Talbot-Lau grating interferometer X-ray computed tomography device, as well as a hyperspectral image.}, author = {Fr{\"{o}}hler, B. and M{\"{o}}ller, T. and Heinzl, C.}, doi = {10.1111/cgf.12895}, issn = {14678659}, journal = {Computer Graphics Forum}, keywords = {Categories and Subject Descriptors (according to A,I.3.8 [Computer Graphics]: Applications—,I.4.6 [Image Processing and Computer Vision]: Segm}, number = {3}, pages = {191--200}, title = {{GEMSe: Visualization-Guided Exploration of Multi-channel Segmentation Algorithms}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {35}, year = {2016} } @article{Fuentes2019, abstract = {PURPOSE: This pilot study evaluates the feasibility of automated volumetric quantification of hepatocellular carcinoma (HCC) as an imaging biomarker to assess treatment response for sorafenib. METHODS: In this institutional review board-approved, Health Insurance Portability and Accountability Act-compliant retrospective study, a training database of manually labeled background liver, enhancing and nonenhancing tumor tissue was established using pretherapy and first posttherapy multiphasic computed tomography images from a registry of 13 HCC patients. For each patient, Hounsfield density and geometry-based feature images were generated from registered multiphasic computed tomography data sets and used as the input for a random forest-based classifier of enhancing and nonenhancing tumor tissue. Leave-one-out cross-validation of the dice similarity measure was applied to quantify the classifier accuracy. A Cox regression model was used to confirm volume changes as predictors of time to progression (TTP) of target lesions for both manual and automatic methods. RESULTS: When compared with manual labels, an overall classification accuracy of dice similarity coefficient of 0.71 for pretherapy and 0.66 posttherapy enhancing tumor labels and 0.45 for pretherapy and 0.59 for posttherapy nonenhancing tumor labels was observed. Automated methods for quantifying volumetric changes in the enhancing lesion agreed with manual methods and were observed as a significant predictor of TTP. CONCLUSIONS: Automated volumetric analysis was determined to be feasible for monitoring HCC response to treatment. The information extracted using automated volumetrics is likely to reproduce labor-intensive manual data and provide a good predictor for TTP. Further work will extend these studies to additional treatment modalities and larger patient populations.}, author = {Fuentes, David and Ahmed, Kareem and Lin, Jonathan S. and Abdel-Wahab, Reham and Kaseb, Ahmed O. and Hassan, Manal and Szklaruk, Janio and Morshid, Ali and Hazle, John D. and Qayyum, Aliya and Elsayes, Khaled M.}, doi = {10.1097/RCT.0000000000000866}, issn = {15323145}, journal = {Journal of computer assisted tomography}, number = {3}, pages = {499--506}, title = {{Automated Volumetric Assessment of Hepatocellular Carcinoma Response to Sorafenib: A Pilot Study}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {43}, year = {2019} } @article{Fusseis2014, abstract = {This contribution outlines Synchrotron-based X-ray micro-tomography and its potential use in structural geology and rock mechanics. The paper complements several recent reviews of X-ray microtomography. We summarize the general approach to data acquisition, post-processing as well as analysis and thereby aim to provide an entry point for the interested reader. The paper includes tables listing relevant beamlines, a list of all available imaging techniques, and available free and commercial software packages for data visualization and quantification. We highlight potential applications in a review of relevant literature including time-resolved experiments and digital rock physics. The paper concludes with a report on ongoing developments and upgrades at synchrotron facilities to frame the future possibilities for imaging sub-second processes in centimetre-sized samples. {\textcopyright} 2014 Elsevier Ltd.}, author = {Fusseis, F. and Xiao, X. and Schrank, C. and {De Carlo}, F.}, doi = {10.1016/j.jsg.2014.02.005}, issn = {01918141}, journal = {Journal of Structural Geology}, keywords = {3D petrography,Digital rock physics,Synchrotron,X-ray microtomography}, pages = {1--16}, title = {{A brief guide to synchrotron radiation-based microtomography in (structural) geology and rock mechanics}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {65}, year = {2014} } @article{Fuster2013, abstract = {Subtraction of Ictal SPECT Co-registered to MRI (SISCOM) is an imaging technique used to localize the epileptogenic focus in patients with intractable partial epilepsy. The aim of this study was to determine the accuracy of registration algorithms involved in SISCOM analysis using FocusDET, a new user-friendly application. To this end, Monte Carlo simulation was employed to generate realistic SPECT studies. Simulated sinograms were reconstructed by using the Filtered BackProjection (FBP) algorithm and an Ordered Subsets Expectation Maximization (OSEM) reconstruction method that included compensation for all degradations. Registration errors in SPECT-SPECT and SPECT-MRI registration were evaluated by comparing the theoretical and actual transforms. Patient studies with well-localized epilepsy were also included in the registration assessment. Global registration errors including SPECT-SPECT and SPECT-MRI registration errors were less than 1.2 mm on average, exceeding the voxel size (3.32 mm) of SPECT studies in no case. Although images reconstructed using OSEM led to lower registration errors than images reconstructed with FBP, differences after using OSEM or FBP in reconstruction were less than 0.2 mm on average. This indicates that correction for degradations does not play a major role in the SISCOM process, thereby facilitating the application of the methodology in centers where OSEM is not implemented with correction of all degradations. These findings together with those obtained by clinicians from patients via MRI, interictal and ictal SPECT and video-EEG, show that FocusDET is a robust application for performing SISCOM analysis in clinical practice. {\textcopyright} 2012 Springer Science+Business Media, LLC.}, author = {{Mart{\'{i}} Fuster}, Berta and Esteban, Oscar and Planes, Xavier and Aguiar, Pablo and Crespo, Cristina and Falcon, Carles and Wollny, Gert and {Rub{\'{i}} Sureda}, Sebasti{\`{a}} and Setoain, Xavier and Frangi, Alejandro F. and Ledesma, Maria J. and Santos, Andr{\'{e}}s and Pav{\'{i}}a, Javier and Ros, Dom{\`{e}}nec}, doi = {10.1007/s12021-012-9158-x}, issn = {15392791}, journal = {Neuroinformatics}, keywords = {Epilepsy,Monte Carlo simulation,Reconstruction algorithms,Registration assessment,SISCOM}, number = {1}, pages = {77--89}, title = {{FocusDET, a new toolbox for SISCOM analysis. Evaluation of the registration accuracy using monte carlo simulation}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {11}, year = {2013} } @inproceedings{Gamilov, abstract = {1D model is used to simulate blood flow in major vessels of the upper body and head. The 1D part is stated in terms of viscous incompressible fluid flow in the network of elastic tubes. Two different types of junctions are considered: junctions between major vessels and junctions between arteries and veins. Vessel network reconstruction algorithm consists of vessel segmentation, thinning-based obtaining of set of centerlines, and graph reconstruction. Input data is 3D DICOM datasets, obtained with contrast enhanced Computed Tomography (CT) Angiography. Constructed model is used to study the influence of carotid artery stenosis on the direction of blood flow in the circle of Willis.}, author = {Gamilov, Timur and Pryamonosov, Roman and Simakov, Sergey}, booktitle = {ECCOMAS Congress 2016 - Proceedings of the 7th European Congress on Computational Methods in Applied Sciences and Engineering}, doi = {10.7712/100016.1793.8690}, isbn = {9786188284401}, keywords = {1D haemodynamics,Carotid artery stenosis,Circle of Willis,Microcirculation,Patient-specific,Vessel segmentation}, pages = {81--89}, title = {{Modeling of patient-specific cases of atherosclerosis in carotid arteries}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84995444960{\&}doi=10.7712{\%}2F100016.1793.8690{\&}partnerID=40{\&}md5=47f13a248cbaf7933cea6036588d227b}, volume = {1}, year = {2016} } @article{Garpebring2013, abstract = {Using dynamic contrast-enhanced MRI (DCE-MRI), it is possible to estimate pharmacokinetic (PK) parameters that convey information about physiological properties, e.g., in tumors. In DCE-MRI, errors propagate in a nontrivial way to the PK parameters. We propose a method based on multivariate linear error propagation to calculate uncertainty maps for the PK parameters. Uncertainties in the PK parameters were investigated for the modified Kety model. The method was evaluated with Monte Carlo simulations and exemplified with in vivo brain tumor data. PK parameter uncertainties due to noise in dynamic data were accurately estimated. Noise with standard deviation up to 15{\%} in the baseline signal and the baseline T1 map gave estimated uncertainties in good agreement with the Monte Carlo simulations. Good agreement was also found for up to 15{\%} errors in the arterial input function amplitude. The method was less accurate for errors in the bolus arrival time with disagreements of 23{\%}, 32{\%}, and 29{\%} for Ktrans, ve, and vp, respectively, when the standard deviation of the bolus arrival time error was 5.3 s. In conclusion, the proposed method provides efficient means for calculation of uncertainty maps, and it was applicable to a wide range of sources of uncertainty. {\textcopyright} 2012 Wiley Periodicals, Inc.}, author = {Garpebring, Anders and Brynolfsson, Patrik and Yu, Jun and Wirestam, Ronnie and Johansson, Adam and Asklund, Thomas and Karlsson, Mikael}, doi = {10.1002/mrm.24328}, issn = {07403194}, journal = {Magnetic Resonance in Medicine}, keywords = {accuracy,dynamic contrast-enhanced-MRI,precision analysis,uncertainty estimation}, number = {4}, pages = {992--1002}, title = {{Uncertainty estimation in dynamic contrast-enhanced MRI}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {69}, year = {2013} } @article{Garpebring2011, abstract = {Object: Phase-based arterial input functions (AIFs) provide a promising alternative to standard magnitude-based AIFs, for example, because inflow effects are avoided. The usefulness of phase-based AIFs in clinical dynamic contrast-enhanced MRI (DCE-MRI) was investigated, and relevant pitfalls and sources of uncertainty were identified. Materials and methods: AIFs were registered from eight human subjects on, in total, 21 occasions. AIF quality was evaluated by comparing AIFs from right and left internal carotid arteries and by assessing the reliability of blood plasma volume estimates. Results: Phase-based AIFs yielded an average bolus peak of 3.9 mM and a residual concentration of 0.37 mM after 3 min, (0.033 mmol/kg contrast agent injection). The average blood plasma volume was 2.7{\%} when using the AIF peak in the estimation, but was significantly different (p {\textless} 0.0001) and less physiologically reasonable when based on the AIF tail concentration. Motion-induced phase shifts and accumulation of contrast agent in background tissue regions were identified as main sources of uncertainty. Conclusion: Phase-based AIFs are a feasible alternative to magnitude AIFs, but sources of errors exist, making quantification difficult, especially of the AIF tail. Improvement of the technique is feasible and also required for the phase-based AIF approach to reach its full potential. {\textcopyright} 2011 ESMRMB.}, author = {Garpebring, Anders and Wirestam, Ronnie and Yu, Jun and Asklund, Thomas and Karlsson, Mikael}, doi = {10.1007/s10334-011-0257-8}, issn = {09685243}, journal = {Magnetic Resonance Materials in Physics, Biology and Medicine}, keywords = {Arterial input function,Dynamic contrast-enhanced MRI,Phase quantification}, number = {4}, pages = {233--245}, title = {{Phase-based arterial input functions in humans applied to dynamic contrast-enhanced MRI: Potential usefulness and limitations}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {24}, year = {2011} } @article{Ge2014, abstract = {Purpose: Intrafraction deformation limits targeting accuracy in radiotherapy. Studies show tumor deformation of over 10 mm for both single tumor deformation and system deformation (due to differential motion between primary tumors and involved lymph nodes). Such deformation cannot be adapted to with current radiotherapy methods. The objective of this study was to develop and experimentally investigate the ability of a dynamic multi-leaf collimator (DMLC) tracking system to account for tumor deformation. Methods: To compensate for tumor deformation, the DMLC tracking strategy is to warp the planned beam aperture directly to conform to the new tumor shape based on real time tumor deformation input. Two deformable phantoms that correspond to a single tumor and a tumor system were developed. The planar deformations derived from the phantom images in beam's eye view were used to guide the aperture warping. An in-house deformable image registration software was developed to automatically trigger the registration once new target image was acquired and send the computed deformation to the DMLC tracking software. Because the registration speed is not fast enough to implement the experiment in real-time manner, the phantom deformation only proceeded to the next position until registration of the current deformation position was completed. The deformation tracking accuracy was evaluated by a geometric target coverage metric defined as the sum of the area incorrectly outside and inside the ideal aperture. The individual contributions from the deformable registration algorithm and the finite leaf width to the tracking uncertainty were analyzed. Clinical proof-of-principle experiment of deformation tracking using previously acquired MR images of a lung cancer patient was implemented to represent the MRI-Linac environment. Intensity-modulated radiation therapy (IMRT) treatment delivered with enabled deformation tracking was simulated and demonstrated. Results: The first experimental investigation of adapting to tumor deformation has been performed using simple deformable phantoms. For the single tumor deformation, the A u+Ao was reduced over 56{\%} when deformation was larger than 2 mm. Overall, the total improvement was 82{\%}. For the tumor system deformation, the Au+Ao reductions were all above 75{\%} and the total Au+Ao improvement was 86{\%}. Similar coverage improvement was also found in simulating deformation tracking during IMRT delivery. The deformable image registration algorithm was identified as the dominant contributor to the tracking error rather than the finite leaf width. The discrepancy between the warped beam shape and the ideal beam shape due to the deformable registration was observed to be partially compensated during leaf fitting due to the finite leaf width. The clinical proof-of-principle experiment demonstrated the feasibility of intrafraction deformable tracking for clinical scenarios. Conclusions: For the first time, we developed and demonstrated an experimental system that is capable of adapting the MLC aperture to account for tumor deformation. This work provides a potentially widely available management method to effectively account for intrafractional tumor deformation. This proof-of-principle study is the first experimental step toward the development of an image-guided radiotherapy system to treat deforming tumors in real-time. {\textcopyright} 2014 American Association of Physicists in Medicine.}, author = {Ge, Yuanyuan and O'Brien, Ricky T. and Shieh, Chun Chien and Booth, Jeremy T. and Keall, Paul J.}, doi = {10.1118/1.4873682}, issn = {00942405}, journal = {Medical Physics}, keywords = {DMLC tracking,adaptation,tumor deformation}, number = {6}, pages = {10}, title = {{Toward the development of intrafraction tumor deformation tracking using a dynamic multi-leaf collimator}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {41}, year = {2014} } @article{Gerst2020, abstract = {Modern imaging techniques, such as lightsheet fluorescence microscopy (LSFM), allow the capture of whole organs in three spatial dimensions. The analysis of these big volume image data requires a combination of user-friendly and highly efficient tools. We here present MISA++, an image analysis framework that allows easy integration of custom high-performance C++ tools into third-party applications via standardized components for parallelization, data and parameter handling, command line interface, and communication with third-party applications. We demonstrate its capabilities by implementing a plugin for ImageJ that provides a graphical user interface for any application built with our framework, and a high-performance re-implementation of our Python-based algorithm to segment glomeruli in LSFM images of whole murine kidneys.}, author = {Gerst, Ruman and Medyukhina, Anna and Figge, Marc Thilo}, doi = {10.1016/j.softx.2020.100405}, issn = {23527110}, journal = {SoftwareX}, keywords = {Application integration,Big volume image data,Image processing,Light-sheet fluorescence microscopy,Parallelization}, title = {{MISA++: A standardized interface for automated bioimage analysis}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85078663806{\&}doi=10.1016{\%}2Fj.softx.2020.100405{\&}partnerID=40{\&}md5=4063263c3ae932dded430bc8192b1704}, volume = {11}, year = {2020} } @article{Gillies2017, abstract = {Purpose: During image-guided prostate biopsy, needles are targeted at tissues that are suspicious of cancer to obtain specimen for histological examination. Unfortunately, patient motion causes targeting errors when using an MR-transrectal ultrasound (TRUS) fusion approach to augment the conventional biopsy procedure. This study aims to develop an automatic motion correction algorithm approaching the frame rate of an ultrasound system to be used in fusion-based prostate biopsy systems. Two modes of operation have been investigated for the clinical implementation of the algorithm: motion compensation using a single user initiated correction performed prior to biopsy, and real-time continuous motion compensation performed automatically as a background process. Methods: Retrospective 2D and 3D TRUS patient images acquired prior to biopsy gun firing were registered using an intensity-based algorithm utilizing normalized cross-correlation and Powell's method for optimization. 2D and 3D images were downsampled and cropped to estimate the optimal amount of image information that would perform registrations quickly and accurately. The optimal search order during optimization was also analyzed to avoid local optima in the search space. Error in the algorithm was computed using target registration errors (TREs) from manually identified homologous fiducials in a clinical patient dataset. The algorithm was evaluated for real-time performance using the two different modes of clinical implementations by way of user initiated and continuous motion compensation methods on a tissue mimicking prostate phantom. Results: After implementation in a TRUS-guided system with an image downsampling factor of 4, the proposed approach resulted in a mean ± std TRE and computation time of 1.6 ± 0.6 mm and 57 ± 20 ms respectively. The user initiated mode performed registrations with in-plane, out-of-plane, and roll motions computation times of 108 ± 38 ms, 60 ± 23 ms, and 89 ± 27 ms, respectively, and corresponding registration errors of 0.4 ± 0.3 mm, 0.2 ± 0.4 mm, and 0.8 ± 0.5. The continuous method performed registration significantly faster (P {\textless} 0.05) than the user initiated method, with observed computation times of 35 ± 8 ms, 43 ± 16 ms, and 27 ± 5 ms for in-plane, out-of-plane, and roll motions, respectively, and corresponding registration errors of 0.2 ± 0.3 mm, 0.7 ± 0.4 mm, and 0.8 ± 1.0. Conclusions: The presented method encourages real-time implementation of motion compensation algorithms in prostate biopsy with clinically acceptable registration errors. Continuous motion compensation demonstrated registration accuracy with submillimeter and subdegree error, while performing {\textless} 50 ms computation times. Image registration technique approaching the frame rate of an ultrasound system offers a key advantage to be smoothly integrated to the clinical workflow. In addition, this technique could be used further for a variety of image-guided interventional procedures to treat and diagnose patients by improving targeting accuracy.}, author = {Gillies, Derek J. and Gardi, Lori and {De Silva}, Tharindu and Zhao, Shuang Ren and Fenster, Aaron}, doi = {10.1002/mp.12441}, issn = {00942405}, journal = {Medical Physics}, keywords = {2D-3D transrectal ultrasound-guided prostate biops,prostate cancer,prostate motion compensation,real-time image registration}, number = {9}, pages = {4708--4723}, title = {{Real-time registration of 3D to 2D ultrasound images for image-guided prostate biopsy:}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {44}, year = {2017} } @article{Ginty2019, abstract = {Purpose:: Transcatheter, beating heart repair techniques for mitral valve regurgitation is a very active area of development. However, it is difficult to both simulate and predict the clinical outcomes of mitral repairs, owing to the complexity of mitral valve geometry and the influence of hemodynamics. We aim to produce a workflow for manufacturing dynamic patient-specific models to simulate the mitral valve for transcatheter repair applications. Methods:: In this paper, we present technology and associated workflow, for using transesophageal echocardiography to generate dynamic physical replicas of patient valves. We validate our workflow using six patient datasets representing patients with unique or particularly challenging pathologies as selected by a cardiologist. The dynamic component of the models and their resultant potential as procedure planning tools is due to a dynamic pulse duplicator that permits the evaluation of the valve models experiencing realistic hemodynamics. Results:: Early results indicate the workflow has excellent anatomical accuracy and the ability to replicate regurgitation pathologies, as shown by colour Doppler ultrasound and anatomical measurements comparing patients and models. Analysis of all measurements successfully resulted in t critical two-tail {\textgreater} t stat and p values {\textgreater} 0.05, thus demonstrating no statistical difference between the patients and models, owing to high fidelity morphological replication. Conclusions:: Due to the combination of a dynamic environment and patient-specific modelling, this workflow demonstrates a promising technology for simulating the complete morphology of mitral valves undergoing transcatheter repairs.}, author = {Ginty, Olivia K. and Moore, John T. and Eskandari, Mehdi and Carnahan, Patrick and Lasso, Andras and Jolley, Matthew A. and Monaghan, Mark and Peters, Terry M.}, doi = {10.1007/s11548-019-01998-y}, issn = {18616429}, journal = {International Journal of Computer Assisted Radiology and Surgery}, keywords = {3D printing,Mitral valve,Mitral valve models,Modelling,Surgical simulation,Transcatheter devices}, number = {7}, pages = {1227--1235}, title = {{Dynamic, patient-specific mitral valve modelling for planning transcatheter repairs}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {14}, year = {2019} } @article{Giuly2013, abstract = {This application note describes a new scalable semi-automatic approach, the Dual Point Decision Process, for segmentation of 3D structures contained in 3D microscopy. The segmentation problem is distributed to many individual workers such that each receives only simple questions regarding whether two points in an image are placed on the same object. A large pool of micro-labor workers available through Amazon's Mechanical Turk system provides the labor in a scalable manner. {\textcopyright} 2013 The Author. Published by Oxford University Press. All rights reserved.}, author = {Giuly, Richard J. and Kim, Keun Young and Ellisman, Mark H.}, doi = {10.1093/bioinformatics/btt154}, issn = {13674803}, journal = {Bioinformatics}, number = {10}, pages = {1359--1360}, title = {{DP2: Distributed 3D image segmentation using micro-labor workforce}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {29}, year = {2013} } @article{Godley2009, abstract = {Available deformable registration methods are often inaccurate over large organ variation encountered, for example, in the rectum and bladder. The authors developed a novel approach to accurately and effectively register large deformations in the prostate region for adaptive radiation therapy. A software tool combining a fast symmetric demons algorithm and the use of masks was developed in C++ based on ITK libraries to register CT images acquired at planning and before treatment fractions. The deformation field determined was subsequently used to deform the delivered dose to match the anatomy of the planning CT. The large deformations involved required that the bladder and rectum volume be masked with uniform intensities of -1000 and 1000 HU, respectively, in both the planning and treatment CTs. The tool was tested for five prostate IGRT patients. The average rectum planning to treatment contour overlap improved from 67{\%} to 93{\%}, the lowest initial overlap is 43{\%}. The average bladder overlap improved from 83{\%} to 98{\%}, with a lowest initial overlap of 60{\%}. Registration regions were set to include a volume receiving 4{\%} of the maximum dose. The average region was 320×210×63, taking approximately 9 min to register on a dual 2.8 GHz Linux system. The prostate and seminal vesicles were correctly placed even though they are not masked. The accumulated doses for multiple fractions with large deformation were computed and verified. The tool developed can effectively supply the previously delivered dose for adaptive planning to correct for interfractional changes. {\textcopyright} 2009 American Association of Physicists in Medicine.}, author = {Godley, Andrew and Ahunbay, Ergun and Peng, Cheng and Li, X. Allen}, doi = {10.1118/1.3095777}, issn = {00942405}, journal = {Medical Physics}, keywords = {Deformable registration,cumulative dose,dose deformation,prostate}, number = {4}, pages = {1433--1441}, title = {{Automated registration of large deformations for adaptive radiation therapy of prostate cancer}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-63849158774{\&}doi=10.1118{\%}2F1.3095777{\&}partnerID=40{\&}md5=54c366a7760aa8527e47d27e9d97aaa7}, volume = {36}, year = {2009} } @article{Godley2012, abstract = {Even with daily image guidance based on soft tissue registration, deviations of fractional doses can be quite large due to changes in patient anatomy. It is of interest to ascertain the cumulative effect of these deviations on the total delivered dose. Daily kV CT data acquired using an in-room CT for five prostate cancer patients were analyzed. Each daily CT was deformably registered to the planning CT using an in-house tool. The resulting deformation field was used to map the delivered daily dose onto the planning CT, then summed to obtain the cumulative (total delivered) dose to the patient. The delivered cumulative values of prostate D100 on average were only 2.9{\%} less than their planned values, while the PTV D95 were 3.6{\%} less. The delivered rectum and bladder V70s can be twice what was planned. The less than 3{\%} difference between delivered and planned prostate coverage indicates that the PTV margin of 5 mm was sufficient with the soft-tissue-based kV CT guidance for the cases studied.}, author = {Godley, Andrew and Ahunbay, Ergun and Peng, Cheng and {Allen Li}, X.}, doi = {10.1120/jacmp.v13i3.3859}, issn = {15269914}, journal = {Journal of Applied Clinical Medical Physics}, keywords = {Cumulative dose,Deformable image registration,Prostate radiation therapy,Soft-tissue-based registration}, number = {3}, pages = {98--107}, title = {{Accumulating daily-varied dose distributions of prostate radiation therapy with soft-tissue-based KV CT guidance}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {13}, year = {2012} } @article{GomezConde2015, abstract = {We describe a set of new algorithms and a software tool, StabiTissue, for stabilizing in vivo intravital microscopy images that suffer from soft-tissue background movement. Because these images lack predetermined anchors and are dominated by noise, we use a pixel weighted image alignment together with a correction for nonlinear tissue deformations. We call this correction a poor man[U+05F3]. s diffeomorphic map since it ascertains the nonlinear regions of the image without resorting to a full integral equation method. To determine the quality of the image stabilization, we developed an ensemble sampling method that quantifies the coincidence between image pairs from randomly distributed image regions. We obtain global stabilization alignment through an iterative constrained simulated annealing optimization procedure. To show the accuracy of our algorithm with existing software, we measured the misalignment error rate in datasets taken from two different organs and compared the results to a similar and popular open-source solution. Present open-source stabilization software tools perform poorly because they do not treat the specific needs of the IV-2pM datasets with soft-tissue deformation, speckle noise, full 5D inter- and intra-stack motion error correction, and undefined anchors. In contrast, the results of our tests demonstrate that our method is more immune to noise and provides better performance for datasets' possessing nonlinear tissue deformations. As a practical application of our software, we show how our stabilization improves cell tracking, where the presence of background movement would degrade track information. We also provide a qualitative comparison of our software with other open-source libraries/applications. Our software is freely available at the open source repository http://sourceforge.net/projects/stabitissue/.}, author = {G{\'{o}}mez-Conde, Iv{\'{a}}n and Caetano, Susana S. and Tadokoro, Carlos E. and Olivieri, David N.}, doi = {10.1016/j.compbiomed.2015.07.001}, issn = {18790534}, journal = {Computers in Biology and Medicine}, keywords = {Bioimaging,Biomedical image stabilization,Image registration,In vivo two photon microscopy,Lymphocyte tracking,Soft-tissue deformations}, pages = {246--260}, title = {{Stabilizing 3D in vivo intravital microscopy images with an iteratively refined soft-tissue model for immunology experiments}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {64}, year = {2015} } @article{Goncalves2019, abstract = {Molecular dynamics simulations are performed to investigate the nucleation and growth of cavities in a hydrated Nafion membrane under mechanical deformation. The simulation model used in this study accurately reproduces the experimental values of the elastic modulus of the membrane as a function of water content. The results obtained from triaxial tensile tests reveal a ductile to brittle transition as the water content increases. The nucleation and growth of the cavities have been quantitatively analyzed in terms of the number and size of cavities, illustrating the ductile to brittle transition uncovered by the stress/strain curves. Further local analyses have been carried out to identify the nucleation sites. The analysis of local plasticity indicates that as the water content increases, the membrane accumulates more plastic deformation in the hydrophilic domain than in the hydrophobic domain during the rupture stage of the tensile tests. These results suggest that the water network significantly impacts the nucleation and expansion of cavities induced by mechanical deformation. Furthermore, the local mechanical properties of the Nafion membrane are evaluated. The results show that the mechanical properties are heterogeneous at the nanoscale and that the cavities nucleate in soft regions of the membrane. A statistical analysis of the local water density of nucleation sites indicates that the polymer-water interfaces are more likely to nucleate cavities. The expansion and coalescence of cavities is facilitated by the high molecular reorganization of the water network, which explains the brittle behavior of membranes with high water content.}, author = {Gon{\c{c}}alves, William and Mabuchi, Takuya and Tokumasu, Takashi}, doi = {10.1021/acs.jpcc.9b07101}, issn = {19327455}, journal = {Journal of Physical Chemistry C}, number = {47}, pages = {28958--28968}, title = {{Nucleation and Growth of Cavities in Hydrated Nafion Membranes under Tensile Strain: A Molecular Dynamics Study}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {123}, year = {2019} } @article{Grauer2019, abstract = {Background: There is an increasing interest in local tumor ablative treatment modalities that induce immunogenic cell death and the generation of antitumor immune responses. Methods: We report six recurrent glioblastoma patients who were treated with intracavitary thermotherapy after coating the resection cavity wall with superparamagnetic iron oxide nanoparticles (“NanoPaste” technique). Patients underwent six 1-h hyperthermia sessions in an alternating magnetic field and, if possible, received concurrent fractionated radiotherapy at a dose of 39.6 Gy. Results: There were no major side effects during active treatment. However, after 2–5 months, patients developed increasing clinical symptoms. CT scans showed tumor flare reactions with prominent edema around nanoparticle deposits. Patients were treated with dexamethasone and, if necessary, underwent re-surgery to remove nanoparticles. Histopathology revealed sustained necrosis directly adjacent to aggregated nanoparticles without evidence for tumor activity. Immunohistochemistry showed upregulation of Caspase-3 and heat shock protein 70, prominent infiltration of macrophages with ingested nanoparticles and CD3 + T-cells. Flow cytometric analysis of freshly prepared tumor cell suspensions revealed increased intracellular ratios of IFN-$\gamma$ to IL-4 in CD4 + and CD8 + memory T cells, and activation of tumor-associated myeloid cells and microglia with upregulation of HLA-DR and PD-L1. Two patients had long-lasting treatment responses {\textgreater} 23 months without receiving any further therapy. Conclusion: Intracavitary thermotherapy combined with radiotherapy can induce a prominent inflammatory reaction around the resection cavity which might trigger potent antitumor immune responses possibly leading to long-term stabilization of recurrent GBM patients. These results warrant further investigations in a prospective phase-I trial.}, author = {Grauer, Oliver and Jaber, Mohammed and Hess, Katharina and Weckesser, Matthias and Schwindt, Wolfram and Maring, Stephan and W{\"{o}}lfer, Johannes and Stummer, Walter}, doi = {10.1007/s11060-018-03005-x}, issn = {15737373}, journal = {Journal of Neuro-Oncology}, keywords = {Caspase-3,HSP70,PD-L1,Superparamagnetic iron oxide nanoparticles,Thermotherapy}, number = {1}, pages = {83--94}, pmid = {30506500}, title = {{Combined intracavitary thermotherapy with iron oxide nanoparticles and radiotherapy as local treatment modality in recurrent glioblastoma patients}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {141}, year = {2019} } @article{Gribble2019, abstract = {Mueller polarimetry is a quantitative polarized light imaging modality that is capable of label-free visualization of tissue pathology, does not require extensive sample preparation, and is suitable for wide-field tissue analysis. It holds promise for selected applications in biomedicine, but polarimetry systems are often constrained by limited end-user accessibility and/or long-imaging times. In order to address these needs, we designed a multiscale-polarimetry module that easily couples to a commercially available stereo zoom microscope. This paper describes the module design and provides initial polarimetry imaging results from a murine preclinical breast cancer model and human breast cancer samples. The resultant polarimetry module has variable resolution and field of view, is low-cost, and is simple to switch in or out of a commercial microscope. The module can reduce long imaging times by adopting the main imaging approach used in pathology: scanning at low resolution to identify regions of interest, then at high resolution to inspect the regions in detail. Preliminary results show how the system can aid in region of interest identification for pathology, but also highlight that more work is needed to understand how tissue structures of pathological interest appear in Mueller polarimetry images across varying spatial zoom scales.}, author = {Gribble, Adam and Pinkert, Michael A. and Westreich, Jared and Liu, Yuming and Keikhosravi, Adib and Khorasani, Mohammadali and Nofech-Mozes, Sharon and Eliceiri, Kevin W. and Vitkin, Alex}, doi = {10.1007/s13534-019-00116-w}, issn = {2093985X}, journal = {Biomedical Engineering Letters}, keywords = {Label-free imaging,Module,Mueller matrix polarimetry,Multiscale,Pathology,Stereo zoom microscope}, number = {3}, pages = {339--349}, title = {{A multiscale Mueller polarimetry module for a stereo zoom microscope}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {9}, year = {2019} } @article{Griesenauer2017, abstract = {Tissue stiffness interrogation is fundamental in breast cancer diagnosis and treatment. Furthermore, biomechanical models for predicting breast deformations have been created for several breast cancer applications. Within these applications, constitutive mechanical properties must be defined and the accuracy of this estimation directly impacts the overall performance of the model. In this study, we present an image-derived computational framework to obtain quantitative, patient specific stiffness properties for application in image-guided breast cancer surgery and interventions. The method uses two MR acquisitions of the breast in different supine gravity-loaded configurations to fit mechanical properties to a biomechanical breast model. A reproducibility assessment of the method was performed in a test-retest study using healthy volunteers and was further characterized in simulation. In five human data sets, the within subject coefficient of variation ranged from 10.7{\%} to 27{\%} and the intraclass correlation coefficient ranged from 0.91-0.944 for assessment of fibroglandular and adipose tissue stiffness. In simulation, fibroglandular content and deformation magnitude were shown to have significant effects on the shape and convexity of the objective function defined by image similarity. These observations provide an important step forward in characterizing the use of nonrigid image registration methodologies in conjunction with biomechanical models to estimate tissue stiffness. In addition, the results suggest that stiffness estimation methods using gravity-induced excitation can reliably and feasibly be implemented in breast cancer surgery/intervention workflows.}, author = {Griesenauer, Rebekah H. and Weis, Jared A. and Arlinghaus, Lori R. and Meszoely, Ingrid M. and Miga, Michael I.}, doi = {10.1088/1361-6560/aa700a}, issn = {13616560}, journal = {Physics in Medicine and Biology}, keywords = {Mechanical properties,biomechanical model,breast cancer,elastography,lumpectomy,magnetic resonance imaging,registration}, number = {12}, pages = {4756--4776}, title = {{Breast tissue stiffness estimation for surgical guidance using gravity-induced excitation}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {62}, year = {2017} } @article{Gurbani2019, abstract = {Glioblastoma has poor prognosis with inevitable local recurrence despite aggressive treatment with surgery and chemoradiation. Radiation therapy (RT) is typically guided by contrast-enhanced T1-weighted magnetic resonance imaging (MRI) for defining the high-dose target and T2-weighted fluid-attenuation inversion recovery MRI for defining the moderate-dose target. There is an urgent need for improved imaging methods to better delineate tumors for focal RT. Spectroscopic MRI (sMRI) is a quantitative imaging technique that enables whole-brain analysis of endogenous metabolite levels, such as the ratio of choline-to-N-acetylaspartate. Previous work has shown that choline-to-N-acetylaspartate ratio accurately identifies tissue with high tumor burden beyond what is seen on standard imaging and can predict regions of metabolic abnormality that are at high risk for recurrence. To facilitate efficient clinical implementation of sMRI for RT planning, we developed the Brain Imaging Collaboration Suite (BrICS; https://brainimaging.emory.edu/brics-demo), a cloud platform that integrates sMRI with standard imaging and enables team members from multiple departments and institutions to work together in delineating RT targets. BrICS is being used in a multisite pilot study to assess feasibility and safety of dose-escalated RT based on metabolic abnormalities in patients with glioblastoma (Clinicaltrials.gov NCT03137888). The workflow of analyzing sMRI volumes and preparing RT plans is described. The pipeline achieved rapid turnaround time by enabling team members to perform their delegated tasks independently in BrICS when their clinical schedules allowed. To date, 18 patients have been treated using targets created in BrICS and no severe toxicities have been observed.}, author = {Gurbani, Saumya and Weinberg, Brent and Cooper, Lee and Mellon, Eric and Schreibmann, Eduard and Sheriff, Sulaiman and Maudsley, Andrew and Goryawala, Mohammed and Shu, Hui Kuo and Shim, Hyunsuk}, doi = {10.18383/j.tom.2018.00028}, issn = {2379139X}, journal = {Tomography (Ann Arbor, Mich.)}, keywords = {cloud platform,radiation therapy,spectroscopic MRI}, number = {1}, pages = {184--191}, title = {{The Brain Imaging Collaboration Suite (BrICS): A Cloud Platform for Integrating Whole-Brain Spectroscopic MRI into the Radiation Therapy Planning Workflow}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {5}, year = {2019} } @article{Gustafsson2017, abstract = {Dosimetric errors in a magnetic resonance imaging (MRI) only radiotherapy workflow may be caused by system specific geometric distortion from MRI. The aim of this study was to evaluate the impact on planned dose distribution and delineated structures for prostate patients, originating from this distortion. A method was developed, in which computer tomography (CT) images were distorted using the MRI distortion field. The displacement map for an optimized MRI treatment planning sequence was measured using a dedicated phantom in a 3 T MRI system. To simulate the distortion aspects of a synthetic CT (electron density derived from MR images), the displacement map was applied to CT images, referred to as distorted CT images. A volumetric modulated arc prostate treatment plan was applied to the original CT and the distorted CT, creating a reference and a distorted CT dose distribution. By applying the inverse of the displacement map to the distorted CT dose distribution, a dose distribution in the same geometry as the original CT images was created. For 10 prostate cancer patients, the dose difference between the reference dose distribution and inverse distorted CT dose distribution was analyzed in isodose level bins. The mean magnitude of the geometric distortion was 1.97 mm for the radial distance of 200-250 mm from isocenter. The mean percentage dose differences for all isodose level bins, were 0.02{\%} and the radiotherapy structure mean volume deviations were {\textless}0.2{\%}. The method developed can quantify the dosimetric effects of MRI system specific distortion in a prostate MRI only radiotherapy workflow, separated from dosimetric effects originating from synthetic CT generation. No clinically relevant dose difference or structure deformation was found when 3D distortion correction and high acquisition bandwidth was used. The method could be used for any MRI sequence together with any anatomy of interest.}, author = {Gustafsson, C. and Nordstr{\"{o}}m, F. and Persson, E. and Brynolfsson, J. and Olsson, L. E.}, doi = {10.1088/1361-6560/aa5fa2}, issn = {13616560}, journal = {Physics in Medicine and Biology}, keywords = {MRI only,MRI radiotherapy,MRI treatment planning,QA,distortion,synthetic CT}, number = {8}, pages = {2976--2989}, title = {{Assessment of dosimetric impact of system specific geometric distortion in an MRI only based radiotherapy workflow for prostate}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {62}, year = {2017} } @article{Hajdu2006, abstract = {In this paper we present the structure and the achieved results of the R{\&}D project IKTA-4, 6/2001 "MEDIP - Platform independent software system for medical image processing" supported by the Hungarian Ministry of Education. The aim of the project was to develop a software background for our basic and applied research in the field of medical imaging that can be used in clinical routine, as well. Realization was based on the experience of information technology and medical imaging research university teams and a company specialized on software and hardware developing for nuclear medicine. The aims also reflect some former research and development activities of the participants. Thus some of them are well experienced in registration, segmentation and image fusion techniques. These experiences were also considered in the determination of the main purposes. The capabilities of the provided software library were demonstrated through test applications from the fields of orthopedics, oncology and nuclear medicine. {\textcopyright} J.UCS.}, author = {Hajdu, Andr{\'{a}}s and Kormos, J{\'{a}}nos and Lencse, Zsolt and Tr{\'{o}}n, Lajos and Emri, Mikl{\'{o}}s}, doi = {10.3217/jucs-012-09-1229}, issn = {0958695X}, journal = {Journal of Universal Computer Science}, keywords = {Medical image processing,Multimodal image analysis,Surface rendering,Virtual surgery,Visualization,Volume rendering}, number = {9}, pages = {1229--1239}, title = {{The "MEDIP - Platform independent software system for medical image processing" project}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {12}, year = {2006} } @inproceedings{Hamarneh, abstract = {Recent advances in medicine conjecture that certain body fat may have mechanical function in addition to its classical role of energy storage. In particular we aim to analyze if the intra-articular fat pad of Hoffa is merely a space holder or if it changes shape to provide cushioning for the knee bones. Towards this goal, 3D CT images of real knees, as well as a skeletal knee model with fat simulating Hoffa's pad, were acquired in both extension and flexion. Image segmentation was performed to automatically extract the real and simulated fat regions from the extension and flexion images. Utilizing the segmentation results as binary masks, we performed automatic multi-resolution image registration of the fat pad between flexed and extended knee positions. The resulting displacement fields from flexion-extension registration are examined and used to calculate local fat volume changes thus providing insight into shape changes that may have a mechanical component.}, author = {Hamarneh, Ghassan and Chu, Vincent and Bordalo-Rodrigues, Marcelo and Schweitzer, Mark}, booktitle = {Medical Imaging 2005: Physiology, Function, and Structure from Medical Images}, doi = {10.1117/12.594803}, issn = {16057422}, pages = {527}, title = {{Deformation analysis of Hoffa's fat pad from CT images of knee flexion and extension}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-23844492895{\&}doi=10.1117{\%}2F12.594803{\&}partnerID=40{\&}md5=5a48120f6f2668def17638f909192147}, volume = {5746}, year = {2005} } @article{Han2018, abstract = {Neuro-navigated procedures require a high degree of geometric accuracy but are subject to geometric error from complex deformation in the deep brain - e.g. regions about the ventricles due to egress of cerebrospinal fluid (CSF) upon neuroendoscopic approach or placement of a ventricular shunt. We report a multi-modality, diffeomorphic, deformable registration method using momentum-based acceleration of the Demons algorithm to solve the transformation relating preoperative MRI and intraoperative CT as a basis for high-precision guidance. The registration method (pMI-Demons) extends the mono-modality, diffeomorphic form of the Demons algorithm to multi-modality registration using pointwise mutual information (pMI) as a similarity metric. The method incorporates a preprocessing step to nonlinearly stretch CT image values and incorporates a momentum-based approach to accelerate convergence. Registration performance was evaluated in phantom and patient images: first, the sensitivity of performance to algorithm parameter selection (including update and displacement field smoothing, histogram stretch, and the momentum term) was analyzed in a phantom study over a range of simulated deformations; and second, the algorithm was applied to registration of MR and CT images for four patients undergoing minimally invasive neurosurgery. Performance was compared to two previously reported methods (free-form deformation using mutual information (MI-FFD) and symmetric normalization using mutual information (MI-SyN)) in terms of target registration error (TRE), Jacobian determinant (J), and runtime. The phantom study identified optimal or nominal settings of algorithm parameters for translation to clinical studies. In the phantom study, the pMI-Demons method achieved comparable registration accuracy to the reference methods and strongly reduced outliers in TRE (p 0.001 in Kolmogorov-Smirnov test). Similarly, in the clinical study: median TRE = 1.54 mm (0.83-1.66 mm interquartile range, IQR) for pMI-Demons compared to 1.40 mm (1.02-1.67 mm IQR) for MI-FFD and 1.64 mm (0.90-1.92 mm IQR) for MI-SyN. The pMI-Demons and MI-SyN methods yielded diffeomorphic transformations (J {\textgreater} 0) that preserved topology, whereas MI-FFD yielded unrealistic (J {\textless} 0) deformations subject to tissue folding and tearing. Momentum-based acceleration gave a ∼35{\%} speedup of the pMI-Demons method, providing registration runtime of 10.5 min (reduced to 2.2 min on GPU), compared to 15.5 min for MI-FFD and 34.7 min for MI-SyN. The pMI-Demons method achieved registration accuracy comparable to MI-FFD and MI-SyN, maintained diffeomorphic transformation similar to MI-SyN, and accelerated runtime in a manner that facilitates translation to image-guided neurosurgery.}, author = {Han, R. and {De Silva}, T. and Ketcha, M. and Uneri, A. and Siewerdsen, J. H.}, doi = {10.1088/1361-6560/aae66c}, issn = {13616560}, journal = {Physics in Medicine and Biology}, keywords = {deformable registration,image registration,neurosurgery navigation}, number = {21}, pages = {18}, pmid = {30353886}, title = {{A momentum-based diffeomorphic demons framework for deformable MR-CT image registration}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {63}, year = {2018} } @article{He2018, abstract = {Exercise preconditioning induces neuroprotective effects during cerebral ischemia and reperfusion, which involves the recovery of cerebral blood flow (CBF). Mechanisms underlying the neuroprotective effects of reestablished CBF following ischemia and reperfusion are unclear. The present study investigated CBF in hyper-early stage of reperfusion by laser speckle contrast imaging, a full-field high-resolution optical imaging technique. Rats with or without treadmill training were subjected to middle cerebral artery occlusion followed by reperfusion. CBF in arteries, veins, and capillaries in hyper-early stage of reperfusion (1, 2, and 3 h after reperfusion) and in subacute stage (24 h after reperfusion) were measured. Neurological scoring and 2,3,5-triphenyltetrazolium chloride staining were further applied to determine the neuroprotective effects of exercise preconditioning. In hyper-early stage of reperfusion, CBF in the rats with exercise preconditioning was reduced significantly in arteries and veins, respectively, compared to rats with no exercise preconditioning. Capillary CBF remained stable in the hyper-early stage of reperfusion, though it increased significantly 24 h after reperfusion in the rats with exercise preconditioning. As a neuroprotective strategy, exercise preconditioning reduced the blood perfusion of arteries and veins in the hyper-early stage of reperfusion, which indicated intervention-induced neuroprotective hypoperfusion after reperfusion onset.}, author = {He, Zhijie and Lu, Hongyang and Yang, Xiaojiao and Zhang, Li and Wu, Yi and Niu, Wenxiu and Ding, Li and Wang, Guili and Tong, Shanbao and Jia, Jie}, doi = {10.1109/TBME.2017.2695229}, issn = {15582531}, journal = {IEEE Transactions on Biomedical Engineering}, keywords = {Cerebral blood flow,Cerebral ischemia,Exercise preconditioning,Laser speckle contrast imaging,Reperfusion injury}, number = {1}, pages = {219--223}, title = {{Hypoperfusion induced by preconditioning treadmill training in hyper-early reperfusion after cerebral ischemia: A laser speckle imaging study}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {65}, year = {2018} } @article{Herrmann2016, abstract = {One of the most important factors to determine the mechanical properties of a fibre composite material is the orientation of the fibres in the matrix. This paper presents Hessian matrix-based algorithms to retrieve the orientation of individual fibres out of steel fibre reinforced cementitious composites samples scanned with an X-ray computed tomography scanner. The software implemented with the algorithms includes a massive data filtering component to remove noise from the data-sets and prepare them correctly for the analysis. Due to its short computational times and limited need for user intervention, the software is able to process and analyse large batches of data in short periods and provide results in a variety of visual and numerical formats. The application and comparison of these algorithms lead to further insight into the material behaviour. In contrast to the usual assumption that the fibres act only along their main axis, it is shown that the contribution of hooked-end fibres in other directions may be noticeable. This means that fibres, depending on their shape, should act as orthotropic inclusions. The methods can be used by research laboratories and companies on an everyday basis to obtain fibre orientations from samples, which in turn can be used in research, to study stress–strain behaviour, as input to constitutive models or for quality assurance.}, author = {Herrmann, Heiko and Pastorelli, Emiliano and Kallonen, Aki and Suuronen, Jussi Petteri}, doi = {10.1007/s10853-015-9695-4}, issn = {15734803}, journal = {Journal of Materials Science}, number = {8}, pages = {3772--3783}, title = {{Methods for fibre orientation analysis of X-ray tomography images of steel fibre reinforced concrete (SFRC)}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {51}, year = {2016} } @article{Heyde2013, abstract = {Although real-time 3-D echocardiography has the potential to allow more accurate assessment of global and regional ventricular dynamics compared with more traditional 2-D ultrasound examinations, it still requires rigorous testing and validation should it break through as a standard examination in routine clinical practice. However, only a limited number of studies have validated 3-D strain algorithms in an invivo experimental setting. The aim of the present study, therefore, was to validate a registration-based strain estimation methodology in an animal model. Volumetric images were acquired in 14 open-chest sheep instrumented with ultrasonic microcrystals. Radial strain (e{\{}open{\}}RR), longitudinal strain (e{\{}open{\}}LL) and circumferential strain (e{\{}open{\}}CC) were estimated during different stages: at rest, during reduced and increased cardiac inotropy induced by esmolol and dobutamine infusion, respectively, and during acute ischemia. Agreement between image-based and microcrystal-based strain estimates was evaluated by their linear correlation, indicating that all strain components could be estimated with acceptable accuracy (r= 0.69 for e{\{}open{\}}RR, r= 0.64 for e{\{}open{\}}LL and r= 0.62 for e{\{}open{\}}CC). These findings are comparable to the performance of the current state-of-the-art commercial 3-D speckle tracking methods. Furthermore, shape of the strain curves, timing of peak values and location of dysfunctional regions were identified well. Whether 3-D elastic registration performs better than 3-D block matching-based methodologies still remains to be proven. {\textcopyright} 2013 World Federation for Ultrasound in Medicine {\&} Biology.}, author = {Heyde, Brecht and Bouchez, Stefaan and Thieren, Sabine and Vandenheuvel, Michael and Jasaityte, Ruta and Barbosa, Daniel and Claus, Piet and Maes, Frederik and Wouters, Patrick and D'hooge, Jan}, doi = {10.1016/j.ultrasmedbio.2013.02.463}, issn = {1879291X}, journal = {Ultrasound in Medicine and Biology}, keywords = {Echocardiography,Elastic registration,Invivo,Sonomicrometry,Strain,Validation}, number = {9}, pages = {1688--1697}, title = {{Elastic image registration to quantify 3-d regional myocardial deformation from volumetric ultrasound: Experimental validation in an animal model}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {39}, year = {2013} } @article{Hilty2019, abstract = {Direct assessment of capillary perfusion has been prioritized in hemodynamic management of critically ill patients in addition to optimizing blood flow on the global scale. Sublingual handheld vital microscopy has enabled online acquisition of moving image sequences of the microcirculation, including the flow of individual red blood cells in the capillary network. However, due to inherent content complexity, manual image sequence analysis remained gold standard, introducing inter-observer variability and precluding real-time image analysis for clinical therapy guidance. Here we introduce an advanced computer vision algorithm for instantaneous analysis and quantification of morphometric and kinetic information related to capillary blood flow in the sublingual microcirculation. We evaluated this technique in a porcine model of septic shock and resuscitation and cardiac surgery patients. This development is of high clinical relevance because it enables implementation of point-of-care goal-directed resuscitation procedures based on correction of microcirculatory perfusion in critically ill and perioperative patients.}, author = {Hilty, Matthias Peter and Guerci, Philippe and Ince, Yasin and Toraman, Fevzi and Ince, Can}, doi = {10.1038/s42003-019-0473-8}, issn = {23993642}, journal = {Communications Biology}, number = {1}, pages = {15}, title = {{MicroTools enables automated quantification of capillary density and red blood cell velocity in handheld vital microscopy}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {2}, year = {2019} } @article{Hilty2017, abstract = {Assessment of the microcirculation is a promising target for the hemodynamic management of critically ill patients. However, just as the sole reliance on macrocirculatory parameters, single static parameters of the microcirculation may not represent a sufficient guide. Our hypothesis was that by serial topical application of acetylcholine (ACH) and nitroglycerin (NG), the sublingual microcirculation can be challenged to determine its endothelial cell-dependent and smooth muscle-dependent physiological reserve capacity. In 41 healthy subjects, sublingual capillary microscopy was performed before and after topical application of ACH and NG. Total vessel density (TVD) was assessed in parallel using manual computer-assisted image analysis as well as a fully automated analysis pathway utilizing a newly developed computer algorithm. Flow velocity was assessed using space-time diagrams of the venules as well as the algorithm-based calculation of an average perfused speed indicator (APSI). No change in all measured parameters was detected after sublingual topical application of ACH. Sublingual topical application of NG however led to an increase in TVD, space-time diagram-derived venular flow velocity and APSI. No difference was detected in heart rate, blood pressure, and cardiac output as measured by echocardiography, as well as in plasma nitric oxide metabolite content before and after the topical application of ACH and NG. In healthy subjects, the sublingual microcirculatory physiological reserve can be assessed non-invasively by topical application of nitroglycerin without affecting systemic circulation.}, author = {Hilty, Matthias Peter and Pichler, Jacqueline and Ergin, Bulent and Hefti, Urs and Merz, Tobias Michael and Ince, Can and Maggiorini, Marco}, doi = {10.1186/s40635-017-0139-0}, issn = {2197-425X}, journal = {Intensive Care Medicine Experimental}, number = {1}, pages = {13}, title = {{Assessment of endothelial cell function and physiological microcirculatory reserve by video microscopy using a topical acetylcholine and nitroglycerin challenge}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {5}, year = {2017} } @article{Ho2016, abstract = {Objective Previously we proposed using an interpolated average computed tomography (IACT) method as a low-dose alternate of cine average computed tomography (CACT) for PET attenuation correction (AC). This study aims to evaluate its performance for thoracic lesions with different characteristics in simulations and clinical patients. Materials and methods We used the XCAT phantom to simulate noisy fluorine-18Fluorodeoxyglucose (18F-FDG) distribution with respiratory motion amplitudes of 2 and 3 cm. Average activity and attenuation maps represented static PET and CACT, respectively. IACT was generated by the end-inspiration and end-expiration phases of the attenuation maps (HCT-in and HCT-ex) using a deformable registration method. Spherical lesions with diameters of 10 and 20 mm with four target-to-background ratios (TBRs) were simulated at four different locations individually, including the lower left lung, lower right lung, middle right lung, and upper right lung. Five patients with a total of six thoracic lesions were recruited. They were scanned 1 h after 315-480 MBq 18F-FDG injection. Simulated and clinical PET sinograms were reconstructed with AC using (i) CACT, (ii) IACT, and (iii) helical computed tomography (HCTs). The TBRs and mean standardized uptake value were analyzed. Results Significant artifacts were observed in PET HCTs from visual assessment. For both simulation and clinical study, PET IACT was more similar to PET CACT in terms of TBRs and mean standardized uptake value. The differences between CACT/IACT and HCTs were more significant for lesions located at the lower lungs. Conclusion The IACT is a robust and low-dose AC method for improved thoracic lesion localization and quantitation for a wide range of lesion characteristics.}, author = {Ho, Cobie Y.T. and Wu, Tung Hsin and Mok, Greta S.P.}, doi = {10.1097/MNM.0000000000000435}, issn = {14735628}, journal = {Nuclear Medicine Communications}, keywords = {PET/CT,attenuation correction,respiratory artifacts,simulations,thoracic lesions}, number = {3}, pages = {297--306}, title = {{Interpolated average CT for PET attenuation correction in different lesion characteristics}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {37}, year = {2016} } @article{Ho2018, abstract = {Decompressive craniectomy (DC) is a neurosurgical procedure performed to relieve the intracranial pressure engendered by brain swelling. However, no easy and accurate method exists for determining the craniectomy surface area. In this study, we implemented and compared three methods of estimating the craniectomy surface area for evaluating the decompressive effort. We collected 118 sets of preoperative and postoperative brain computed tomography images from patients who underwent craniectomy procedures between April 2009 and April 2011. The surface area associated with each craniectomy was estimated using the marching cube and quasi-Monte Carlo methods. The surface area was also estimated using a simple AC method, in which the area is calculated by multiplying the craniectomy length (A) by its height (C). The estimated surface area ranged from 9.46 to 205.32 cm2, with a median of 134.80 cm2. The root-mean-square deviation (RMSD) between the marching cube and quasi-Monte Carlo methods was 7.53 cm2. Furthermore, the RMSD was 14.45 cm2 between the marching cube and AC methods and 12.70 cm2 between the quasi-Monte Carlo and AC methods. Paired t-tests indicated no statistically significant difference between these methods. The marching cube and quasi-Monte Carlo methods yield similar results. The results calculated using the AC method are also clinically acceptable for estimating the DC surface area. Our results can facilitate additional studies on the association of decompressive effort with the effect of craniectomy.}, author = {Ho, Meng Yin and Tseng, Wei Lung and Xiao, Furen}, doi = {10.1155/2018/5237693}, issn = {16874196}, journal = {International Journal of Biomedical Imaging}, pages = {8}, title = {{Estimation of the Craniectomy Surface Area by Using Postoperative Images}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {2018}, year = {2018} } @article{Hoinkiss2019, abstract = {The sensitivity to subject motion is one of the major challenges in functional MRI (fMRI) studies in which a precise alignment of images from different time points is required to allow reliable quantification of brain activation throughout the scan. Especially the long measurement times and laborious fMRI tasks add to the amount of subject motion found in typical fMRI measurements, even when head restraints are used. In case of moving subjects, prospective motion correction can maintain the relationship between spatial image information and subject anatomy by constantly adapting the image slice positioning to follow the subject in real time. Image-based prospective motion correction is well-established in fMRI studies and typically computes the motion estimates based on a volume-to-volume image registration, resulting in low temporal resolution. This study combines fMRI using simultaneous multislice imaging with multislice-to-volume-based image registration to allow sub-TR motion detection with subsequent real-time adaption of the imaging system. Simultaneous multislice imaging is widely used in fMRI studies and, together with multislice-to-volume-based image registration algorithms, enables computing suitable motion states after only a single readout by registering the simultaneously excited slices to a reference volume acquired at the start of the measurement. The technique is evaluated in three human BOLD fMRI studies (n = 1, 5, and 1) to explore different aspects of the method. It is compared to conventional, volume-to-volume-based prospective motion correction as well as retrospective motion correction methods. Results show a strong reduction in retrospectively computed residual motion parameters of up to 50{\%} when comparing the two prospective motion correction techniques. An analysis of temporal signal-to-noise ratio as well as brain activation results shows high consistency between the results before and after additional retrospective motion correction when using the proposed technique, indicating successful prospective motion correction. The comparison of absolute tSNR values does not show an improvement compared to using retrospective motion correction alone. However, the improved temporal resolution may provide improved tSNR in the presence of more exaggerated intra-volume motion.}, author = {Hoinkiss, Daniel Christopher and Erhard, Peter and Breutigam, Nora Josefin and von Samson-Himmelstjerna, Federico and G{\"{u}}nther, Matthias and Porter, David Andrew}, doi = {10.1016/j.neuroimage.2019.06.042}, issn = {10959572}, journal = {NeuroImage}, keywords = {BOLD,EPI,Functional MRI,Image registration,Kalman filter,Multislice-to-volume,Prospective motion correction,Real-time MRI sequences,Simultaneous multislice (SMS)}, pages = {159--173}, title = {{Prospective motion correction in functional MRI using simultaneous multislice imaging and multislice-to-volume image registration}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {200}, year = {2019} } @article{Hoinkiss2017, abstract = {Purpose: Prospective motion correction reduces artifacts in MRI by correcting for subject motion in real time, but techniques are limited for multishot 2-dimensional (2D) sequences. This study addresses this limitation by using 2D echo-planar imaging (EPI) slice navigator acquisitions together with a multislice-to-volume image registration. Methods: The 2D-EPI navigators were integrated into 2D imaging sequences to allow a rapid, real-time motion correction based on the registration of three navigator slices to a reference volume. A dedicated slice-iteration scheme was used to limit mutual spin-saturation effects between navigator and image data. The method was evaluated using T2-weighted spin echo and multishot rapid acquisition with relaxation enhancement (RARE) sequences, and its motion-correction capabilities were compared with those of periodically rotated overlapping parallel lines with enhanced reconstruction (PROPELLER). Validation was performed in vivo using a well-defined motion protocol. Results: Data acquired during subject motion showed residual motion parameters within ±0.5 mm and ±0.5°, and demonstrated a substantial improvement in image quality compared with uncorrected scans. In a comparison to PROPELLER, the proposed technique preserved a higher level of anatomical detail in the presence of subject motion. Conclusions: EPI-navigator-based prospective motion correction using multislice-to-volume image registration can substantially reduce image artifacts, while minimizing spin-saturation effects. The method can be adapted for use in other 2D MRI sequences and promises to improve image quality in routine clinical examinations. Magn Reson Med 78:2127–2135, 2017. {\textcopyright} 2017 International Society for Magnetic Resonance in Medicine.}, author = {Hoinkiss, Daniel Christopher and Porter, David Andrew}, doi = {10.1002/mrm.26951}, issn = {15222594}, journal = {Magnetic Resonance in Medicine}, keywords = {EPI navigator,RARE,motion artifacts,mutual information,prospective acquisition correction,real-time feedback}, number = {6}, pages = {2127--2135}, title = {{Prospective motion correction in 2D multishot MRI using EPI navigators and multislice-to-volume image registration}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {78}, year = {2017} } @article{Hsieh2011, abstract = {Background: In recent years, magnetic resonance imaging (MRI) has become important in brain tumor diagnosis. Using this modality, physicians can locate specific pathologies by analyzing differences in tissue character presented in different types of MR images. This paper uses an algorithm integrating fuzzy-c-mean (FCM) and region growing techniques for automated tumor image segmentation from patients with menigioma. Only non-contrasted T1 and T2 -weighted MR images are included in the analysis. The study's aims are to correctly locate tumors in the images, and to detect those situated in the midline position of the brain. Methods. The study used non-contrasted T1- and T2-weighted MR images from 29 patients with menigioma. After FCM clustering, 32 groups of images from each patient group were put through the region-growing procedure for pixels aggregation. Later, using knowledge-based information, the system selected tumor-containing images from these groups and merged them into one tumor image. An alternative semi-supervised method was added at this stage for comparison with the automatic method. Finally, the tumor image was optimized by a morphology operator. Results from automatic segmentation were compared to the "ground truth" (GT) on a pixel level. Overall data were then evaluated using a quantified system. Results: The quantified parameters, including the "percent match" (PM) and "correlation ratio" (CR), suggested a high match between GT and the present study's system, as well as a fair level of correspondence. The results were compatible with those from other related studies. The system successfully detected all of the tumors situated at the midline of brain. Six cases failed in the automatic group. One also failed in the semi-supervised alternative. The remaining five cases presented noticeable edema inside the brain. In the 23 successful cases, the PM and CR values in the two groups were highly related. Conclusions: Results indicated that, even when using only two sets of non-contrasted MR images, the system is a reliable and efficient method of brain-tumor detection. With further development the system demonstrates high potential for practical clinical use. {\textcopyright} 2011 Hsieh et al; licensee BioMed Central Ltd.}, author = {Hsieh, Thomas M. and Liu, Yi Min and Liao, Chun Chih and Xiao, Furen and Chiang, I. Jen and Wong, Jau Min}, doi = {10.1186/1472-6947-11-54}, issn = {14726947}, journal = {BMC Medical Informatics and Decision Making}, number = {1}, pages = {12}, title = {{Automatic segmentation of meningioma from non-contrasted brain MRI integrating fuzzy clustering and region growing}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {11}, year = {2011} } @article{Huber2017, abstract = {Adult-onset vanishing white-matter disease (VWM) is a rare autosomal recessive disease with neurological symptoms such as ataxia and paraparesis, showing extensive white-matter hyperintensities (WMH) on magnetic resonance (MR) imaging. Besides symptom-specific scores like the International Cooperative Ataxia Rating Scale (ICARS), there is no established tool to monitor disease progression. Because of extensive WMH, visual comparison of MR images is challenging. Here, we report the results of an automated method of segmentation to detect alterations in T2-weighted fluid-attenuated-inversion-recovery (FLAIR) sequences in a one-year follow-up study of a clinically stable patient with genetically diagnosed VWM. Signal alterations in MR imaging were quantified with a recently published WMH segmentation method by means of extreme value distribution (EVD). Our analysis revealed progressive FLAIR alterations of 5.84{\%} in the course of one year, whereas no significant WMH change could be detected in a stable multiple sclerosis (MS) control group. This result demonstrates that automated EVD-based segmentation allows a precise and rapid quantification of extensive FLAIR alterations like in VWM and might be a powerful tool for the clinical and scientific monitoring of degenerative white-matter diseases and potential therapeutic interventions.}, author = {Huber, Thomas and Herwerth, Marina and Alberts, Esther and Kirschke, Jan S. and Zimmer, Claus and Ilg, Ruediger}, doi = {10.1177/1971400916678222}, issn = {19714009}, journal = {Neuroradiology Journal}, keywords = {Extreme value distribution,automated segmentation,vanishing white-matter disease,white-matter hyperintensities}, number = {1}, pages = {5--9}, title = {{Automated segmentation reveals silent radiographic progression in adult-onset vanishing white-matter disease}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {30}, year = {2017} } @article{Huellebrand2020, abstract = {Background and objective: Cardiovascular imaging is an exponentially growing field with aspects ranging from image acquisition and analysis to disease characterization, and evaluation of therapy approaches.The transfer of innovative new technological and algorithmic solutions into clinical practice is still slow. In addition to the verification of solutions, their integration in the clinical processing workflow must be enabled for the assessment of clinical impact and risks. The goal of our software platform for cardiac image processing – CAIPI – is to support researchers from different specialties such as imaging physics, computer science, and medicine by a common extensible platform to address typical challenges and hurdles in interdisciplinary cardiovascular imaging research. It provides an integrated solution for method comparison, integrated analysis, and validation in the clinical context. The interface concept enables a combination with existing frameworks that address specific aspects of the pipeline, such as modeling (e.g., OpenCMISS, CARP) or image reconstruction (Gadgetron). Methods: In our platform, we developed a concept for import, integration, and management of cardiac image data. The integration approach considers the spatiotemporal properties of the beating heart through a specific data model. The solution is based on MeVisLab and provides functionalities for data retrieval and storage. Two types of plugins can be added. While ToolPlugins usually provide processing algorithms such as image correction and segmentation, AnalysisPlugins enable interactive data exploration and reporting. GUI integration concepts are presented for both plugin types. We developed domain-specific reporting and visualization tools (e.g., AHA segment model) to enable validation studies by clinical experts. The platform offers plugins for calculating and reporting quantitative parameters such as cardiac function, which can be used to, e.g., evaluate the effect of processing algorithms on clinical parameters. Export functionalities include quantitative measurements to Excel, image data to PACS, and STL models to modeling and simulation tools. Results: To demonstrate the applicability of this concept both for method development and clinical application, we present use cases representing different problems along the innovation chain in cardiac MR imaging. Validation of an image reconstruction method (MRI T1 mapping) Validation of an image correction method for real-time 2D-PC MRI Comparison of quantification methods for blood flow analysis Training and integration of machine learning solutions with expert annotations Clinical studies with new imaging techniques (flow measurements in the carotid arteries and peripheral veins as well as cerebral spinal fluid). Conclusion: The presented platform can be used in interdisciplinary teams, in which engineers or data scientists perform the method validation, followed by clinical research studies in patient collectives. The demonstrated use cases show how it enables the transfer of innovations through validation in the cardiovascular application context.}, author = {Huellebrand, Markus and Messroghli, Daniel and Tautz, Lennart and Kuehne, Titus and Hennemuth, Anja}, doi = {10.1016/j.cmpb.2019.105277}, issn = {18727565}, journal = {Computer Methods and Programs in Biomedicine}, keywords = {Cardiology,Image processing,MRI,Medical image analysis,Segmentation}, pages = {13}, title = {{An extensible software platform for interdisciplinary cardiovascular imaging research}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {184}, year = {2020} } @article{Humbert2016, abstract = {Purpose: Cortical thickness and density are critical components in determining the strength of bony structures. Computed tomography (CT) is one possible modality for analyzing the cortex in 3D. In this paper, a model-based approach for measuring the cortical bone thickness and density from clinical CT images is proposed. Methods: Density variations across the cortex were modeled as a function of the cortical thickness and density, location of the cortex, density of surrounding tissues, and imaging blur. High resolution micro-CT data of cadaver proximal femurs were analyzed to determine a relationship between cortical thickness and density. This thickness-density relationship was used as prior information to be incorporated in the model to obtain accurate measurements of cortical thickness and density from clinical CT volumes. The method was validated using micro-CT scans of 23 cadaver proximal femurs. Simulated clinical CT images with different voxel sizes were generated from the micro-CT data. Cortical thickness and density were estimated from the simulated images using the proposed method and compared with measurements obtained using the micro-CT images to evaluate the effect of voxel size on the accuracy of the method. Then, 19 of the 23 specimens were imaged using a clinical CT scanner. Cortical thickness and density were estimated from the clinical CT images using the proposed method and compared with the micro-CT measurements. Finally, a case-control study including 20 patients with osteoporosis and 20 age-matched controls with normal bone density was performed to evaluate the proposed method in a clinical context. Results: Cortical thickness (density) estimation errors were 0.07 ± 0.19 mm (-18 ± 92 mg/cm3) using the simulated clinical CT volumes with the smallest voxel size (0.33 × 0.33 × 0.5 mm3), and 0.10 ± 0.24 mm (-10 ± 115 mg/cm3) using the volumes with the largest voxel size (1.0 × 1.0 × 3.0 mm3). A trend for the cortical thickness and density estimation errors to increase with voxel size was observed and was more pronounced for thin cortices. Using clinical CT data for 19 of the 23 samples, mean errors of 0.18 ± 0.24 mm for the cortical thickness and 15 ± 106 mg/cm3 for the density were found. The case-control study showed that osteoporotic patients had a thinner cortex and a lower cortical density, with average differences of -0.8 mm and -58.6 mg/cm3 at the proximal femur in comparison with age-matched controls (p-value {\textless} 0.001). Conclusions: This method might be a promising approach for the quantification of cortical bone thickness and density using clinical routine imaging techniques. Future work will concentrate on investigating how this approach can improve the estimation of mechanical strength of bony structures, the prevention of fracture, and the management of osteoporosis.}, author = {Humbert, Ludovic and {Hazrati Marangalou}, Javad and {Del R{\'{i}}o Barquero}, Luis Miguel and {Van Lenthe}, G. Harry and {Van Rietbergen}, Bert}, doi = {10.1118/1.4944501}, issn = {00942405}, journal = {Medical Physics}, keywords = {bone mineral density,computed tomography,cortical thickness,hip fracture,osteoporosis}, number = {4}, pages = {1945--1954}, title = {{Technical Note: Cortical thickness and density estimation from clinical CT using a prior thickness-density relationship}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {43}, year = {2016} } @article{Humbert2017, abstract = {The 3D distribution of the cortical and trabecular bone mass in the proximal femur is a critical component in determining fracture resistance that is not taken into account in clinical routine Dual-energy X-ray Absorptiometry (DXA) examination. In this paper, a statistical shape and appearance model together with a 3D-2D registration approach are used to model the femoral shape and bone density distribution in 3D from an anteroposterior DXA projection. A model-based algorithm is subsequently used to segment the cortex and build a 3D map of the cortical thickness and density. Measurements characterising the geometry and density distribution were computed for various regions of interest in both cortical and trabecular compartments. Models and measurements provided by the '3D-DXA' software algorithm were evaluated using a database of 157 study subjects, by comparing 3D-DXA analyses (using DXA scanners from three manufacturers) with measurements performed by Quantitative Computed Tomography (QCT). The mean point-to-surface distance between 3D-DXA and QCT femoral shapes was 0.93 mm. The mean absolute error between cortical thickness and density estimates measured by 3D-DXA and QCT was 0.33 mm and 72 mg/cm3. Correlation coefficients (R) between the 3D-DXA and QCT measurements were 0.86, 0.93, and 0.95 for the volumetric bone mineral density at the trabecular, cortical, and integral compartments respectively, and 0.91 for the mean cortical thickness. 3D-DXA provides a detailed analysis of the proximal femur, including a separate assessment of the cortical layer and trabecular macrostructure, which could potentially improve osteoporosis management while maintaining DXA as the standard routine modality.}, author = {Humbert, Ludovic and Martelli, Yves and Fonolla, Roger and Steghofer, Martin and {DI Gregorio}, Silvana and Malouf, Jorge and Romera, Jordi and Barquero, Luis Miguel Del Rio}, doi = {10.1109/TMI.2016.2593346}, issn = {1558254X}, journal = {IEEE Transactions on Medical Imaging}, keywords = {Bone mineral density,DXA,cortical thickness,image registration,osteoporosis,proximal femur}, number = {1}, pages = {27--39}, title = {{3D-DXA: Assessing the Femoral Shape, the Trabecular Macrostructure and the Cortex in 3D from DXA images}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {36}, year = {2017} } @article{Ige2017, abstract = {This work investigated the effects of fibre type, dosage and maximum aggregate size on the mechanical behaviour of concrete reinforced with steel fibres. Hooked-end steel fibres with 50 and 60 mm length and aspect ratios (length/diameter) of 45, 65 and 80 were used with maximum sizes of coarse aggregate of 10 and 20 mm. The same mix proportions of concrete were used throughout the investigation. Flexural testing of 600 mm square panels was performed. Subsequently, cores were taken from these panels and X-ray computed tomography was used to analyse the positioning of fibres in hardened concrete. The experimental results show that the performance of steel fibre-reinforced concrete improved drastically when compared to plain concrete without fibres. Longer, thinner fibres and smaller aggregates were noted to give the best results.}, author = {Ige, Olubisi and Barnett, Stephanie and Chiverton, John and Nassif, Ayman and Williams, John}, doi = {10.1080/17436753.2017.1284389}, issn = {17436761}, journal = {Advances in Applied Ceramics}, keywords = {Concrete,X-ray CT,flexural properties,steel fibres}, number = {4}, pages = {193--198}, title = {{Effects of steel fibre-aggregate interaction on mechanical behaviour of steel fibre reinforced concrete}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {116}, year = {2017} } @article{Iversen2018, abstract = {Purpose: In neurosurgery, reliable information about blood vessel anatomy and flow direction is important to identify, characterize, and avoid damage to the vasculature. Due to ultrasound Doppler angle dependencies and the complexity of the vascular architecture, clinically valuable 3-D flow direction information is currently not available. In this paper, we aim to clinically validate and demonstrate the intraoperative use of a fully automatic method for estimation of 3-D blood flow direction from freehand 2-D Doppler ultrasound. Methods: A 3-D vessel model is reconstructed from 2-D Doppler ultrasound and used to determine the vessel architecture. The blood flow direction is then estimated automatically using the model in combination with Doppler velocity data. To enable testing and validation during surgery, the method was implemented as part of the open-source navigation system CustusX (www.custusx.org). Results: Ten patients were included prospectively. Data from four patients were processed postoperatively, and data from six patients were processed intraoperatively. In total, the blood flow direction was estimated for 48 different blood vessels with a success rate of 98{\%}. Conclusions: In this work, we have shown that the proposed method is suitable for fully automatic estimation of the blood flow direction in intracranial vessels during neurosurgical interventions. The method has the potential to make the understanding of the complex vascular anatomy and flow pattern more intuitive for the surgeon. The method is compatible with intraoperative use, and results can be presented within the limited time frame where they still are of clinical interest.}, author = {Iversen, Daniel H{\o}yer and L{\o}vstakken, Lasse and Unsg{\aa}rd, Geirmund and Reinertsen, Ingerid}, doi = {10.1007/s11548-018-1711-0}, issn = {18616429}, journal = {International Journal of Computer Assisted Radiology and Surgery}, keywords = {Blood flow,Intraoperative,Neurosurgery,Ultrasound}, number = {5}, pages = {693--701}, title = {{Automatic intraoperative estimation of blood flow direction during neurosurgical interventions}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {13}, year = {2018} } @inproceedings{Izard, abstract = {The visualization of the radiological results with more advanced techniques than the current ones, such as Augmented Reality and Virtual Reality technologies, represent a great advance for medical professionals, by eliminating their imagination capacity as an indispensable requirement for the understanding of medical images. The problem is that for its application it is necessary to segment the anatomical areas of interest, and this currently involves the intervention of the human being. The Nextmed project is presented as a complete solution that includes DICOM images import, automatic segmentation of certain anatomical structures, 3D mesh generation of the segmented area, visualization engine with Augmented Reality and Virtual Reality, all thanks to different software platforms that have been implemented and detailed, including results obtained from real patients. We will focus on the visualization platform using both Augmented and Virtual Reality technologies to allow medical professionals to work with 3d model representation of medical images in a different way taking advantage of new technologies.}, author = {Izard, Santiago Gonz{\'{a}}lez and Plaza, Scar Alonso and Torres, Ramiro S{\'{a}}nchez and M{\'{e}}ndez, Juan Antonio Juanes and Garc{\'{i}}a-Pẽalvo, Francisco Jos{\'{e}}}, booktitle = {ACM International Conference Proceeding Series}, doi = {10.1145/3362789.3362936}, isbn = {9781450371919}, keywords = {Augmented Reality,Automatic Segmentation,Medical Imaging,Virtual Reality}, pages = {459--468}, title = {{NextMed, Augmented and Virtual Reality platform for 3D medical imaging visualization: Explanation of the software platform developed for 3D models visualization related with medical images using Augmented and Virtual Reality technology}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85075443653{\&}doi=10.1145{\%}2F3362789.3362936{\&}partnerID=40{\&}md5=4c4690e277fd6b868cb7be97c738a3ed}, year = {2019} } @incollection{Jackowski2005, abstract = {Automatic image segmentation methods often involve errors, requiring the assistance of the user to correct them. In this paper, a computer-aided design system is introduced for correcting such errors. The proposed system approximates each 3-D region by a parametric surface. Region voxels are first parametrized spherically using a coarseto-fine subdivision method. By using the voxel positions and their parameter coordinates, control points of a rational Gaussian surface are determined through a least-squares method to approximate the region. Finally, this surface is overlaid with the volumetric image and by locally pulling or pushing it with the mouse while viewing image information, the surface is revised as needed. Typically, a few minutes are sufficient to correct errors in a region. {\textcopyright} Springer-Verlag Berlin Heidelberg 2005.}, author = {Jackowski, Marcel and Goshtasby, Ardeshir}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/11566489_88}, isbn = {3540293264}, issn = {03029743}, pages = {717--724}, title = {{A computer-aided design system for revision of segmentation errors}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-33744812176{\&}doi=10.1007{\%}2F11566489{\_}88{\&}partnerID=40{\&}md5=1a20beb704d367c3bb74ae1934b0a84d}, volume = {3750 LNCS}, year = {2005} } @article{Jakubicek2018, abstract = {{\textcopyright} 2018 Bentham Science Publishers. Background: Robust and accurate segmentation of the spine subdivided into individual vertebrae is necessary for subsequent diagnosis of illnesses related to the spine, particularly those requiring detection and classification of bone lesions. Based on correct vertebra segmentation, the current status of a disease under treatment-as well as its progress-can be determined and followed. Discussion: The problem is complicated by frequent heavy deformations of both the spine axis and individual vertebrae due to illness, so that some vertebrae may differ substantially from expected shapes or even be missing. This overview summarises and discusses so far published methods for spine and vertebrae segmentation in 3D CT thoracic data. Conclusion: It suggests a classification of these algorithms based on the used approaches, complexity of algorithms, as well as on achieved efficiencies.}, author = {Jakubicek, Roman and Chmelik, Jiri and Jan, Jiri}, doi = {10.2174/1573405613666170622120228}, issn = {15734056}, journal = {Current Medical Imaging Reviews}, number = {6}, pages = {853--866}, title = {{Vertebrae Segmentation in 3D CT Data: A Review of Methods and Evaluation Approaches}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {14}, year = {2017} } @inproceedings{Johnson, abstract = {Computers have changed the way we live, work, and even recreate. Now, they are transforming how we think about and treat human disease. In particular, advanced techniques in biomedical computing, imaging, and visualization are changing the face of biology and medicine in both research and clinical practice. The goals of biomedical computing, imaging and visualization are multifaceted. While some images and visualizations facilitate diagnosis, others help physicians plan surgery. Biomedical simulations can help to acquire a better understanding of human physiology. Still other biomedical computing and visualization techniques are used for medical training. Within biomedical research, new computational technologies allow us to "see" into and understand our bodies with unprecedented depth and detail. As a result of these advances, biomedical computing and visualization will help produce exciting new biomedical scientific discoveries and clinical treatments. In this paper, we give an overview of the computational science pipeline for an application in neuroscience and present associated research results in medical imaging, modeling, simulation, and visualization. Copyright {\textcopyright} 2006, Australian Computer Society, Inc.}, author = {Johnson, Chris R. and Weinstein, David M.}, booktitle = {Conferences in Research and Practice in Information Technology Series}, doi = {10.1109/pact.2003.1238017}, isbn = {1920682309}, issn = {14451336}, keywords = {Biomedical computing,Imaging,Problem solving environment,Visualization}, pages = {3--10}, title = {{Biomedical computing and visualization}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84868696757{\&}partnerID=40{\&}md5=4571d3f07f7e58d431d9e91206610690}, volume = {48}, year = {2006} } @article{Jones2015, abstract = {Background: In the area of connectomics, there is a significant gap between the time required for data acquisition and dense reconstruction of the neural processes contained in the same dataset. Automatic methods are able to eliminate this timing gap, but the state-of-the-art accuracy so far is insufficient for use without user corrections. If completed naively, this process of correction can be tedious and time consuming. New method: We present a new semi-automatic method that can be used to perform 3D segmentation of neurites in EM image stacks. It utilizes an automatic method that creates a hierarchical structure for recommended merges of superpixels. The user is then guided through each predicted region to quickly identify errors and establish correct links. Results: We tested our method on three datasets with both novice and expert users. Accuracy and timing were compared with published automatic, semi-automatic, and manual results. Comparison with existing methods: Post-automatic correction methods have also been used in Mishchenko et al. (2010) and Haehn et al. (2014). These methods do not provide navigation or suggestions in the manner we present. Other semi-automatic methods require user input prior to the automatic segmentation such as Jeong et al. (2009) and Cardona et al. (2010) and are inherently different than our method. Conclusion: Using this method on the three datasets, novice users achieved accuracy exceeding state-of-the-art automatic results, and expert users achieved accuracy on par with full manual labeling but with a 70{\%} time improvement when compared with other examples in publication.}, author = {Jones, Cory and Liu, Ting and Cohan, Nathaniel Wood and Ellisman, Mark and Tasdizen, Tolga}, doi = {10.1016/j.jneumeth.2015.03.005}, issn = {1872678X}, journal = {Journal of Neuroscience Methods}, keywords = {3D segmentation,Connectomics,Electron microscopy,Image segmentation,Neuron reconstruction,Semi-automatic segmentation}, pages = {13--21}, title = {{Efficient semi-automatic 3D segmentation for neuron tracing in electron microscopy images}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {246}, year = {2015} } @article{Joshi2018, abstract = {Histogram-based methods can be used to analyse and transform medical images. Histogram specification is one such method which has been widely used to transform the histograms of cone beam CT (CBCT) images to match those of corresponding CT images. However, when the derived transformation is applied to the CBCT image pixels, significant artefacts can be produced. We propose the iterative peak combination algorithm, a novel and robust method for automatically identifying relevant features in medical image histograms. The procedure is conceptually simple and can be applied equally well to both CT and CBCT image histograms. We also demonstrate how iterative peak combination can be used to transform CBCT images in such as way as to improve the Hounsfield Unit (HU) calibration of CBCT image pixel values, without introducing additional artefacts. We analyse 36 pelvis CBCT images and show that the average difference in fat tissue pixel values between CT images and CBCT images processed using the iterative peak combination algorithm is 23.7 HU. Compared to 136.7 HU in unprocessed CBCT images and 50.9 in CBCT images processed using histogram specification.}, author = {Joshi, K. D. and Marchant, T. E.}, doi = {10.1088/2057-1976/aa929d}, issn = {20571976}, journal = {Biomedical Physics and Engineering Express}, keywords = {cone beam CT,histogram matching,image histogram,image processing}, number = {1}, pages = {10}, title = {{Iterative peak combination: A robust technique for identifying relevant features in medical image histograms}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {4}, year = {2018} } @article{Ju2006, abstract = {Sectioning tissues for optical microscopy often introduces upon the resulting sections distortions that make 3D reconstruction difficult. Here we present an automatic method for producing a smooth 3D volume from distorted 2D sections in the absence of any undistorted references. The method is based on pairwise elastic image warps between successive tissue sections, which can be computed by 2D image registration. Using a Gaussian filter, an average warp is computed for each section from the pairwise warps in a group of its neighboring sections. The average warps deform each section to match its neighboring sections, thus creating a smooth volume where corresponding features on successive sections lie close to each other. The proposed method can be used with any existing 2D image registration method for 3D reconstruction. In particular, we present a novel image warping algorithm based on dynamic programming that extends Dynamic Time Warping in 1D speech recognition to compute pairwise warps between high-resolution 2D images. The warping algorithm efficiently computes a restricted class of 2D local deformations that are characteristic between successive tissue sections. Finally, a validation framework is proposed and applied to evaluate the quality of reconstruction using both real sections and a synthetic volume. {\textcopyright} 2006 Elsevier B.V. All rights reserved.}, author = {Ju, Tao and Warren, Joe and Carson, James and Bello, Musodiq and Kakadiaris, Ioannis and Chiu, Wah and Thaller, Christina and Eichele, Gregor}, doi = {10.1016/j.jneumeth.2006.02.020}, issn = {01650270}, journal = {Journal of Neuroscience Methods}, keywords = {3D reconstruction,Dynamic programming,Filtering,Histology,Image warping}, number = {1-2}, pages = {84--100}, title = {{3D volume reconstruction of a mouse brain from histological sections using warp filtering}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-33747181686{\&}doi=10.1016{\%}2Fj.jneumeth.2006.02.020{\&}partnerID=40{\&}md5=a4e90f7b3d3b71adcc36d27a722e9843}, volume = {156}, year = {2006} } @article{Jug2014, abstract = {Modern biological research relies heavily on microscopic imaging. The advanced genetic toolkit of Drosophila makes it possible to label molecular and cellular components with unprecedented level of specificity necessitating the application of the most sophisticated imaging technologies. Imaging in Drosophila spans all scales from single molecules to the entire populations of adult organisms, from electron microscopy to live imaging of developmental processes. As the imaging approaches become more complex and ambitious, there is an increasing need for quantitative, computer-mediated image processing and analysis to make sense of the imagery. Bioimage Informatics is an emerging research field that covers all aspects of biological image analysis from data handling, through processing, to quantitative measurements, analysis and data presentation. Some of the most advanced, large scale projects, combining cutting edge imaging with complex bioimage informatics pipelines, are realized in the Drosophila research community. In this review, we discuss the current research in biological image analysis specifically relevant to the type of systems level image datasets that are uniquely available for the Drosophila model system. We focus on how state-of-the-art computer vision algorithms are impacting the ability of Drosophila researchers to analyze biological systems in space and time. We pay particular attention to how these algorithmic advances from computer science are made usable to practicing biologists through open source platforms and how biologists can themselves participate in their further development. {\textcopyright} 2014 The Authors.}, author = {Jug, Florian and Pietzsch, Tobias and Preibisch, Stephan and Tomancak, Pavel}, doi = {10.1016/j.ymeth.2014.04.004}, issn = {10959130}, journal = {Methods}, keywords = {Drosophila,Image analysis,Processing,Registration,Segmentation,Tracking}, number = {1}, pages = {60--73}, title = {{Bioimage Informatics in the context of Drosophila research}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {68}, year = {2014} } @article{Jun2017, abstract = {We propose the use of ensemble classifiers to overcome inter-scanner variations in the differentiation of regional disease patterns in high-resolution computed tomography (HRCT) images of diffuse interstitial lung disease patients obtained from different scanners. A total of 600 rectangular 20 × 20-pixel regions of interest (ROIs) on HRCT images obtained from two different scanners (GE and Siemens) and the whole lung area of 92 HRCT images were classified as one of six regional pulmonary disease patterns by two expert radiologists. Textual and shape features were extracted from each ROI and the whole lung parenchyma. For automatic classification, individual and ensemble classifiers were trained and tested with the ROI dataset. We designed the following three experimental sets: an intra-scanner study in which the training and test sets were from the same scanner, an integrated scanner study in which the data from the two scanners were merged, and an inter-scanner study in which the training and test sets were acquired from different scanners. In the ROI-based classification, the ensemble classifiers showed better (p {\textless} 0.001) accuracy (89.73{\%}, SD = 0.43) than the individual classifiers (88.38{\%}, SD = 0.31) in the integrated scanner test. The ensemble classifiers also showed partial improvements in the intra- and inter-scanner tests. In the whole lung classification experiment, the quantification accuracies of the ensemble classifiers with integrated training (49.57{\%}) were higher (p {\textless} 0.001) than the individual classifiers (48.19{\%}). Furthermore, the ensemble classifiers also showed better performance in both the intra- and inter-scanner experiments. We concluded that the ensemble classifiers provide better performance when using integrated scanner images.}, author = {Jun, Sanghoon and Kim, Namkug and Seo, Joon Beom and Lee, Young Kyung and Lynch, David A.}, doi = {10.1007/s10278-017-9957-6}, issn = {1618727X}, journal = {Journal of Digital Imaging}, keywords = {Ensemble learning,Inter-scanner variation,Interstitial lung disease (ILD),Multi-center trial,Support vector machine (SVM)}, number = {6}, pages = {761--771}, title = {{An Ensemble Method for Classifying Regional Disease Patterns of Diffuse Interstitial Lung Disease Using HRCT Images from Different Vendors}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {30}, year = {2017} } @article{Jung2019, abstract = {BACKGROUND AND PURPOSE: The anterior cingulate cortex (ACC) is involved in several cognitive processes including executive function. Degenerative changes of ACC are consistently seen in Alzheimer's disease (AD). However, volumetric changes specific to the ACC in AD are not clear because of the difficulty in segmenting this region. The objectives of the current study were to develop a precise and high-throughput approach for measuring ACC volumes and to correlate the relationship between ACC volume and cognitive function in AD. METHODS: Structural T 1 -weighted magnetic resonance images of AD patients (n = 47) and age-matched controls (n = 47) at baseline and at 24 months were obtained from the Alzheimer's disease neuroimaging initiative (ADNI) database and studied using a custom-designed semiautomated segmentation protocol. RESULTS: ACC volumes obtained using the semiautomated protocol were highly correlated to values obtained from manual segmentation (r =.98) and the semiautomated protocol was considerably faster. When comparing AD and control subjects, no significant differences were observed in baseline ACC volumes or in change in ACC volumes over 24 months using the two segmentation methods. However, a change in ACC volume over 24 months did not correlate with a change in mini-mental state examination scores. CONCLUSIONS: Our results indicate that the proposed semiautomated segmentation protocol is reliable for determining ACC volume in neurodegenerative conditions including AD.}, author = {Jung, Flora and Kazemifar, Samaneh and Bartha, Robert and Rajakumar, Nagalingam}, doi = {10.1111/jon.12598}, issn = {15526569}, journal = {Journal of Neuroimaging}, keywords = {MRI,automated segmentation,brain atrophy,structural biomarker}, number = {3}, pages = {376--382}, title = {{Semiautomated Assessment of the Anterior Cingulate Cortex in Alzheimer's Disease}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {29}, year = {2019} } @article{Kainz2015, abstract = {Image and signal analysis applications are substantial in scientific research. Both open source and commercial packages provide a wide range of functions for image and signal analysis, which are sometimes supported very well by the communities in the corresponding fields. Commercial software packages have the major drawback of being expensive and having undisclosed source code, which hampers extending the functionality if there is no plugin interface or similar option available. However, both variants cannot cover all possible use cases and sometimes custom developments are unavoidable, requiring open source applications. In this paper we describe IQM, a completely free, portable and open source (GNU GPLv3) image and signal analysis application written in pure Java. IQM does not depend on any natively installed libraries and is therefore runnable out-of-the-box. Currently, a continuously growing repertoire of 50 image and 16 signal analysis algorithms is provided. The modular functional architecture based on the three-tier model is described along the most important functionality. Extensibility is achieved using operator plugins, and the development of more complex workflows is provided by a Groovy script interface to the JVM. We demonstrate IQM's image and signal processing capabilities in a proof-of-principle analysis and provide example implementations to illustrate the plugin framework and the scripting interface. IQM integrates with the popular ImageJ image processing software and is aiming at complementing functionality rather than competing with existing open source software. Machine learning can be integrated into more complex algorithms via the WEKA software package as well, enabling the development of transparent and robust methods for image and signal analysis.}, author = {Kainz, Philipp and Mayrhofer-Reinhartshuber, Michael and Ahammer, Helmut}, doi = {10.1371/journal.pone.0116329}, issn = {19326203}, journal = {PLoS ONE}, number = {1}, pages = {28}, title = {{IQM: An extensible and portable open source application for image and signal analysis in java}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {10}, year = {2015} } @article{Kankaanpaa2012, abstract = {BioImageXD puts open-source computer science tools for three-dimensional visualization and analysis into the hands of all researchers, through a user-friendly graphical interface tuned to the needs of biologists. BioImageXD has no restrictive licenses or undisclosed algorithms and enables publication of precise, reproducible and modifiable workflows. It allows simple construction of processing pipelines and should enable biologists to perform challenging analyses of complex processes. We demonstrate its performance in a study of integrin clustering in response to selected inhibitors. {\textcopyright} 2012 Nature America, Inc. All rights reserved.}, author = {Kankaanp{\"{a}}{\"{a}}, Pasi and Paavolainen, Lassi and Tiitta, Silja and Karjalainen, Mikko and P{\"{a}}iv{\"{a}}rinne, Joacim and Nieminen, Jonna and Marjom{\"{a}}ki, Varpu and Heino, Jyrki and White, Daniel J.}, doi = {10.1038/nmeth.2047}, issn = {15487091}, journal = {Nature Methods}, number = {7}, pages = {683--689}, title = {{BioImageXD: An open, general-purpose and high-throughput image-processing platform}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {9}, year = {2012} } @article{Kazemifar2014, abstract = {Background: The change in volume of anatomic structures is as a sensitive indicator of Alzheimer disease (AD) progression. Although several methods are available to measure brain volumes, improvements in speed and automation are required. Our objective was to develop a fully automated, fast, and reliable approach to measure change in medial temporal lobe (MTL) volume, including primarily hippocampus. Methods: The MTL volume defined in an atlas image was propagated onto each baseline image and a level set algorithm was applied to refine the shape and smooth the boundary. The MTL of the baseline image was then mapped onto the corresponding follow-up image to measure volume change ($\delta$MTL). Baseline and 24 months 3D T1-weighted images from the Alzheimer Disease Neuroimaging Initiative (ADNI) were randomly selected for 50 normal elderly controls (NECs), 50 subjects with mild cognitive impairment (MCI) and 50 subjects with AD to test the algorithm. The method was compared to the FreeSurfer segmentation tools. Results: The average $\delta$MTL (mean±SEM) was 68±35mm3 in NEC, 187±38mm3 in MCI and 300±34mm3 in the AD group and was significantly different (p{\textless}0.0001) between all three groups. The $\delta$MTL was correlated with cognitive decline. Comparison with existing method(s): Results for the FreeSurfer software were similar but did not detect significant differences between the MCI and AD groups. Conclusion: This novel segmentation approach is fully automated and provides a robust marker of brain atrophy that shows different rates of atrophy over 2 years between NEC, MCI, and AD groups. {\textcopyright} 2014 Elsevier B.V.}, author = {Kazemifar, Samaneh and Drozd, John J. and Rajakumar, Nagalingam and Borrie, Michael J. and Bartha, Robert}, doi = {10.1016/j.jneumeth.2014.01.033}, issn = {1872678X}, journal = {Journal of Neuroscience Methods}, keywords = {Alzheimer disease,Hippocampus,MRI,Medial temporal lobe segmentation,Multi-atlas,Shape}, pages = {35--46}, title = {{Automated algorithm to measure changes in medial temporal lobe volume in Alzheimer disease}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {227}, year = {2014} } @article{Kennedy2019, abstract = {Purpose: To demonstrate selection of a small representative subset of images from a pool of images comprising a potential atlas (PA) pelvic CT set to be used for autosegmentation of a separate target image set. The aim is to balance the need for the atlas set to represent anatomical diversity with the need to minimize resources required to create a high quality atlas set (such as multiobserver delineation), while retaining access to additional information available for the PA image set. Methods: Preprocessing was performed for image standardization, followed by image registration. Clustering was used to select the subset that provided the best coverage of a target dataset as measured by postregistration image intensity similarities. Tests for clustering robustness were performed including repeated clustering runs using different starting seeds and clustering repeatedly using 90{\%} of the target dataset chosen randomly. Comparisons of coverage of a target set (comprising 711 pelvic CT images) were made for atlas sets of five images (chosen from a PA set of 39 pelvic CT and MR images) (a) at random (averaged over 50 random atlas selections), (b) based solely on image similarities within the PA set (representing prospective atlas development), (c) based on similarities within the PA set and between the PA and target dataset (representing retrospective atlas development). Comparisons were also made to coverage provided by the entire PA set of 39 images. Results: Exemplar selection was highly robust with exemplar selection results being unaffected by choice of starting seed with very occasional change to one of the exemplar choices when the target set was reduced. Coverage of the target set, as measured by best normalized cross-correlation similarity of target images to any exemplar image, provided by five well-selected atlas images (mean = 0.6497) was more similar to coverage provided by the entire PA set (mean = 0.6658) than randomly chosen atlas subsets (mean = 0.5977). This was true both of the mean values and the shape of the distributions. Retrospective selection of atlases (mean = 0.6497) provided a very small improvement over prospective atlas selection (mean = 0.6431). All differences were significant (P {\textless} 1.0E-10). Conclusions: Selection of a small representative image set from one dataset can be utilized to develop an atlas set for either retrospective or prospective autosegmentation of a different target dataset. The coverage provided by such a judiciously selected subset has the potential to facilitate propagation of numerous retrospectively defined structures, utilizing additional information available with multimodal imaging in the atlas set, without the need to create large atlas image sets.}, author = {Kennedy, Angel and Dowling, Jason and Greer, Peter B. and Holloway, Lois and Jameson, Michael G. and Roach, Dale and Ghose, Soumya and Rivest-H{\'{e}}nault, David and Marcello, Marco and Ebert, Martin A.}, doi = {10.1002/mp.13494}, issn = {00942405}, journal = {Medical Physics}, keywords = {autosegmentation,clustering,image registration,image-atlas}, number = {5}, pages = {2243--2250}, title = {{Similarity clustering-based atlas selection for pelvic CT image segmentation}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {46}, year = {2019} } @inproceedings{Khajwaniya, abstract = {The paper proposes a new methodology in order to improve the quality of satellite images. It is demonstrated that it is possible to achieve a better performance than that of Bilateral filter in a variety of noise levels. We have proposed Weiner filter in accordance with SPEA2 algorithm which removes pre-filtering and high noise level: therefore it improves the Peak Signal-to-Noise Ratio (PSNR) and visual quality gets improved and complexities and processing time are reduced. This improved algorithm is extended and used to denoise satellite images. Output results show that the performance has upgraded in comparison with current methods of denoising satellite.}, author = {Khajwaniya, Kuldeep Kumar and Tiwari, Vibha}, booktitle = {Proceedings of 2015 IEEE 9th International Conference on Intelligent Systems and Control, ISCO 2015}, doi = {10.1109/ISCO.2015.7282324}, isbn = {9781479964802}, keywords = {Bilateral Filter,Image denoising,SPEA2 Algorithm,Weiner Filter}, title = {{Satellite image denoising using Weiner filter with SPEA2 algorithm}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84959108119{\&}doi=10.1109{\%}2FISCO.2015.7282324{\&}partnerID=40{\&}md5=3cac197663c657dd57bf87eb47941101}, year = {2015} } @article{Kim2018, abstract = {Purpose: This study aimed to develop a linac-mounted kilovoltage (kV) projection streaming-based tracking method for vertebral targets during spine stereotactic radiation surgery and evaluate the clinical feasibility of the proposed spine tracking method. Methods and materials: Using real-time kV projection streaming within XVI (Elekta XVI), kV–projection-based tracking was applied to the target vertebral bodies. Two-dimensional in-plane patient translation was calculated via an image registration between digitally reconstructed radiographs (DRRs) and kV projections. DRR was generated from the cone beam computed tomography (CBCT) scan, which was obtained immediately before the tracking session. During a tracking session, each kV projection was streamed for an intensity gradient-based image with similar metric-based registration to the offset DRR. The ground truth displacement for each kV beam angle was calculated at the beam isocenter using the 6 degrees-of-freedom transformation that was obtained by a CBCT-CBCT rigid registration. The resulting translation by the DRR-projection registration was compared with the ground truth displacement. The proposed tracking method was evaluated retrospectively and online, using 7 and 5 spine patients, respectively. Results: The accuracy and precision of spine tracking for in-plane patient motion were 0.5 ± 0.2 and 0.2 ± 0.1 mm. The magnitude of patient motion that was estimated using the CBCT-CBCT rigid registration was (0.5 ± 0.4, 0.4 ± 0.3, 0.3 ± 0.3) mm and (0.3 ± 0.4, 0.2 ± 0.2, 0.5 ± 0.6) mm for all tracking sessions. The intrafraction motion was within 2 mm for all CBCT scans considered. Conclusions: This study demonstrated that the proposed spine tracking method can track intrafraction motion with sub-millimeter accuracy and precision, and sub-second latency.}, author = {Kim, Jihun and Park, Yang Kyun and Edmunds, David and Oh, Kevin and Sharp, Gregory C. and Winey, Brian}, doi = {10.1016/j.adro.2018.06.002}, issn = {24521094}, journal = {Advances in Radiation Oncology}, number = {4}, pages = {682--692}, title = {{Kilovoltage projection streaming-based tracking application (KiPSTA): First clinical implementation during spine stereotactic radiation surgery}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85050957688{\&}doi=10.1016{\%}2Fj.adro.2018.06.002{\&}partnerID=40{\&}md5=80de6701682b693312a12d441a486b1c}, volume = {3}, year = {2018} } @article{Kiss2017, abstract = {Background: Developmental biology has made great strides in recent years towards the quantification of cellular properties during development. This requires tissues to be imaged and segmented to generate computerised versions that can be easily analysed. In this context, one of the principal technical challenges remains the faithful detection of cellular contours, principally due to variations in image intensity throughout the tissue. Watershed segmentation methods are especially vulnerable to these variations, generating multiple errors due notably to the incorrect detection of the outer surface of the tissue. Results: We use the level set method (LSM) to improve the accuracy of the watershed segmentation in different ways. First, we detect the outer surface of the tissue, reducing the impact of low and variable contrast at the surface during imaging. Second, we demonstrate a new edge function for a level set, based on second order derivatives of the image, to segment individual cells. Finally, we also show that the LSM can be used to segment nuclei within the tissue. Conclusion: The watershed segmentation of the outer cell layer is demonstrably improved when coupled with the LSM-based surface detection step. The tool can also be used to improve watershed segmentation at cell-scale, as well as to segment nuclei within a tissue. The improved segmentation increases the quality of analysis, and the surface detected by our algorithm may be used to calculate local curvature or adapted for other uses, such as mathematical simulations.}, author = {Kiss, Annam{\'{a}}ria and Moreau, Typhaine and Mirabet, Vincent and Calugaru, Cerasela Iliana and Boudaoud, Arezki and Das, Pradeep}, doi = {10.1186/s13007-017-0264-5}, issn = {17464811}, journal = {Plant Methods}, keywords = {3D,Cell,Cellwall,Confocal image,L1,Level set method,Nucleus,Segmentation,Watershed}, number = {1}, pages = {11}, title = {{Segmentation of 3D images of plant tissues at multiple scales using the level set method}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {13}, year = {2017} } @article{Knopf2013, abstract = {Advanced 4D dose calculations (4DDCs) for scanned particle therapy show that in the incidence of motion, it is insufficient to use target contours defined on one reference CT phase. ICRU Report 62 (ICRU 1999 ICRU Report 62 (Bethesda, MD: ICRU)) advises that variations in size, shape and position of CTVs relative to anatomic reference points have to be considered for internal target volumes (ITVs). In addition to geometrical margin adaption, changes of water equivalent path length have to be considered for particle therapy. Different ITV concepts have been applied to six representative patients (liver and lung indications) based on 4DCT. Geometrical ITVs (gITV) were calculated by combining deformed CTVs over all motion phases. To take into account path length changes, range adapted ITVs (raITV) were established as the union of range adapted CTVs in all phases. For gated delivery, gat-gITVs and gat-raITVs were calculated. Extensive 4DDCs have been performed for two exemplary patients to illustrate that neither re-scanning nor gating can sufficiently compensate for motion effects if no appropriate margins are employed and to evaluate the effectiveness of gITVs and raITVs. CTVs significantly differ from gITVs and raITVs in size (up to a factor 2 in volume). But also raITVs and gITVs differ significantly in size and are spatially displaced, particularly for lung patients. raITVs show a strong field dependence in shape. All volumes are reduced in size when gating is applied and considered during margin adaption. 4D dose distributions show big improvements when gITV or raITV are used compared to CTVs. However, the use of either gITVs or raITVs do not result in significant differences. If raITVs are used, slightly better target coverage is gained at the cost of more healthy tissue exposure. Our results emphasize that adapted target volumes have to be used for scanned particle therapy in the presence of motion. However, even though gITVs and raITVs differ significantly in shape and size, this difference does not necessarily translate into significant differences in the resultant 4D dose distributions. {\textcopyright} 2013 Institute of Physics and Engineering in Medicine.}, author = {Knopf, Antje Christin and Boye, Dirk and Lomax, Antony and Mori, Shininchiro}, doi = {10.1088/0031-9155/58/17/6079}, issn = {00319155}, journal = {Physics in Medicine and Biology}, number = {17}, pages = {6079--6094}, title = {{Adequate margin definition for scanned particle therapy in the incidence of intrafractional motion}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {58}, year = {2013} } @article{Kopecky2012, abstract = {Three-dimensional (3D) reconstructions of the vertebrate inner ear have provided novel insights into the development of this complex organ. 3D reconstructions enable superior analysis of phenotypic differences between wild type and mutant ears but can result in laborious work when reconstructed from physically sectioned material. Although nondestructive optical sectioning light sheet microscopy may ultimately prove the ideal solution, these technologies are not yet commercially available, or in many instances are not monetarily feasible. Here we introduce a simple technique to image a fluorescently labelled ear at different stages throughout development at high resolution enabling 3D reconstruction of any component of the inner ear using confocal microscopy. We provide a step-by-step manual from tissue preparation to imaging to 3D reconstruction and analysis including a rationale and troubleshooting guide at each step for researchers with different equipment, protocols, and access to resources to successfully incorporate the principles of this method and customize them to their laboratory settings. {\textcopyright} 2012 Royal Microscopical Society.}, author = {Kopecky, B. J. and Duncan, J. S. and Elliott, K. L. and Fritzsch, B.}, doi = {10.1111/j.1365-2818.2012.03673.x}, issn = {00222720}, journal = {Journal of Microscopy}, keywords = {Confocal microscopy,Inner ear,Mouse,STSLIM,Segmentation,Three-dimensional reconstruction}, number = {3}, pages = {292--298}, title = {{Three-dimensional reconstructions from optical sections of thick mouse inner ears using confocal microscopy}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {248}, year = {2012} } @article{Korez2015, abstract = {Automated and semi-automated detection and segmentation of spinal and vertebral structures from computed tomography (CT) images is a challenging task due to a relatively high degree of anatomical complexity, presence of unclear boundaries and articulation of vertebrae with each other, as well as due to insufficient image spatial resolution, partial volume effects, presence of image artifacts, intensity variations and low signal-to-noise ratio. In this paper, we describe a novel framework for automated spine and vertebrae detection and segmentation from 3-D CT images. A novel optimization technique based on interpolation theory is applied to detect the location of the whole spine in the 3-D image and, using the obtained location of the whole spine, to further detect the location of individual vertebrae within the spinal column. The obtained vertebra detection results represent a robust and accurate initialization for the subsequent segmentation of individual vertebrae, which is performed by an improved shape-constrained deformable model approach. The framework was evaluated on two publicly available CT spine image databases of 50 lumbar and 170 thoracolumbar vertebrae. Quantitative comparison against corresponding reference vertebra segmentations yielded an overall mean centroid-to-centroid distance of 1.1 mm and Dice coefficient of 83.6{\%} for vertebra detection, and an overall mean symmetric surface distance of 0.3 mm and Dice coefficient of 94.6{\%} for vertebra segmentation. The results indicate that by applying the proposed automated detection and segmentation framework, vertebrae can be successfully detected and accurately segmented in 3-D from CT spine images.}, author = {Korez, Robert and Ibragimov, Bulat and Likar, Bostjan and Pernus, Franjo and Vrtovec, Tomaz}, doi = {10.1109/TMI.2015.2389334}, issn = {1558254X}, journal = {IEEE Transactions on Medical Imaging}, keywords = {Computed tomography,deformable models,image segmentation,interpolation theory,object detection,spine,vertebra}, number = {8}, pages = {1649--1662}, title = {{A Framework for Automated Spine and Vertebrae Interpolation-Based Detection and Model-Based Segmentation}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {34}, year = {2015} } @article{Korfiatis2016, abstract = {Purpose: Imaging biomarker research focuses on discovering relationships between radiological features and histological findings. In glioblastoma patients, methylation of the O6-methylguanine methyltransferase (MGMT) gene promoter is positively correlated with an increased effectiveness of current standard of care. In this paper, the authors investigate texture features as potential imaging biomarkers for capturing the MGMT methylation status of glioblastoma multiforme (GBM) tumors when combined with supervised classification schemes. Methods: A retrospective study of 155 GBM patients with known MGMT methylation status was conducted. Co-occurrence and run length texture features were calculated, and both support vector machines (SVMs) and random forest classifiers were used to predict MGMT methylation status. Results: The best classification system (an SVM-based classifier) had a maximum area under the receiver-operating characteristic (ROC) curve of 0.85 (95{\%} CI: 0.780.91) using four texture features (correlation, energy, entropy, and local intensity) originating from the T2-weighted images, yielding at the optimal threshold of the ROC curve, a sensitivity of 0.803 and a specificity of 0.813. Conclusions: Results show that supervised machine learning of MRI texture features can predict MGMT methylation status in preoperative GBM tumors, thus providing a new noninvasive imaging biomarker.}, author = {Korfiatis, Panagiotis and Kline, Timothy L. and Coufalova, Lucie and Lachance, Daniel H. and Parney, Ian F. and Carter, Rickey E. and Buckner, Jan C. and Erickson, Bradley J.}, doi = {10.1118/1.4948668}, issn = {00942405}, journal = {Medical Physics}, keywords = {MGMT,MRI,glioblastoma multiforme,imaging biomarkers,random forest,support vector machines}, number = {6}, pages = {2835--2844}, title = {{MRI texture features as biomarkers to predict MGMT methylation status in glioblastomas}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {43}, year = {2016} } @article{Kortesniemi2012, abstract = {Background: Computed tomography (CT) has become the main contributor of the cumulative radiation exposure in radiology. Information on cumulative exposure history of the patient should be available for efficient management of radiation exposures and for radiological justification. Purpose: To develop and evaluate automatic image registration for organ dose calculation in CT. Material and Methods: Planning radiograph (scout) image data describing CT scan ranges from 15 thoracic CT examinations (9 men and 6 women) and 10 abdominal CT examinations (6 men and 4 women) were co-registered with the reference trunk CT scout image. 2-D affine transformation and normalized correlation metric was used for image registration. Longitudinal (z-axis) scan range coordinates on the reference scout image were converted into slice locations on the CT-Expo anthropomorphic male and female models, following organ and effective dose calculations. Results: The average deviation of z-location of studied patient images from the corresponding location in the reference scout image was 6.2 mm. The ranges of organ and effective doses with constant exposure parameters were from 0 to 28.0 mGy and from 7.3 to 14.5 mSv, respectively. The mean deviation of the doses for fully irradiated organs (inside the scan range), partially irradiated organs and non-irradiated organs (outside the scan range) was 1{\%}, 5{\%}, and 22{\%}, respectively, due to image registration. Conclusion: The automated image processing method to registrate individual chest and abdominal CT scout radiograph with the reference scout radiograph is feasible. It can be used to determine the individual scan range coordinates in z-direction to calculate the organ dose values. The presented method could be utilized in automatic organ dose calculation in CT for radiation exposure tracking of the patients.}, author = {Kortesniemi, Mika and Salli, Eero and Seuri, Raija}, doi = {10.1258/ar.2012.110611}, issn = {02841851}, journal = {Acta Radiologica}, keywords = {Computed tomography,Dosimetry,Technical aspects}, number = {8}, pages = {908--913}, title = {{Organ dose calculation in CT based on scout image data and automatic image registration}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {53}, year = {2012} } @article{Kostenko2018, abstract = {We propose a combination of an experimental approach and a reconstruction technique that leads to reduction of artefacts in X-ray computer tomography of strongly attenuating objects. Through fully automatic data alignment, data generated in multiple experiments with varying object orientations are combined. Simulations and experiments show that the solutions computed using algebraic methods based on multiple acquisitions can achieve a dramatic improvement in the reconstruction quality, even when each acquisition generates a reduced number of projections. The approach does not require any advanced setup components making it ideal for laboratory-based X-ray tomography.}, author = {Kostenko, Alexander and Andriiashen, Vladyslav and Batenburg, Kees Joost}, doi = {10.1364/oe.26.028982}, issn = {1094-4087}, journal = {Optics Express}, number = {22}, pages = {28982}, title = {{Registration-based multi-orientation tomography}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {26}, year = {2018} } @article{Kremer2013, abstract = {Ultrasound assessment of myocardial strain can provide valuable information on regional cardiac function. However, Doppler-based methods often used in practice for strain estimation suffer from angle dependency. In this study, a partial solution to that fundamental limitation is presented. We have previously reported using simulated data sets that spatial compounding of axial velocities obtained at three steering angles can theoretically outperform 2-D speckle tracking for 2-D strain estimation in the mouse heart. In this study, the feasibility of the method was analyzed invivo using spatial compounding of Doppler velocities on six mice with myocardial infarction and five controls, and results were compared with those of tagged microscopic magnetic resonance imaging ($\mu$MRI). Circumferential estimates quantified by means of both ultrasound and $\mu$MRI could detect regional dysfunction. Between echocardiography and $\mu$MRI, a good regression coefficient was obtained for circumferential strain estimates (. r= 0.69), whereas radial strain estimates correlated only moderately (. r= 0.37). A second echocardiography was performed after $\mu$MRI to test the reproducibility of the compounding method. This yielded a higher correlation coefficient for the circumferential component than for the radial component (. r= 0.74 circumferentially, r= 0.49 radially). {\textcopyright} 2013 World Federation for Ultrasound in Medicine {\&} Biology.}, author = {Kremer, Florence and Dresselaers, Tom and Heyde, Brecht and Ferferieva, Vesselina and Caluw{\'{e}}, Ellen and Choi, Hon Fai and Claus, Piet and Oosterlinck, Wouter and Janssens, Stefan and Himmelreich, Uwe and D'hooge, Jan}, doi = {10.1016/j.ultrasmedbio.2013.04.004}, issn = {1879291X}, journal = {Ultrasound in Medicine and Biology}, keywords = {2-D myocardial strain,Doppler,Mouse,Spatial compounding}, number = {10}, pages = {1848--1860}, title = {{2-D Strain Assessment in the Mouse Through Spatial Compounding of Myocardial Velocity Data: InVivo Feasibility}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84883465702{\&}doi=10.1016{\%}2Fj.ultrasmedbio.2013.04.004{\&}partnerID=40{\&}md5=7e4e566e8788b13da38aa8c29cc1271b}, volume = {39}, year = {2013} } @inproceedings{Kremer, abstract = {Ultrasound assessment of myocardial strain can give valuable information on regional cardiac function. Speckle tracking is often used for this purpose as it can estimate the 2D myocardial strain tensor. However, in the mouse setting, speckle tracking remains challenging due to the high heart rate and the relatively thin wall compared to the typical size of the speckles. We have previously shown using simulated data sets that spatial compounding of axial velocities obtained at 3 steering angles can outperform 2D speckle tracking for 2D strain estimation in the mouse heart. In this study, beam steering was applied at -20°, 0° and 20° on short axis views of 5 control and 6 infarct mice. The lateral motion component was reconstructed through spatial compounding and results were compared to tagged $\mu$MRI. Circumferential estimates quantified by means of ultrasound and MRI could both detect regional dysfunction. Between echo and MRI, a good regression coefficient was obtained for circumferential strain estimates (r = 0.69), while radial strain estimates correlated only moderately (r = 0.37). {\textcopyright} 2011 IEEE.}, author = {Kremer, Florence and Dresselaers, Tom and Heyde, Brecht and Ferferieva, Vesselina and Caluw{\'{e}}, Ellen and Choi, Hon Fai and Oosterlinck, Wouter and Janssens, Stefan and Himmelreich, Uwe and D'hooge, Jan}, booktitle = {IEEE International Ultrasonics Symposium, IUS}, doi = {10.1109/ULTSYM.2011.0230}, isbn = {9781457712531}, issn = {19485719}, pages = {939--942}, title = {{2D myocardial strain in the mouse through spatial compounding: In-vivo feasibility study}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84869030007{\&}doi=10.1109{\%}2FULTSYM.2011.0230{\&}partnerID=40{\&}md5=785bfbda4e1f62661f6a74a21c318fe6}, year = {2011} } @inproceedings{Kremera, abstract = {In radiotherapy, fixed 2-D X-ray imaging devices have several advantages compared to gantry-mounted systems, such as less geometrical deformations and the possibility to monitor 3-D markers motion in real-time. However, there is a lack of studies concerning the geometry of these systems. For example, in the case of a non-orthogonal geometry, the effect of the angle between the X-ray axes has not been investigated yet. In the first part of this study, the optimal angle was analyzed theoretically. Results showed that 60° between the axes still enables displacements of the order of 0.35 mm to be detected. In a second step, the performance of the registration method for such oblique configuration was evaluated on phantom data sets. It was found that using images separated by 60° rather than 90° required more than twice as much the number of iterations to obtain sufficient accuracy (i.e. 0.7 mm and 0.5°). {\textcopyright} 2014 IEEE.}, author = {Kremer, Florence and Giard, Joachim and Sibomana, Merence and Seabra, Jose and {De Xivry}, Jonathan Orban and Labarbe, Rudi and MacQ, Benoit}, booktitle = {2014 1st International Conference on Advanced Technologies for Signal and Image Processing, ATSIP 2014}, doi = {10.1109/ATSIP.2014.6834600}, isbn = {9781479948888}, keywords = {patient positioning,radiotherapy,stereoscopic geometry}, pages = {172--176}, title = {{Feasibility and preliminary validation of 2D/3D image registration using fixed 2-D X-ray devices in image-guided radiotherapy}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84903775128{\&}doi=10.1109{\%}2FATSIP.2014.6834600{\&}partnerID=40{\&}md5=a48a78e38dae1ddafa3b391003173fad}, year = {2014} } @incollection{Kuegler2018, abstract = {Clinical outcome of several Minimally Invasive Surgeries (MIS) heavily depend on the accuracy of intraoperative pose estimation of the surgical instrument from intraoperative x-rays. The estimation consists of finding the tool in a given set of x-rays and extracting the necessary data to recreate the tool's pose for further navigation - resulting in severe consequences of incorrect estimation. Though state-of-the-art MIS literature has exploited image registration as a tool for instrument pose estimation, lack of practical considerations in previous study design render their conclusion ineffective from a clinical standpoint. One major issue of such a study is the lack of Ground Truth in clinical data -as there are no direct ways of measuring the ground truth pose and indirect estimation accumulates error. A systematic way to overcome this problem is to generate Digitally Reconstructed Radiographs (DRR), however, such procedure generates data which are free from measuring errors (e.g. noise, number of projections), resulting claims of registration performance inconclusive. Generalization of registration performance across different instruments with different Degrees of Freedom (DoF) has not been studied as well. By marrying a rigorous study design involving several clinical scenarios with, for example, several optimizers, metrics and others parameters for image registration, this paper bridges this gap effectively. Although the pose estimation error scales inversely with instrument size, we show image registration generalizes well for different instruments and DoF. In particular, it is shown that increasing the number of x-ray projections can reduce the pose estimation error significantly across instruments - which might lead to the acquisition of several x-rays for pose estimation in a clinical workflow.}, author = {K{\"{u}}gler, David and Jastrzebski, Martin Andrade and Mukhopadhyay, Anirban}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-319-92258-4_10}, isbn = {9783319922577}, issn = {16113349}, pages = {105--114}, title = {{Instrument pose estimation using registration for otobasis surgery}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85048674870{\&}doi=10.1007{\%}2F978-3-319-92258-4{\_}10{\&}partnerID=40{\&}md5=e5ef96e3629df844b717127296fb9d5c}, volume = {10883 LNCS}, year = {2018} } @incollection{Kutarnia2012, abstract = {We are developing a low cost ultrasound training system running on a laptop in which the user scans a generic 3D curved surface representing the patient using a 5 DoF sensor. A critical component of this system is the generation of ultrasound training image volumes, which need to cover a complete body region in order to provide a realistic scanning experience. This research attempts to develop stitching techniques to generate large global volumes from smaller overlapping volumes acquired using freehand techniques. {\textcopyright} 2012 The authors and IOS Press. All rights reserved.}, author = {Kutarnia, Jason and Pedersen, Peder C.}, booktitle = {Studies in Health Technology and Informatics}, doi = {10.3233/978-1-61499-022-2-238}, isbn = {9781614990215}, issn = {18798365}, keywords = {Freehand ultrasound,Non-rigid registration}, pages = {238--244}, title = {{Generation of 3D ultrasound training volumes from freehand acquired data}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84860628403{\&}doi=10.3233{\%}2F978-1-61499-022-2-238{\&}partnerID=40{\&}md5=1799c545e84fbed039abc026f951ab6d}, volume = {173}, year = {2012} } @article{Lakhman2017, abstract = {Purpose: To investigate whether qualitative magnetic resonance (MR) features can distinguish leiomyosarcoma (LMS) from atypical leiomyoma (ALM) and assess the feasibility of texture analysis (TA). Methods: This retrospective study included 41 women (ALM = 22, LMS = 19) imaged with MRI prior to surgery. Two readers (R1, R2) evaluated each lesion for qualitative MR features. Associations between MR features and LMS were evaluated with Fisher's exact test. Accuracy measures were calculated for the four most significant features. TA was performed for 24 patients (ALM = 14, LMS = 10) with uniform imaging following lesion segmentation on axial T2-weighted images. Texture features were pre-selected using Wilcoxon signed-rank test with Bonferroni correction and analyzed with unsupervised clustering to separate LMS from ALM. Results: Four qualitative MR features most strongly associated with LMS were nodular borders, haemorrhage, “T2 dark” area(s), and central unenhanced area(s) (p ≤ 0.0001 each feature/reader). The highest sensitivity [1.00 (95{\%}CI:0.82-1.00)/0.95 (95{\%}CI: 0.74-1.00)] and specificity [0.95 (95{\%}CI:0.77-1.00)/1.00 (95{\%}CI:0.85-1.00)] were achieved for R1/R2, respectively, when a lesion had ≥3 of these four features. Sixteen texture features differed significantly between LMS and ALM (p-values: {\textless}0.001-0.036). Unsupervised clustering achieved accuracy of 0.75 (sensitivity: 0.70; specificity: 0.79). Conclusions: Combination of ≥3 qualitative MR features accurately distinguished LMS from ALM. TA was feasible. Key Points: • Four qualitative MR features demonstrated the strongest statistical association with LMS. • Combination of ≥3 these features could accurately differentiate LMS from ALM. • Texture analysis was a feasible semi-automated approach for lesion categorization.}, author = {Lakhman, Yulia and Veeraraghavan, Harini and Chaim, Joshua and Feier, Diana and Goldman, Debra A. and Moskowitz, Chaya S. and Nougaret, Stephanie and Sosa, Ramon E. and Vargas, Hebert Alberto and Soslow, Robert A. and Abu-Rustum, Nadeem R. and Hricak, Hedvig and Sala, Evis}, doi = {10.1007/s00330-016-4623-9}, issn = {14321084}, journal = {European Radiology}, keywords = {Atypical Uterine Leiomyoma,Magnetic Resonance Imaging,Texture Analysis,Uterine Leiomyoma,Uterine Leiomyosarcoma}, number = {7}, pages = {2903--2915}, title = {{Differentiation of Uterine Leiomyosarcoma from Atypical Leiomyoma: Diagnostic Accuracy of Qualitative MR Imaging Features and Feasibility of Texture Analysis}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {27}, year = {2017} } @article{Lan2016, abstract = {Purpose: To investigate the incorporation of pretherapy regional ventilation function in predicting radiation fibrosis (RF) in stage III nonsmall cell lung cancer (NSCLC) patients treated with concurrent thoracic chemoradiotherapy. Methods: Thirty-seven patients with stage III NSCLC were retrospectively studied. Patients received one cycle of cisplatingemcitabine, followed by two to three cycles of cisplatinetoposide concurrently with involved-field thoracic radiotherapy (4666 Gy; 2 Gy/fraction). Pretherapy regional ventilation images of the lung were derived from 4D computed tomography via a density changebased algorithm with mass correction. In addition to the conventional dosevolume metrics (V20, V30, V40, and mean lung dose), dosefunction metrics (fV20, fV30, fV40, and functional mean lung dose) were generated by combining regional ventilation and radiation dose. A new class of metrics was derived and referred to as dosesubvolume metrics (sV20, sV30, sV40, and subvolume mean lung dose); these were defined as the conventional dosevolume metrics computed on the functional lung. Area under the receiver operating characteristic curve (AUC) values and logistic regression analyses were used to evaluate these metrics in predicting hallmark characteristics of RF (lung consolidation, volume loss, and airway dilation). Results: AUC values for the dosevolume metrics in predicting lung consolidation, volume loss, and airway dilation were 0.650.69, 0.570.70, and 0.690.76, respectively. The respective ranges for dosefunction metrics were 0.630.66, 0.610.71, and 0.720.80 and for dosesubvolume metrics were 0.500.65, 0.650.75, and 0.730.85. Using an AUC value = 0.70 as cutoff value suggested that at least one of each type of metrics (dosevolume, dosefunction, dosesubvolume) was predictive for volume loss and airway dilation, whereas lung consolidation cannot be accurately predicted by any of the metrics. Logistic regression analyses showed that dosefunction and dosesubvolume metrics were significant (P values ≤ 0.02) in predicting volume airway dilation. Likelihood ratio test showed that when combining dosefunction and/or dosesubvolume metrics with dosevolume metrics, the achieved improvements of prediction accuracy on volume loss and airway dilation were significant (P values ≤ 0.04). Conclusions: The authors results demonstrated that the inclusion of regional ventilation function improved accuracy in predicting RF. In particular, dosesubvolume metrics provided a promising method for preventing radiation-induced pulmonary complications.}, author = {Lan, Fujun and Jeudy, Jean and Senan, Suresh and {Van Sornsen De Koste}, J. R. and D'Souza, Warren and Tseng, Huan Hsin and Zhou, Jinghao and Zhang, Hao}, doi = {10.1118/1.4960367}, issn = {00942405}, journal = {Medical Physics}, keywords = {non-small cell lung cancer,radiation fibrosis,regional ventilation function}, number = {9}, pages = {5072--5079}, title = {{Should regional ventilation function be considered during radiation treatment planning to prevent radiation-induced complications?}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {43}, year = {2016} } @article{Lang2013, abstract = {Transcatheter aortic valve implantation (TAVI) is a minimally invasive alternative to conventional aortic valve replacement for severe aortic stenosis in high-risk patients in which a stent-based bioprosthetic valve is delivered into the heart via a catheter. TAVI relies largely on single-plane fluoroscopy for intraoperative navigation and guidance, which provides only gross imaging of anatomical structures. Inadequate imaging leading to suboptimal valve positioning contributes to many of the early complications experienced by TAVI patients, including valve embolism, coronary ostia obstruction, paravalvular leak, heart block, and secondary nephrotoxicity from excessive contrast use. Improved visualization can be provided using intraoperative registration of a CT-derived surface to transesophageal echo (TEE) images. In this study, the accuracy and robustness of a surface-based registration method suitable for intraoperative use are evaluated, and the performances of different TEE surface extraction methods are compared. The use of cross-plane TEE contours demonstrated the best accuracy, with registration errors of less than 5 mm. This guidance system uses minimal intraoperative interaction and workflow modification, does not require tool calibration or additional intraoperative hardware, and can be implemented at all cardiac centers at extremely low cost. {\textcopyright} 2013 IEEE.}, author = {Lang, Pencilla and Chu, Michael W.A. and Bainbridge, Dan and Guiraudon, Gerard M. and Jones, Douglas L. and Peters, Terry M.}, doi = {10.1109/TBME.2013.2249582}, issn = {00189294}, journal = {IEEE Transactions on Biomedical Engineering}, keywords = {CT,image registration,image-guided procedure,transcatheter aortic valve replacement,transesophageal echo}, number = {12}, pages = {3382--3390}, title = {{Surface-based CT-TEE registration of the aortic root}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {60}, year = {2013} } @article{Langer2011, abstract = {The attractions of virtual computing are many: reduced costs, reduced resources and simplified maintenance. Any one of these would be compelling for a medical imaging professional attempting to support a complex practice on limited resources in an era of ever tightened reimbursement. In particular, the ability to run multiple operating systems optimized for different tasks (computational image processing on Linux versus office tasks on Microsoft operating systems) on a single physical machine is compelling. However, there are also potential drawbacks. High performance requirements need to be carefully considered if they are to be executed in an environment where the running software has to execute through multiple layers of device drivers before reaching the real disk or network interface. Our lab has attempted to gain insight into the impact of virtualization on performance by benchmarking the following metrics on both physical and virtual platforms: local memory and disk bandwidth, network bandwidth, and integer and floating point performance. The virtual performance metrics are compared to baseline performance on "bare metal." The results are complex, and indeed somewhat surprising. {\textcopyright} Society for Imaging Informatics in Medicine 2010.}, author = {Langer, Steve G. and French, Todd}, doi = {10.1007/s10278-010-9358-6}, issn = {08971889}, journal = {Journal of Digital Imaging}, keywords = {Computer hardware,Computer systems,Computers in medicine}, number = {5}, pages = {883--889}, title = {{Virtual machine performance benchmarking}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {24}, year = {2011} } @article{LaTorre2013, abstract = {In this paper, we present an algorithm to create 3D segmentations of neuronal cells from stacks of previously segmented 2D images. The idea behind this proposal is to provide a general method to reconstruct 3D structures from 2D stacks, regardless of how these 2D stacks have been obtained. The algorithm not only reuses the information obtained in the 2D segmentation, but also attempts to correct some typical mistakes made by the 2D segmentation algorithms (for example, under segmentation of tightly-coupled clusters of cells). We have tested our algorithm in a real scenario-the segmentation of the neuronal nuclei in different layers of the rat cerebral cortex. Several representative images from different layers of the cerebral cortex have been considered and several 2D segmentation algorithms have been compared. Furthermore, the algorithm has also been compared with the traditional 3D Watershed algorithm and the results obtained here show better performance in terms of correctly identified neuronal nuclei. {\textcopyright} 2013 LaTorre, Alonso-Nanclares, Muelas, Pe{\~{n}}a and DeFelipe.}, author = {Latorre, Antonio and Alonso-Nanclares, Lidia and Muelas, Santiago and Pe{\~{n}}a, Jos{\'{e}} Mar{\'{i}}a and Defelipe, Javier}, doi = {10.3389/fnana.2013.00049}, issn = {16625129}, journal = {Frontiers in Neuroanatomy}, keywords = {3D reconstruction,Automatic segmentation,Cerebral cortex,Image processing,Neuron}, number = {DEC}, pages = {10}, title = {{3D segmentations of neuronal nuclei from confocal microscope image stacks}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {7}, year = {2013} } @article{LePoul2014, abstract = {Genetic dominance in polymorphic loci may respond to selection; however, the evolution of dominance in complex traits remains a puzzle. We analyse dominance at a wing-patterning supergene controlling local mimicry polymorphism in the butterfly Heliconius numata. Supergene alleles are associated with chromosomal inversion polymorphism, defining ancestral versus derived alleles. Using controlled crosses and the new procedure, Colour Pattern Modelling, allowing whole-wing pattern comparisons, we estimate dominance coefficients between alleles. Here we show strict dominance in sympatry favouring mimicry and inconsistent dominance throughout the wing between alleles from distant populations. Furthermore, dominance among derived alleles is uncoordinated across wing-pattern elements, producing mosaic heterozygous patterns determined by a hierarchy in colour expression. By contrast, heterozygotes with an ancestral allele show complete, coordinated dominance of the derived allele, independently of colours. Therefore, distinct dominance mechanisms have evolved in association with supergene inversions, in response to strong selection on mimicry polymorphism.}, author = {{Le Poul}, Yann and Whibley, Annabel and Chouteau, Mathieu and Prunier, Florence and Llaurens, Violaine and Joron, Mathieu}, doi = {10.1038/ncomms6644}, issn = {20411723}, journal = {Nature Communications}, pages = {8}, title = {{Evolution of dominance mechanisms at a butterfly mimicry supergene}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {5}, year = {2014} } @article{Lebre2019, abstract = {Background: Proper segmentation of the liver from medical images is critical for computer-assisted diagnosis, therapy and surgical planning. Knowledge of its vascular structure allows division of the liver into eight functionally independent segments, each with its own vascular inflow, known as the Couinaud scheme. Couinaud's description is the most widely used classification, since it is well-suited for surgery and accurate for the localization of lesions. However, automatic segmentation of the liver and its vascular structure to construct the Couinaud scheme remains a challenging task. Methods: We present a complete framework to obtain Couinaud's classification in three main steps; first, we propose a model-based liver segmentation, then a vascular segmentation based on a skeleton process, and finally, the construction of the eight independent liver segments. Our algorithms are automatic and allow 3D visualizations. Results: We validate these algorithms on various databases with different imaging modalities (Magnetic Resonance Imaging (MRI) and Computed Tomography (CT)). Experimental results are presented on diseased livers, which pose complex challenges because both the overall organ shape and the vessels can be severely deformed. A mean DICE score of 0.915 is obtained for the liver segmentation, and an average accuracy of 0.98 for the vascular network. Finally, we present an evaluation of our method for performing the Couinaud segmentation thanks to medical reports with promising results. Conclusions: We were able to automatically reconstruct 3-D volumes of the liver and its vessels on MRI and CT scans. Our goal is to develop an improved method to help radiologists with tumor localization.}, author = {Lebre, Marie Ange and Vacavant, Antoine and Grand-Brochier, Manuel and Rositi, Hugo and Abergel, Armand and Chabrot, Pascal and Magnin, Beno{\^{i}}t}, doi = {10.1016/j.compbiomed.2019.04.014}, issn = {18790534}, journal = {Computers in Biology and Medicine}, keywords = {CT and MRI volumes,Couinaud,Liver segmentation,Medical imaging,Vessel segmentation}, pages = {42--51}, title = {{Automatic segmentation methods for liver and hepatic vessels from CT and MRI volumes, applied to the Couinaud scheme}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {110}, year = {2019} } @article{Lebre2019a, abstract = {Developing methods to segment the liver in medical images, study and analyze it remains a significant challenge. The shape of the liver can vary considerably from one patient to another, and adjacent organs are visualized in medical images with similar intensities, making the boundaries of the liver ambiguous. Consequently, automatic or semi-automatic segmentation of liver is a difficult task. Moreover, scanning systems and magnetic resonance imaging have different settings and parameters. Thus the images obtained differ from one machine to another. In this article, we propose an automatic model-based segmentation that allows building a faithful 3-D representation of the liver, with a mean Dice value equal to 90.3{\%} on CT and MRI datasets. We compare our algorithm with a semi-automatic method and with other approaches according to the state of the art. Our method works with different data sources, we use a large quantity of CT and MRI images from machines in various hospitals and multiple DICOM images available from public challenges. Finally, for evaluation of liver segmentation approaches in state of the art, robustness is not adequacy addressed with a precise definition. Another originality of this article is the introduction of a novel measure of robustness, which takes into account the liver variability at different scales.}, author = {Lebre, Marie Ange and Vacavant, Antoine and Grand-Brochier, Manuel and Rositi, Hugo and Strand, Robin and Rosier, Hubert and Abergel, Armand and Chabrot, Pascal and Magnin, Beno{\^{i}}t}, doi = {10.1016/j.compmedimag.2019.05.003}, issn = {18790771}, journal = {Computerized Medical Imaging and Graphics}, keywords = {3-D,Automatic segmentation,CT,Liver,MRI,Robustness,Shape model,Variability}, pages = {11}, title = {{A robust multi-variability model based liver segmentation algorithm for CT-scan and MRI modalities}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {76}, year = {2019} } @article{Lee2017, abstract = {Background Since there is no way to measure electric current generated by transcranial direct current stimulation (tDCS) inside the human head through in vivo experiments, numerical analysis based on the finite element method has been widely used to estimate the electric field inside the head. In 2013, we released a MATLAB toolbox named COMETS, which has been used by a number of groups and has helped researchers to gain insight into the electric field distribution during stimulation. The aim of this study was to develop an advanced MATLAB toolbox, named COMETS2, for the numerical analysis of the electric field generated by tDCS. New method COMETS2 can generate any sizes of rectangular pad electrodes on any positions on the scalp surface. To reduce the large computational burden when repeatedly testing multiple electrode locations and sizes, a new technique to decompose the global stiffness matrix was proposed. Results As examples of potential applications, we observed the effects of sizes and displacements of electrodes on the results of electric field analysis. The proposed mesh decomposition method significantly enhanced the overall computational efficiency. Comparison with existing methods We implemented an automatic electrode modeler for the first time, and proposed a new technique to enhance the computational efficiency. Conclusions In this paper, an efficient toolbox for tDCS analysis is introduced (freely available at http://www.cometstool.com). It is expected that COMETS2 will be a useful toolbox for researchers who want to benefit from the numerical analysis of electric fields generated by tDCS.}, author = {Lee, Chany and Jung, Young Jin and Lee, Sang Jun and Im, Chang Hwan}, doi = {10.1016/j.jneumeth.2016.12.008}, issn = {1872678X}, journal = {Journal of Neuroscience Methods}, keywords = {Electrostatic field,Finite element method (FEM),MATLAB toolbox,Neuromodulation,Transcranial direct current stimulation (tDCS)}, pages = {56--62}, title = {{COMETS2: An advanced MATLAB toolbox for the numerical analysis of electric fields generated by transcranial direct current stimulation}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {277}, year = {2017} } @article{Lin2017, abstract = {BACKGROUND AND PURPOSE: Clinical brain MR imaging registration algorithms are often made available by commercial vendors without figures of merit. The purpose of this study was to suggest a rational performance comparison methodology for these products. MATERIALS AND METHODS: Twenty patients were imaged on clinical 3T scanners by using 4 sequences: T2-weighted, FLAIR, susceptibility-weighted angiography, and T1 postcontrast. Fiducial landmark sites (n = 1175) were specified throughout these image volumes to define identical anatomic locations across sequences. Multiple registration algorithms were applied by using the T2 sequence as a fixed reference. Euclidean error was calculated before and after each registration and compared with a criterion standard landmark registration. The Euclidean effectiveness ratio is the fraction of Euclidean error remaining after registration, and the statistical effectiveness ratio is similar, but accounts for dispersion and noise. RESULTS: Before registration, error values for FLAIR susceptibility-weighted angiography, and T1 postcontrast were 2.07±0.55 mm, 2.63±0.62 mm, and 3.65±2.00 mm, respectively. Postregistration, the best error values for FLAIR, susceptibility-weighted angiography, and T1 postcontrast were 1.55±0.46 mm, 1.34±0.23 mm, and 1.06±0.16 mm, with Euclidean effectiveness ratio values of 0.493,0.181, and 0.096 and statistical effectiveness ratio values of 0.573, 0.352, and 0.929 for rigid mutual information, affine mutual information, and a commercial GE registration, respectively. CONCLUSIONS: We demonstrate a method for comparing the performance of registration algorithms and suggest the Euclidean error, Euclidean effectiveness ratio, and statistical effectiveness ratio as performance metrics for clinical registration algorithms. These figures of merit allow registration algorithms to be rationally compared.}, author = {Lin, J. S. and Fuentes, D. T. and Chandler, A. and Prabhu, S. S. and Weinberg, J. S. and Baladandayuthapani, V. and Hazle, J. D. and Schellingerhout, D.}, doi = {10.3174/ajnr.A5122}, issn = {1936959X}, journal = {American Journal of Neuroradiology}, number = {5}, pages = {973--980}, title = {{Performance assessment for brain MR imaging registration methods}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {38}, year = {2017} } @article{Liu2017, abstract = {{\textcopyright} 2017 Optical Society of America. Multimodality molecular imaging emerges as a powerful strategy for correlating multimodal information. We developed a pentamodal imaging system which can perform positron emission tomography, bioluminescence tomography, fluorescence molecular tomography, Cerenkov luminescence tomography and X-ray computed tomography successively. Performance of sub-systems corresponding to different modalities were characterized. In vivo multimodal imaging of an orthotopic hepatocellular carcinoma xenograft mouse model was performed, and acquired multimodal images were fused. The feasibility of pentamodal tomographic imaging system was successfully validated with the imaging application on the mouse model. The ability of integrating anatomical, metabolic, and pharmacokinetic information promises applications of multimodality molecular imaging in precise medicine.}, author = {Liu, Muhan and Guo, Hongbo and Liu, Hongbo and Zhang, Zeyu and Chi, Chongwei and Hui, Hui and Dong, Di and Hu, Zhenhua and Tian, Jie}, doi = {10.1364/boe.8.001356}, issn = {2156-7085}, journal = {Biomedical Optics Express}, number = {3}, pages = {1356}, title = {{In vivo pentamodal tomographic imaging for small animals}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {8}, year = {2017} } @article{Liu2015, abstract = {The last two decades have witnessed the explosive growth in the development and use of noninvasive neuroimaging technologies that advance the research on human brain under normal and pathological conditions. Multimodal neuroimaging has become a major driver of current neuroimaging research due to the recognition of the clinical benefits of multimodal data, and the better access to hybrid devices. Multimodal neuroimaging computing is very challenging, and requires sophisticated computing to address the variations in spatiotemporal resolution and merge the biophysical/biochemical information. We review the current workflows and methods for multimodal neuroimaging computing, and also demonstrate how to conduct research using the established neuroimaging computing packages and platforms.}, author = {Liu, Sidong and Cai, Weidong and Liu, Siqi and Zhang, Fan and Fulham, Michael and Feng, Dagan and Pujol, Sonia and Kikinis, Ron}, doi = {10.1007/s40708-015-0020-4}, issn = {21984026}, journal = {Brain Informatics}, keywords = {Medical image computing,Multimodal,Neuroimaging}, number = {3}, pages = {181--195}, title = {{Multimodal neuroimaging computing: the workflows, methods, and platforms}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84977927257{\&}doi=10.1007{\%}2Fs40708-015-0020-4{\&}partnerID=40{\&}md5=5b2c05f1154939f3d1a0412038a423e1}, volume = {2}, year = {2015} } @article{Liu2014, abstract = {The study of neural circuit reconstruction, i.e., connectomics, is a challenging problem in neuroscience. Automated and semi-automated electron microscopy (EM) image analysis can be tremendously helpful for connectomics research. In this paper, we propose a fully automatic approach for intra-section segmentation and inter-section reconstruction of neurons using EM images. A hierarchical merge tree structure is built to represent multiple region hypotheses and supervised classification techniques are used to evaluate their potentials, based on which we resolve the merge tree with consistency constraints to acquire final intra-section segmentation. Then, we use a supervised learning based linking procedure for the inter-section neuron reconstruction. Also, we develop a semi-automatic method that utilizes the intermediate outputs of our automatic algorithm and achieves intra-segmentation with minimal user intervention. The experimental results show that our automatic method can achieve close-to-human intra-segmentation accuracy and state-of-the-art inter-section reconstruction accuracy. We also show that our semi-automatic method can further improve the intra-segmentation accuracy. {\textcopyright} 2014 Elsevier B.V.}, author = {Liu, Ting and Jones, Cory and Seyedhosseini, Mojtaba and Tasdizen, Tolga}, doi = {10.1016/j.jneumeth.2014.01.022}, issn = {01650270}, journal = {Journal of Neuroscience Methods}, keywords = {Electron microscopy,Hierarchical segmentation,Image segmentation,Neuron reconstruction,Semi-automatic segmentation}, pages = {88--102}, title = {{A modular hierarchical approach to 3D electron microscopy image segmentation}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {226}, year = {2014} } @article{Liu2014a, abstract = {Personalized tumor growth model is valuable in tumor staging and therapy planning. In this paper, we present a patient specific tumor growth model based on longitudinal multimodal imaging data including dual-phase CT and FDG-PET. The proposed Reaction-Advection-Diffusion model is capable of integrating cancerous cell proliferation, infiltration, metabolic rate and extracellular matrix biomechanical response. To bridge the model with multimodal imaging data, we introduce Intracellular Volume Fraction (ICVF) measured from dual-phase CT and Standardized Uptake Value (SUV) measured from FDG-PET into the model. The patient specific model parameters are estimated by fitting the model to the observation, which leads to an inverse problem formalized as a coupled Partial Differential Equations (PDE)-constrained optimization problem. The optimality system is derived and solved by the Finite Difference Method. The model was evaluated by comparing the predicted tumors with the observed tumors in terms of average surface distance (ASD), root mean square difference (RMSD) of the ICVF map, average ICVF difference (AICVFD) of tumor surface and tumor relative volume difference (RVD) on six patients with pathologically confirmed pancreatic neuroendocrine tumors. The ASD between the predicted tumor and the reference tumor was 2.4 ± 0.5. mm, the RMSD was 4.3 ± 0.4{\%}, the AICVFD was 2.6 ± 0.6{\%}, and the RVD was 7.7 ± 1.3{\%}. {\textcopyright} 2014.}, author = {Liu, Yixun and Sadowski, Samira M. and Weisbrod, Allison B. and Kebebew, Electron and Summers, Ronald M. and Yao, Jianhua}, doi = {10.1016/j.media.2014.02.005}, issn = {13618423}, journal = {Medical Image Analysis}, keywords = {Intracellular volume fraction,Metabolic rate,Multimodal images,Tumor growth modeling}, number = {3}, pages = {555--566}, title = {{Patient specific tumor growth prediction using multimodal images}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {18}, year = {2014} } @article{Lok2017, abstract = {BACKGROUND: Brain tumor segmentation is a challenging task for its variation in intensity. The phenomenon is caused by the inhomogeneous content of tumor tissue and the choice of imaging modality. In 2010 Zhang developed the Selective Binary Gaussian Filtering Regularizing Level Set (SBGFRLS) model that combined the merits of edge-based and region-based segmentation. OBJECTIVE: To improve the SBGFRLS method by modifying the singed pressure force (SPF) term with multiple image information and demonstrate effectiveness of proposed method on clinical images. METHODS: In original SBGFRLS model, the contour evolution direction mainly depends on the SPF. By introducing a directional term in SPF, the metric could control the evolution direction. The SPF is altered by statistic values enclosed by the contour. This concept can be extended to jointly incorporate multiple image information. The new SPF term is expected to bring a solution for blur edge problem in brain tumor segmentation. The proposed method is validated with clinical images including pre- and post-contrast magnetic resonance images. The accuracy and robustness is compared with sensitivity, specificity, DICE similarity coefficient and Jaccard similarity index. RESULTS: Experimental results show improvement, in particular the increase of sensitivity at the same specificity, in segmenting all types of tumors except for the diffused tumor. CONCLUSION: The novel brain tumor segmentation method is clinical-oriented with fast, robust and accurate implementation and a minimal user interaction. The method effectively segmented homogeneously enhanced, non-enhanced, heterogeneously-enhanced, and ring-enhanced tumor under MR imaging. Though the method is limited by identifying edema and diffuse tumor, several possible solutions are suggested to turn the curve evolution into a fully functional clinical diagnosis tool.}, author = {Lok, Ka Hei and Shi, Lin and Zhu, Xianlun and Wang, Defeng}, doi = {10.3233/XST-17261}, issn = {10959114}, journal = {Journal of X-ray science and technology}, keywords = {Brain tumor segmentation,Selective Binary Gaussian Filtering Regularizing L,Singed Pressure Force (SPF),evaluation of tumor segmentation accuracy}, number = {2}, pages = {301--312}, title = {{Fast and robust brain tumor segmentation using level set method with multiple image information}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {25}, year = {2017} } @incollection{Loyek2011, abstract = {Life science research aims at understanding the relationships in genomics, proteomics and metabolomics on all levels of biological self organization, dealing with data of increasing dimension and complexity. Bioimages represent a new data domain in this context, gaining growing attention since it closes important gaps left by the established molecular techniques. We present a new, web-based strategy that allows a new way of collaborative bioimage interpretaion through knowledge integration. We show, how this can be supported by combining data mining algorithms running on powerful compute servers and a next generation rich internet application (RIA) front-end offering database/project management and high-level tools for exploratory data analysis and annotation. We demonstrate our system BioIMAX using a bioimage dataset from High-Content Screening experiments to study bacterial infection in cell cultures. {\textcopyright} 2011 Springer-Verlag.}, author = {Loyek, Christian and K{\"{o}}lling, Jan and Langenk{\"{a}}mper, Daniel and Niehaus, Karsten and Nattkemper, Tim W.}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-642-24800-9_25}, isbn = {9783642247996}, issn = {03029743}, keywords = {Bioimage Informatics,Data Mining,Exploratory Data Analysis,High-content screening,Information Visualization,Life Science,Rich Internet Application,Semantic Annotation,Web2.0}, pages = {258--269}, title = {{A Web2.0 strategy for the collaborative analysis of complex bioimages}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-80455129929{\&}doi=10.1007{\%}2F978-3-642-24800-9{\_}25{\&}partnerID=40{\&}md5=14d944bb2916a977fa416b20b12387e3}, volume = {7014 LNCS}, year = {2011} } @article{Loyek2011a, abstract = {Background: Innovations in biological and biomedical imaging produce complex high-content and multivariate image data. For decision-making and generation of hypotheses, scientists need novel information technology tools that enable them to visually explore and analyze the data and to discuss and communicate results or findings with collaborating experts from various places.Results: In this paper, we present a novel Web2.0 approach, BioIMAX, for the collaborative exploration and analysis of multivariate image data by combining the webs collaboration and distribution architecture with the interface interactivity and computation power of desktop applications, recently called rich internet application.Conclusions: BioIMAX allows scientists to discuss and share data or results with collaborating experts and to visualize, annotate, and explore multivariate image data within one web-based platform from any location via a standard web browser requiring only a username and a password. BioIMAX can be accessed at http://ani.cebitec.uni-bielefeld.de/BioIMAX with the username "test" and the password "test1" for testing purposes. {\textcopyright} 2011 Loyek et al; licensee BioMed Central Ltd.}, author = {Loyek, Christian and Rajpoot, Nasir M. and Khan, Michael and Nattkemper, Tim W.}, doi = {10.1186/1471-2105-12-297}, issn = {14712105}, journal = {BMC Bioinformatics}, pages = {11}, title = {{BioIMAX: A Web 2.0 approach for easy exploratory and collaborative access to multivariate bioimage data}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {12}, year = {2011} } @article{Lucas2010, abstract = {Non-invasive neuroimaging techniques enable extraordinarily sensitive and specific in vivo study of the structure, functional response and connectivity of biological mechanisms. With these advanced methods comes a heavy reliance on computer-based processing, analysis and interpretation. While the neuroimaging community has produced many excellent academic and commercial tool packages, new tools are often required to interpret new modalities and paradigms. Developing custom tools and ensuring interoperability with existing tools is a significant hurdle. To address these limitations, we present a new framework for algorithm development, that implicitly ensures tool interoperability, generates graphical user interfaces, provides advanced batch processing tools, and, most importantly, requires minimal additional programming or computational overhead. Javabased rapid prototyping with this system is an efficient and practical approach to evaluate new algorithms since the proposed system ensures that rapidly constructed prototypes are actually fully-functional processing modules with support for multiple GUI's, a broad range of file formats, and distributed computation. Herein, we demonstrate MRI image processing with the proposed system for cortical surface extraction in large cross-sectional cohorts, provide a system for fully automated diffusion tensor image analysis, and illustrate how the system can be used as a simulation framework for the development of a new image analysis method. Hie system is released as open source under the Lesser GNU Public License (LGPL) through the Neuroimaging Infoimatics Tools and Resources Clearinghouse (NITRC). {\textcopyright} Springer Science+Business Media, LLC 2010.}, author = {Lucas, Blake C. and Bogovic, John A. and Carass, Aaron and Bazin, Pierre Louis and Prince, Jerry L. and Pham, Dzung L. and Landman, Bennett A.}, doi = {10.1007/s12021-009-9061-2}, issn = {15392791}, journal = {Neuroinformatics}, keywords = {Image processing,MRI,Parallel processing,Pipeline,Rapid prototyping}, number = {1}, pages = {5--17}, title = {{The Java Image Science Toolkit (JIST) for rapid prototyping and publishing of neuroimaging software}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {8}, year = {2010} } @article{Ma2019, abstract = {Iodine-125 seed brachytherapy has great potential in the treatment of malignant tumors. However, the success of this treatment is highly dependent on the ability to accurately position the coplanar template. The aim of this study was to develop an auto-positioning system for the template with a design focus on efficiency and accuracy. In this study, an auto-positioning system was presented, which was composed of a treatment planning system (TPS) and a robot-assisted system. The TPS was developed as a control system for the robot-assisted system. Then, the robot-assisted system was driven by the output of the TPS to position the template. Contrast experiments for error validation were carried out in a computed tomography environment to compare with the traditional positioning method (TPM). Animal experiments on Sprague–Dawley rats were also carried out to evaluate the auto-positioning system. The error validation experiments and animal experiments with this auto-positioning system were successfully carried out with improved efficiency and accuracy. The error validation experiments achieved a positioning error of 1.04 ± 0.19 mm and a positioning time of 23.15 ± 2.52 min, demonstrating a great improvement compared with the TPM (2.55 ± 0.21 mm and 40.35 ± 2.99 min, respectively). The animal experiments demonstrated that the mean deviation of the seed position was 0.75 mm. The dose-volume histogram of the preoperative planning showed the same as the postoperative dosimetry validation. A novel auto-positioning system driven by preoperative planning was established, which exhibited higher efficiency and accuracy compared with the TPM.}, author = {Ma, Xiaodong and Yang, Zhiyong and Jiang, Shan and Zhang, Guobin and Chai, Shude}, doi = {10.1002/acm2.12591}, issn = {15269914}, journal = {Journal of Applied Clinical Medical Physics}, keywords = {auto-positioning,iodine-125 seed brachytherapy,preoperative planning,treatment planning system}, number = {6}, pages = {23--30}, title = {{A novel auto-positioning method in Iodine-125 seed brachytherapy driven by preoperative planning}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {20}, year = {2019} } @article{Ma2019a, abstract = {Purpose: An ideal dose distribution in a target is the ultimate goal of preoperative dose planning. Furthermore, avoiding vital organs or tissues such as blood vessels or bones during the puncture procedure is significant in lowdose-rate brachytherapy. The aim of this work is to develop a hybrid inverse optimization method based on non-coplanar needles to assist the physician during conformal dose planning, which cannot be properly achieved with a traditional coplanar template. Material and methods: The hybrid inverse optimization technique include two novel technologies: an inverse optimization algorithm and a dose volume histogram evaluation method. Brachytherapy treatment planning system was designed as an experimental platform. Left lung adenocarcinoma case was used to test the performance of the method in non-coplanar and coplanar needles, and malignant tumor of spine case was involved to test the practical application of this technique. In addition, the optimization time of every test was also recorded. Results: The proposed method can achieve an ideal dose distribution, avoiding vital organs (bones). In the first experiment, 13 non-coplanar needles and 24 seeds were used to get an ideal dose distribution to cover the target, whereas 11 coplanar needles and 23 seeds were used to cover the same target. In the second experiment, the new method used 22 non-coplanar needles and 65 seeds to cover the target, while 63 seeds and 22 needles were used in the actual operation. In addition, the computation time of the hybrid inverse optimization method was 20.5 seconds in the tumor of 94.67 cm3 by using 22 needles, which was fast enough for clinical application. Conclusions: The hybrid inverse optimization method achieved high conformity in the clinical practice. The non-coplanar needle can help to achieve a better dose distribution than the coplanar needle.}, author = {Ma, Xiaodong and Yang, Zhiyong and Jiang, Shan and Zhang, Guobin and Huo, Bin and Chai, Shude}, doi = {10.5114/jcb.2019.86167}, issn = {20812841}, journal = {Journal of Contemporary Brachytherapy}, keywords = {Brachytherapy,Dose volume histogram,Inverse optimization,Non-coplanar needle}, number = {3}, pages = {267--279}, title = {{Hybrid optimization based on non-coplanar needles for brachytherapy dose planning}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {11}, year = {2019} } @inbook{Macia2012, abstract = {Accurate detection and extraction of 3D vascular structures is a crucial step for many medical image applications that require vascular analysis. Vessel tracking algorithms iteratively follow vascular branches point by point, obtaining geometric descriptors, such as centerlines and sections of branches, that describe patient-specific vasculature. In order to obtain these descriptors, most approaches use specialized scaled vascular feature detectors. However, these detectors may fail due to the presence of nearby spurious structures, incorrect scale or parameter choice or other undesired effects, obtaining incorrect local sections which may lead to unrecoverable errors during the tracking procedure. We propose to combine this approach with an evolutionary optimization framework that use specific modified vascular detectors as cost functions in order to obtain accurate vascular sections when the direct detection approach fails. We demonstrate the validity of this new approach with experiments using real datasets. We also show that, for a family of medialness functions, the procedure can be performed at fixed small scales which is computationally efficient for local kernel-based estimators. {\textcopyright} 2012 Springer-Verlag.}, address = {Berlin}, author = {Mac{\'{i}}a, Iv{\'{a}}n and Gra{\~{n}}a, Manuel}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-642-28931-6_48}, editor = {Corchado, E and Snasel, V and Abraham, A and Wozniak, M and Grana, M and Cho, S B}, isbn = {9783642289309}, issn = {03029743}, keywords = {Evolutionary Optimization,Feature Detectors,Medialness,Medical Image Analysis,Section Estimator,Vascular Analysis,Vascular Tracking,Vesselness,Vessels}, number = {PART 2}, pages = {503--513}, publisher = {Springer-Verlag Berlin}, series = {Lecture Notes in Computer Science}, title = {{Vascular section estimation in medical images using combined feature detection and evolutionary optimization}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {7209 LNAI}, year = {2012} } @incollection{Macia2012a, abstract = {Accurate detection and extraction of 3D vascular structures is a crucial step for many medical image applications that require vascular analysis. Vessel tracking algorithms iteratively follow vascular branches point by point, obtaining geometric descriptors, such as centerlines and sections of branches, that describe patient-specific vasculature. In order to obtain these descriptors, most approaches use specialized scaled vascular feature detectors. However, these detectors may fail due to the presence of nearby spurious structures, incorrect scale or parameter choice or other undesired effects, obtaining incorrect local sections which may lead to unrecoverable errors during the tracking procedure. We propose to combine this approach with an evolutionary optimization framework that use specific modified vascular detectors as cost functions in order to obtain accurate vascular sections when the direct detection approach fails. We demonstrate the validity of this new approach with experiments using real datasets. We also show that, for a family of medialness functions, the procedure can be performed at fixed small scales which is computationally efficient for local kernel-based estimators. {\textcopyright} 2012 Springer-Verlag.}, author = {Mac{\'{i}}a, Iv{\'{a}}n and Gra{\~{n}}a, Manuel}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-642-28931-6_48}, isbn = {9783642289309}, issn = {03029743}, keywords = {Evolutionary Optimization,Feature Detectors,Medialness,Medical Image Analysis,Section Estimator,Vascular Analysis,Vascular Tracking,Vesselness,Vessels}, number = {PART 2}, pages = {503--513}, title = {{Vascular section estimation in medical images using combined feature detection and evolutionary optimization}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84858807369{\&}doi=10.1007{\%}2F978-3-642-28931-6{\_}48{\&}partnerID=40{\&}md5=1574b66250aeb6a43bb8a54db03d91e3}, volume = {7209 LNAI}, year = {2012} } @inbook{Macia2010, abstract = {We propose the development of a knowledge representation model in the area of Blood Vessel analysis, whose need we feel for the future development of the field and for our own research efforts. It will allow easy reuse of software pieces through appropriate abstractions, facilitating the development of innovative methods, procedures and applications. In this paper we present some key ideas that will be fully developed elsewhere. {\textcopyright} Springer-Verlag 2010.}, address = {Berlin}, author = {Mac{\'{i}}a, I. and Gra{\~{n}}a, M. and Paloc, C.}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-642-15384-6_9}, editor = {Setchi, R and Jordanov, I and Howlett, R J and Jain, L C}, isbn = {3642153836}, issn = {03029743}, number = {PART 4}, pages = {80--87}, publisher = {Springer-Verlag Berlin}, series = {Lecture Notes in Artificial Intelligence}, title = {{Towards a proposal for a vessel knowledge representation model}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {6279 LNAI}, year = {2010} } @article{Macia2012b, abstract = {We have detected the lack of a widely accepted knowledge representation model in the area of Blood Vessel analysis. We find that such a tool is needed for the future development of the field and our own research efforts. It will allow easy reuse of software pieces through appropriate abstractions, facilitating the development of innovative methods, procedures and applications. We include a thorough review of vascular morphology image analysis. After the identification of the key representation elements and operations, we propose a Vessel Knowledge Representation (VKR) model that would fill this gap. We give insights into its implementation based on standard Object-Oriented Programming tools and paradigms. The VKR would easily integrate with existing medical imaging and visualization software platforms, such as the Insight ToolKit (ITK) and Visualization Toolkit (VTK). {\textcopyright} 2011 Springer-Verlag London Limited.}, author = {Mac{\'{i}}a, Iv{\'{a}}n and Gra{\~{n}}a, Manuel and Paloc, Celine}, doi = {10.1007/s10115-010-0377-x}, issn = {02191377}, journal = {Knowledge and Information Systems}, keywords = {Knowledge representation,Medical image,Vessel analysis}, number = {2}, pages = {457--491}, title = {{Knowledge management in image-based analysis of blood vessel structures}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {30}, year = {2012} } @incollection{Macia2010a, abstract = {We propose the development of a knowledge representation model in the area of Blood Vessel analysis, whose need we feel for the future development of the field and for our own research efforts. It will allow easy reuse of software pieces through appropriate abstractions, facilitating the development of innovative methods, procedures and applications. In this paper we present some key ideas that will be fully developed elsewhere. {\textcopyright} Springer-Verlag 2010.}, author = {Mac{\'{i}}a, I. and Gra{\~{n}}a, M. and Paloc, C.}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-642-15384-6_9}, isbn = {3642153836}, issn = {03029743}, number = {PART 4}, pages = {80--87}, title = {{Towards a proposal for a vessel knowledge representation model}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-78649239965{\&}doi=10.1007{\%}2F978-3-642-15384-6{\_}9{\&}partnerID=40{\&}md5=0484a83cf98f28846128f6c2d221cffd}, volume = {6279 LNAI}, year = {2010} } @article{Maga2017, abstract = {Laboratory mice are staples for evo/devo and genetics studies. Inbred strains provide a uniform genetic background to manipulate and understand gene–environment interactions, while their crosses have been instrumental in studies of genetic architecture, integration and modularity, and mapping of complex biological traits. Recently, there have been multiple large-scale studies of laboratory mice to further our understanding of the developmental basis, evolution, and genetic control of shape variation in the craniofacial skeleton (i.e. skull and mandible). These experiments typically use micro-computed tomography (micro-CT) to capture the craniofacial phenotype in 3D and rely on manually annotated anatomical landmarks to conduct statistical shape analysis. Although the common choice for imaging modality and phenotyping provides the potential for collaborative research for even larger studies with more statistical power, the investigator (or lab-specific) nature of the data collection hampers these efforts. Investigators are rightly concerned that subtle differences in how anatomical landmarks were recorded will create systematic bias between studies that will eventually influence scientific findings. Even if researchers are willing to repeat landmark annotation on a combined dataset, different lab practices and software choices may create obstacles for standardization beyond the underlying imaging data. Here, we propose a freely available analysis system that could assist in the standardization of micro-CT studies in the mouse. Our proposal uses best practices developed in biomedical imaging and takes advantage of existing open-source software and imaging formats. Our first contribution is the creation of a synthetic template for the adult mouse craniofacial skeleton from 25 inbred strains and five F1 crosses that are widely used in biological research. The template contains a fully segmented cranium, left and right hemi-mandibles, endocranial space, and the first few cervical vertebrae. We have been using this template in our lab to segment and isolate cranial structures in an automated fashion from a mixed population of mice, including craniofacial mutants, aged 4–12.5 weeks. As a secondary contribution, we demonstrate an application of nearly automated shape analysis, using symmetric diffeomorphic image registration. This approach, which we call diGPA, closely approximates the popular generalized Procrustes analysis (GPA) but negates the collection of anatomical landmarks. We achieve our goals by using the open-source advanced normalization tools (ANT) image quantification library, as well as its associated R library (ANTsR) for statistical image analysis. Finally, we make a plea to investigators to commit to using open imaging standards and software in their labs to the extent possible to increase the potential for data exchange and improve the reproducibility of findings. Future work will incorporate more anatomical detail (such as individual cranial bones, turbinals, dentition, middle ear ossicles) and more diversity into the template.}, author = {Maga, A. Murat and Tustison, Nicholas J. and Avants, Brian B.}, doi = {10.1111/joa.12645}, issn = {14697580}, journal = {Journal of Anatomy}, keywords = {geometrics morphometrics,image processing,image-based shape analysis,landmarking,microCT,segmentation}, number = {3}, pages = {433--443}, title = {{A population level atlas of Mus musculus craniofacial skeleton and automated image-based shape analysis}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {231}, year = {2017} } @inproceedings{Magnain, abstract = {Optical coherence tomography visualizes the structure of the human brain, from the cortical laminar structure to the individual neurons.}, author = {Magnain, Caroline and Augustinack, Jean C. and Konukoglu, Ender and Boas, David and Fischl, Bruce}, booktitle = {Optics and the Brain, BRAIN 2015}, doi = {10.1364/brain.2015.brt4b.5}, isbn = {9781557529541}, title = {{Visualization of the cytoarchitecture of Ex vivo human brain by optical coherence tomography}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84930804493{\&}partnerID=40{\&}md5=a7e7e7e3b1f380d82e3ea7c3f1960a06}, year = {2015} } @article{Magnain2015, abstract = {Abstract. The cytoarchitecture of the human brain is of great interest in diverse fields: neuroanatomy, neurology, neuroscience, and neuropathology. Traditional histology is a method that has been historically used to assess cell and fiber content in the ex vivo human brain. However, this technique suffers from significant distortions. We used a previously demonstrated optical coherence microscopy technique to image individual neurons in several square millimeters of en-face tissue blocks from layer II of the human entorhinal cortex, over 50 $\mu$min depth. The same slices were then sectioned and stained for Nissl substance. We registered the optical coherence tomog- raphy (OCT) images with the corresponding Nissl stained slices using a nonlinear transformation. The neurons were then segmented in both images and we quantified the overlap. We show that OCT images contain infor- mation about neurons that is comparable to what can be obtained from Nissl staining, and thus can be used to assess the cytoarchitecture of the ex vivo human brain with minimal distortion. With the future integration of a vibratome into the OCT imaging rig, this technique can be scaled up to obtain undistorted volumetric data of centimeter cube tissue blocks in the near term, and entire human hemispheres in the future.}, author = {Magnain, Caroline and Augustinack, Jean C. and Konukoglu, Ender and Frosch, Matthew P. and Sakad{\v{z}}ic, Sava and Varjabedian, Ani and Garcia, Nathalie and Wedeen, Van J. and Boas, David A. and Fischl, Bruce}, doi = {10.1117/1.nph.2.1.015004}, issn = {2329-423X}, journal = {Neurophotonics}, number = {1}, pages = {015004}, title = {{Optical coherence tomography visualizes neurons in human entorhinal cortex}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {2}, year = {2015} } @article{Magnain2019, abstract = {Optical coherence tomography is an optical technique that uses backscattered light to highlight intrinsic structure, and when applied to brain tissue, it can resolve cortical layers and fiber bundles. Optical coherence microscopy (OCM) is higher resolution (i.e., 1.25 µm) and is capable of detecting neurons. In a previous report, we compared the correspondence of OCM acquired imaging of neurons with traditional Nissl stained histology in entorhinal cortex layer II. In the current method-oriented study, we aimed to determine the colocalization success rate between OCM and Nissl in other brain cortical areas with different laminar arrangements and cell packing density. We focused on two additional cortical areas: medial prefrontal, pre-genual Brodmann area (BA) 32 and lateral temporal BA 21. We present the data as colocalization matrices and as quantitative percentages. The overall average colocalization in OCM compared to Nissl was 67{\%} for BA 32 (47{\%} for Nissl colocalization) and 60{\%} for BA 21 (52{\%} for Nissl colocalization), but with a large variability across cases and layers. One source of variability and confounds could be ascribed to an obscuring effect from large and dense intracortical fiber bundles. Other technical challenges, including obstacles inherent to human brain tissue, are discussed. Despite limitations, OCM is a promising semi-high throughput tool for demonstrating detail at the neuronal level, and, with further development, has distinct potential for the automatic acquisition of large databases as are required for the human brain.}, author = {Magnain, Caroline and Augustinack, Jean C. and Tirrell, Lee and Fogarty, Morgan and Frosch, Matthew P. and Boas, David and Fischl, Bruce and Rockland, Kathleen S.}, doi = {10.1007/s00429-018-1777-z}, issn = {18632661}, journal = {Brain Structure and Function}, keywords = {Human brain,Isocortex,Limbic,Neuron,Optical imaging,Tissue,Validation}, number = {1}, pages = {351--362}, title = {{Colocalization of neurons in optical coherence microscopy and Nissl-stained histology in Brodmann's area 32 and area 21}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {224}, year = {2019} } @article{Maier2013, abstract = {Purpose: In the community of x-ray imaging, there is a multitude of tools and applications that are used in scientific practice. Many of these tools are proprietary and can only be used within a certain lab. Often the same algorithm is implemented multiple times by different groups in order to enable comparison. In an effort to tackle this problem, the authors created CONRAD, a software framework that provides many of the tools that are required to simulate basic processes in x-ray imaging and perform image reconstruction with consideration of nonlinear physical effects. Methods: CONRAD is a Java-based state-of-the-art software platform with extensive documentation. It is based on platform-independent technologies. Special libraries offer access to hardware acceleration such as OpenCL. There is an easy-to-use interface for parallel processing. The software package includes different simulation tools that are able to generate up to 4D projection and volume data and respective vector motion fields. Well known reconstruction algorithms such as FBP, DBP, and ART are included. All algorithms in the package are referenced to a scientific source. Results: A total of 13 different phantoms and 30 processing steps have already been integrated into the platform at the time of writing. The platform comprises 74.000 nonblank lines of code out of which 19{\%} are used for documentation. The software package is available for download at http://conrad.stanford.edu. To demonstrate the use of the package, the authors reconstructed images from two different scanners, a table top system and a clinical C-arm system. Runtimes were evaluated using the RabbitCT platform and demonstrate state-of-the-art runtimes with 2.5 s for the 256 problem size and 12.4 s for the 512 problem size. Conclusions: As a common software framework, CONRAD enables the medical physics community to share algorithms and develop new ideas. In particular this offers new opportunities for scientific collaboration and quantitative performance comparison between the methods of different groups. {\textcopyright} 2013 American Association of Physicists in Medicine.}, author = {Maier, Andreas and Hofmann, Hannes G. and Berger, Martin and Fischer, Peter and Schwemmer, Chris and Wu, Haibo and M{\"{u}}ller, Kerstin and Hornegger, Joachim and Choi, Jang Hwan and Riess, Christian and Keil, Andreas and Fahrig, Rebecca}, doi = {10.1118/1.4824926}, issn = {00942405}, journal = {Medical Physics}, keywords = {C-arm computed tomography (CT),GPU,cone-beam,hardware acceleration,open-source,software frameworks}, number = {11}, pages = {8}, title = {{CONRAD - A software framework for cone-beam imaging in radiology}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {40}, year = {2013} } @inproceedings{Marisaldi, abstract = {We study the possibility to use Single Photon Avalanche Diodes (SPADs) optically coupled to scintillating fibers as a novel type of gamma-ray detector for space applications. SPADs are silicon devices operating under polarization conditions above the junction breakdown voltage (typical overvoltage of 5V), for which a single photon interacting in the active region is sufficient to trigger a self sustainable avalanche discharge. SPADs can thus be used for the detection of very low light levels with an absolute timing accuracy of about 30 ps for single photon detection, without spectroscopic capabilities. In this presentation we report the preliminary results on large area SPAD (actual results refers to SPADs having 200 $\mu$m diameter, with the aim to grow up to 500 $\mu$m SPADs) coupled to scintillating fibers as the basic module for a particle tracker for space application. Dark counts rate as low as few tens of kHz at room temperature, lowering down to few kHz at -10°C have been obtained for the 200 $\mu$m devices, in accordance with the basic requirements for the proposed application. Similar instruments based on silicon photomultiplier (SiPM) readout have already been studied, but none based on SPAD has been realized up to now. Moreover, since very few information is available on SPADs for the use in a space environment, we performed bulk damage and total dose radiation tests with protons and gamma-rays in order to evaluate their radiation hardness properties and their suitability for application in a Low Earth Orbit (LEO) space mission. With this aim the SPAD devices have be irradiated using up to 20 krad total dose with gamma-rays and 5 krad with protons. {\textcopyright} 2011 IEEE.}, author = {Marisaldi, Martino and MacCagnani, Piera and Moscatelli, Francesco and Labanti, Claudio and Fuschino, Fabio and Prest, Michela and Berra, Alessandro and Bolognini, Davide and Ghioni, Massimo and Rech, Ivan and Gulinatti, Angelo and Giudice, Andrea and Simmerle, Georg and Rubini, Danilo and Candelori, Andrea and Mattiazzo, Serena}, booktitle = {IEEE Nuclear Science Symposium Conference Record}, doi = {10.1109/NSSMIC.2011.6154465}, isbn = {9781467301183}, issn = {10957863}, pages = {129--134}, title = {{Single Photon Avalanche Diodes for space applications}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84858651704{\&}doi=10.1109{\%}2FNSSMIC.2011.6154465{\&}partnerID=40{\&}md5=9e403fad320eec958f118ac47b0d92e4}, year = {2011} } @article{Mattonen2014, abstract = {Purpose: Benign computed tomography (CT) changes due to radiation induced lung injury (RILI) are common following stereotactic ablative radiotherapy (SABR) and can be difficult to differentiate from tumor recurrence. The authors measured the ability of CT image texture analysis, compared to more traditional measures of response, to predict eventual cancer recurrence based on CT images acquired within 5 months of treatment. Methods: A total of 24 lesions from 22 patients treated with SABR were selected for this study: 13 with moderate to severe benign RILI, and 11 with recurrence. Three-dimensional (3D) consolidative and ground-glass opacity (GGO) changes were manually delineated on all follow-up CT scans. Two size measures of the consolidation regions (longest axial diameter and 3D volume) and nine appearance features of the GGO were calculated: 2 first-order features [mean density and standard deviation of density (first-order texture)], and 7 second-order texture features [energy, entropy, correlation, inverse difference moment (IDM), inertia, cluster shade, and cluster prominence]. For comparison, the corresponding response evaluation criteria in solid tumors measures were also taken for the consolidation regions. Prediction accuracy was determined using the area under the receiver operating characteristic curve (AUC) and two-fold cross validation (CV). Results: For this analysis, 46 diagnostic CT scans scheduled for approximately 3 and 6 months post-treatment were binned based on their recorded scan dates into 2-5 month and 5-8 month follow-up time ranges. At 2-5 months post-treatment, first-order texture, energy, and entropy provided AUCs of 0.79-0.81 using a linear classifier. On two-fold CV, first-order texture yielded 73{\%} accuracy versus 76{\%}-77{\%} with the second-order features. The size measures of the consolidative region, longest axial diameter and 3D volume, gave two-fold CV accuracies of 60{\%} and 57{\%}, and AUCs of 0.72 and 0.65, respectively. Conclusions: Texture measures of the GGO appearance following SABR demonstrated the ability to predict recurrence in individual patients within 5 months of SABR treatment. Appearance changes were also shown to be more accurately predictive of recurrence, as compared to size measures within the same time period. With further validation, these results could form the substrate for a clinically useful computer-aided diagnosis tool which could provide earlier salvage of patients with recurrence. {\textcopyright} 2014 American Association of Physicists in Medicine.}, author = {Mattonen, Sarah A. and Palma, David A. and Haasbeek, Cornelis J.A. and Senan, Suresh and Ward, Aaron D.}, doi = {10.1118/1.4866219}, issn = {00942405}, journal = {Medical Physics}, keywords = {cancer recurrence,computed tomography,lung,stereotactic radiation therapy,texture analysis}, number = {3}, pages = {14}, title = {{Early prediction of tumor recurrence based on CT texture changes after stereotactic ablative radiotherapy (SABR) for lung cancer}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {41}, year = {2014} } @article{Mattonen2015, abstract = {Benign radiation-induced lung injury (RILI) is not uncommon following stereotactic ablative radiotherapy (SABR) for lung cancer and can be difficult to differentiate from tumor recurrence on follow-up imaging. We previously showed the ability of computed tomography (CT) texture analysis to predict recurrence. The aim of this study was to evaluate and compare the accuracy of recurrence prediction using manual region-of-interest segmentation to that of a semiautomatic approach. We analyzed 22 patients treated for 24 lesions (11 recurrences, 13 RILI). Consolidative and ground-glass opacity (GGO) regions were manually delineated. The longest axial diameter of the consolidative region on each post-SABR CT image was measured. This line segment is routinely obtained as part of the clinical imaging workflow and was used as input to automatically delineate the consolidative region and subsequently derive a periconsolidative region to sample GGO tissue. Texture features were calculated, and at two to five months post-SABR, the entropy texture measure within the semiautomatic segmentations showed prediction accuracies [areas under the receiver operating characteristic curve (AUC): 0.70 to 0.73] similar to those of manual GGO segmentations (AUC: 0.64). After integration into the clinical workflow, this decision support system has the potential to support earlier salvage for patients with recurrence and fewer investigations of benign RILI.}, author = {Mattonen, Sarah A. and Tetar, Shyama and Palma, David A. and Louie, Alexander V. and Senan, Suresh and Ward, Aaron D.}, doi = {10.1117/1.jmi.2.4.041010}, issn = {2329-4302}, journal = {Journal of Medical Imaging}, number = {4}, pages = {041010}, title = {{Imaging texture analysis for automated prediction of lung cancer recurrence after stereotactic radiotherapy}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {2}, year = {2015} } @inproceedings{Maule, abstract = {This article proposes automated approach for whole brain infarction core delineation while using only non-contrast computed tomography and computed tomography angiography. The main aim is to provide additional information measuring infarction core volume while exceeding certain level is contraindication of early recanalization. Process of generation of Perfusion Blood Volume maps is described first followed by description of process of infarction core delineation. Verification of correctness is based on comparison against follow-up examinations. Discussion and future works summarizes weaknesses of the method and steps for improvement.}, author = {Maule, Petr and Kle{\v{c}}kov{\'{a}}, Jana and Rohan, Vladim{\'{i}}r}, booktitle = {KDIR 2011 - Proceedings of the International Conference on Knowledge Discovery and Information Retrieval}, doi = {10.5220/0003651704330437}, isbn = {9789898425799}, keywords = {Acute stroke,Automated infarction core segmentation,Brain ischemia,Perfusion blood volume,Volumetric maps}, pages = {433--437}, title = {{Automated approach for whole brain infarction core delineation: Using non-contrast and computed tomography angiography}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84862235383{\&}partnerID=40{\&}md5=1d80a348add3d1bf8abea3228c5457a1}, year = {2011} } @article{McCormick2011, abstract = {Noise artifacts due to signal decorrelation and reverberation are a considerable problem in ultrasound strain imaging. For block-matching methods, information from neighboring matching blocks has been utilized to regularize the estimated displacements. We apply a recursive Bayesian regularization algorithm developed by Hayton et al. [Artif. Intell., vol. 114, pp. 125-156, 1999] to phase-sensitive ultrasound RF signals to improve displacement estimation. The parameter of regularization is reformulated, and its meaning examined in the context of strain imaging. Tissue-mimicking experimental phantoms and RF data incorporating finite-element models for the tissue deformation and frequency-domain ultrasound simulations are used to compute the optimal parameter with respect to nominal strain and algorithmic iterations. The optimal strain regularization parameter was found to be twice the nominal strain and did not vary significantly with algorithmic iterations. The technique demonstrates superior performance over median filtering in noise reduction at strains 5 and higher for all quantitative experiments performed. For example, the strain SNR was 11 dB higher than that obtained using a median filter at 7 strain. It has to be noted that for applied deformations lower than 1, since signal decorrelation errors are minimal, using this approach may degrade the displacement image. {\textcopyright} 2010 IEEE.}, author = {McCormick, Matthew and Rubert, Nicholas and Varghese, Tomy}, doi = {10.1109/TBME.2011.2106500}, issn = {00189294}, journal = {IEEE Transactions on Biomedical Engineering}, keywords = {Bayes procedures,biomedical acoustic imaging,biomedical imaging,displacement measurement,image motion analysis,strain measurement}, number = {6}, pages = {1612--1620}, title = {{Bayesian regularization applied to ultrasound strain imaging}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79956345107{\&}doi=10.1109{\%}2FTBME.2011.2106500{\&}partnerID=40{\&}md5=6d53735bfa77d8fa5f9b895224795479}, volume = {58}, year = {2011} } @article{McCormick2013, abstract = {Accurate subsample displacement estimation is necessary for ultrasound elastography because of the small deformations that occur and the subsequent application of a derivative operation on local displacements. Many of the commonly used subsample estimation techniques introduce significant bias errors. This article addresses a reduced bias approach to subsample displacement estimations that consists of a two-dimensional windowed-sinc interpolation with numerical optimization. It is shown that a Welch or Lanczos window with a Nelder-Mead simplex or regular-step gradient-descent optimization is well suited for this purpose. Little improvement results from a sinc window radius greater than four data samples. The strain signal-to-noise ratio (SNR) obtained in a uniformly elastic phantom is compared with other parabolic and cosine interpolation methods; it is found that the strain SNR ratio is improved over parabolic interpolation from 11.0 to 13.6 in the axial direction and 0.7 to 1.1 in the lateral direction for an applied 1{\%} axial deformation. The improvement was most significant for small strains and displacement tracking in the lateral direction. This approach does not rely on special properties of the image or similarity function, which is demonstrated by its effectiveness with the application of a previously described regularization technique. {\textcopyright} The Author(s) 2013.}, author = {McCormick, Matthew M. and Varghese, Tomy}, doi = {10.1177/0161734613476176}, issn = {01617346}, journal = {Ultrasonic Imaging}, keywords = {motion tracking,sinc reconstruction,strain imaging,subsample interpolation}, number = {2}, pages = {76--89}, title = {{An approach to unbiased subsample interpolation for motion tracking}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {35}, year = {2013} } @article{McGee2011, abstract = {The roundworm C. elegans is widely used as an aging model, with hundreds of genes identified that modulate aging (Kaeberlein et al., 2002. Mech. Ageing Dev.123, 1115-1119). The development and bodyplan of the 959 cells comprising the adult have been well described and established for more than 25years (Sulston {\&} Horvitz, 1977. Dev. Biol.56, 110-156; Sulston et al., 1983. Dev. Biol.100, 64-119.). However, morphological changes with age in this optically transparent animal are less well understood, with only a handful of studies investigating the pathobiology of aging. Age-related changes in muscle (Herndon, 2002. Nature419, 808-814), neurons (Herndon, 2002), intestine and yolk granules (Garigan, 2002. Genetics161, 1101-1112; Herndon, 2002), nuclear architecture (Haithcock, 2005. Proc. Natl Acad. Sci. USA102, 16690-16695), tail nuclei (Golden, 2007. Aging Cell6, 179-188), and the germline (Golden, 2007) have been observed via a variety of traditional relatively low-throughput methods. We report here a number of novel approaches to study the pathobiology of aging C. elegans. We combined histological staining of serial-sectioned tissues, transmission electron microscopy, and confocal microscopy with 3D volumetric reconstructions and characterized age-related morphological changes in multiple wild-type individuals at different ages. This enabled us to identify several novel pathologies with age in the C. elegans intestine, including the loss of critical nuclei, the degradation of intestinal microvilli, changes in the size, shape, and cytoplasmic contents of the intestine, and altered morphologies caused by ingested bacteria. The three-dimensional models we have created of tissues and cellular components from multiple individuals of different ages represent a unique resource to demonstrate global heterogeneity of a multicellular organism. {\textcopyright} 2011 The Authors. Aging Cell {\textcopyright} 2011 Blackwell Publishing Ltd/Anatomical Society of Great Britain and Ireland.}, author = {McGee, Matthew D. and Weber, Darren and Day, Nicholas and Vitelli, Cathy and Crippen, Danielle and Herndon, Laura A. and Hall, David H. and Melov, Simon}, doi = {10.1111/j.1474-9726.2011.00713.x}, issn = {14749718}, journal = {Aging Cell}, keywords = {Aging,C. elegans,Intestine,Microvilli,Nucleus}, number = {4}, pages = {699--710}, title = {{Loss of intestinal nuclei and intestinal integrity in aging C. elegans}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {10}, year = {2011} } @article{Meijs2017, abstract = {A robust method is presented for the segmentation of the full cerebral vasculature in 4-dimensional (4D) computed tomography (CT). The method consists of candidate vessel selection, feature extraction, random forest classification and postprocessing. Image features include among others the weighted temporal variance image and parameters, including entropy, of an intensity histogram in a local region at different scales. These histogram parameters revealed to be a strong feature in the detection of vessels regardless of shape and size. The method was trained and tested on a large database of 264 patients with suspicion of acute ischemia who underwent 4D CT in our hospital in the period January 2014 to December 2015. Five subvolumes representing different regions of the cerebral vasculature were annotated in each image in the training set by medical assistants. The evaluation was done on 242 patients. A total of 16 ({\textless}8{\%}) patients showed severe under or over segmentation and were reported as failures. One out of five subvolumes was randomly annotated in 159 patients and was used for quantitative evaluation. Quantitative evaluation showed a Dice coefficient of 0.91 ± 0.07 and a modified Hausdorff distance of 0.23 ± 0.22 mm. Therefore, robust vessel segmentation in 4D CT is feasible with good accuracy.}, author = {Meijs, Midas and Patel, Ajay and {Van De Leemput}, Sil C. and Prokop, Mathias and {Van Dijk}, Ewoud J. and {De Leeuw}, Frank Erik and Meijer, Frederick J.A. and {Van Ginneken}, Bram and Manniesing, Rashindra}, doi = {10.1038/s41598-017-15617-w}, issn = {20452322}, journal = {Scientific Reports}, number = {1}, title = {{Robust Segmentation of the Full Cerebral Vasculature in 4D CT of Suspected Stroke Patients}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85034432920{\&}doi=10.1038{\%}2Fs41598-017-15617-w{\&}partnerID=40{\&}md5=d67c87465788c1f4c70cc759dd0a0625}, volume = {7}, year = {2017} } @article{Menard2015, abstract = {Purpose: To determine if the integration of diagnostic magnetic resonance (MR) imaging and MR-guided biopsy would improve target delineation for focal salvage therapy in men with prostate cancer. Materials and Between September 2008 and March 2011, 30 men with bio-Methods: chemical failure after radiation therapy for prostate cancer provided written informed consent and were enrolled in a prospective clinical trial approved by the institutional research ethics board. An integrated diagnostic MR imaging and interventional biopsy procedure was performed with a 1.5-T MR imager by using a prototype table and stereotactic transperineal template. Multiparametric MR imaging (T2-weighted, dynamic contrast material-enhanced, and diffusion-weighted sequences) was followed by targeted biopsy of suspicious regions and systematic sextant sampling. Biopsy needle locations were imaged and registered to diagnostic images. Two observers blinded to clinical data and the results of prior imaging studies delineated tumor boundaries. Area under the receiver operating characteristic curve (Az) was calculated based on generalized linear models by using biopsy as the reference standard to distinguish benign from malignant lesions. Results: Twenty-eight patients were analyzed. Most patients (n = 22) had local recurrence, with 82{\%} (18 of 22) having unifocal disease. When multiparametric volumes from two observers were combined, it increased the apparent overall tumor volume by 30{\%}; however, volumes remained small (mean, 2.9 mL; range, 0.5-8.3 mL). Tumor target boundaries differed between T2-weighted, dynamic contrast-enhanced, and diffusion-weighted sequences (mean Dice coefficient, 0.13-0.35). Diagnostic accuracy in the identification of tumors improved with a multiparametric approach versus a strictly T2-weighted or dynamic contrast-enhanced approach through an improvement in sensitivity (observer 1, 0.65 vs 0.35 and 0.44, respectively; observer 2, 0.82 vs 0.64 and 0.53, respectively; P {\textless} .05) and improved further with a 5-mm expansion margin (Az = 0.85 vs 0.91 for observer 2). After excluding three patients with fewer than six informative biopsy cores and six patients with inadequately stained margins, MR-guided biopsy enabled more accurate delineation of the tumor target volume be means of exclusion of false-positive results in 26{\%} (five of 19 patients), false-negative results in 11{\%} (two of 19 patients) and by guiding extension of tumor boundaries in 16{\%} (three of 19 patients). Conclusion: The integration of guided biopsy with diagnostic MR imaging is feasible and alters delineation of the tumor target boundary in a substantial proportion of patients considering focal salvage.}, author = {M{\'{e}}nard, Cynthia and Iupati, Douglas and Publicover, Julia and Lee, Jenny and Abed, Jessamine and O'Leary, Gerald and Simeonov, Anna and Foltz, Warren D. and Milosevic, Michael and Catton, Charles and Morton, Gerard and Bristow, Robert and Bayley, Andrew and Atenafu, Eshetu G. and Evans, Andrew J. and Jaffray, David A. and Chung, Peter and Brock, Kristy K. and Haider, Masoom A.}, doi = {10.1148/radiol.14122681}, issn = {15271315}, journal = {Radiology}, number = {1}, pages = {181--191}, title = {{MR-guided prostate biopsy for planning of focal salvage after radiation therapy}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {274}, year = {2015} } @article{Mendoza2012, abstract = {In this paper, we propose a self-assessed adaptive region growing segmentation algorithm. In the context of an experimental virtual-reality surgical planning software platform, our method successfully delineates main tissues relevant for reconstructive surgery, such as fat, muscle, and bone. We rely on a self-tuning approach to deal with a great variety of imaging conditions requiring limited user intervention (one seed). The detection of the optimal parameters is managed internally using a measure of the varying contrast of the growing region, and the stopping criterion is adapted to the noise level in the dataset thanks to the sampling strategy used for the assessment function. Sampling is referred to the statistics of a neighborhood around the seed(s), so that the sampling period becomes greater when images are noisier, resulting in the acquisition of a lower frequency version of the contrast function. Validation is provided for synthetic images, as well as real CT datasets. For the CT test images, validation is referred to manual delineations for 10 cases and to subjective assessment for another 35. High values of sensitivity and specificity, as well as Dice's coefficient and Jaccard's index on one hand, and satisfactory subjective evaluation on the other hand, prove the robustness of our contrast-based measure, even suggesting suitability for calibration of other region-based segmentation algorithms. {\textcopyright} 2010 Springer-Verlag.}, author = {Mendoza, Carlos S. and Acha, Bego{\~{n}}a and Serrano, Carmen and G{\'{o}}mez-C{\'{i}}a, Tom{\'{a}}s}, doi = {10.1007/s00138-010-0274-z}, issn = {09328092}, journal = {Machine Vision and Applications}, keywords = {CT,Region growing,Segmentation,Surgical planning,Virtual reality}, number = {1}, pages = {165--177}, title = {{Fast parameter-free region growing segmentation with application to surgical planning}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {23}, year = {2012} } @article{Metz2012, abstract = {State of the art cardiac computed tomography (CT) enables the acquisition of imaging data of the heart over the entire cardiac cycle at concurrent high spatial and temporal resolution. However, in clinical practice, acquisition is increasingly limited to 3-D images. Estimating the shape of the cardiac structures throughout the entire cardiac cycle from a 3-D image is therefore useful in applications such as the alignment of preoperative computed tomography angiography (CTA) to intra-operative X-ray images for improved guidance in coronary interventions. We hypothesize that the motion of the heart is partially explained by its shape and therefore investigate the use of three regression methods for motion estimation from single-phase shape information. Quantitative evaluation on 150 4-D CTA images showed a small, but statistically significant, increase in the accuracy of the predicted shape sequences when using any of the regression methods, compared to shape-independent motion prediction by application of the mean motion. The best results were achieved using principal component regression resulting in point-to-point errors of 2.3$\backslash$pm 0.5 mm, compared to values of 2.7$\backslash$pm 0.6 mm for shape-independent motion estimation. Finally, we showed that this significant difference withstands small variations in important parameter settings of the landmarking procedure. {\textcopyright} 2012 IEEE.}, author = {Metz, Coert T. and Baka, Nora and Kirisli, Hortense and Schaap, Michiel and Klein, Stefan and Neefjes, Lisan A. and Mollet, Nico R. and Lelieveldt, Boudewijn and {De Bruijne}, Marleen and Niessen, Wiro J. and {Van Walsum}, Theo}, doi = {10.1109/TMI.2012.2190938}, issn = {02780062}, journal = {IEEE Transactions on Medical Imaging}, keywords = {Cardiac,heart,motion prediction,principal component regression (PCR),shape,statistical models}, number = {6}, pages = {1311--1325}, title = {{Regression-based cardiac motion prediction from single-phase CTA}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {31}, year = {2012} } @article{Meyer2014, abstract = {Ediacara fossils often exhibit enigmatic taphonomy that complicates morphological characterization and ecological and phylogenetic interpretation; such is the case with Pteridinium simplex from the late Ediacaran Aar Member in southern Namibia. P. simplex is often preserved as three-dimensional (3D) casts and molds in coarse-grained quartzites, making detailed morphological characterization difficult. In addition, P. simplex is often transported, distorted, and embedded in gutter fills or channel deposits, further obscuring its morphologies. By utilizing microfocus X-ray computed tomography (microCT) techniques, we are able to trace individual specimens and their vanes in order to digitally restore the 3D morphology of this enigmatic fossil. Our analysis shows that P. simplex has a very flexible integument that can be bent, folded, twisted, stretched, and torn, indicating a certain degree of elasticity. In the analyzed specimens, we find no evidence for vane identity change or penetrative growth that were previously used as evidence to support a fully endobenthic lifestyle of P. simplex; instead, evidence is consistent with the traditional interpretation of a semi-endobenthic or epibenthic lifestyle. This interpretation needs to be further tested through microCT analysis of P. simplex specimens preserved in situ rather than transported ones. The elastic integument of P. simplex is inconsistent with a phylogenetic affinity with xenophyophore protists; instead, its physical property is consistent with the presence of collagen, chitin, and cellulose, an inference that would provide constraints on the phylogenetic affinity of P. simplex. {\textcopyright} 2014 Elsevier B.V.}, author = {Meyer, Mike and Elliott, David and Wood, Andrew D. and Polys, Nicholas F. and Colbert, Matthew and Maisano, Jessica A. and Vickers-Rich, Patricia and Hall, Michael and Hoffman, Karl H. and Schneider, Gabi and Xiao, Shuhai}, doi = {10.1016/j.precamres.2014.04.013}, issn = {03019268}, journal = {Precambrian Research}, keywords = {Aar member,Ediacaran,MicroCT,Namibia,Pteridinium,Taphonomy}, pages = {79--87}, title = {{Three-dimensional microCT analysis of the Ediacara fossil Pteridinium simplex sheds new light on its ecology and phylogenetic affinity}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {249}, year = {2014} } @article{Meyer2017, abstract = {Trace fossils are superb lines of evidence for examining the ancient biologic world because they offer an opportunity to infer behavioral ecology of organisms. However, traces can be difficult to parse from their matrix, which leads to the loss of important morphological and behavioral data. This is especially true for the earliest marine animal traces from the Ediacaran Period (635–541 Ma), which are usually small ({\textless}5 mm in diameter) and simple (mostly small horizontal trails and burrows), and are sometimes difficult to be distinguished from co-existing tubular body fossils. There is also evidence that the prevalence of microbial substrates in Ediacaran oceans may have influenced emerging trace makers in non-actualistic ways from a late Phanerozoic perspective (e.g., microbial mats may have facilitated a strong geochemical gradient across the sediment-water interface). Therefore, the discovery of the relatively large traces of Lamonte trevallis from the Ediacaran Shibantan Member of the Denying Formation (∼551–541 Ma) in the Yangtze Gorges area of South China provides a unique opportunity to study early bioturbators. These trace fossils are large enough and have sufficient compositional contrast (relative to the matrix) for in situ analysis via X-ray computed tomography (CT) and microcomputed tomography (microCT). Each analytical method has its own advantages and disadvantages. CT scans can image larger specimens, but cannot adequately resolve small features of interest. MicroCT scans can achieve higher resolution, but can only be used with small samples and may involve more post-processing than CT scans. As demonstrated in this study, X-ray CT and microCT in combination with other 3D imaging techniques and resources have the potential to resolve the 3D morphology of Ediacaran trace fossils. A new Volumetric Bioturbation Intensity (VBI) is also proposed, which quantifies whole rock bioturbation using 3D analysis of subsurface traces. Combined with the ability to examine trace fossils in situ, the VBI can enhance our view of ancient ecologies and life's enduring relationship with sediments.}, author = {Meyer, Mike and Polys, Nick and Yaqoob, Humza and Hinnov, Linda and Xiao, Shuhai}, doi = {10.1016/j.precamres.2017.05.010}, issn = {03019268}, journal = {Precambrian Research}, keywords = {Bioturbation,Ediacaran,Lamonte trevallis,South China,Trace fossil}, pages = {341--350}, title = {{Beyond the stony veil: Reconstructing the Earth's earliest large animal traces via computed tomography X-ray imaging}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {298}, year = {2017} } @article{Meyer2013, abstract = {A demonstrator system for planning neurosurgical procedures was developed based on commercial hardware and software. The system combines an easyto- use environment for surgical planning with highend visualization and the opportunity to analyze data sets for research purposes. The demonstrator system is based on the software AMIRA. Specific algorithms for segmentation, elastic registration, and visualization have been implemented and adapted to the clinical workflow. Modules from AMIRA and the image processing library Insight Segmentation and Registration Toolkit (ITK) can be combined to solve various image processing tasks. Customized modules tailored to specific clinical problems can easily be implemented using the AMIRA application programming interface and a self-developed framework for ITK filters. Visualization is done via autostereoscopic displays, which provide a 3D impression without viewing aids. A Spaceball device allows a comfortable, intuitive way of navigation in the data sets. Via an interface to a neurosurgical navigation system, the demonstrator system can be used intraoperatively. The precision, applicability, and benefit of the demonstrator system for planning of neurosurgical interventions and for neurosurgical research were successfully evaluated by neurosurgeons using phantom and patient data sets. {\textcopyright} 2013 Walter de Gruyter GmbH.}, author = {Meyer, Tobias and Ku{\ss}, Julia and Uhlemann, Falk and Wagner, Stefan and Kirsch, Matthias and Sobottka, Stephan B. and Steinmeier, Ralf and Schackert, Gabriele and Morgenstern, Ute}, doi = {10.1515/bmt-2012-0079}, issn = {00135585}, journal = {Biomedizinische Technik}, keywords = {Autostereoscopy,Diffusion tensor imaging,Interaction,Multimodality phantoms,Visualization}, number = {3}, pages = {281--291}, title = {{Autostereoscopic 3D visualization and image processing system for neurosurgery}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {58}, year = {2013} } @article{Meyer2013a, abstract = {Intraoperative optical imaging of intrinsic signals can improve the localization of functional areas of the cortex. On the basis of a review of the current state of technology, a setup was developed and evaluated. The aim was to implement an easy-to-use and robust imaging setup that can be used in clinical routine with standard hardware equipment (surgical microscope, high-resolution camera, stimulator for peripheral nerve stimulation) and custom-made software for intraoperative and postoperative data analysis. Evaluation of different light sources (halogen, xenon) showed a sufficient temporal behavior of xenon light without using a stabilized power supply. Spatial binning (2 ×?2) of the camera reduces temporal variations in the images by preserving a high spatial resolution. The setup was tested in eight patients. Images were acquired continuously for 9 min with alternating 30-s rest and 30-s stimulation conditions. Intraoperative measurement and visualization of high-resolution two-dimensional activity maps could be achieved in 15 min. The detected functional regions corresponded with anatomical and electrophysiological validation. The integration of optical imaging in clinical routine could successfully be achieved using standard hardware, which improves guidance for the surgeon during interventions near the eloquent areas of the brain. {\textcopyright} 2013 Walter de Gruyter GmbH.}, author = {Meyer, Tobias and Sobottka, Stephan B. and Kirsch, Matthias and Schackert, Gabriele and Steinmeier, Ralf and Koch, Edmund and Morgenstern, Ute}, doi = {10.1515/bmt-2012-0072}, issn = {00135585}, journal = {Biomedizinische Technik}, keywords = {Functional imaging,Intrinsic signals,Somatosensory cortex}, number = {3}, pages = {225--236}, title = {{Intraoperative optical imaging of functional brain areas for improved image-guided surgery}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {58}, year = {2013} } @inproceedings{Miller, abstract = {The research presented in this paper represents several novel conceptual contributions to the computer vision literature. In this position paper, our goal is to define the scope of computer vision analysis and discuss a new categorisation of the computer vision problem. We first provide a novel decomposition of computer vision into base components which we term the axioms of vision. These are used to define researcher-level and developer-level access to vision algorithms, in a way which does not require expert knowledge of computer vision. We discuss a new line of thought for computer vision by basing analyses on descriptions of the problem instead of in terms of algorithms. From this an abstraction can be developed to provide a layer above algorithmic details. This is extended to the idea of a formal description language which may be automatically interpreted thus allowing those not familiar with computer vision techniques to utilise sophisticated methods. {\textcopyright} 2011 IEEE.}, author = {Miller, Gregor and Fels, Sidney and Oldridge, Steve}, booktitle = {Proceedings - 2011 Canadian Conference on Computer and Robot Vision, CRV 2011}, doi = {10.1109/CRV.2011.29}, isbn = {9780769543628}, keywords = {Computer Vision,Vision Development,Vision Systems}, pages = {168--174}, title = {{A conceptual structure for computer vision}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-80051813078{\&}doi=10.1109{\%}2FCRV.2011.29{\&}partnerID=40{\&}md5=dcf66af3aaedbee7cecd406172b38797}, year = {2011} } @article{Mistry2013, abstract = {Purpose: Current implementations of methods based on Hounsfield units to evaluate regional lung ventilation do not directly incorporate tissue-based mass changes that occur over the respiratory cycle. To overcome this, we developed a 4-dimensional computed tomography (4D-CT)-based technique to evaluate fractional regional ventilation (FRV) that uses an individualized ratio of tidal volume to end-expiratory lung volume for each voxel. We further evaluated the effect of different breathing maneuvers on regional ventilation. The results from this work will help elucidate the relationship between global and regional lung function. Methods and Materials: Eight patients underwent 3 sets of 4D-CT scans during 1 session using free-breathing, audiovisual guidance, and active breathing control. FRV was estimated using a density-based algorithm with mass correction. Internal validation between global and regional ventilation was performed by use of the imaging data collected during the use of active breathing control. The impact of breathing maneuvers on FRV was evaluated comparing the tidal volume from 3 breathing methods. Results: Internal validation through comparison between the global and regional changes in ventilation revealed a strong linear correlation (slope of 1.01, R2 of 0.97) between the measured global lung volume and the regional lung volume calculated by use of the "mass corrected" FRV. A linear relationship was established between the tidal volume measured with the automated breathing control system and FRV based on 4D-CT imaging. Consistently larger breathing volumes were observed when coached breathing techniques were used. Conclusions: The technique presented improves density-based evaluation of lung ventilation and establishes a link between global and regional lung ventilation volumes. Furthermore, the results obtained are comparable with those of other techniques of functional evaluation such as spirometry and hyperpolarized-gas magnetic resonance imaging. These results were demonstrated on retrospective analysis of patient data, and further research using prospective data is under way to validate this technique against established clinical tests. {\textcopyright} 2013 Elsevier Inc.}, author = {Mistry, Nilesh N. and Diwanji, Tejan and Shi, Xiutao and Pokharel, Sabin and Feigenberg, Steven and Scharf, Steven M. and D'Souza, Warren D.}, doi = {10.1016/j.ijrobp.2013.07.032}, issn = {03603016}, journal = {International Journal of Radiation Oncology Biology Physics}, number = {4}, pages = {825--831}, title = {{Evaluation of fractional regional ventilation using 4D-CT and effects of breathing maneuvers on ventilation}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {87}, year = {2013} } @article{Mohamed2015, abstract = {Purpose: To develop a quality assurance (QA) workflow by using a robust, curated, manually segmented anatomic region-of-interest (ROI) library as a benchmark for quantitative assessment of different image registration techniques used for head and neck radiation therapy-simulation computed tomography (CT) with diagnostic CT coregistration. Materials and Methods: Radiation therapy-simulation CT images and diagnostic CT images in 20 patients with head and neck squamous cell carcinoma treated with curative-intent intensity-modulated radiation therapy between August 2011 and May 2012 were retrospectively retrieved with institutional review board approval. Sixty-eight reference anatomic ROIs with gross tumor and nodal targets were then manually contoured on images from each examination. Diagnostic CT images were registered with simulation CT images rigidly and by using four deformable image registration (DIR) algorithms: atlas based, B-spline, demons, and optical flow. The resultant deformed ROIs were compared with manually contoured reference ROIs by using similarity coefficient metrics (ie, Dice similarity coefficient) and surface distance metrics (ie, 95{\%} maximum Hausdorff distance). The nonparametric Steel test with control was used to compare different DIR algorithms with rigid image registration (RIR) by using the post hoc Wilcoxon signed-rank test for stratified metric comparison.. Results: A total of 2720 anatomic and 50 tumor and nodal ROIs were delineated. All DIR algorithms showed improved performance over RIR for anatomic and target ROI conformance, as shown for most comparison metrics (Steel test, P {\textless} .008 after Bonferroni correction). The performance of different algorithms varied substantially with stratification by specific anatomic structures or category and simulation CT section thickness. Conclusion: Development of a formal ROI-based QA workflow for registration assessment demonstrated improved performance with DIR techniques over RIR. After QA, DIR implementation should be the standard for head and neck diagnostic CT and simulation CT allineation, especially for target delineation.}, author = {Mohamed, Abdallah S.R. and Ruangskul, Manee Naad and Awan, Musaddiq J. and Baron, Charles A. and Kalpathy-Cramer, Jayashree and Castillo, Richard and Castillo, Edward and Guerrero, Thomas M. and Kocak-Uzel, Esengul and Yang, Jinzhong and Court, Laurence E. and Kantor, Michael E. and Gunn, G. Brandon and Colen, Rivka R. and Frank, Steven J. and Garden, Adam S. and Rosenthal, David I. and Fuller, Clifton D.}, doi = {10.1148/radiol.14132871}, issn = {15271315}, journal = {Radiology}, number = {3}, pages = {752--763}, title = {{Quality assurance assessment of diagnostic and radiation therapy-simulation CT image registration for head and neck radiation therapy: Anatomic region of interest-based comparison of rigid and deformable algorithms}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {274}, year = {2015} } @article{Mok2016, abstract = {Background: Previously, we proposed interpolated averaged CT (IACT) for improved attenuation correction (AC) in thoracic PET/CT. This study aims to evaluate its feasibility and effectiveness on cardiac PET/CT. Methods: We simulated 18F-FDG distribution using the XCAT phantom with normal and abnormal cardiac uptake. Average activity and attenuation maps represented static PET and respiration average CT (ACT), respectively, while the attenuation maps of end-inspiration/expiration represented 2 helical CTs (HCT). IACT was obtained by averaging the 2 extreme phases and the interpolated phases generated between them. Later, we recruited 4 patients who were scanned 1 hr post 315-428 MBq 18F-FDG injection. Simulated and clinical PET sinograms were reconstructed with AC using (1) HCT, (2) IACT, and (3) ACT. Polar plots and the 17-segment plots were analyzed. Two regions-of-interest were drawn on lesion and background area to obtain the intensity ratio (IR). Results: Polar plots of PETIACT-AC were more similar to PETACT-AC in both simulation and clinical data. Artifacts were observed in various segments in PETHCT-AC. IR differences of HCT as compared to the phantom were up to {\~{}}20{\%}. Conclusions: IACT-AC reduced respiratory artifacts and improved PET/CT matching similarly to ACT-AC. It is a promising low-dose alternate of ACT for cardiac PET/CT.}, author = {Mok, Greta S.P. and Ho, Cobie Y.T. and Yang, Bang Hung and Wu, Tung Hsin}, doi = {10.1007/s12350-015-0140-5}, issn = {15326551}, journal = {Journal of Nuclear Cardiology}, keywords = {PET/CT,attenuation correction,cardiac imaging,respiratory artifacts}, number = {5}, pages = {1072--1079}, title = {{Interpolated average CT for cardiac PET/CT attenuation correction}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {23}, year = {2016} } @article{Montin2020, abstract = {Survival of pediatric patients with brain tumor has increased over the past 20 years, and increasing evidence of iatrogenic toxicities has been reported. In follow-ups, images are acquired at different time points where substantial changes of brain morphology occur, due to childhood physiological development and treatment effects. To address the image registration complexity, we propose two multi-metric approaches (Mplus, Mdot), combining mutual information (MI) and normalized gradient field filter (NGF). The registration performance of the proposed metrics was assessed on a simulated dataset (Brainweb) and compared with those obtained by MI and NGF separately, using mean magnitude and mean angular errors. The most promising metric (Mplus) was then selected and tested on a retrospective dataset comprising 45 pediatric patients who underwent focal radiotherapy for brain cancer. The quality of the realignment was scored by a radiation oncologist using a perceived misalignment metric (PM). All patients but one were assessed as PM ≤ 2 (good alignment), but the remaining one, severely affected by hydrocephalus and pneumocephalus at the first MRI acquisition, scored PM = 5 (unacceptable). These preliminary findings suggest that Mplus might improve the registration accuracy in complex applications such as pediatric oncology, when data are acquired throughout the years of follow-up, and is worth investigating. [Figure not available: see fulltext.].}, author = {Montin, Eros and Belfatto, Antonella and Bologna, Marco and Meroni, Silvia and Cavatorta, Claudia and Pecori, Emilia and Diletto, Barbara and Massimino, Maura and Oprandi, Maria Chiara and Poggi, Geraldina and Arrigoni, Filippo and Peruzzo, Denis and Pignoli, Emanuele and Gandola, Lorenza and Cerveri, Pietro and Mainardi, Luca}, doi = {10.1007/s11517-019-02109-4}, issn = {17410444}, journal = {Medical and Biological Engineering and Computing}, keywords = {Brain MRI,Deformable registration,Image registration,Mutual information,Normalized gradient field,Pediatric brain tumors}, title = {{A multi-metric registration strategy for the alignment of longitudinal brain images in pediatric oncology}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85079384742{\&}doi=10.1007{\%}2Fs11517-019-02109-4{\&}partnerID=40{\&}md5=211d8231bb9224f67092a15ccbb09ce3}, year = {2020} } @article{Moore2017, abstract = {This study considers the computationally determined thermal profile of a finely discretized, heterogeneous human body model, simulating a radiofrequency electromagnetic field (RF-EMF) worker wearing protective clothing subject to RF-EMF exposure, and subject to various environmental conditions including high ambient temperature and high humidity, with full thermoregulatory mechanisms in place. How the human body responds in various scenarios was investigated, and the information was used to consider safety limits in current international RF-EMF safety guidelines and standards. It was found that different environmental conditions had minimal impact on the magnitude of the thermal response due to RF-EMF exposure, and that the current safety factor of 10 applied in international RF-EMF safety guidelines and standards for RF-EMF workers is generally conservative, though it is only narrowly so when workers are subjected to the most adverse environmental conditions. Bioelectromagnetics. 38:356–363, 2017. {\textcopyright} 2017 Wiley Periodicals, Inc.}, author = {Moore, Stephen M. and McIntosh, Robert L. and Iskra, Steve and Lajevardipour, Alireza and Wood, Andrew W.}, doi = {10.1002/bem.22048}, issn = {1521186X}, journal = {Bioelectromagnetics}, keywords = {RF-EMF worker,occupational safety,protective clothing,radiofrequency safety standards,temperature rise}, number = {5}, pages = {356--363}, title = {{Effect of adverse environmental conditions and protective clothing on temperature rise in a human body exposed to radiofrequency electromagnetic fields}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85016441570{\&}doi=10.1002{\%}2Fbem.22048{\&}partnerID=40{\&}md5=9f2b71c5da61855e410d615f4c57dfd3}, volume = {38}, year = {2017} } @article{Mosaliganti2009, abstract = {In this paper, we utilize the N-point correlation functions (N-pcfs) to construct an appropriate feature space for achieving tissue segmentation in histology-stained microscopic images. The N-pcfs estimate microstructural constituent packing densities and their spatial distribution in a tissue sample. We represent the multi-phase properties estimated by the N-pcfs in a tensor structure. Using a variant of higher-order singular value decomposition (HOSVD) algorithm, we realize a robust classifier that provides a multi-linear description of the tensor feature space. Validated results of the segmentation are presented in a case-study that focuses on understanding the genetic phenotyping differences in mouse placentae. {\textcopyright} 2008 Elsevier B.V. All rights reserved.}, author = {Mosaliganti, Kishore and Janoos, Firdaus and Irfanoglu, Okan and Ridgway, Randall and Machiraju, Raghu and Huang, Kun and Saltz, Joel and Leone, Gustavo and Ostrowski, Michael}, doi = {10.1016/j.media.2008.06.020}, issn = {13618415}, journal = {Medical Image Analysis}, keywords = {Image segmentation,Microstructure,N-point correlation functions,Phenotyping}, number = {1}, pages = {156--166}, title = {{Tensor classification of N-point correlation function features for histology tissue segmentation}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {13}, year = {2009} } @article{Mosaliganti2019, abstract = {Animals make organs of precise size, shape, and symmetry but how developing embryos do this is largely unknown. Here, we combine quantitative imaging, physical theory, and physiological measurement of hydrostatic pressure and fluid transport in zebrafish to study size control of the developing inner ear. We find that fluid accumulation creates hydrostatic pressure in the lumen leading to stress in the epithelium and expansion of the otic vesicle. Pressure, in turn, inhibits fluid transport into the lumen. This negative feedback loop between pressure and transport allows the otic vesicle to change growth rate to control natural or experimentally-induced size variation. Spatiotemporal patterning of contractility modulates pressure-driven strain for regional tissue thinning. Our work connects molecular-driven mechanisms, such as osmotic pressure driven strain and actomyosin tension, to the regulation of tissue morphogenesis via hydraulic feedback to ensure robust control of organ size.}, author = {Mosaliganti, Kishore R. and Swinburne, Ian A. and Chan, Chon U. and Obholzer, Nikolaus D. and Green, Amelia A. and Tanksale, Shreyas and Mahadevan, L. and Megason, Sean G.}, doi = {10.7554/eLife.39596}, issn = {2050084X}, journal = {eLife}, pages = {30}, title = {{Size control of the inner ear via hydraulic feedback}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {8}, year = {2019} } @article{Mouches2019, abstract = {Magnetic resonance angiography (MRA) can capture the variation of cerebral arteries with high spatial resolution. These measurements include valuable information about the morphology, geometry, and density of brain arteries, which may be useful to identify risk factors for cerebrovascular and neurological diseases at an early time point. However, this requires knowledge about the distribution and morphology of vessels in healthy subjects. The statistical arterial brain atlas described in this work is a free and public neuroimaging resource that can be used to identify vascular morphological changes. The atlas was generated based on 544 freely available multi-center MRA and T1-weighted MRI datasets. The arteries were automatically segmented in each MRA dataset and used for vessel radius quantification. The binary segmentation and vessel size information were non-linearly registered to the MNI brain atlas using the T1-weighted MRI datasets to construct atlases of artery occurrence probability, mean artery radius, and artery radius standard deviation. This public neuroimaging resource improves the understanding of the distribution and size of arteries in the healthy human brain.}, author = {Mouches, Pauline and Forkert, Nils D.}, doi = {10.1038/s41597-019-0034-5}, issn = {20524463}, journal = {Scientific data}, number = {1}, pages = {29}, title = {{A statistical atlas of cerebral arteries generated using multi-center MRA datasets from healthy subjects}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {6}, year = {2019} } @article{Moudgalya2019, abstract = {Inner ear disorders such as sensorineural deafness and genetic diseases may one day be treated with local drug delivery to the inner ear. Current pharmacokinetic models have been based on invasive methods to measure drug concentrations, limiting them in spatial resolution, and restricting the research to larger rodents. We developed an intracochlear pharmacokinetic model based on an imaging, learning-prediction (LP) paradigm for learning transport parameters in the murine cochlea. This was achieved using noninvasive micro-computed tomography imaging of the cochlea during in vivo infusion of a contrast agent at the basal end of scala tympani through a cochleostomy. Each scan was registered in 3-D to a cochlear atlas to segment the cochlear regions with high accuracy, enabling concentrations to be extracted along the length of each scala. These spatio-temporal concentration profiles were used to learn a concentration dependent diffusion coefficient, and transport parameters between the major scalae and to clearance. The LP model results are comparable to the current state of the art model, and can simulate concentrations for cases involving different infusion molecules and different drug delivery protocols. Forward simulation results with pulsatile delivery suggest the pharmacokinetic model can be used to optimize drug delivery protocols to reduce total drug delivered and the potential for toxic side effects. While developed in the challenging murine cochlea, the processes are scalable to larger animals and different drug infusion paradigms.}, author = {Moudgalya, Sanketh S. and Wilson, Kevin and Zhu, Xiaoxia and Budzevich, Mikalai M. and Walton, Joseph P. and Cahill, Nathan D. and Frisina, Robert D. and Borkholder, David A.}, doi = {10.1016/j.heares.2019.05.009}, issn = {18785891}, journal = {Hearing Research}, keywords = {3-D image registration,Cochlea,Contrast agent,Inner ear,Micro-computed tomography,Mouse,Optimization,Pharmacokinetics,Segmentation}, pages = {46--59}, title = {{Cochlear pharmacokinetics - Micro-computed tomography and learning-prediction modeling for transport parameter determination}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {380}, year = {2019} } @inproceedings{Mueller, abstract = {Coronary heart disease was the single largest cause of sudden death in Australia in 2002. Computed tomography angiography (CTA) provides high resolution, high contrast images of the thoracic cavity, and as such has emerged as the imaging modality of choice for diagnosing and planning treatment for coronary heart disease. However, radiologists and cardiac surgeons require tools to easily identify possible stenosis (narrowed or constricted coronary vessels) in such CTA datasets. We present a method which allows users to interactively visualise a specific three-dimensional region of interest (ROI). In our example, segmentation methods are applied to isolate the coronary vessels, which in turn are visually enhanced using various perceptual cues. The segmentation is achieved using a combination of thresholding, region-growing, and morphological operations. The perceptual enhancement is realized by fusing direct volume rendered images using weighting factors determined by the segmented regions. The user can allow for the easy dissemination of relevant information by adjusting 'transfer functions' to control the degree of ROI enhancement. This approach requires only roughly segmented regions of interest, and allows for the 3D visualisation of calcifications within vessels. This proposed method has significant potential for helping to facilitate the efficient treatment for coronary heart disease. Furthermore, it can be implemented at interactive framerates on comparatively cheap, desktop computing hardware making it readily accessible. {\textcopyright} 2005 IEEE.}, author = {Mueller, Daniel and Maeder, Anthony and O'Shea, Peter}, booktitle = {Proceedings of the Digital Imaging Computing: Techniques and Applications, DICTA 2005}, doi = {10.1109/DICTA.2005.1578115}, isbn = {0769524672}, pages = {110--117}, title = {{Improved direct volume visualisation of the coronary arteries using fused segmented regions}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-33846990428{\&}doi=10.1109{\%}2FDICTA.2005.1578115{\&}partnerID=40{\&}md5=d567baa5b241ec2cbf2f4ffd4a343b46}, volume = {2005}, year = {2005} } @article{Mukherjee2016, abstract = {Purpose: Head motion during PET brain imaging can cause significant degradation of image quality. Several authors have proposed ways to compensate for PET brain motion to restore image quality and improve quantitation. Head restraints can reduce movement but are unreliable; thus the need for alternative strategies such as data-driven motion estimation or external motion tracking. Herein, the authors present a data-driven motion estimation method using a preprocessing technique that allows the usage of very short duration frames, thus reducing the intraframe motion problem commonly observed in the multiple frame acquisition method. Methods: The list mode data for PET acquisition is uniformly divided into 5-s frames and images are reconstructed without attenuation correction. Interframe motion is estimated using a 3D multiresolution registration algorithm and subsequently compensated for. For this study, the authors used 8 PET brain studies that used F-18 FDG as the tracer and contained minor or no initial motion. After reconstruction and prior to motion estimation, known motion was introduced to each frame to simulate head motion during a PET acquisition. To investigate the trade-off in motion estimation and compensation with respect to frames of different length, the authors summed 5-s frames accordingly to produce 10 and 60 s frames. Summed images generated from the motion-compensated reconstructed frames were then compared to the original PET image reconstruction without motion compensation. Results: The authors found that our method is able to compensate for both gradual and step-like motions using frame times as short as 5 s with a spatial accuracy of 0.2 mm on average. Complex volunteer motion involving all six degrees of freedom was estimated with lower accuracy (0.3 mm on average) than the other types investigated. Preprocessing of 5-s images was necessary for successful image registration. Since their method utilizes nonattenuation corrected frames, it is not susceptible to motion introduced between CT and PET acquisitions. Conclusions: The authors have shown that they can estimate motion for frames with time intervals as short as 5 s using nonattenuation corrected reconstructed FDG PET brain images. Intraframe motion in 60-s frames causes degradation of accuracy to about 2 mm based on the motion type.}, author = {Mukherjee, J. M. and Lindsay, C. and Mukherjee, A. and Olivier, P. and Shao, L. and King, M. A. and Licho, R.}, doi = {10.1118/1.4946814}, issn = {00942405}, journal = {Medical Physics}, keywords = {PET,motion compensation,motion tracking,reconstruction,registration}, number = {5}, pages = {2443--2454}, title = {{Improved frame-based estimation of head motion in PET brain imaging}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {43}, year = {2016} } @article{Muschelli2019, abstract = {Neuroconductor (https://neuroconductor.org) is an open-source platform for rapid testing and dissemination of reproducible computational imaging software. The goals of the project are to: (i) provide a centralized repository of R software dedicated to image analysis, (ii) disseminate software updates quickly, (iii) train a large, diverse community of scientists using detailed tutorials and short courses, (iv) increase software quality via automatic and manual quality controls, and (v) promote reproducibility of image data analysis. Based on the programming language R (https://www.r-project.org/), Neuroconductor starts with 51 inter-operable packages that cover multiple areas of imaging including visualization, data processing and storage, and statistical inference. Neuroconductor accepts new R package submissions, which are subject to a formal review and continuous automated testing. We provide a description of the purpose of Neuroconductor and the user and developer experience.}, author = {Muschelli, John and Gherman, Adrian and Fortin, Jean Philippe and Avants, Brian and Whitcher, Brandon and Clayden, Jonathan D. and Caffo, Brian S. and Crainiceanu, Ciprian M.}, doi = {10.1093/biostatistics/kxx068}, issn = {14684357}, journal = {Biostatistics}, keywords = {Bioinformatics,Image analysis,Statistical modelling}, number = {2}, pages = {218--239}, title = {{Neuroconductor: An R platform for medical imaging analysis}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {20}, year = {2019} } @article{Mutsvangwa2015, abstract = {This paper presents development of statistical shape models based on robust and rigid-groupwise registration followed by pointset nonrigid registration. The main advantages of the pipeline include automation in that the method does not rely on manual landmarks or a regionalization step; there is no bias in the choice of reference during the correspondence steps and the use of the probabilistic principal component analysis framework increases the domain of the shape variability. A comparison between the widely used expectation maximization-iterative closest point algorithm and a recently reported groupwise method on publicly available data (hippocampus) using the well-known criteria of generality, specificity, and compactness is also presented. The proposed method gives similar values but the curves of generality and specificity are superior to those of the other two methods. Finally, the method is applied to the human scapula, which is a known difficult structure, and the human humerus.}, author = {Mutsvangwa, Tinashe and Burdin, Valerie and Schwartz, Cedric and Roux, Christian}, doi = {10.1109/TBME.2014.2368362}, issn = {15582531}, journal = {IEEE Transactions on Biomedical Engineering}, keywords = {humerus,scapula,statistical shape model}, number = {4}, pages = {1098--1107}, title = {{An Automated Statistical Shape Model Developmental Pipeline: Application to the Human Scapula and Humerus}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {62}, year = {2015} } @article{Nagatomi2005, abstract = {Spinal cord injuries (SCI) often lead to severe bladder dysfunctions. Our previous studies have demonstrated that following SCI, rat bladder wall tissue became hypertrophied, significantly more compliant, and changed its mechanical behavior from orthotropic to isotropic. In order to elucidate the link between the tissue microstructure and mechanical properties of the wall, we have developed a novel semi-automated image analysis method to quantify smooth muscle bundle orientation and mass fraction in the bladder wall tissues from normal and 10 day-post-SCI rats. Results of the present study revealed that there were significant (p {\textless} 0.05) increases in smooth muscle area fractions as well as significantly (p {\textless} 0.001) fewer cell nuclei per muscle area in the SCI groups compared to the normal groups. Furthermore, while the normal rat bladders exhibited predominant smooth muscle orientation only in the longitudinal direction, the SCI rat bladders exhibited smooth muscles oriented in both the circumferential and longitudinal directions. These results provide first evidence that bladder smooth muscle cells exhibit hypertrophy rather than hyperplasia and developed a second, orthogonal orientation of smooth muscle bundles following SCI. The results of the present study corroborate our previous mechanical anisotropy data and provide the basis for development of structure-based constitutive models for urinary bladder wall tissue. {\textcopyright} 2005 Biomedical Engineering Society.}, author = {Nagatomi, Jiro and Toosi, K. Khashayar and Grashow, Jonathan S. and Chancellor, Michael B. and Sacks, Michael S.}, doi = {10.1007/s10439-005-5776-x}, issn = {00906964}, journal = {Annals of Biomedical Engineering}, keywords = {Histomorphometery,Image analysis,Mechanical anisotropy,Smooth muscle}, number = {8}, pages = {1078--1089}, title = {{Quantification of bladder smooth muscle orientation in normal and spinal cord injured rats}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-23844453003{\&}doi=10.1007{\%}2Fs10439-005-5776-x{\&}partnerID=40{\&}md5=c94d2c670513b414f8e81daf45d810d7}, volume = {33}, year = {2005} } @article{Namias2016, abstract = {Segmenting structures of interest in medical images is an important step in different tasks such as visualization, quantitative analysis, simulation, and image-guided surgery, among several other clinical applications. Numerous segmentation methods have been developed in the past three decades for extraction of anatomical or functional structures on medical imaging. Deformable models, which include the active contour models or snakes, are among the most popular methods for image segmentation combining several desirable features such as inherent connectivity and smoothness. Even though different approaches have been proposed and significant work has been dedicated to the improvement of such algorithms, there are still challenging research directions as the simultaneous extraction of multiple objects and the integration of individual techniques. This paper presents a novel open-source framework called deformable model array (DMA) for the segmentation of multiple and complex structures of interest in different imaging modalities. While most active contour algorithms can extract one region at a time, DMA allows integrating several deformable models to deal with multiple segmentation scenarios. Moreover, it is possible to consider any existing explicit deformable model formulation and even to incorporate new active contour methods, allowing to select a suitable combination in different conditions. The framework also introduces a control module that coordinates the cooperative evolution of the snakes and is able to solve interaction issues toward the segmentation goal. Thus, DMA can implement complex object and multi-object segmentations in both 2D and 3D using the contextual information derived from the model interaction. These are important features for several medical image analysis tasks in which different but related objects need to be simultaneously extracted. Experimental results on both computed tomography and magnetic resonance imaging show that the proposed framework has a wide range of applications especially in the presence of adjacent structures of interest or under intra-structure inhomogeneities giving excellent quantitative results.}, author = {Nam{\'{i}}as, Rafael and D'Amato, Juan Pablo and del Fresno, Mariana and V{\'{e}}nere, Marcelo and Pirr{\'{o}}, Nicola and Bellemare, Marc Emmanuel}, doi = {10.1007/s11517-015-1387-3}, issn = {17410444}, journal = {Medical and Biological Engineering and Computing}, keywords = {Collision control,Complex segmentation,Deformable models,Multi-object segmentation,Segmentation framework}, number = {8}, pages = {1181--1192}, title = {{Multi-object segmentation framework using deformable models for medical imaging analysis}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {54}, year = {2016} } @article{Nantavithya2020, abstract = {Introduction: Patterns of local, regional, and distant failure after stereotactic ablative radiotherapy (SABR) for early-stage non-small cell lung cancer (NSCLC) have been widely reported. However, reliable methods for analyzing causes of local failure are lacking. We describe a method for analyzing and reporting patterns of in-field recurrence after SABR, incorporating dosimetric parameters from initial treatment plan as well as geometric information from diagnostic images at recurrence. Material and methods: Diagnostic CT images at recurrence were registered with initial treatment planning images and radiation dose by deformable image registration. Recurrent gross tumor volume (rGTV) and centroid (geometric center of rGTV) were delineated. In-field failure was classified as centroids originating within the original planning target volume. Dose-volume histograms for each rGTV were used to further classify in-field recurrences as central high-dose (dose to 95{\%} of rGTV [rGTVD95{\%}] ≥95{\%} of dose prescribed to PTV) or peripheral high-dose (rGTVD95{\%} {\textless}95{\%} of dose prescribed to PTV). Results: 634 patients received SABR from 2004 to 2014 with 48 local recurrences. 35 of these had evaluable images with 16 in-field recurrences: 9 central high-dose, 6 peripheral high-dose, and 1 had both. Time to and volume of recurrence were not statistically different between central versus peripheral high-dose recurrences. However mean rGTV dose, mean centroid dose, and rGTVD95{\%} were higher for central versus peripheral high-dose recurrences. Conclusion: We report a standardized method for analysis and classification of in-field recurrence after SABR. There were more central as opposed to peripheral high-dose recurrences, suggesting biological rather than technical issues underlying majority of in-field failures.}, author = {Nantavithya, Chonnipa and Gomez, Daniel R. and Chang, Joe Y. and Mohamed, Abdallah S.R. and Fuller, C. David and Li, Heng and Brooks, Eric D. and Gandhi, Saumil J.}, doi = {10.1016/j.radonc.2020.01.002}, issn = {18790887}, journal = {Radiotherapy and Oncology}, keywords = {Early-stage non-small cell lung cancer,In-field recurrence,Stereotactic ablative radiotherapy}, pages = {209--214}, title = {{An improved method for analyzing and reporting patterns of in-field recurrence after stereotactic ablative radiotherapy in early-stage non-small cell lung cancer}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85079230198{\&}doi=10.1016{\%}2Fj.radonc.2020.01.002{\&}partnerID=40{\&}md5=fe2e0bf370b46396d4913f6e5289a860}, volume = {145}, year = {2020} } @article{Nardelli2015, abstract = {Background: Computed tomography (CT) helps physicians locate and diagnose pathological conditions. In some conditions, having an airway segmentation method which facilitates reconstruction of the airway from chest CT images can help hugely in the assessment of lung diseases. Many efforts have been made to develop airway segmentation algorithms, but methods are usually not optimized to be reliable across different CT scan parameters. Methods: In this paper, we present a simple and reliable semi-automatic algorithm which can segment tracheal and bronchial anatomy using the open-source 3D Slicer platform. The method is based on a region growing approach where trachea, right and left bronchi are cropped and segmented independently using three different thresholds. The algorithm and its parameters have been optimized to be efficient across different CT scan acquisition parameters. The performance of the proposed method has been evaluated on EXACT'09 cases and local clinical cases as well as on a breathing pig lung phantom using multiple scans and changing parameters. In particular, to investigate multiple scan parameters reconstruction kernel, radiation dose and slice thickness have been considered. Volume, branch count, branch length and leakage presence have been evaluated. A new method for leakage evaluation has been developed and correlation between segmentation metrics and CT acquisition parameters has been considered. Results: All the considered cases have been segmented successfully with good results in terms of leakage presence. Results on clinical data are comparable to other teams' methods, as obtained by evaluation against the EXACT09 challenge, whereas results obtained from the phantom prove the reliability of the method across multiple CT platforms and acquisition parameters. As expected, slice thickness is the parameter affecting the results the most, whereas reconstruction kernel and radiation dose seem not to particularly affect airway segmentation. Conclusion: The system represents the first open-source airway segmentation platform. The quantitative evaluation approach presented represents the first repeatable system evaluation tool for like-for-like comparison between different airway segmentation platforms. Results suggest that the algorithm can be considered stable across multiple CT platforms and acquisition parameters and can be considered as a starting point for the development of a complete airway segmentation algorithm.}, author = {Nardelli, Pietro and Khan, Kashif A. and Corv{\`{o}}, Alberto and Moore, Niamh and Murphy, Mary J. and Twomey, Maria and O'Connor, Owen J. and Kennedy, Marcus P. and Est{\'{e}}par, Ra{\'{u}}l San Jos{\'{e}} and Maher, Michael M. and Cantillon-Murphy, P{\'{a}}draig}, doi = {10.1186/s12938-015-0060-2}, issn = {1475925X}, journal = {BioMedical Engineering Online}, keywords = {3D Slicer,Airway segmentation,Computed tomography (CT),ITK,Image processing,Lung,Region growing}, number = {1}, pages = {24}, title = {{Optimizing parameters of an open-source airway segmentation algorithm using different CT images}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {14}, year = {2015} } @article{Narizzano2017, abstract = {Background: In the evaluation of Stereo-Electroencephalography (SEEG) signals, the physicist's workflow involves several operations, including determining the position of individual electrode contacts in terms of both relationship to grey or white matter and location in specific brain regions. These operations are (i) generally carried out manually by experts with limited computer support, (ii) hugely time consuming, and (iii) often inaccurate, incomplete, and prone to errors. Results: In this paper we present SEEG Assistant, a set of tools integrated in a single 3DSlicer extension, which aims to assist neurosurgeons in the analysis of post-implant structural data and hence aid the neurophysiologist in the interpretation of SEEG data. SEEG Assistant consists of (i) a module to localize the electrode contact positions using imaging data from a thresholded post-implant CT, (ii) a module to determine the most probable cerebral location of the recorded activity, and (iii) a module to compute the Grey Matter Proximity Index, i.e. the distance of each contact from the cerebral cortex, in order to discriminate between white and grey matter location of contacts. Finally, exploiting 3DSlicer capabilities, SEEG Assistant offers a Graphical User Interface that simplifies the interaction between the user and the tools. SEEG Assistant has been tested on 40 patients segmenting 555 electrodes, and it has been used to identify the neuroanatomical loci and to compute the distance to the nearest cerebral cortex for 9626 contacts. We also performed manual segmentation and compared the results between the proposed tool and gold-standard clinical practice. As a result, the use of SEEG Assistant decreases the post implant processing time by more than 2 orders of magnitude, improves the quality of results and decreases, if not eliminates, errors in post implant processing. Conclusions: The SEEG Assistant Framework for the first time supports physicists by providing a set of open-source tools for post-implant processing of SEEG data. Furthermore, SEEG Assistant has been integrated into 3D Slicer, a software platform for the analysis and visualization of medical images, overcoming limitations of command-line tools.}, author = {Narizzano, Massimo and Arnulfo, Gabriele and Ricci, Serena and Toselli, Benedetta and Tisdall, Martin and Canessa, Andrea and Fato, Marco Massimo and Cardinale, Francesco}, doi = {10.1186/s12859-017-1545-8}, issn = {14712105}, journal = {BMC Bioinformatics}, keywords = {Automatic segmentation,Epilepsy,Epileptic zone detections,GMPI,Medical imaging,SEEG}, number = {1}, pages = {13}, title = {{SEEG assistant: A 3DSlicer extension to support epilepsy surgery}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {18}, year = {2017} } @inproceedings{Nett, abstract = {C-arm based cone-beam CT (CBCT) has evolved into a routine clinical imaging modality to provide threedimensional tomographic image guidance before, during, and after an interventional procedure. It is often used to update the clinician to the state of the patient anatomy and interventional tool placement. Due to the repeatedly use of CBCT, the accumulated radiation dose in an interventional procedure has become a concern. There is a strong desire from both patients and health care providers to reduce the radiation exposure required for these exams. The overall objective of this work is to propose and validate a method to significantly reduce the total radiation dose used during a CBCT image guided intervention. The basic concept is that the first cone-beam CT scan acquired at the full dose will be used to constrain the reconstruction of the later CBCT scans acquired at a much lower radiation dose. A recently developed new image reconstruction algorithm, Prior Image Constrained Compressed Sensing (PICCS), was used to reconstruct subsequent CBCT images with lower dose. This application differs from other applications of the PICCS algorithm, such as time-resolved CT or fourdimensional CBCT (4DCBCT), because the patient position may be frequently changed from one CBCT scan to another during the procedure. Thus, an image registration step to account for the change in patient position is indispensable for use of the PICCS image reconstruction algorithm. In this paper, the image registration step is combined with the PICCS algorithm to enable radiation dose reduction in CBCT image guided interventions. Experimental results acquired from a clinical C-arm system using a human cadaver were used to validate the PICCS algorithm based radiation dose reduction scheme. Using the proposed method in this paper, it has been demonstrated that, instead of 300 view angles, this technique requires about 20 cone-beam view angles to reconstruct CBCT angiograms. This signals a radiation dose reduction by a factor of approximately fifteen for subsequent acquisitions.}, author = {Nett, Brian and Tang, Jie and Aagaard-Kienitz, Beverly and Rowley, Howard and Chen, Guang-Hong}, booktitle = {Medical Imaging 2009: Physics of Medical Imaging}, doi = {10.1117/12.813800}, isbn = {9780819475091}, issn = {16057422}, pages = {725803}, title = {{Low radiation dose C-arm cone-beam CT based on prior image constrained compressed sensing (PICCS): including compensation for image volume mismatch between multiple data acquisitions}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-66749095565{\&}doi=10.1117{\%}2F12.813800{\&}partnerID=40{\&}md5=7427199cbd63c3c0e9c00bf6c4c4ff1e}, volume = {7258}, year = {2009} } @article{Nketia2017, abstract = {Advances in optical microscopy, biosensors and cell culturing technologies have transformed live cell imaging. Thanks to these advances live cell imaging plays an increasingly important role in basic biology research as well as at all stages of drug development. Image analysis methods are needed to extract quantitative information from these vast and complex data sets. The aim of this review is to provide an overview of available image analysis methods for live cell imaging, in particular required preprocessing image segmentation, cell tracking and data visualisation methods. The potential opportunities recent advances in machine learning, especially deep learning, and computer vision provide are being discussed. This review includes overview of the different available software packages and toolkits.}, author = {Nketia, Thomas A. and Sailem, Heba and Rohde, Gustavo and Machiraju, Raghu and Rittscher, Jens}, doi = {10.1016/j.ymeth.2017.02.007}, issn = {10959130}, journal = {Methods}, keywords = {Biological image analysis,Cell segmentation,Cell tracking,Live cell imaging,Machine learning,Quantitative biological imaging}, pages = {65--79}, title = {{Analysis of live cell images: Methods, tools and opportunities}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {115}, year = {2017} } @article{Oelschlagel2013, abstract = {Intraoperative optical imaging (IOI) is a localization method for functional areas of the human brain cortex during neurosurgical procedures. The aim of the current work was to develop of a new analysis technique for the computation of two-dimensional IOI activity maps that is suited especially for use in clinical routine. The new analysis technique includes a stimulation scheme that comprises 30-s rest and 30-s stimulation conditions, in connection with pixelwise spectral power analysis for activity map calculation. A software phantom was used for verification of the implemented algorithms as well as for the comparison with the commonly used relative difference imaging method. Furthermore, the analysis technique was tested using intraoperative measurements on eight patients. The comparison with the relative difference algorithm revealed an averaged improvement of the signal-to-noise ratio between 95{\%} and 130{\%} for activity maps computed from intraoperatively acquired patient datasets. The results show that the new imaging technique improves the activity map quality of IOI especially under difficult intraoperative imaging conditions and is therefore especially suited for use in clinical routine. {\textcopyright} 2013 Walter de Gruyter GmbH.}, author = {Oelschlagel, Martin and Meyer, Tobias and Wahl, Hannes and Sobottka, Stephan B. and Kirsch, Matthias and Schackert, Gabriele and Morgenstern, Ute}, doi = {10.1515/bmt-2012-0077}, issn = {00135585}, journal = {Biomedizinische Technik}, keywords = {Data analysis,Intrinsic signals,Optical imaging,Spectral analysis}, number = {3}, pages = {257--267}, title = {{Evaluation of intraoperative optical imaging analysis methods by phantom and patient measurements}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {58}, year = {2013} } @inproceedings{Oliveira, abstract = {This paper presents a method based on level sets to segment the liver using Computer Tomography (CT) images. Initially, the liver boundary is manually set in one slice as an initial solution, and then the method automatically segments the liver in all other slices, sequentially. In each step of iteration it fits a Gaussian curve to the liver histogram to model the speed image in which the level sets propagates. The parameters of our method were estimated using Genetic Algorithms (GA) and a database of reference segmentations. The method was tested using 20 different exams and five different measures of performance, and the results obtained confirm the potential of the method. The cases in which the method presented a poor performance are also discussed in order to instigate further research.}, author = {Oliveira, D{\'{a}}rio A.B. and Feitosa, Raul Q. and Correia, Mauro M.}, booktitle = {VISAPP 2009 - Proceedings of the 4th International Conference on Computer Vision Theory and Applications}, doi = {10.5220/0001787401540159}, isbn = {9789898111692}, keywords = {Computer tomography,Genetic algorithms,Level sets,Liver segmentation,Medical imaging}, pages = {154--159}, title = {{Liver segmentation using level sets and genetic algorithms}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-70349298510{\&}partnerID=40{\&}md5=921f6aa504ccbbbf03902b5d0fbb632b}, volume = {2}, year = {2009} } @article{Oliveira2011, abstract = {Background: Cancer treatments are complex and involve different actions, which include many times a surgical procedure. Medical imaging provides important information for surgical planning, and it usually demands a proper segmentation, i.e., the identification of meaningful objects, such as organs and lesions. This study proposes a methodology to segment the liver, its vessels and nodules from computer tomography images for surgical planning.Methods: The proposed methodology consists of four steps executed sequentially: segmentation of liver, segmentation of vessels and nodules, identification of hepatic and portal veins, and segmentation of Couinaud anatomical segments. Firstly, the liver is segmented by a method based on a deformable model implemented through level sets, of which parameters are adjusted by using a supervised optimization procedure. Secondly, a mixture model is used to segment nodules and vessels through a region growing process. Then, the identification of hepatic and portal veins is performed using liver anatomical knowledge and a vein tracking algorithm. Finally, the Couinaud anatomical segments are identified according to the anatomical liver model proposed by Couinaud.Results: Experiments were conducted using data and metrics brought from the liver segmentation competition held in the Sliver07 conference. A subset of five exams was used for estimation of segmentation parameter values, while 15 exams were used for evaluation. The method attained a good performance in 17 of the 20 exams, being ranked as the 6thbest semi-automatic method when comparing to the methods described on the Sliver07 website (2008). It attained visual consistent results for nodules and veins segmentation, and we compiled the results, showing the best, worst, and mean results for all dataset.Conclusions: The method for liver segmentation performed well, according to the results of the numerical evaluation implemented, and the segmentation of liver internal structures were consistent with the anatomy of the liver, as confirmed by a specialist. The analysis provided evidences that the method to segment the liver may be applied to segment other organs, especially to those whose distribution of voxel intensities is nearly Gaussian shaped. {\textcopyright} 2011 Oliveira et al; licensee BioMed Central Ltd.}, author = {Oliveira, D{\'{a}}rio A.B. and Feitosa, Raul Q. and Correia, Mauro M.}, doi = {10.1186/1475-925X-10-30}, issn = {1475925X}, journal = {BioMedical Engineering Online}, title = {{Segmentation of liver, its vessels and lesions from CT images for surgical planning}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79954573225{\&}doi=10.1186{\%}2F1475-925X-10-30{\&}partnerID=40{\&}md5=c81b11df842390263ed95985f684c387}, volume = {10}, year = {2011} } @article{Ozbolat2016, abstract = {In order to bioprint living tissue and organ constructs, patient-specific anatomical models need to be acquired; however, these models mainly provide external surface information only. The internal architecture of tissue constructs plays a crucial role as it provides a porous environment for media exchange, vascularization, tissue growth and engraftment. This review presents design requirements for bioprinting and discusses currently available medical imaging techniques used in acquisition of anatomical models including magnetic resonance imaging (MRI) and computed tomography (CT), and compares their strengths and limitations. Then, consideration for design architecture is discussed and various approaches in blueprint modeling of tissue constructs are presented for creation of porous architectures. Next, existing toolpath planning approaches for bioprinting of tissues and organs are presented. Design limitations for bioprinting are discussed and future perspectives are provided to the reader.}, author = {Ozbolat, Ibrahim and Gudapati, Hemanth}, doi = {10.1016/j.bprint.2016.11.001}, issn = {24058866}, journal = {Bioprinting}, keywords = {Blueprint modeling,Design requirements for bioprinting,Medical imaging,Toolpath planning}, pages = {1--14}, title = {{A review on design for bioprinting}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85016144668{\&}doi=10.1016{\%}2Fj.bprint.2016.11.001{\&}partnerID=40{\&}md5=be8ae863546f5861c3ed0df09aaee711}, volume = {3}, year = {2016} } @book{Ozbolat2016a, author = {Ozbolat, I T}, pages = {1--342}, series = {3D Bioprinting: Fundamentals, Principles and Applications}, title = {{3D Bioprinting: Fundamentals, Principles and Applications}}, type = {Book}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85014448877{\&}partnerID=40{\&}md5=e0d277bc12f4ddba45e2cdccfbae6cbc}, year = {2016} } @book{Ozbolat2017, abstract = {To bioprint tissue and organ constructs, patient-specific anatomical models need to be obtained; however, these models mainly provide external surface information only. The internal architecture of tissue constructs plays a crucial role as it provides a porous environment for media exchange, vascularization, tissue growth, and engraftment. This chapter discusses currently available medical imaging techniques used in acquisition of anatomical models, including magnetic resonance imaging, computed tomography, and ultrasound, and compares their strengths and limitations. Then, consideration for design architecture is discussed, and various approaches in blueprint modeling of tissue constructs are presented for creation of porous architectures. Next, existing toolpath planning approaches for bioprinting of tissues and organs are presented. Design limitations for bioprinting are discussed, and future perspectives are provided to the reader.}, address = {London}, author = {Ozbolat, Ibrahim T.}, booktitle = {3D Bioprinting}, doi = {10.1016/b978-0-12-803010-3.00002-0}, isbn = {978-0-12-803030-1; 978-0-12-803010-3}, pages = {13--39}, publisher = {Academic Press Ltd-Elsevier Science Ltd}, series = {3d Bioprinting: Fundamentals, Principles and Applications}, title = {{Design for Bioprinting}}, type = {Book}, url = {{\%}3CGo to}, year = {2017} } @article{Park2017, abstract = {Purpose: We developed an image-guided intervention robot system that can be operated in a magnetic resonance (MR) imaging gantry. The system incorporates a bendable needle intervention robot for breast cancer patients that overcomes the space limitations of the MR gantry. Methods: Most breast coil designs for breast MR imaging have side openings to allow manual localization. However, for many intervention procedures, the patient must be removed from the gantry. A robotic manipulation system with integrated image guidance software was developed. Our robotic manipulator was designed to be slim, so as to fit between the patient's side and the MR gantry wall. Only non-magnetic materials were used, and an electromagnetic shield was employed for cables and circuits. The image guidance software was built using open source libraries. In situ feasibility tests were performed in a 3-T MR system. One target point in the breast phantom was chosen by the clinician for each experiment, and our robot moved the needle close to the target point. Results: Without image-guided feedback control, the needle end could not hit the target point (distance = 5 mm) in the first experiment. Using our robotic system, the needle hits the target lesion of the breast phantom at a distance of 2.3 mm from the same target point using image-guided feedback. The second experiment was performed using other target points, and the distance between the final needle end point and the target point was 0.8 mm. Conclusions: We successfully developed an MR-guided needle intervention robot for breast cancer patients. Further research will allow the expansion of these interventions.}, author = {Park, Samuel Byeongjun and Kim, Jung Gun and Lim, Ki Woong and Yoon, Chae Hyun and Kim, Dong Jun and Kang, Han Sung and Jo, Yung Ho}, doi = {10.1007/s11548-017-1528-2}, issn = {18616429}, journal = {International Journal of Computer Assisted Radiology and Surgery}, keywords = {Bendable needle,Breast cancer,Magnetic resonance imaging,Medical robotics,Needle intervention}, number = {8}, pages = {1319--1331}, title = {{A magnetic resonance image-guided breast needle intervention robot system: overview and design considerations}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {12}, year = {2017} } @article{Parsons2017, author = {Parsons, Stephen and Parker, C. Seth and Seales, W. Brent}, doi = {10.1353/mns.2017.0022}, issn = {23801190}, journal = {Manuscript Studies}, number = {2}, pages = {483--498}, title = {{The St. Chad Gospels: Diachronic manuscript registration and visualization}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {2}, year = {2017} } @article{Patel2018, abstract = {Purpose: To assess variability in corneal ulcer measurements between ophthalmologists and reduce clinician-dependent variability using semiautomated segmentation of the ulcer from photographs. Methods: Three ophthalmologists measured 50 patients' eyes for epithelial defects (EDs) and the stromal infiltrate (SI) size using slit-lamp (SL) calipers. SL photographs were obtained. An algorithm was developed for semiautomatic segmenting of the ED and SI in the photographs. Semiautomatic segmentation was repeated 3 times by different users (2 ophthalmologists and 1 trainee). Clinically significant variability was assessed with intraclass correlation coefficients (ICCs) and the percentage of pairwise measurements differing by ≥0.5 mm. Semiautomatic segmentation measurements were compared with manual delineation of the image by a corneal specialist (gold standard) using Dice similarity coefficients. Results: Ophthalmologists' reliability in measurements by SL calipers had an ICC from 0.84 to 0.88 between examiners. Measurements by semiautomatic segmentation had an ICC from 0.96 to 0.98. SL measures of ulcers by clinical versus semiautomatic segmentation measures differed by ≥0.5 mm in 24{\%} to 38{\%} versus 8{\%} to 28{\%} (ED height); 30{\%} to 52{\%} versus 12{\%} to 34{\%} (ED width); 26{\%} to 38{\%} versus 10{\%} to 32{\%} (SI height); and 38{\%} to 58{\%} versus 14{\%} to 34{\%} (SI width), respectively. Average Dice similarity coefficients between manual and repeated semiautomatic segmentation ranged from 0.83 to 0.86 for the ED and 0.78 to 0.83 for the SI. Conclusions: Variability exists when measuring corneal ulcers, even among ophthalmologists. Photography and computerized methods for quantifying the ulcer size could reduce variability while remaining accurate and impact quantitative measurement endpoints.}, author = {Patel, Tapan P. and Prajna, N. Venkatesh and Farsiu, Sina and Valikodath, Nita G. and Niziol, Leslie M. and Dudeja, Lakshey and Kim, Kyeong Hwan and Woodward, Maria A.}, doi = {10.1097/ICO.0000000000001488}, issn = {15364798}, journal = {Cornea}, keywords = {corneal ulcer,interexaminer variability,random forest segmentation,semiautomated measurement}, number = {3}, pages = {331--339}, pmid = {29256985}, title = {{Novel Image-Based Analysis for Reduction of Clinician-Dependent Variability in Measurement of the Corneal Ulcer Size}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {37}, year = {2018} } @article{Patterson2018, abstract = {Histology-directed imaging mass spectrometry (IMS) is a spatially targeted IMS acquisition method informed by expert annotation that provides rapid molecular characterization of select tissue structures. The expert annotations are usually determined on digital whole slide images of histological stains where the staining preparation is incompatible with optimal IMS preparation, necessitating serial sections: one for annotation, one for IMS. Registration is then used to align staining annotations onto the IMS tissue section. Herein, we report a next-generation histology-directed platform implementing IMS-compatible autofluorescence (AF) microscopy taken prior to any staining or IMS. The platform enables two histology-directed workflows, one that improves the registration process between two separate tissue sections using automated, computational monomodal AF-to-AF microscopy image registration, and a registration-free approach that utilizes AF directly to identify ROIs and acquire IMS on the same section. The registration approach is fully automated and delivers state of the art accuracy in histology-directed workflows for transfer of annotations (-3-10 $\mu$m based on 4 organs from 2 species) while the direct AF approach is registration-free, allowing targeting of the finest structures visible by AF microscopy. We demonstrate the platform in biologically relevant case studies of liver stage malaria and human kidney disease with spatially targeted acquisition of sparsely distributed (composing less than one tenth of 1{\%} of the tissue section area) malaria infected mouse hepatocytes and glomeruli in the human kidney case study.}, author = {Patterson, Nathan Heath and Tuck, Michael and Lewis, Adam and Kaushansky, Alexis and Norris, Jeremy L. and {Van De Plas}, Raf and Caprioli, Richard M.}, doi = {10.1021/acs.analchem.8b02885}, issn = {15206882}, journal = {Analytical Chemistry}, number = {21}, pages = {12404--12413}, title = {{Next Generation Histology-Directed Imaging Mass Spectrometry Driven by Autofluorescence Microscopy}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {90}, year = {2018} } @article{Paun2018, abstract = {During embryogenesis, a mammalian heart develops from a simple tubular shape into a complex 4-chamber organ, going through four distinct phases: early primitive tubular heart, emergence of trabeculations, trabecular remodeling and development of the compact myocardium. In this paper we propose a framework for standardized and subject-independent 3D regional myocardial complexity analysis, applied to analysis of the development of the mouse left ventricle. We propose a standardized subdivision of the myocardium into 3D overlapping regions (in our case 361) and a novel visualization of myocardial complexity, whereupon we: 1) extend the fractal dimension, commonly applied to image slices, to 3D and 2) use volume occupied by the trabeculations in each region together with their surface area, in order to quantify myocardial complexity. The latter provides an intuitive characterization of the complexity, given that compact myocardium will tend to occupy a larger volume with little surface area while high surface area with low volume will correspond to highly trabeculated areas. Using 50 mouse embryo images at 5 different gestational ages (10 subjects per gestational age), we demonstrate how the proposed representation and complexity measures describe the development of LV myocardial complexity. The mouse embryo data was acquired using high resolution episcopic microscopy. The complexity analysis per region was carried out using: 3D fractal dimension, myocardial volume, myocardial surface area and ratio between the two. The analysis of gestational ages was performed on embryos of 14.5, 15.5, 16.5, 17.5 and 18.5 embryonic days, and demonstrated that the regional complexity of the trabeculations increases longitudinally from the base to the apex, with a maximum around the middle. The overall complexity decreases with gestational age, being most complex at 14.5. Circumferentially, at ages 14.5, 15.5 and 16.5, the trabeculations show similar complexity everywhere except for the anteroseptal and inferolateral area of the wall, where it is smaller. At 17.5 days, the regions of high complexity become more localized towards the inferoseptal and anterolateral parts of the wall. At 18.5 days, the high complexity area exhibits further localization at the inferoseptal and anterior part of the wall.}, author = {Paun, Bruno and Bijnens, Bart and Cook, Andrew C. and Mohun, Timothy J. and Butakoff, Constantine}, doi = {10.1016/j.media.2018.08.001}, issn = {13618423}, journal = {Medical Image Analysis}, keywords = {3D fractal analysis,Cardiac embryology,Cardiac morphogenesis,Cardiac trabeculations,High resolution episcopic microscopy}, pages = {89--104}, title = {{Quantification of the detailed cardiac left ventricular trabecular morphogenesis in the mouse embryo}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {49}, year = {2018} } @article{Payan2019, abstract = {Purpose: The aim of this prospective study is to analyze the global tumor blood flow (BF) and its heterogeneity in newly diagnosed breast cancer (BC) according to tumor biological characteristics and molecular subtypes. These perfusion parameters were compared to those classically derived from metabolic studies to investigate links between perfusion and metabolism. Methods: Two hundred seventeen newly diagnosed BC patients underwent a 18F-FDG PET/CT exam before any treatment. A 2-min dynamic acquisition, centered on the chest, was performed immediately after intravenous injection of 3 MBq/kg of 18F-FDG, followed by a two-step static acquisition 90 min later. Tumor BF was calculated (in ml/min/g) using a single compartment kinetic model. In addition to standard PET parameters, texture features (TF) describing the heterogeneity of tumor perfusion and metabolism were extracted. Patients were divided into three groups: Luminal (HR+/HER2-), HER2 (HER2+), and TN (HR-/HER2-). Global and TF parameters of BF and metabolism were compared in different groups of patients according to tumor biological characteristics. Results: Tumors with lymph node involvement showed a higher perfusion, whereas no significant differences in SUV{\_}max or SUV{\_}mean were reported. TN tumors had a higher metabolic activity than HER2 and luminal tumors but no significant differences in global BF values were noted. HER2 tumors exhibited a larger tumor heterogeneity of both perfusion and metabolism compared to luminal and TN tumors. Heterogeneity of perfusion appeared well correlated to that of metabolism. Conclusions: The study of breast cancer perfusion shows a higher BF in large tumors and in tumors with lymph node involvement, not paralleled by similar modifications in tumor global metabolism. In addition, the observed correlation between the perfusion heterogeneity and the metabolism heterogeneity suggests that tumor perfusion and consequently the process of tumor angiogenesis might be involved in the metabolism heterogeneity previously shown in BC.}, author = {Payan, Neree and Presles, Benoit and Brunotte, Fran{\c{c}}ois and Coutant, Charles and Desmoulins, Isabelle and Vrigneaud, Jean Marc and Cochet, Alexandre}, doi = {10.1007/s00259-019-04422-4}, issn = {16197089}, journal = {European Journal of Nuclear Medicine and Molecular Imaging}, keywords = {18F-FDG PET/CT,Blood flow,Breast cancer,Heterogeneity,Textural features}, title = {{Biological correlates of tumor perfusion and its heterogeneity in newly diagnosed breast cancer using dynamic first-pass 18F-FDG PET/CT}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85070290987{\&}doi=10.1007{\%}2Fs00259-019-04422-4{\&}partnerID=40{\&}md5=a6426041023e645a74b0ccede063cd2f}, year = {2019} } @article{Peng2008, abstract = {In recent years, the deluge of complicated molecular and cellular microscopic images creates compelling challenges for the image computing community. There has been an increasing focus on developing novel image processing, data mining, database and visualization techniques to extract, compare, search and manage the biological knowledge in these data-intensive problems. This emerging new area of bioinformatics can be called 'bioimage informatics'. This article reviews the advances of this field from several aspects, including applications, key techniques, available tools and resources. Application examples such as high-throughput/high-content phenotyping and atlas building for model organisms demonstrate the importance of bioimage informatics. The essential techniques to the success of these applications, such as bioimage feature identification, segmentation and tracking, registration, annotation, mining, image data management and visualization, are further summarized, along with a brief overview of the available bioimage databases, analysis tools and other resources. {\textcopyright} 2008 The Author(s).}, author = {Peng, Hanchuan}, doi = {10.1093/bioinformatics/btn346}, issn = {13674803}, journal = {Bioinformatics}, number = {17}, pages = {1827--1836}, title = {{Bioimage informatics: A new area of engineering biology}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {24}, year = {2008} } @article{Pennati2014, abstract = {Rationale and Objectives: The assessment of regional ventilation is of critical importance when investigating lung function during disease progression and planning of pulmonary interventions. Recently, different computed tomography (CT)-based parameters have been proposed as surrogates of lung ventilation. The aim of the present study was to compare these parameters, namely variations of density ($\delta$HU), specific volume (sVol), and specific gas volume ($\delta$SVg) between different lung volumes, in relation to their topographic distribution within the lung. Materials and Methods: Ten healthy volunteers were scanned via high-resolution CT at residual volume (RV) and total lung capacity (TLC); $\delta$HU, sVol, and $\delta$SVg were mapped voxel by voxel after registering TLC onto RV. Variations of the three parameters along the vertical and horizontal directions were analyzed. Results: Along the vertical direction (from ventral to dorsal regions), a strong dependence on gravity was found in $\delta$HU and sVol, with greater values in the dorsal regions of the lung (P{\textless}.001), whereas $\delta$SVg was more homogeneously distributed within the lung. Conversely, along the caudocranial direction (from lung bases to apexes) where no gravitational gradient is present, the three parameters behaved similarly, with lower values at the apices. Conclusions: $\delta$HU, sVol, and $\delta$SVg behave differently along the gravity direction. As the greater amount of air delivered to the dependent portion of the lung supplies a larger number of alveoli, the amount of gas delivered to alveoli compared to the mass of tissue is not gravity dependent. The minimization of gravity dependence in the distribution of ventilation when using $\delta$SVg suggests that this parameter is more reliable to discriminate healthy from pathologic regions.}, author = {Pennati, Francesca and Salito, Caterina and Baroni, Guido and Woods, Jason and Aliverti, Andrea}, doi = {10.1016/j.acra.2014.05.022}, issn = {18784046}, journal = {Academic Radiology}, keywords = {CT imaging,Gravity,Healthy lung,Ventilation}, number = {10}, pages = {1268--1275}, title = {{Comparison Between Multivolume CT-Based Surrogates of Regional Ventilation in Healthy Subjects}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {21}, year = {2014} } @article{Pheiffer2011, abstract = {Modality-independent elastography (MIE) is a method of elastography that reconstructs the elastic properties of tissue using images acquired under different loading conditions and a biomechanical model. Boundary conditions are a critical input to the algorithm and are often determined by time-consuming point correspondence methods requiring manual user input. This study presents a novel method of automatically generating boundary conditions by nonrigidly registering two image sets with a demons diffusion-based registration algorithm. The use of this method was successfully performed in silico using magnetic resonance and X-ray-computed tomography image data with known boundary conditions. These preliminary results produced boundary conditions with an accuracy of up to 80 compared to the known conditions. Demons-based boundary conditions were utilized within a 3-D MIE reconstruction to determine an elasticity contrast ratio between tumor and normal tissue. Two phantom experiments were then conducted to further test the accuracy of the demons boundary conditions and the MIE reconstruction arising from the use of these conditions. Preliminary results show a reasonable characterization of the material properties on this first attempt and a significant improvement in the automation level and viability of the method. {\textcopyright} 2011 IEEE.}, author = {Pheiffer, Thomas S. and Ou, Jao J. and Ong, Rowena E. and Miga, Michael I.}, doi = {10.1109/TBME.2011.2159791}, issn = {00189294}, journal = {IEEE Transactions on Biomedical Engineering}, keywords = {Boundary conditions,elastography,finite element methods,image registration}, number = {9}, pages = {2607--2616}, title = {{Automatic generation of boundary conditions using demons nonrigid image registration for use in 3-D modality-independent elastography}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {58}, year = {2011} } @article{Phellan2018, abstract = {Objective: Automatic vessel segmentation can be used to process the considerable amount of data generated by four-dimensional arterial spin labeling magnetic resonance angiography (4D ASL MRA) images. Previous segmentation approaches for dynamic series of images propose either reducing the series to a temporal average (tAIP) or maximum intensity projection (tMIP) prior to vessel segmentation, or a separate segmentation of each image. This paper introduces a method that combines both approaches to overcome the specific drawbacks of each technique. Methods: Vessels in the tAIP are enhanced by using the ranking orientation responses of path operators and multiscale vesselness enhancement filters. Then, tAIP segmentation is performed using a seed-based algorithm. In parallel, this algorithm is also used to segment each frame of the series and identify small vessels, which might have been lost in the tAIP segmentation. The results of each individual time frame segmentation are fused using an or boolean operation. Finally, small vessels found only in the fused segmentation are added to the tAIP segmentation. Results: In a quantitative analysis using ten 4D ASL MRA image series from healthy volunteers, the proposed combined approach reached an average Dice coefficient of 0.931, being more accurate than the corresponding tMIP, tAIP, and single time frame segmentation methods with statistical significance. Conclusion : The novel combined vessel segmentation strategy can be used to obtain improved vessel segmentation results from 4D ASL MRA and other dynamic series of images. Significance: Improved vessel segmentation of 4D ASL MRA allows a fast and accurate assessment of cerebrovascular structures.}, author = {Phellan, Renzo and Lindner, Thomas and Helle, Michael and Falcao, Alexandre X. and Forkert, Nils Daniel}, doi = {10.1109/TBME.2017.2759730}, issn = {15582531}, journal = {IEEE Transactions on Biomedical Engineering}, keywords = {Angiography,magnetic resonance angiography,temporal segmentation,vessel enhancement,vessel segmentation}, number = {7}, pages = {1486--1494}, title = {{Automatic Temporal Segmentation of Vessels of the Brain Using 4D ASL MRA Images}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {65}, year = {2018} } @incollection{Phellan2019, abstract = {Medical imaging modalities, such as four-dimensional arterial spin label magnetic resonance angiography (4D ASL MRA), can acquire blood flow data of the cerebrovascular system. These datasets are useful to determine criteria of normality and diagnose, study, and follow-up on the treatment progress of cerebrovascular diseases. In particular, variations in the arterial transit time (ATT) are related to hemodynamic impairment as a consequence of vascular diseases. In order to obtain accurate ATT estimations, the acquisition parameters of the applied image modality need to be properly tuned. In case of 4D ASL MRA, two important acquisition parameters are the blood labeling duration and the temporal resolution. This paper evaluates the effect of different settings for the two mentioned parameters on the accuracy of the ATT estimation in 4D ASL MRA datasets. Six 4D ASL MRA datasets of a pipe containing a mixture of glycerine and water, circulated with constant flow rate using a pump, are acquired with different labeling duration and temporal resolution. A mathematical model is then fitted to the observed signal in order to estimate the ATT. The results indicate that the lowest average absolute error between the ground-truth and estimated ATT is achieved when the longest labeling duration of 1000 ms and the highest temporal resolution of 60 ms are used. The insight obtained from the experiments using a flow phantom, under controlled conditions, can be extended to tune acquisition parameters of 4D ASL MRA datasets of human subjects.}, author = {Phellan, Renzo and Lindner, Thomas and Helle, Michael and Falc{\~{a}}o, Alexandre X. and Forkert, Nils D.}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-030-33327-0_17}, isbn = {9783030333263}, issn = {16113349}, keywords = {Arterial transit time,Blood flow,Hemodynamic analysis,Model fitting}, pages = {141--148}, title = {{The Effect of Labeling Duration and Temporal Resolution on Arterial Transit Time Estimation Accuracy in 4D ASL MRA Datasets - A Flow Phantom Study}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85075765108{\&}doi=10.1007{\%}2F978-3-030-33327-0{\_}17{\&}partnerID=40{\&}md5=34fc3b4595995b6c0ddd47325b464be7}, volume = {11794 LNCS}, year = {2019} } @article{Phillion2008, abstract = {Semisolid tensile testing combined with X-ray microtomography (XMT) was used to characterize the development of internal damage as a function of strain in an aluminum-magnesium alloy, AA5182. Novel techniques were developed to allow the quantification of both the size evolution and orientation of the damage to determine mechanisms controlling the early stage growth and localization. During the initial stages of semisolid deformation, it was observed that strain was accommodated by both the growth of as-cast porosity and the detection of new damage-based voids. As the volume fraction of damage increases, the growth of voids occurs in an orientation perpendicular to the loading direction, both through expansion within the grain boundary liquid and void coalescence. The damage then localizes, causing failure. {\textcopyright} The Minerals, Metals {\&} Materials Society and ASM International 2008.}, author = {Phillion, Andre B. and Lee, P. D. and Maire, E. and Cockcroft, S. L.}, doi = {10.1007/s11661-008-9584-4}, issn = {10735623}, journal = {Metallurgical and Materials Transactions A: Physical Metallurgy and Materials Science}, number = {10}, pages = {2459--2469}, title = {{Quantitative assessment of deformation-induced damage in a Semisolid aluminum alloy via X-ray microtomography}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {39}, year = {2008} } @article{Pichat2018, abstract = {Histology permits the observation of otherwise invisible structures of the internal topography of a specimen. Although it enables the investigation of tissues at a cellular level, it is invasive and breaks topology due to cutting. Three-dimensional (3D) reconstruction was thus introduced to overcome the limitations of single-section studies in a dimensional scope. 3D reconstruction finds its roots in embryology, where it enabled the visualisation of spatial relationships of developing systems and organs, and extended to biomedicine, where the observation of individual, stained sections provided only partial understanding of normal and abnormal tissues. However, despite bringing visual awareness, recovering realistic reconstructions is elusive without prior knowledge about the tissue shape. 3D medical imaging made such structural ground truths available. In addition, combining non-invasive imaging with histology unveiled invaluable opportunities to relate macroscopic information to the underlying microscopic properties of tissues through the establishment of spatial correspondences; image registration is one technique that permits the automation of such a process and we describe reconstruction methods that rely on it. It is thereby possible to recover the original topology of histology and lost relationships, gain insight into what affects the signals used to construct medical images (and characterise them), or build high resolution anatomical atlases. This paper reviews almost three decades of methods for 3D histology reconstruction from serial sections, used in the study of many different types of tissue. We first summarise the process that produces digitised sections from a tissue specimen in order to understand the peculiarity of the data, the associated artefacts and some possible ways to minimise them. We then describe methods for 3D histology reconstruction with and without the help of 3D medical imaging, along with methods of validation and some applications. We finally attempt to identify the trends and challenges that the field is facing, many of which are derived from the cross-disciplinary nature of the problem as it involves the collaboration between physicists, histolopathologists, computer scientists and physicians.}, author = {Pichat, Jonas and Iglesias, Juan Eugenio and Yousry, Tarek and Ourselin, S{\'{e}}bastien and Modat, Marc}, doi = {10.1016/j.media.2018.02.004}, issn = {13618423}, journal = {Medical Image Analysis}, keywords = {3D reconstruction,Histology,MRI,Medical imaging,Registration}, pages = {73--105}, title = {{A Survey of Methods for 3D Histology Reconstruction}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {46}, year = {2018} } @article{Pietzsch2012, abstract = {ImgLib2 is an open-source Java library for n-dimensional data representation and manipulation with focus on image processing. It aims at minimizing code duplication by cleanly separating pixelalgebra, data access and data representation in memory. Algorithms can be implemented for classes of pixel types and generic access patterns by which they become independent of the specific dimensionality, pixel type and data representation. ImgLib2 illustrates that an elegant high-level programming interface can be achieved without sacrificing performance. It provides efficient implementations of common data types, storage layouts and algorithms. It is the data model underlying ImageJ2, the KNIME Image Processing toolbox and an increasing number of Fiji-Plugins. {\textcopyright} 2012 The Author.}, author = {Pietzsch, Tobias and Preibisch, Stephan and Toman{\v{c}}{\'{a}}k, Pavel and Saalfeld, Stephan}, doi = {10.1093/bioinformatics/bts543}, issn = {14602059}, journal = {Bioinformatics}, number = {22}, pages = {3009--3011}, title = {{Img lib 2-generic image processing in Java}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {28}, year = {2012} } @article{Pinter2019, abstract = {Background and objective: Segmentation is a ubiquitous operation in medical image computing. Various data representations can describe segmentation results, such as labelmap volumes or surface models. Conversions between them are often required, which typically include complex data processing steps. We identified four challenges related to managing multiple representations: conversion method selection, data provenance, data consistency, and coherence of in-memory objects. Methods: A complex data container preserves identity and provenance of the contained representations and ensures data coherence. Conversions are executed automatically on-demand. A graph containing the implemented conversion algorithms determines each execution, ensuring consistency between various representations. The design and implementation of a software library are proposed, in order to provide a readily usable software tool to manage segmentation data in multiple data representations. A low-level core library called PolySeg implemented in the Visualization Toolkit (VTK) manages the data objects and conversions. It is used by a high-level application layer, which has been implemented in the medical image visualization and analysis platform 3D Slicer. The application layer provides advanced visualization, transformation, interoperability, and other functions. Results: The core conversion algorithms comprising the graph were validated. Several applications were implemented based on the library, demonstrating advantages in terms of usability and ease of software development in each case. The Segment Editor application provides fast, comprehensive, and easy-to-use manual and semi-automatic segmentation workflows. Clinical applications for gel dosimetry, external beam planning, and MRI-ultrasound image fusion in brachytherapy were rapidly prototyped resulting robust applications that are already in use in clinical research. The conversion algorithms were found to be accurate and reliable using these applications. Conclusions: A generic software library has been designed and developed for automatic management of multiple data formats in segmentation tasks. It enhances both user and developer experience, enabling fast and convenient manual workflows and quicker and more robust software prototyping. The software's BSD-style open-source license allows complete freedom of use of the library.}, author = {Pinter, Csaba and Lasso, Andras and Fichtinger, Gabor}, doi = {10.1016/j.cmpb.2019.02.011}, issn = {18727565}, journal = {Computer Methods and Programs in Biomedicine}, keywords = {3D Slicer,DICOM,Open-source,Segmentation,Software library,Voxelization}, pages = {19--26}, title = {{Polymorph segmentation representation for medical image computing}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {171}, year = {2019} } @article{Podlesek2015, abstract = {Introduction: Ultrasound can visualize and update the vessel status in real time during cerebral vascular surgery. We studied the depiction of parent vessels and aneurysms with a high-resolution 3D intraoperative ultrasound imaging system during aneurysm clipping using rotational digital subtraction angiography as a reference. Methods: We analyzed 3D intraoperative ultrasound in 39 patients with cerebral aneurysms to visualize the aneurysm intraoperatively and the nearby vascular tree before and after clipping. Simultaneous coregistration of preoperative subtraction angiography data with 3D intraoperative ultrasound was performed to verify the anatomical assignment. Results: Intraoperative ultrasound detected 35 of 43 aneurysms (81{\%}) in 39 patients. Thirty-nine intraoperative ultrasound measurements were matched with rotational digital subtraction angiography and were successfully reconstructed during the procedure. In 7 patients, the aneurysm was partially visualized by 3D-ioUS or was not in field of view. Post-clipping intraoperative ultrasound was obtained in 26 and successfully reconstructed in 18 patients (69{\%}) despite clip related artefacts. The overlap between 3D-ioUS aneurysm volume and preoperative rDSA aneurysm volume resulted in a mean accuracy of 0.71 (Dice coefficient). Conclusions: Intraoperative coregistration of 3D intraoperative ultrasound data with preoperative rotational digital subtraction angiography is possible with high accuracy. It allows the immediate visualization of vessels beyond the microscopic field, as well as parallel assessment of blood velocity, aneurysm and vascular tree configuration. Although spatial resolution is lower than for standard angiography, the method provides an excellent vascular overview, advantageous interpretation of 3D-ioUS and immediate intraoperative feedback of the vascular status. A prerequisite for understanding vascular intraoperative ultrasound is image quality and a successful match with preoperative rotational digital subtraction angiography.}, author = {Podlesek, Dino and Meyer, Tobias and Morgenstern, Ute and Schackert, Gabriele and Kirsch, Matthias}, doi = {10.1371/journal.pone.0121345}, issn = {19326203}, journal = {PLoS ONE}, number = {3}, pages = {16}, title = {{Improved visualization of intracranial vessels with intraoperative coregistration of rotational digital subtraction angiography and intraoperative 3D ultrasound}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {10}, year = {2015} } @inbook{Polfliet2015, abstract = {We present a method for automatically estimating prosthesis migration from previous and follow-up CT image data. The method consists of the segmentation of the bone and prosthesis in both images, followed by a registration of both substructures. The migration is found by computing the difference between both transforms. In this work we assess the accuracy of the method for zero migration. The method was applied on data from a mechanical phantom and on patient data, both with zero migration. Our experiments show that an accuracy of less than 0.3 mm can be achieved in a clinical setting.}, address = {Berlin}, author = {Polfliet, Mathias and Vandemeulebroucke, Jef and {Van Gompel}, Gert and Buls, Nico and Deklerck, Rudi and Scheerlinck, Thierry}, booktitle = {IFMBE Proceedings}, doi = {10.1007/978-3-319-11128-5_32}, editor = {Lackovic, I and Vasic, D}, isbn = {9783319111278}, issn = {16800737}, keywords = {Hip Prosthesis,Migration,Rigid Registration}, pages = {126--129}, publisher = {Springer-Verlag Berlin}, series = {IFMBE Proceedings}, title = {{Estimation of hip prosthesis migration: A study of zero migration}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {45}, year = {2015} } @article{Poppe2019, abstract = {Magma intrusions grow to their final geometries by deforming the Earth's crust internally and by displacing the Earth's surface. Interpreting the related displacements in terms of intrusion geometry is key to forecasting a volcanic eruption. While scaled laboratory models enable us to study the relationships between surface displacement and intrusion geometry, past approaches entailed limitations regarding imaging of the laboratory model interior or simplicity of the simulated crustal rheology. Here we apply cutting-edge medical wide beam X-ray Computed Tomography (CT) to quantify in 4D the deformation induced in laboratory models by an intrusion of a magma analog (golden syrup) into a rheologically-complex granular host rock analog (sand and plaster). We extract the surface deformation and we quantify the strain field of the entire experimental volume in 3D over time by using Digital Volume Correlation (DVC). By varying the strength and height of the host material, and intrusion velocity, we observe how intrusions of contrasting geometries grow, and induce contrasting strain field characteristics and surface deformation in 4D. The novel application of CT and DVC reveals that distributed strain accommodation and mixed-mode (opening and shear) fracturing dominates in low-cohesion material overburden, and leads to the growth of thick cryptodomes or cup-shaped intrusions. More localized strain accommodation and opening-mode fracturing dominates in high-cohesion material overburden, and leads to the growth of cone sheets or thin dikes. The results demonstrate how the combination of CT and DVC can greatly enhance the utility of optically non-transparent crustal rock analogs in obtaining insights into shallow crustal deformation processes. This unprecedented perspective on the spatio-temporal interaction of intrusion growth coupled with host material deformation provides a conceptual framework that can be tested by field observations at eroded volcanic systems and by the ever increasing spatial and temporal resolution of geodetic data at active volcanoes.}, author = {Poppe, Sam and Holohan, Eoghan P. and Galland, Olivier and Buls, Nico and {Van Gompel}, Gert and Keelson, Benyameen and Tournigand, Pierre Yves and Brancart, Joost and Hollis, Dave and Nila, Alex and Kervyn, Matthieu}, doi = {10.3389/feart.2019.00062}, issn = {22966463}, journal = {Frontiers in Earth Science}, keywords = {Analog,Digital volume correlation,Laboratory modeling,Magma intrusion,Surface deformation,X-ray computed tomography}, pages = {20}, title = {{An inside perspective on magma intrusion: Quantifying 3d displacement and strain in laboratory experiments by dynamic X-ray computed tomography}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {7}, year = {2019} } @article{Prasetio2018, abstract = {The Vero linear accelerator delivers dynamic tumor tracking (DTT) treatment using a gimbal motion. However, the availability of treatment planning systems (TPS) to simulate DTT is limited. This study aims to implement and verify the gimbal tracking beam geometry in the dose calculation. Gimbal tracking was implemented by rotating the reference CT outside the TPS according to the ring, gantry, and gimbal tracking position obtained from the tracking log file. The dose was calculated using these rotated CTs. The geometric accuracy was verified by comparing calculated and measured film response using a ball bearing phantom. The dose was verified by comparing calculated 2D dose distributions and film measurements in a ball bearing and a homogeneous phantom using a gamma criterion of 2{\%}/2 mm. The effect of implementing the gimbal tracking beam geometry in a 3D patient data dose calculation was evaluated using dose volume histograms (DVH). Geometrically, the gimbal tracking implementation accuracy was {\textless}0.94 mm. The isodose lines agreed with the film measurement. The largest dose difference of 9.4{\%} was observed at maximum tilt positions with an isocenter and target separation of 17.51 mm. Dosimetrically, gamma passing rates were {\textgreater}98.4{\%}. The introduction of the gimbal tracking beam geometry in the dose calculation shifted the DVH curves by 0.05{\%}-1.26{\%} for the phantom geometry and by 5.59{\%} for the patient CT dataset. This study successfully demonstrates a method to incorporate the gimbal tracking beam geometry into dose calculations. By combining CT rotation and MU distribution according to the log file, the TPS was able to simulate the Vero tracking treatment dose delivery. The DVH analysis from the gimbal tracking dose calculation revealed changes in the dose distribution during gimbal DTT that are not visible with static dose calculations.}, author = {Prasetio, H. and W{\"{o}}lfelschneider, J. and Ziegler, M. and Serpa, M. and Witulla, B. and Bert, C.}, doi = {10.1088/1361-6560/aaa617}, issn = {13616560}, journal = {Physics in Medicine and Biology}, keywords = {MU distribution,dose reconstruction,dynamic tumor tracking,film dosimetry,gimbal tracking}, number = {3}, pages = {16}, title = {{Dose calculation and verification of the Vero gimbal tracking treatment delivery}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {63}, year = {2018} } @article{Price2017, abstract = {Purpose: MR-only treatment planning requires images of high geometric fidelity, particularly for large fields of view (FOV). However, the availability of large FOV distortion phantoms with analysis software is currently limited. This work sought to optimize a modular distortion phantom to accommodate multiple bore configurations and implement distortion characterization in a widely implementable solution. Method and Materials: To determine candidate materials, 1.0 T MR and CT images were acquired of twelve urethane foam samples of various densities and strengths. Samples were precision-machined to accommodate 6 mm diameter paintballs used as landmarks. Final material candidates were selected by balancing strength, machinability, weight, and cost. Bore sizes and minimum aperture width resulting from couch position were tabulated from the literature (14 systems, 5 vendors). Bore geometry and couch position were simulated using MATLAB to generate machine-specific models to optimize the phantom build. Previously developed software for distortion characterization was modified for several magnet geometries (1.0 T, 1.5 T, 3.0 T), compared against previously published 1.0 T results, and integrated into the 3D Slicer application platform. Results: All foam samples provided sufficient MR image contrast with paintball landmarks. Urethane foam (compressive strength {\~{}}1000 psi, density {\~{}}20 lb/ft3) was selected for its accurate machinability and weight characteristics. For smaller bores, a phantomversion with the following parameters was used: 15 foam plates, 55 9 55 9 37.5 cm3 (L9W9H), 5,082 landmarks, and weight {\~{}}30 kg. To accommodate {\textgreater} 70 cm wide bores, an extended build used 20 plates spanning 55 9 55 9 50 cm3 with 7,497 landmarks and weight {\~{}}44 kg.Distortion characterization softwarewas implemented as an externalmodule into 3DSlicer's plugin framework and results agreed with the literature. Conclusion: The design and implementation of a modular, extendable distortion phantom was optimized for several bore configurations. The phantom and analysis software will be available for multi-institutional collaborations and cross-validation trials to support MR-only planning.}, author = {Price, Ryan G. and Knight, Robert A. and Hwang, Ken Pin and Bayram, Ersin and Nejad-Davarani, Siamak P. and Glide-Hurst, Carri K.}, doi = {10.1002/acm2.12090}, issn = {15269914}, journal = {Journal of Applied Clinical Medical Physics}, keywords = {Distortion,Gradient nonlinearity,MRI,Phantom,Spatial accuracy}, number = {4}, pages = {51--61}, title = {{Optimization of a novel large field of view distortion phantom for MR-only treatment planning}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {18}, year = {2017} } @article{Primpke2019, abstract = {The ubiquitous presence of microlitter (ML), precisely microplastics (MP) and microfibres (MF) in the global environment is of growing concern for science, and society in general. Reliable methods are urgently needed for the identification and quantification of these emerging environmental pollutants. Recently a rapid Fourier transform infrared (FTIR) imaging pipeline was developed for automated identification and quantification of MP. However, although the usefulness for the quantification of MP could already be shown in several studies, microfibres could not be targeted so far by the developed analysis pipeline. In this study we present a novel approach for the simultaneous identification and quantification of MP and MF. By concentrating the sample on membrane filters and applying a BaF2 window on top of the filter, all objects-including MF-are fixed in the focal plane of the FTIR microscope. Furthermore, the analysis pipeline was augmented with algorithms which take into consideration the filamentous structure of MF. The novel analysis pipeline now allows to separate MP and MF via a preselection of fibres from the dataset by object size and shape. MP and MF are subsequently further investigated for specific polymer types and lengths/sizes. After parameter optimization the newly developed analysis approach was applied to archived samples from previous studies on treated waste water. The results were compared with respect to the original detected polymer types and numbers, but also considered MF detection.}, author = {Primpke, S. and Dias, P. A. and Gerdts, G.}, doi = {10.1039/c9ay00126c}, issn = {17599679}, journal = {Analytical Methods}, number = {16}, pages = {2138--2147}, title = {{Automated identification and quantification of microfibres and microplastics}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {11}, year = {2019} } @article{Primpke2017, abstract = {The analysis of imaging data derived from micro-Fourier transform infrared ($\mu$FTIR) microscopy is a powerful tool allowing the analysis of microplastics enriched on membrane filters. In this study we present an automated approach to reduce the time demand currently needed for data analyses. We developed a novel analysis pipeline, based on the OPUS{\textcopyright} Software by Bruker, followed by image analysis with Python and Simple ITK image processing modules. By using this newly developed pipeline it was possible to analyse datasets from focal plane array (FPA) $\mu$FTIR mapping of samples containing up to 1.8 million single spectra. All spectra were compared against a database of different synthetic and natural polymers by various routines followed by benchmark tests with focus on accuracy and quality. The spectral correlation was optimized for high quality data generation, which allowed image analysis. Based on these results an image analysis approach was developed, providing information on particle numbers and sizes for each polymer detected. It was possible to collect all data with relative ease even for complex sample matrices. This approach significantly decreases the time demand for the interpretation of complex FTIR-imaging data and significantly increases the data quality.}, author = {Primpke, S. and Lorenz, C. and Rascher-Friesenhausen, R. and Gerdts, G.}, doi = {10.1039/c6ay02476a}, issn = {17599679}, journal = {Analytical Methods}, number = {9}, pages = {1499--1511}, title = {{An automated approach for microplastics analysis using focal plane array (FPA) FTIR microscopy and image analysis}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {9}, year = {2017} } @article{Primpke2018, abstract = {The identification of microplastics becomes increasingly challenging with decreasing particle size and increasing sample heterogeneity. The analysis of microplastic samples by Fourier transform infrared (FTIR) spectroscopy is a versatile, bias-free tool to succeed at this task. In this study, we provide an adaptable reference database, which can be applied to single-particle identification as well as methods like chemical imaging based on FTIR microscopy. The large datasets generated by chemical imaging can be further investigated by automated analysis, which does, however, require a carefully designed database. The novel database design is based on the hierarchical cluster analysis of reference spectra in the spectral range from 3600 to 1250 cm−1. The hereby generated database entries were optimized for the automated analysis software with defined reference datasets. The design was further tested for its customizability with additional entries. The final reference database was extensively tested on reference datasets and environmental samples. Data quality by means of correct particle identification and depiction significantly increased compared to that of previous databases, proving the applicability of the concept and highlighting the importance of this work. Our novel database provides a reference point for data comparison with future and previous microplastic studies that are based on different databases. [Figure not available: see fulltext.].}, author = {Primpke, Sebastian and Wirth, Marisa and Lorenz, Claudia and Gerdts, Gunnar}, doi = {10.1007/s00216-018-1156-x}, issn = {16182650}, journal = {Analytical and Bioanalytical Chemistry}, keywords = {Database,FTIR,Imaging,Infrared,Microplastics,Spectroscopy}, number = {21}, pages = {5131--5141}, title = {{Reference database design for the automated analysis of microplastic samples based on Fourier transform infrared (FTIR) spectroscopy}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {410}, year = {2018} } @incollection{Pryamonosov2019, abstract = {Segmentation of medical scans is the first and fundamental stage of numerical modeling of the human cardiovascular system. In this chapter, we analyze the results of coronary arteries segmentation using our approach for ten contrast-enhanced Computer Tomography Angiography datasets with different image quality and contrast phases. The segmentation is also affected by the patient anatomy, the shape and the scope of images. Our results show that the contrast phase timing is crucial for successful automatic segmentation. These factors form restrictions on the input data for automatic segmentation algorithms. Nevertheless, user guidance such as manual seeding and setting of thresholds can be used to significantly improve segmentation results and weaken the input restrictions.}, author = {Pryamonosov, Roman and Danilov, Alexander}, booktitle = {Smart Innovation, Systems and Technologies}, doi = {10.1007/978-3-030-06228-6_26}, isbn = {9783030062279}, issn = {21903026}, keywords = {Cardiovascular applications,Computed tomography,Contrast enhanced,Coronary arteries,Image segmentation,Personalized medicine}, pages = {331--344}, title = {{Robustness analysis of coronary arteries segmentation}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85060769579{\&}doi=10.1007{\%}2F978-3-030-06228-6{\_}26{\&}partnerID=40{\&}md5=2732253554b40b79957430c0178be429}, volume = {133}, year = {2019} } @article{Punzo2016, abstract = {Soon to be operational H I survey instruments such as APERTIF and ASKAP will produce large datasets. These surveys will provide information about the H I in and around hundreds of galaxies with a typical signal-to-noise ratio of ∼10 in the inner regions and ∼1 in the outer regions. In addition, such surveys will make it possible to probe faint H I structures, typically located in the vicinity of galaxies, such as extra-planar-gas, tails and filaments. These structures are crucial for understanding galaxy evolution, particularly when they are studied in relation to the local environment. Our aim is to find optimized kernels for the discovery of faint and morphologically complex H I structures. Therefore, using H I data from a variety of galaxies, we explore state-of-the-art filtering algorithms. We show that the intensity-driven gradient filter, due to its adaptive characteristics, is the optimal choice. In fact, this filter requires only minimal tuning of the input parameters to enhance the signal-to-noise ratio of faint components. In addition, it does not degrade the resolution of the high signal-to-noise component of a source. The filtering process must be fast and be embedded in an interactive visualization tool in order to support fast inspection of a large number of sources. To achieve such interactive exploration, we implemented a multi-core CPU (OpenMP) and a GPU (OpenGL) version of this filter in a 3D visualization environment (SlicerAstro).}, archivePrefix = {arXiv}, arxivId = {1609.03782}, author = {Punzo, D. and van der Hulst, J. M. and Roerdink, J. B.T.M.}, doi = {10.1016/j.ascom.2016.09.002}, eprint = {1609.03782}, issn = {22131337}, journal = {Astronomy and Computing}, keywords = {Radio lines: galaxies,Scientific visualization,Techniques: image processing}, pages = {163--176}, title = {{Finding faint H I structure in and around galaxies: Scraping the barrel}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {17}, year = {2016} } @article{Qiao2006, abstract = {Cardiac and respiratory motion artefacts in PET imaging have been traditionally resolved by acquiring the data in gated mode. However, gated PET images are usually characterized by high noise content due to their low photon statistics. In this paper, we present a novel 4D model for the PET imaging system, which can incorporate motion information to generate a motion-free image with all acquired data. A computer simulation and a phantom study were conducted to test the performance of this approach. The computer simulation was based on a digital phantom that was continuously scaled during data acquisition. The phantom study, on the other hand, used two spheres in a tank of water, all of which were filled with 18F water. One of the spheres was stationary while the other moved in a sinusoidal fashion to simulate tumour motion in the thorax. Data were acquired using both 4D CT and gated PET. Motion information was derived from the 4D CT images and then used in the 4D PET model. Both studies showed that this 4D PET model had a good motion-compensating capability. In the phantom study, this approach reduced quantification error of the radioactivity concentration by 95{\%} when compared to a corresponding static acquisition, while signal-to-noise ratio was improved by 210{\%} when compared to a corresponding gated image. {\textcopyright} 2006 IOP Publishing Ltd.}, author = {Qiao, Feng and Pan, Tinsu and Clark, John W. and Mawlawi, Osama R.}, doi = {10.1088/0031-9155/51/15/012}, issn = {00319155}, journal = {Physics in Medicine and Biology}, number = {15}, pages = {3769--3783}, title = {{A motion-incorporated reconstruction method for gated PET studies}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-33746512521{\&}doi=10.1088{\%}2F0031-9155{\%}2F51{\%}2F15{\%}2F012{\&}partnerID=40{\&}md5=85744da7c87fdff0c273cb687cee3723}, volume = {51}, year = {2006} } @article{Qiao2007, abstract = {A motion-incorporated reconstruction (MIR) method for gated PET imaging has recently been developed by several authors to correct for respiratory motion artifacts in PET imaging. This method however relies on a motion map derived from images (4D PET or 4D CT) of the entire field of view (FOV). In this study we present a region of interest (ROI)-based extension to this method, whereby only the motion map of a user-defined ROI is required and motion incorporation during image reconstruction is solely performed within the ROI. A phantom study and an NCAT computer simulation study were performed to test the feasibility of this method. The phantom study showed that the ROI-based MIR produced results that are within 1.26{\%} of those obtained by the full image-based MIR approach when using the same accurate motion information. The NCAT phantom study on the other hand, further verified that motion of features of interest in an image can be estimated more efficiently and potentially more accurately using the ROI-based approach. A reduction of motion estimation time from 450 s to 30 and 73 s was achieved for two different ROIs respectively. In addition, the ROI-based approach showed a reduction in registration error of 43{\%} for one ROI, which effectively reduced quantification bias by 44{\%} and 32{\%} using mean and maximum voxel values, respectively. {\textcopyright} 2007 IOP Publishing Ltd.}, author = {Qiao, Feng and Pan, Tinsu and Clark, John W. and Mawlawi, Osama R.}, doi = {10.1088/0031-9155/52/10/003}, issn = {00319155}, journal = {Physics in Medicine and Biology}, number = {10}, title = {{Region of interest motion compensation for PET image reconstruction}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-34248204627{\&}doi=10.1088{\%}2F0031-9155{\%}2F52{\%}2F10{\%}2F003{\&}partnerID=40{\&}md5=deb8145b579395e0ef985aab49663666}, volume = {52}, year = {2007} } @article{Qu2015, abstract = {Warping images into a standard coordinate space is critical for many image computing related tasks. However, for multi-dimensional and high-resolution images, an accurate warping operation itself is often very expensive in terms of computer memory and computational time. For high-throughput image analysis studies such as brain mapping projects, it is desirable to have high performance image warping tools that are compatible with common image analysis pipelines. In this article, we present LittleQuickWarp, a swift and memory efficient tool that boosts 3D image warping performance dramatically and at the same time has high warping quality similar to the widely used thin plate spline (TPS) warping. Compared to the TPS, LittleQuickWarp can improve the warping speed 2-5 times and reduce the memory consumption 6-20 times. We have implemented LittleQuickWarp as an Open Source plug-in program on top of the Vaa3D system (http://vaa3d.org). The source code and a brief tutorial can be found in the Vaa3D plugin source code repository.}, author = {Qu, Lei and Peng, Hanchuan}, doi = {10.1016/j.ymeth.2014.09.002}, issn = {10959130}, journal = {Methods}, keywords = {B-spline,Image registration,Thin plate spline,Warping}, pages = {38--42}, title = {{LittleQuickWarp: An ultrafast image warping tool}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {73}, year = {2015} } @article{Rak2017, abstract = {Purpose: In interstitial high-dose rate brachytherapy, liver cancer is treated by internal radiation, requiring percutaneous placement of applicators within or close to the tumor. To maximize utility, the optimal applicator configuration is pre-planned on magnetic resonance images. The pre-planned configuration is then implemented via a magnetic resonance-guided intervention. Mapping the pre-planning information onto interventional data would reduce the radiologist's cognitive load during the intervention and could possibly minimize discrepancies between optimally pre-planned and actually placed applicators. Methods: We propose a fast and robust two-step registration framework suitable for interventional settings: first, we utilize a multi-resolution rigid registration to correct for differences in patient positioning (rotation and translation). Second, we employ a novel iterative approach alternating between bias field correction and Markov random field deformable registration in a multi-resolution framework to compensate for non-rigid movements of the liver, the tumors and the organs at risk. In contrast to existing pre-correction methods, our multi-resolution scheme can recover bias field artifacts of different extents at marginal computational costs. Results: We compared our approach to deformable registration via B-splines, demons and the SyN method on 22 registration tasks from eleven patients. Results showed that our approach is more accurate than the contenders for liver as well as for tumor tissues. We yield average liver volume overlaps of 94.0 ± 2.7{\%} and average surface-to-surface distances of 2.02 ± 0.87 mm and 3.55 ± 2.19 mm for liver and tumor tissue, respectively. The reported distances are close to (or even below) the slice spacing (2.5 – 3.0 mm) of our data. Our approach is also the fastest, taking 35.8 ± 12.8 s per task. Conclusion: The presented approach is sufficiently accurate to map information available from brachytherapy pre-planning onto interventional data. It is also reasonably fast, providing a starting point for computer-aidance during intervention.}, author = {Rak, Marko and K{\"{o}}nig, Tim and T{\"{o}}nnies, Klaus D. and Walke, Mathias and Ricke, Jens and Wybranski, Christian}, doi = {10.1007/s11548-017-1633-2}, issn = {18616429}, journal = {International Journal of Computer Assisted Radiology and Surgery}, keywords = {Bias field correction,Deformable registration,High-dose rate brachytherapy,Liver intervention,Magnetic resonance imaging}, number = {12}, pages = {2169--2180}, title = {{Joint deformable liver registration and bias field correction for MR-guided HDR brachytherapy}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {12}, year = {2017} } @article{Ramirez2011, abstract = {Subcortical hyperintensities (SH) are a commonly observed phenomenon on MRI of the aging brain (Kertesz et al., 1988). Conflicting behavioral, cognitive and pathological associations reported in the literature underline the need to develop an intracranial volumetric analysis technique to elucidate pathophysiological origins of SH in Alzheimer's disease (AD), vascular cognitive impairment (VCI) and normal aging (De Leeuw et al., 2001; Mayer and Kier, 1991; Pantoni and Garcia, 1997; Sachdev et al., 2008). The challenge is to develop processing tools that effectively and reliably quantify subcortical small vessel disease in the context of brain tissue compartments. Segmentation and brain region parcellation should account for SH subtypes which are often classified as: periventricular (pvSH) and deep white (dwSH), incidental white matter disease or lacunar infarcts and Virchow-Robin spaces. Lesion Explorer (LE) was developed as the final component of a comprehensive volumetric segmentation and parcellation image processing stream built upon previously published methods (Dade et al., 2004; Kovacevic et al., 2002). Inter-rater and inter-method reliability was accomplished both globally and regionally. Volumetric analysis showed high inter-rater reliability both globally (ICC = 99) and regionally (ICC = 98). Pixel-wise spatial congruence was also high (SI = 97). Whole brain pvSH volumes yielded high inter-rater reliability (ICC = 99). Volumetric analysis against an alternative kNN segmentation revealed high inter-method reliability (ICC = 97). Comparison with visual rating scales showed high significant correlations (ARWMC: r = 86; CHIPS: r = 87). The pipeline yields a comprehensive and reliable individualized volumetric profile for subcortical vasculopathy that includes regionalized (26 brain regions) measures for: GM, WM, sCSF, vCSF, lacunar and non-lacunar pvSH and dwSH. {\textcopyright} 2010 Elsevier Inc.}, author = {Ramirez, J. and Gibson, E. and Quddus, A. and Lobaugh, N. J. and Feinstein, A. and Levine, B. and Scott, C. J.M. and Levy-Cooperman, N. and Gao, F. Q. and Black, S. E.}, doi = {10.1016/j.neuroimage.2010.09.013}, issn = {10538119}, journal = {NeuroImage}, keywords = {Aging,Alzheimer's disease,Brain volume,Lesion analysis,Leukoariosis,MRI,Segmentation,White matter hyperintensities}, number = {2}, pages = {963--973}, title = {{Lesion Explorer: A comprehensive segmentation and parcellation package to obtain regional volumetrics for subcortical hyperintensities and intracranial tissue}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {54}, year = {2011} } @article{Ramirez2018, abstract = {Open science, as a common good, opens possibilities for the development of nations, through innovations and collaborative constructions, which help to democratize knowledge. Advances in this area are still emerging, and the open science, co-creation of knowledge and open innovation triangle, is presented as an opportunity to generate an original contribution from research to open educational theory and practices. The study analyzed the articles that addressed this triangle, in order to identify the contexts and challenges that arise in open innovation and the co-creation of knowledge to promote open science. The method was a systematic literature review (SLR) of 168 articles published in open access format, from January 2014 to May 2017 in the Web of Science and Scopus databases. In the validation process, the York University criteria were used: inclusion and exclusion, relevance of the pertinent studies, evaluation of the quality / validity of included studies and description of data / basic studies. The findings showed that the most-widely publicized contexts were in the United States and Brazil, in the business and academic sectors (closely followed by the social sector), and the challenges were open to innovation, opening and research. The research concludes that the context and practices of collaboration are substantial elements for innovation and open science.}, author = {Ram{\'{i}}rez, Mar{\'{i}}a Soledad and Garc{\'{i}}a-Pe{\~{n}}alvo, Francisco Jos{\'{e}}}, doi = {10.3916/C54-2018-01}, issn = {19883293}, journal = {Comunicar}, keywords = {Citizen science,Collaboration,Innovation,Knowledge,Knowledge co-creation,Open science,Openness,Validation}, number = {54}, pages = {9--18}, title = {{Co-creation and open innovation: Systematic literature review}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {26}, year = {2018} } @article{Rasmussen2007, abstract = {Objective. The aims of this study were: 1) To develop protocols for, integration and assessment of the usefulness of high quality fMRI (functional magnetic resonance imaging) and DTI (diffusion tensor imaging) data in an ultrasound-based neuronavigation system. 2) To develop and demonstrate a co-registration method for automatic brain-shift correction of pre-operative MR data using intra-operative 3D ultrasound. Methods. Twelve patients undergoing brain surgery were scanned to obtain structural and fMRI data before the operation. In six of these patients, DTI data was also obtained. The preoperative data was imported into a commercial ultrasound-based navigation system and used for surgical planning and guidance. Intra-operative ultrasound volumes were acquired when needed during surgery and the multimodal data was used for guidance and resection control. The use of the available image information during planning and surgery was recorded. An automatic voxel-based registration method between preoperative MRA and intra-operative 3D ultrasound angiography (Power Doppler) was developed and tested postoperatively. Results. The study showed that it is possible to implement robust, high-quality protocols for fMRI and DTI and that the acquired data could be seamlessly integrated in an ultrasound-based neuronavigation system. Navigation based on fMRI data was found to be important for pre-operative planning in all twelve procedures. In five out of eleven cases the data was also found useful during the resection. DTI data was found to be useful for planning in all five cases where these data were imported into the navigation system. In two out of four cases DTI data was also considered important during the resection (in one case DTI data were acquired but not imported and in another case fMRI and DTI data could only be used for planning). Information regarding the location of important functional areas (fMRI) was more beneficial during the planning phase while DTI data was more helpful during the resection. Furthermore, the surgeon found it more user-friendly and efficient to interpret fMRI and DTI information when shown in a navigation system as compared to the traditional display on a light board or monitor. Updating MRI data for brain-shift using automatic co-registration of preoperative MRI with intra-operative ultrasound was feasible. Conclusion. In the present study we have demonstrated how both fMRI and DTI data can be acquired and integrated into a neuronavigation system for improved surgical planning and guidance. The surgeons reported that the integration of fMRI and DTI data in the navigation system represented valuable additional information presented in a user-friendly way and functional neuronavigation is now in routine use at our hospital. Furthermore, the present study showed that automatic ultrasound-based updates of important pre-operative MRI data are feasible and hence can be used to compensate for brain shift. {\textcopyright} 2007 Springer-Verlag.}, author = {Rasmussen, I. A. and Lindseth, F. and Rygh, O. M. and Berntsen, E. M. and Selbekk, T. and Xu, J. and {Nagelhus Hernes}, T. A. and Harg, E. and H{\aa}berg, A. and Unsgaard, G.}, doi = {10.1007/s00701-006-1110-0}, issn = {00016268}, journal = {Acta Neurochirurgica}, keywords = {Brain shift,Diffusion tensor imaging,Functional magnetic resonance imaging,Image guidance,Image registration,Intra-operative 3D ultrasound,Minimally invasive surgery,Multimodal imaging,Neuronavigation}, number = {4}, pages = {365--378}, title = {{Functional neuronavigation combined with intra-operative 3D ultrasound: Initial experiences during surgical resections close to eloquent brain areas and future directions in automatic brain shift compensation of preoperative data}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {149}, year = {2007} } @article{Raudaschl2010, abstract = {Purpose The favored treatment for many hip fractures is a sliding hip screw, and its usage is expected to increase in the future.Failures can be reduced, and complications detected earlier by semi-automated CT image analysis.The most frequent failure is due to the screw cut-out from the femoral head.Methods An image-based method was developed for early detection of complications and assessment of anchorage quality relative to implant model, bone quality or tip-apex distance (TAD).This method evaluates micro-migration using CT images acquired at different time points (immediately post-op and 3-month later).Serial CT image registration and transformation methods were applied, including point-based registration, to achieve semi-automated evaluations.Results Qualitative and quantitative validation of the image registration was performed with measurement mean error determination by different observers.The micro-migration evaluation by clinicians compared favorably with semiautomated image-based results.Conclusion Semi-automatic evaluation of hip screw micromigration using CT images is feasible and can aid observation of convalescence.The method may be amenable to full automation, a future goal for this work. {\textcopyright} CARS 2010.}, author = {Raudaschl, Patrik and Fritscher, Karl and Roth, Tobias and Kammerlander, Christian and Schubert, Rainer}, doi = {10.1007/s11548-010-0498-4}, issn = {18616429}, journal = {International Journal of Computer Assisted Radiology and Surgery}, keywords = {Cut-out,Hip fractures,Micro-migration,Sliding hip screw}, number = {5}, pages = {455--460}, title = {{Analysis of the micro-migration of sliding hip screws by using point-based registration}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {5}, year = {2010} } @article{Rautek2014, abstract = {Researchers from many domains use scientific visualization in their daily practice. Existing implementations of algorithms usually come with a graphical user interface (high-level interface), or as software library or source code (low-level interface). In this paper we present a system that integrates domain-specific languages (DSLs) and facilitates the creation of new DSLs. DSLs provide an effective interface for domain scientists avoiding the difficulties involved with low-level interfaces and at the same time offering more flexibility than high-level interfaces. We describe the design and implementation of ViSlang, an interpreted language specifically tailored for scientific visualization. A major contribution of our design is the extensibility of the ViSlang language. Novel DSLs that are tailored to the problems of the domain can be created and integrated into ViSlang. We show that our approach can be added to existing user interfaces to increase the flexibility for expert users on demand, but at the same time does not interfere with the user experience of novice users. To demonstrate the flexibility of our approach we present new DSLs for volume processing, querying and visualization. We report the implementation effort for new DSLs and compare our approach with Matlab and Python implementations in terms of run-time performance.}, author = {Rautek, Peter and Bruckner, Stefan and Gr{\"{o}}ller, M. Eduard and Hadwiger, Markus}, doi = {10.1109/TVCG.2014.2346318}, issn = {10772626}, journal = {IEEE Transactions on Visualization and Computer Graphics}, keywords = {Domain-specific languages,Volume visualization,Volume visualization framework}, number = {12}, pages = {2388--2396}, title = {{ViSlang: A system for interpreted domain-specific languages for scientific visualization}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {20}, year = {2014} } @article{Reckfort2015, abstract = {Structural connectivity of the brain can be conceptionalized as a multiscale organization. The present study is built on 3D-Polarized Light Imaging (3D-PLI), a neuroimaging technique targeting the reconstruction of nerve fiber orientations and therefore contributing to the analysis of brain connectivity. Spatial orientations of the fibers are derived from birefringence measurements of unstained histological sections that are interpreted by means of a voxel-based analysis. This implies that a single fiber orientation vector is obtained for each voxel, which reflects the net effect of all comprised fibers. We have utilized two polarimetric setups providing an object space resolution of 1.3 $\mu$m/px (microscopic setup) and 64 $\mu$m/px (macroscopic setup) to carry out 3D-PLI and retrieve fiber orientations of the same tissue samples, but at complementary voxel sizes (i.e., scales). The present study identifies the main sources which cause a discrepancy of the measured fiber orientations observed when measuring the same sample with the two polarimetric systems. As such sources the differing optical resolutions and diverging retardances of the implemented waveplates were identified. A methodology was implemented that enables the compensation of measured different systems' responses to the same birefringent sample. This opens up new ways to conduct multiscale analysis in brains by means of 3D-PLI and to provide a reliable basis for the transition between different scales of the nerve fiber architecture.}, author = {Reckfort, Julia and Wiese, Hendrik and Pietrzyk, Uwe and Zilles, Karl and Amunts, Katrin and Axer, Markus}, doi = {10.3389/fnana.2015.00118}, issn = {16625129}, journal = {Frontiers in Neuroanatomy}, keywords = {Brain,Connectome,Fiber orientation,Multiscale approach,Polarized light imaging}, number = {september}, pages = {11}, title = {{A multiscale approach for the reconstruction of the fiber architecture of the human brain based on 3D-PLI}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {9}, year = {2015} } @article{Reitz2008, abstract = {Accurate daily patient localization is becoming increasingly important in external-beam radiotherapy (RT). Mega-voltage cone-beam computed tomography (MV-CBCT) utilizing a therapy beam and an on-board electronic portal imager can be used to localize tumor volumes and verify the patient's position prior to treatment. MV-CBCT produces a static volumetric image and therefore can only account for inter-fractional changes. In this work, the feasibility of using the MV-CBCT raw data as a fluoroscopic series of portal images to monitor tumor changes due to e.g. respiratory motion was investigated. A method was developed to read and convert the CB raw data into a cine. To improve the contrast-to-noise ratio on the MV-CB projection data, image post-processing with filtering techniques was investigated. Volumes of interest from the planning CT were projected onto the MV-cine. Because of the small exposure and the varying thickness of the patient depending on the projection angle, soft-tissue contrast was limited. Tumor visibility as a function of tumor size and projection angle was studied. The method was well suited in the upper chest, where motion of the tumor as well as of the diaphragm could be clearly seen. In the cases of patients with non-small cell lung cancer with medium or large tumor masses, we verified that the tumor mass was always located within the PTV despite respiratory motion. However for small tumors the method is less applicable, because the visibility of those targets becomes marginal. Evaluation of motion in non-superior-inferior directions might also be limited for small tumor masses. Viewing MV-CBCT data in a cine mode adds to the utility of MV-CBCT for verification of tumor motion and for deriving individualized treatment margins. {\textcopyright} 2008 Institute of Physics and Engineering in Medicine.}, author = {Reitz, Bodo and Gayou, Olivier and Parda, David S. and Miften, Moyed}, doi = {10.1088/0031-9155/53/4/001}, issn = {00319155}, journal = {Physics in Medicine and Biology}, number = {4}, pages = {823--836}, title = {{Monitoring tumor motion with on-line mega-voltage cone-beam computed tomography imaging in a cine mode}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-39049165539{\&}doi=10.1088{\%}2F0031-9155{\%}2F53{\%}2F4{\%}2F001{\&}partnerID=40{\&}md5=1053838180560cce8431007aee909ef5}, volume = {53}, year = {2008} } @article{Riddle2011, abstract = {Objective: Although magnetic resonance imaging (MRI) is the optimal imaging modality to define cerebral white-matter injury (WMI) in preterm survivors, the histopathological features of MRI-defined chronic lesions are poorly defined. We hypothesized that chronic WMI is related to a combination of delayed oligodendrocyte (OL) lineage cell death and arrested maturation of preoligodendrocytes (preOLs). We determined whether ex vivo MRI can distinguish distinct microglial and astroglial responses related to WMI progression and arrested preOL differentiation. Methods: We employed a preterm fetal sheep model of global cerebral ischemia in which acute WMI results in selective preOL degeneration. We developed novel algorithms to register histopathologically- defined lesions with contrast-weighted and diffusion-weighted high-field ex vivo MRI data. Results: Despite mild delayed preOL degeneration, preOL density recovered to control levels by 7 days after ischemia and was {\^{a}}2 fold greater at 14 days. However, premyelinating OLs were significantly diminished at 7 and 14 days. WMI evolved to mostly gliotic lesions where arrested preOL differentiation was directly proportional to the magnitude of astrogliosis. A reduction in cerebral WM volume was accompanied by four classes of MRI-defined lesions. Each lesion type displayed unique astroglial and microglial responses that corresponded to distinct forms of necrotic or non-necrotic injury. High-field MRI defined 2 novel hypointense signal abnormalities on T 2-weighted images that coincided with microscopic necrosis or identified astrogliosis with high sensitivity and specificity. Interpretation: These studies support the potential of high-field MRI for early identification of microscopic necrosis and gliosis with preOL maturation arrest, a common form of WMI in preterm survivors. {\textcopyright} 2011 American Neurological Association.}, author = {Riddle, Art and Dean, Justin and Buser, Joshua R. and Gong, Xi and Maire, Jennifer and Chen, Kevin and Ahmad, Tahir and Cai, Victor and Nguyen, Thuan and Kroenke, Christopher D. and Hohimer, A. Roger and Back, Stephen A.}, doi = {10.1002/ana.22501}, issn = {03645134}, journal = {Annals of Neurology}, number = {3}, pages = {493--507}, title = {{Histopathological correlates of magnetic resonance imaging-defined chronic perinatal white matter injury}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {70}, year = {2011} } @article{Riedel2010, abstract = {Background and Purpose-: We sought to evaluate how accurately length and volume of thrombotic clots occluding cerebral arteries of patients with acute ischemic stroke can be assessed from nonenhanced CT (NECT) scans reconstructed with different slice widths. Methods-: NECT image data of 58 patients with acute ischemic stroke with vascular occlusion proven by CT angiography were reconstructed with slice widths of 1.25 mm, 2.5 mm, 3.75 mm, and 5 mm. Thrombus lengths and volumes were quantified based on these NECT images by detecting and segmenting intra-arterial hyperdensities. The results were compared with reference values of thrombus length and volume obtained from CT angiography images using Bland-Altman analysis and predefined levels or tolerance to find NECT slice thicknesses that allow for sufficiently accurate thrombus quantification. Results-: Thrombus length can be measured with high accuracy using the hyperdense middle cerebral artery sign detected in NECT images with slice thicknesses of 1.25 mm and 2.5 mm. We found mean deviations from the reference values and limits of agreement of-0.1 mm±0.6 mm with slice widths of 1.25 mm and 0.1 mm±0.7 mm for slice widths of 2.5 mm. Thrombus length measurements in NECT images with higher slice width and all evaluated thrombus volume measurements exhibited severe dependence on the level and did not match the accuracy criteria. Conclusion-: The length of the hyperdense middle cerebral artery sign as detected on thin-slice NECT reconstructions in patients with acute ischemic stroke can be used to quantify thrombotic burden accurately. Thus, it might qualify as a new diagnostic parameter in acute stroke management that indicates and quantifies the extent of vascular obliteration. {\textcopyright} 2010 American Heart Association, Inc.}, author = {Riedel, Christian H. and Jensen, Ulf and Rohr, Axel and Tietke, Marc and Alfke, Karsten and Ulmer, Stephan and Jansen, Olav}, doi = {10.1161/STROKEAHA.110.580662}, issn = {00392499}, journal = {Stroke}, keywords = {CT,acute care,acute stroke,embolic stroke,embolism,imaging,neuroradiology,stroke care,stroke management,thrombolysis}, number = {8}, pages = {1659--1664}, title = {{Assessment of thrombus in acute middle cerebral artery occlusion using thin-slice nonenhanced computed tomography reconstructions}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {41}, year = {2010} } @article{Rivest2009, abstract = {Daily image guidance for helical tomotherapy prostate patients is based on the registration of pretreatment megavoltage CT (MVCT) images and the original planning CT. The goal of registration, whether manual or automatic, is the overlap of the prostate; otherwise prostate misplacement may compromise the efficacy of treatment or lead to increased toxicity. A previous study demonstrated that without the aid of implanted fiducials, manual registration results in inaccurate prostate positioning. The objective of this work is to quantify prostate misplacement that results from automatic bone matching (BM) and image matching (IM) registration algorithms. 204 MVCT images from eight high-risk tomotherapy prostate patients were incorporated into this retrospective study. BM and IM registration algorithms - based on maximization of mutual information of bony anatomy only and the entire image, respectively - were used to independently register MVCT images to their respective planning images. A correlation coefficient based algorithm that uses known planning CT contour information was used for automatic prostate localization in each MVCT image. Daily prostate misplacement was determined by repositioning as calculated from the BM and the IM algorithms. Mean (± SD) and maximum 3D prostate positioning errors were 3.7 ± 2.1 mm and 11.8 mm for bone matching, and 4.6 ± 2.3 mm and 11.5 mm for image matching. In terms of translational directions, IM would lead to prostate positioning error ≥ 3 mm in any of the LR, AP or SI directions in 62{\%} of treatment fractions. The corresponding value for BM is 51{\%}. The values for positioning errors ≥ 5 mm were 29{\%} and 17{\%} for IM and BM, respectively. This data suggests automatic daily image guidance for tomotherapy prostate patients should be based on bone matching instead of image matching.}, author = {Rivest, D. Ryan C. and Riauka, Terence A. and Murtha, Albert D. and Fallone, B. Gino}, doi = {10.1120/jacmp.v10i4.3071}, issn = {15269914}, journal = {Journal of Applied Clinical Medical Physics}, keywords = {Helical tomotherapy,Image registration,Prostate motion}, number = {4}, pages = {165--176}, title = {{Prostate positioning errors associated with two automatic registration based image guidance strategies}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {10}, year = {2009} } @article{Roach2009, abstract = {Advances in systems biology and bioinformatics have highlighted that no cell population is truly uniform and that stochastic behavior is an inherent property of many biological systems. As a result, bulk measurements can be misleading even when particular care has been taken to isolate a single cell type, and measurements averaged over multiple cell populations in a tissue can be as misleading as the average height at an elementary school. There is a growing need for experimental techniques that can provide a combination of single cell resolution, large cell populations, and the ability to track cells over multiple time points. In this article, a microwell array cytometry platform was developed to meet this need and investigate the heterogeneity and stochasticity of cell behavior on a single cell basis. The platform consisted of a microfabricated device with high-density arrays of cell-sized microwells and custom software for automated image processing and data analysis. As a model experimental system, we used primary hepatocytes labeled with fluorescent probes sensitive to mitochondrial membrane potential and free radical generation. The cells were exposed to oxidative stress and the responses were dynamically monitored for each cell. The resulting data was then analyzed using bioinformatics techniques such as hierarchical and k-means clustering to visualize the data and identify interesting features. The results showed that clustering of the dynamic data not only enhanced comparisons between the treatment groups but also revealed a number of distinct response patterns within each treatment group. Heatmaps with hierarchical clustering also provided a data-rich complement to survival curves in a dose response experiment. The microwell array cytometry platform was shown to be powerful, easy to use, and able to provide a detailed picture of the heterogeneity present in cell responses to oxidative stress. We believe that our microwell array cytometry platform will have general utility for a wide range of questions related to cell population heterogeneity, biological stochasticity, and cell behavior under stress conditions. {\textcopyright} 2009 American Institute of Chemical Engineers.}, author = {Roach, Kenneth L. and King, Kevin R. and Uygun, Basak E. and Kohane, Isaac S. and Yarmush, Martin L. and Toner, Mehmet}, doi = {10.1002/btpr.289}, issn = {87567938}, journal = {Biotechnology Progress}, keywords = {Cytometry,Free radicals,Hepatocytes,Membrane potential,Microfabrication,Microwells,Mitochondria}, number = {6}, pages = {1772--1779}, title = {{High throughput single cell bioinformatics}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {25}, year = {2009} } @inproceedings{Rohn, abstract = {Understanding complex biological systems requires data from manifold biological levels. Often this data is analysed in some meaningful context, for example, by integrating it into biological networks. However, spatial data given as 2D images or 3D volumes is commonly not taken into consideration and analysed separately. Here we present a new approach to integrate and analyse complex multimodal biological data in space and time. We present a data structure to manage this kind of data and discuss application examples for different data integration scenarios.}, author = {Rohn, Hendrik and Klukas, Christian and Schreiber, Falk}, booktitle = {GCB 2009 - German Conference on Bioinformatics 2009}, isbn = {9783885792512}, pages = {105--115}, title = {{Integration and visualisation of multimodal biological data}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79960202320{\&}partnerID=40{\&}md5=c0b665d0351eb138a112ad74c961d17d}, year = {2009} } @article{Rueden2017, abstract = {Background: ImageJ is an image analysis program extensively used in the biological sciences and beyond. Due to its ease of use, recordable macro language, and extensible plug-in architecture, ImageJ enjoys contributions from non-programmers, amateur programmers, and professional developers alike. Enabling such a diversity of contributors has resulted in a large community that spans the biological and physical sciences. However, a rapidly growing user base, diverging plugin suites, and technical limitations have revealed a clear need for a concerted software engineering effort to support emerging imaging paradigms, to ensure the software's ability to handle the requirements of modern science. Results: We rewrote the entire ImageJ codebase, engineering a redesigned plugin mechanism intended to facilitate extensibility at every level, with the goal of creating a more powerful tool that continues to serve the existing community while addressing a wider range of scientific requirements. This next-generation ImageJ, called "ImageJ2" in places where the distinction matters, provides a host of new functionality. It separates concerns, fully decoupling the data model from the user interface. It emphasizes integration with external applications to maximize interoperability. Its robust new plugin framework allows everything from image formats, to scripting languages, to visualization to be extended by the community. The redesigned data model supports arbitrarily large, N-dimensional datasets, which are increasingly common in modern image acquisition. Despite the scope of these changes, backwards compatibility is maintained such that this new functionality can be seamlessly integrated with the classic ImageJ interface, allowing users and developers to migrate to these new methods at their own pace. Conclusions: Scientific imaging benefits from open-source programs that advance new method development and deployment to a diverse audience. ImageJ has continuously evolved with this idea in mind; however, new and emerging scientific requirements have posed corresponding challenges for ImageJ's development. The described improvements provide a framework engineered for flexibility, intended to support these requirements as well as accommodate future needs. Future efforts will focus on implementing new algorithms in this framework and expanding collaborations with other popular scientific software suites.}, archivePrefix = {arXiv}, arxivId = {1701.05940}, author = {Rueden, Curtis T. and Schindelin, Johannes and Hiner, Mark C. and DeZonia, Barry E. and Walter, Alison E. and Arena, Ellen T. and Eliceiri, Kevin W.}, doi = {10.1186/s12859-017-1934-z}, eprint = {1701.05940}, issn = {14712105}, journal = {BMC Bioinformatics}, keywords = {Extensibility,Image processing,ImageJ,ImageJ2,Interoperability,N-dimensional,Open development,Open source,Reproducibility}, number = {1}, pages = {26}, title = {{ImageJ2: ImageJ for the next generation of scientific image data}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {18}, year = {2017} } @article{Rupp2006, abstract = {Applications of computer vision and signal processing are often based on a set of basic and commonly accepted ideas and algorithms. Thus, when developing new approaches, reuse plays a decisive role. Unfortunately, scientists are rarely familiar with the powerful concepts that the software engineering community provides in order to develop reusable software nor do they have the appropriate experience to apply these existing techniques right. For this reason, we present fundamental design and implementation aspects of component-based software frameworks, which should help to develop component-based software frameworks for a given application field.}, author = {Rupp, Stephan and Daum, Volker}, issn = {11092750}, journal = {WSEAS Transactions on Computers}, keywords = {Plugin-based software framework,Reflexion mechanism,Software components}, number = {2}, pages = {425--432}, title = {{Design and implementation aspects for plugin-base software frameworks}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-33645140919{\&}partnerID=40{\&}md5=8bdab8bf2d3ec049f4709850ec61d0c4}, volume = {5}, year = {2006} } @article{Rusu2014, abstract = {Purpose: In this work, the authors introduce a novel framework, the anatomically constrained registration (AnCoR) scheme and apply it to create a fused anatomic-disease atlas of the prostate which the authors refer to as the prostatome. The prostatome combines a MRI based anatomic and a histology based disease atlas. Statistical imaging atlases allow for the integration of information across multiple scales and imaging modalities into a single canonical representation, in turn enabling a fused anatomical-disease representation which may facilitate the characterization of disease appearance relative to anatomic structures. While statistical atlases have been extensively developed and studied for the brain, approaches that have attempted to combine pathology and imaging data for study of prostate pathology are not extant. This works seeks to address this gap. Methods: The AnCoR framework optimizes a scoring function composed of two surface (prostate and central gland) misalignment measures and one intensity-based similarity term. This ensures the correct mapping of anatomic regions into the atlas, even when regional MRI intensities are inconsistent or highly variable between subjects. The framework allows for creation of an anatomic imaging and a disease atlas, while enabling their fusion into the anatomic imaging-disease atlas. The atlas presented here was constructed using 83 subjects with biopsy confirmed cancer who had pre-operative MRI (collected at two institutions) followed by radical prostatectomy. The imaging atlas results from mapping thein vivo MRI into the canonical space, while the anatomic regions serve as domain constraints. Elastic co-registration MRI and corresponding ex vivo histology provides " ground truth" mapping of cancer extent on in vivo imaging for 23 subjects. Results: AnCoR was evaluated relative to alternative construction strategies that use either MRI intensities or the prostate surface alone for registration. The AnCoR framework yielded a central gland Dice similarity coefficient (DSC) of 90{\%}, and prostate DSC of 88{\%}, while the misalignment of the urethra and verumontanum was found to be 3.45 mm, and 4.73 mm, respectively, which were measured to be significantly smaller compared to the alternative strategies. As might have been anticipated from our limited cohort of biopsy confirmed cancers, the disease atlas showed that most of the tumor extent was limited to the peripheral zone. Moreover, central gland tumors were typically larger in size, possibly because they are only discernible at a much later stage. Conclusions: The authors presented the AnCoR framework to explicitly model anatomic constraints for the construction of a fused anatomic imaging-disease atlas. The framework was applied to constructing a preliminary version of an anatomic-disease atlas of the prostate, the prostatome. The prostatome could facilitate the quantitative characterization of gland morphology and imaging features of prostate cancer. These techniques, may be applied on a large sample size data set to create a fully developed prostatome that could serve as a spatial prior for targeted biopsies by urologists. Additionally, the AnCoR framework could allow for incorporation of complementary imaging and molecular data, thereby enabling their careful correlation for population based radio-omics studies. {\textcopyright} 2014 American Association of Physicists in Medicine.}, author = {Rusu, Mirabela and Bloch, B. Nicolas and Jaffe, Carl C. and Genega, Elizabeth M. and Lenkinski, Robert E. and Rofsky, Neil M. and Feleppa, Ernest and Madabhushi, Anant}, doi = {10.1118/1.4881515}, issn = {00942405}, journal = {Medical Physics}, keywords = {anatomic imaging atlas,guided biopsy,image processing,in vivo imaging,prostate cancer}, number = {7}, pages = {12}, title = {{Prostatome: A combined anatomical and disease based MRI atlas of the prostate}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {41}, year = {2014} } @article{Savolainen2013, abstract = {Boron Neutron Capture Therapy (BNCT) is a binary radiotherapy method developed to treat patients with certain malignant tumours. To date, over 300 treatments have been carried out at the Finnish BNCT facility in various on-going and past clinical trials. In this technical review, we discuss our research work in the field of medical physics to form the groundwork for the Finnish BNCT patient treatments, as well as the possibilities to further develop and optimize the method in the future. Accordingly, the following aspects are described: neutron sources, beam dosimetry, treatment planning, boron imaging and determination, and finally the possibilities to detect the efficacy and effects of BNCT on patients. {\textcopyright} 2012 Associazione Italiana di Fisica Medica.}, author = {Savolainen, Sauli and Kortesniemi, Mika and Timonen, Marjut and Reijonen, Vappu and Kuusela, Linda and Uusi-Simola, Jouni and Salli, Eero and Koivunoro, Hanna and Sepp{\"{a}}l{\"{a}}, Tiina and L{\"{o}}nnroth, Nadja and V{\"{a}}lim{\"{a}}ki, Petteri and Hyv{\"{o}}nen, Heini and Kotiluoto, Petri and Ser{\'{e}}n, Tom and Kuronen, Antti and Heikkinen, Sami and Kosunen, Antti and Auterinen, Iiro}, doi = {10.1016/j.ejmp.2012.04.008}, issn = {11201797}, journal = {Physica Medica}, keywords = {Beam dosimetry,Boron imaging and determination,Diffusion imaging,Dose calculation,Image registration,Neutron dosimetry,Neutron sources,Treatment planning}, number = {3}, pages = {233--248}, title = {{Boron neutron capture therapy (BNCT) in Finland: Technological and physical prospects after 20 years of experiences}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {29}, year = {2013} } @inbook{Scheenstra2007, abstract = {In biological image processing the segmentation of a volume is, although tedious, required for many applications, like the comparison of structures and annotation purposes. To automate this process, we present a segmentation method for various structures of the mouse brain. The segmentation consists of two parts; first a rough affine atlas-based registration was performed and second, the edges between structures were refined by an adapted Markov random field clustering approach. The segmentations results were compared to manual segmentations from two experts. The presented automatic segmentation method is quick, intuitive and suitable for registration purposes, but also for biological objectives, like comparison and annotation.}, address = {Bellingham}, author = {Scheenstra, Alize E. H. and Dijkstra, Jouke and van de Ven, Rob C. G. and van der Weerd, Louise and Reiber, Johan H. C.}, booktitle = {Medical Imaging 2007: Physiology, Function, and Structure from Medical Images}, doi = {10.1117/12.708867}, editor = {Manduca, A and Hu, X P}, isbn = {0819466298}, issn = {16057422}, pages = {651106}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of SPIE}, title = {{Automated segmentation of the ex vivo mouse brain}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {6511}, year = {2007} } @incollection{Scheenstra2009, abstract = {Non-rigid registration of MR images to a common reference image results in deformation fields, from which anatomical differences can be statistically assessed, within and between populations. Without further assumptions, nonparametric tests are required and currently the analysis of deformation fields is performed by permutation tests. For deformation fields, often the vector magnitude is chosen as test statistic, resulting in a loss of information. In this paper, we consider the three dimensional Moore-Rayleigh test as an alternative for permutation tests. This nonparametric test offers two novel features: first, it incorporates both the directions and magnitude of the deformation vectors. Second, as its distribution function is available in closed form, this test statistic can be used in a clinical setting. Using synthetic data that represents variations as commonly encountered in clinical data, we show that the Moore-Rayleigh test outperforms the classical permutation test. {\textcopyright} 2009 Springer Berlin Heidelberg.}, author = {Scheenstra, Alize E.H. and Muskulus, Michael and Staring, Marius and {Van Den Maagdenberg}, Arn M.J.V. and {Verduyn Lunel}, Sjoerd and Reiber, J. Hans C. and {Van Der Weerd}, Louise and Dijkstra, Jouke}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-642-02498-6_47}, isbn = {3642024971}, issn = {03029743}, pages = {564--575}, title = {{The 3D moore-rayleigh test for the quantitative groupwise comparison of MR brain images}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-70349313285{\&}doi=10.1007{\%}2F978-3-642-02498-6{\_}47{\&}partnerID=40{\&}md5=6667d3c02eda486716d2cfb5e09e139d}, volume = {5636 LNCS}, year = {2009} } @article{Scheenstra2009a, abstract = {Segmentation of magnetic resonance imaging (MRI) data is required for many applications, such as the comparison of different structures or time points, and for annotation purposes. Currently, the gold standard for automated image segmentation is nonlinear atlas-based segmentation. However, these methods are either not sufficient or highly time consuming for mouse brains, owing to the low signal to noise ratio and low contrast between structures compared with other applications. We present a novel generic approach to reduce processing time for segmentation of various structures of mouse brains, in vivo and ex vivo. The segmentation consists of a rough affine registration to a template followed by a clustering approach to refine the rough segmentation near the edges. Compared with manual segmentations, the presented segmentation method has an average kappa index of 0.7 for 7 of 12 structures in in vivo MRI and 11 of 12 structures in ex vivo MRI. Furthermore, we found that these results were equal to the performance of a nonlinear segmentation method, but with the advantage of being 8 times faster. The presented automatic segmentation method is quick and intuitive and can be used for image registration, volume quantification of structures, and annotation. {\textcopyright} 2009 BC Decker Inc.}, author = {Dijkstra, Jouke and Scheenstra, Alize E.H. and {Van De Ven}, Rob C.G. and {Van Weerd}, Louise Der and {Van Den Maagdenberg}, Arn M.J.M. and Reiber, Johan H.C.}, doi = {10.2310/7290.2009.00004}, issn = {15353508}, journal = {Molecular Imaging}, number = {1}, pages = {35--44}, title = {{Automated segmentation of in vivo and ex vivo mouse brain magnetic resonance images}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {8}, year = {2009} } @article{Schindelin2015, abstract = {Technology in microscopy advances rapidly, enabling increasingly affordable, faster, and more precise quantitative biomedical imaging, which necessitates correspondingly more-advanced image processing and analysis techniques. A wide range of software is available-from commercial to academic, special-purpose to Swiss army knife, small to large-but a key characteristic of software that is suitable for scientific inquiry is its accessibility. Open-source software is ideal for scientific endeavors because it can be freely inspected, modified, and redistributed; in particular, the open-software platform ImageJ has had a huge impact on the life sciences, and continues to do so. From its inception, ImageJ has grown significantly due largely to being freely available and its vibrant and helpful user community. Scientists as diverse as interested hobbyists, technical assistants, students, scientific staff, and advanced biology researchers use ImageJ on a daily basis, and exchange knowledge via its dedicated mailing list. Uses of ImageJ range from data visualization and teaching to advanced image processing and statistical analysis. The software's extensibility continues to attract biologists at all career stages as well as computer scientists who wish to effectively implement specific image-processing algorithms. In this review, we use the ImageJ project as a case study of how open-source software fosters its suites of software tools, making multitudes of image-analysis technology easily accessible to the scientific community. We specifically explore what makes ImageJ so popular, how it impacts the life sciences, how it inspires other projects, and how it is self-influenced by coevolving projects within the ImageJ ecosystem.}, author = {Schindelin, Johannes and Rueden, Curtis T. and Hiner, Mark C. and Eliceiri, Kevin W.}, doi = {10.1002/mrd.22489}, issn = {10982795}, journal = {Molecular Reproduction and Development}, number = {7-8}, pages = {518--529}, title = {{The ImageJ ecosystem: An open platform for biomedical image analysis}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {82}, year = {2015} } @article{Schmitz2017, abstract = {Three-dimensional multicellular aggregates such as spheroids provide reliable in vitro substitutes for tissues. Quantitative characterization of spheroids at the cellular level is fundamental. We present the first pipeline that provides three-dimensional, high-quality images of intact spheroids at cellular resolution and a comprehensive image analysis that completes traditional image segmentation by algorithms from other fields. The pipeline combines light sheet-based fluorescence microscopy of optically cleared spheroids with automated nuclei segmentation (F score: 0.88) and concepts from graph analysis and computational topology. Incorporating cell graphs and alpha shapes provided more than 30 features of individual nuclei, the cellular neighborhood and the spheroid morphology. The application of our pipeline to a set of breast carcinoma spheroids revealed two concentric layers of different cell density for more than 30,000 cells. The thickness of the outer cell layer depends on a spheroid's size and varies between 50{\%} and 75{\%} of its radius. In differently-sized spheroids, we detected patches of different cell densities ranging from 5 × 10 5 to 1 × 10 6 cells/mm 3. Since cell density affects cell behavior in tissues, structural heterogeneities need to be incorporated into existing models. Our image analysis pipeline provides a multiscale approach to obtain the relevant data for a system-level understanding of tissue architecture.}, author = {Schmitz, Alexander and Fischer, Sabine C. and Mattheyer, Christian and Pampaloni, Francesco and Stelzer, Ernst H.K.}, doi = {10.1038/srep43693}, issn = {20452322}, journal = {Scientific Reports}, pages = {13}, title = {{Multiscale image analysis reveals structural heterogeneity of the cell microenvironment in homotypic spheroids}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {7}, year = {2017} } @article{Schmitz2018, abstract = {3D-Polarized Light Imaging (3D-PLI) enables high-resolution three-dimensional mapping of the nerve fiber architecture in unstained histological brain sections based on the intrinsic birefringence of myelinated nerve fibers. The interpretation of the measured birefringent signals comes with conjointly measured information about the local fiber birefringence strength and the fiber orientation. In this study, we present a novel approach to disentangle both parameters from each other based on a weighted least squares routine (ROFL) applied to oblique polarimetric 3D-PLI measurements. This approach was compared to a previously described analytical method on simulated and experimental data obtained from a post mortem human brain. Analysis of the simulations revealed in case of ROFL a distinctly increased level of confidence to determine steep and flat fiber orientations with respect to the brain sectioning plane. Based on analysis of histological sections of a human brain dataset, it was demonstrated that ROFL provides a coherent characterization of cortical, subcortical, and white matter regions in terms of fiber orientation and birefringence strength, within and across sections. Oblique measurements combined with ROFL analysis opens up new ways to determine physical brain tissue properties by means of 3D-PLI microscopy.}, author = {Schmitz, Daniel and Muenzing, Sascha E.A. and Schober, Martin and Schubert, Nicole and Minnerop, Martina and Lippert, Thomas and Amunts, Katrin and Axer, Markus}, doi = {10.3389/fnana.2018.00075}, issn = {16625129}, journal = {Frontiers in Neuroanatomy}, keywords = {3D-PLI,Fiber architecture,Modeling,Neuroimaging,White matter anatomy}, pages = {15}, title = {{Derivation of Fiber Orientations From Oblique Views Through Human Brain Sections in 3D-Polarized Light Imaging}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {12}, year = {2018} } @inproceedings{Schreiber, abstract = {This article describes a method for the automatic detection of the proximal femur in radiographs using a template-based mutual information registration method. It will be part of a planned, larger system for automated estimation of osteoporosis in the femoral neck. Our multi-step optimization process achieves a successful registration rate of 70{\%} to 95{\%}.}, author = {Schreiber, Jan and Schubert, Rainer and Kuhn, Volker}, booktitle = {Informatik aktuell}, doi = {10.1007/3-540-32137-3_23}, isbn = {9783540321361}, issn = {1431472X}, pages = {111--115}, title = {{Femur detection in radiographs using template-based registration}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-77949583057{\&}partnerID=40{\&}md5=71c5eb7ea5d62e5a2e9b0450a8d52b56}, year = {2006} } @incollection{Shah2014, abstract = {Although variousmodalities are used in prostate cancer imaging, transrectal ultrasound (TRUS) guided biopsy remains the gold standard for diagnosis. However, TRUS suffers from low sensitivity, leading to an elevated rate of false negative results. Magnetic Resonance Imaging (MRI) on the other hand provides currently the most accurate imagebased evaluation of the prostate. Thus, TRUS/MRI fusion image-guided biopsy has evolved to be the method of choice to circumvent the limitations of TRUS-only biopsy. Most commercial frameworks that offer such a solution rely on rigid TRUS/MRI fusion and rarely use additional information from other modalities such as Positron Emission Tomography (PET). Other frameworks require long interaction times and are complex to integrate with the clinical workflow. Available solutions are not fully able to meet the clinical requirements of speed and high precision at low cost simultaneously.We introduce an open source fusion biopsy framework that is low cost, simple to use and has minimal overhead in clinical workflow. Hence, it is ideal as a research platform for the implementation and rapid bench to bedside translation of new image registration and visualization approaches. We present the current status of the framework that uses pre-interventional PET and MRI rigidly registered with 3D TRUS for prostate biopsy guidance and discuss results from first clinical cases.}, author = {Shah, Amit and Zettinig, Oliver and Maurer, Tobias and Precup, Cristina and {Schulte Zu Berge}, Christian and Weiss, Jakob and Frisch, Benjamin and Navab, Nassir}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-319-13909-8_1}, issn = {16113349}, keywords = {MRI,Multimodal image-guided biopsy,Open source software,PET,Prostate cancer,TRUS}, title = {{An Open Source Multimodal Image-Guided Prostate Biopsy Framework}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84921489612{\&}doi=10.1007{\%}2F978-3-319-13909-8{\_}1{\&}partnerID=40{\&}md5=dda55817f88614188e848aa38b528c3b}, volume = {8680}, year = {2014} } @inproceedings{Shah, abstract = {Studies have shown that vascular structure of a solitary pulmonary nodule (SPN) can give insight into the diagnosis of the nodule. The purpose of this study is to investigate the utility of texture analysis as a quantitative measure of the vascular structure of a nodule. A contrast CT study was conducted for 29 patients with an indeterminate SPN. For each patient, the post-contrast series at maximum enhancement was volumetrically registered to the pre-contrast series. The two registered series were subtracted to form difference images of the nodule and each voxel was color-coded into 7 bins. Initially, a representative image of each nodule was subjectively rated on a five-point by a radiologist as to the magnitude, extent, and heterogeneity of the enhancement. From the initial analysis the heterogeneity of the nodule was found to be significantly different for benign versus malignant nodules (p{\textless}0.01), while the other two ratings were found not to be significant. We then attempted to quantify this subjective rating of heterogeneity by calculating 14 textural features based on co-occurrence matrices. These features included various measures of contrast, entropy, energy, etc. Dimension reduction techniques such as principal component and factor analysis were applied to the features to reduce the 14 variables to one factor. The mean of this factor was significantly different for malignant versus benign nodules (p=0.010). Texture analysis of contrast enhancement maps appears to be useful tool to characterize SPNs.}, author = {Shah, Sumit K. and McNitt-Gray, Michael F. and Petkovska, Iva and Kim, Hyun Jun and DeZoysa, Kheshini R. and Goldin, Jonathan G. and Suh, Robert D. and Aberle, Denise R.}, booktitle = {Medical Imaging 2005: Image Processing}, doi = {10.1117/12.595874}, issn = {16057422}, pages = {1950}, title = {{Solitary pulmonary nodule characterization on CT by use of contrast enhancement maps}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-23844470662{\&}doi=10.1117{\%}2F12.595874{\&}partnerID=40{\&}md5=cb9ec8d5c13ffb11b3fdaa3d4b5bb75d}, volume = {5747}, year = {2005} } @article{Shams2018, abstract = {Image guidance has become the standard of care for patient positioning in radiotherapy, where image registration is often a critical step to help manage patient motion. However, in practice, verification of registration quality is often adversely affected by difficulty in manual inspection of 3-D images and time constraint, thus affecting the therapeutic outcome. Therefore, we proposed to employ both bootstrapping and the supervised learning methods of linear discriminant analysis and random forest to help robustly assess registration quality in ultrasound-guided radiotherapy. We validated both approaches using phantom and real clinical ultrasound images, and showed that both performed well for the task. While learning-based techniques offer better accuracy and shorter evaluation time, bootstrapping requires no prior training and has a higher sensitivity.}, author = {Shams, Roozbeh and Xiao, Yiming and Hebert, Francois and Abramowitz, Matthew and Brooks, Rupert and Rivaz, Hassan}, doi = {10.1109/TMI.2017.2755695}, issn = {1558254X}, journal = {IEEE Transactions on Medical Imaging}, keywords = {Radiotherapy,bootstrapping,image registration,motion management,quality management,supervised learning}, number = {2}, pages = {428--437}, title = {{Assessment of Rigid Registration Quality Measures in Ultrasound-Guided Radiotherapy}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85030780937{\&}doi=10.1109{\%}2FTMI.2017.2755695{\&}partnerID=40{\&}md5=c0b386f770628888db0e4e521b9905ba}, volume = {37}, year = {2018} } @article{Shao2014, abstract = {Intensity-based 2D/3D registration is a key technique using digitally reconstructed radiographs (DRRs) to register the preoperative volume to the patient setup during the operation. Although DRR-based method provides a high accuracy, the small capture range hinders its clinical use. In this paper, such problem was addressed by a robust and fast initialization method using a two-level scheme including automatic tracking-based initialization (Level I) and multiresolution estimation based on central-slice theorem and phase correlation (Level II). It provided almost optimal transformation parameters for intensity-based registration. Experiments using a public gold standard data set and a spinal phantom have been conducted. The mean target registration error (mTRE) was limited in the range from 2.12 mm to 22.57 mm after tracking-based initialization. The capture range based on level II only was 20.1 mm and the mTRE in this capture range was 2.92 ± 2.21 mm. The intensity-based 2D/3D registration using proposed two-level initialization achieved the successful rate of 84.8{\%} with the average error of 2.36 mm. The experimental results showed that the proposed method yielded the robust and fast initialization for intensity-based registration methods. In a similar way, it can be applied to other registration methods to enable a larger capture range and robust implementation. {\textcopyright} 2014 Zhenzhou Shao et al.}, author = {Shao, Zhenzhou and Han, Jianda and Liang, Wei and Tan, Jindong and Guan, Yong}, doi = {10.1155/2014/989254}, issn = {16878140}, journal = {Advances in Mechanical Engineering}, pages = {12}, title = {{Robust and fast initialization for intensity-based 2D/3D registration}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {2014}, year = {2014} } @inproceedings{Shen, abstract = {This paper describes the Medical Visualizer, a real-time visualization system for analyzing medical volumetric data in various virtual environments, such as autostereoscopic displays, dual-projector screens and immersive environments such as the CAVE. Direct volume rendering is used for visualizing the details of medical volumetric data sets without intermediate geometric representations. By interactively manipulating the color and transparency functions through the friendly user interface, radiologists can either inspect the data set as a whole or focus on a specific region. In our system, 3D texture hardware is employed to accelerate the rendering process. The system is designed to be platform independent, as all virtual reality functions are separated from kernel functions. Due to its modular design, our system can be easily extended to other virtual environments, and new functions can be incorporated rapidly. {\textcopyright} 2008 IEEE.}, author = {Shen, Rui and Boulanger, Pierre and Noga, Michelle}, booktitle = {Proceedings - 5th International Conference BioMedical Visualization, Information Visualization in Medical and Biomedical Informatics, MediVis 2008}, doi = {10.1109/MediVis.2008.10}, isbn = {9780769532844}, pages = {63--70}, title = {{Med vis: A real-time immersive visualization environment for the exploration of medical volumetric data}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-57849137509{\&}doi=10.1109{\%}2FMediVis.2008.10{\&}partnerID=40{\&}md5=11410f98c18a5d2bb0c6538a8af8ee5e}, year = {2008} } @inproceedings{Shena, abstract = {Nowadays, the effective utilization of open-source software could significantly boost both clinical research and practices, especially in resource-poor countries. However, the plethora of open-source clinical software has left many people unable to quickly locate the appropriate one for their needs. Commonly available software quality metrics and software documentation, such as downloads, forks, stars, and readme files, are useful selection criteria, but they only indicate the software quality from the perspective of IT experts. This paper proposes a method that offers additional insights on the performance and effectiveness of clinical software. It links open-source clinical software with relevant scientific literature, such as papers that use case studies of clinical software to reveal the strength and weakness of a given software from the clinical perspective. To interactively present the open-source clinical software and their related literature, we have developed the LOCATE web application that enables users to explore related literature for a given opensource clinical software. Moreover, the peer-review cycle of the application allows users to improve the application by confirming, adding or removing related literature. An evaluation experiment of the five most popular open-source clinical tools demonstrates the potential usefulness of LOCATE.}, author = {Shen, Zhengru and Spruit, Marco}, booktitle = {HEALTHINF 2019 - 12th International Conference on Health Informatics, Proceedings; Part of 12th International Joint Conference on Biomedical Engineering Systems and Technologies, BIOSTEC 2019}, doi = {10.5220/0007378702940301}, isbn = {9789897583537}, keywords = {Github Repository,Literature,Open-source Clinical Software,Web Application}, pages = {294--301}, title = {{LOCATE: A web application to link open-source clinical software with literature}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85064637892{\&}partnerID=40{\&}md5=e5fa660669c558180b42f873a3ef3683}, year = {2019} } @article{Sjoberg2013, abstract = {Label fusion multi-atlas approaches for image segmentation can give better segmentation results than single atlas methods. We present a multi-atlas label fusion strategy based on probabilistic weighting of distance maps. Relationships between image similarities and segmentation similarities are estimated in a learning phase and used to derive fusion weights that are proportional to the probability for each atlas to improve the segmentation result. The method was tested using a leave-one-out strategy on a database of 21 pre-segmented prostate patients for different image registrations combined with different image similarity scorings. The probabilistic weighting yields results that are equal or better compared to both fusion with equal weights and results using the STAPLE algorithm. Results from the experiments demonstrate that label fusion by weighted distance maps is feasible, and that probabilistic weighted fusion improves segmentation quality more the stronger the individual atlas segmentation quality depends on the corresponding registered image similarity. The regions used for evaluation of the image similarity measures were found to be more important than the choice of similarity measure. {\textcopyright} 2013 Elsevier Ireland Ltd.}, author = {Sj{\"{o}}berg, C. and Ahnesj{\"{o}}, A.}, doi = {10.1016/j.cmpb.2012.12.006}, issn = {01692607}, journal = {Computer Methods and Programs in Biomedicine}, keywords = {Atlas based segmentation,Deformable registration,Label fusion,Multi-atlas segmentation,Radiotherapy prostate,Segmentation}, number = {3}, pages = {308--319}, title = {{Multi-atlas based segmentation using probabilistic label fusion with adaptive weighting of image similarity measures}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {110}, year = {2013} } @article{Song2020, abstract = {To overcome the mechanical drawback of bioink, we proposed a supporter model to enhance the mechanical strength of bioprinted 3D constructs, in which a unit-assembly idea was involved. Based on Computed Tomography images of critical-sized rabbit bone defect, the 3D re-construction was accomplished by a sequenced process using Mimics 17.0, BioCAM and BioCAD software. 3D constructs were bioprinted using polycaprolactone (PCL) ink for the outer supporter under extrusion mode, and cell-laden tricalcium phosphate (TCP)/alginate bioink for the inner filler under air pressure dispensing mode. The relationship of viscosity of bioinks, 3D bioprinting pressure, TCP/alginate ratio and cell survival were investigated by the shear viscosities analysis, live/dead cell test and cell-counting kit 8 measurement. The viscosity of bioinks at 1.0 s−1-shear rate could be adjusted within the range of 1.75 ± 0.29 Pa{\textperiodcentered}s to 155.65 ± 10.86 Pa{\textperiodcentered}s by changing alginate concentration, corresponding to 10 kPa–130 kPa of printing pressure. This design with PCL supporter could significantly enhance the compressive strength and compressive modulus of standardized 3D mechanical testing specimens up to 2.15 ± 0.14 MPa to 2.58 ± 0.09 MPa, and 42.83 ± 4.75 MPa to 53.12 ± 1.19 MPa, respectively. Cells could maintain the high viability (over 80{\%}) under the given printing pressure but cell viability declined with the increase of TCP content. Cell survival after experiencing 7 days of cell culture could be achieved when the ratio of TCP/alginate was 1 : 4. All data supported the feasibility of the supporter and unit-assembly model to enhance mechanical properties of bioprinted 3D constructs.}, author = {Song, Jie Liang and Fu, Xin Ye and Raza, Ali and Shen, Nai An and Xue, Ya Qi and Wang, Hua Jie and Wang, Jin Ye}, doi = {10.1016/j.jmbbm.2019.103533}, issn = {18780180}, journal = {Journal of the Mechanical Behavior of Biomedical Materials}, keywords = {Alginate-TCP bioink,Bioprinting,Bone defect,PCL supporter,Unit-assembly model}, title = {{Enhancement of mechanical strength of TCP-alginate based bioprinted constructs}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85074942779{\&}doi=10.1016{\%}2Fj.jmbbm.2019.103533{\&}partnerID=40{\&}md5=5a9d1f1c5ade6a354c2e6bf7adb9613f}, volume = {103}, year = {2020} } @article{Song2009, abstract = {Objective: To establish a virtual surgery system based on three-dimensional visualization technique which can help the doctors to plot the surgery. Methods: A curve was created on the 3D space, and some points added on the curve adjusted by controlling the points which could be added and deleted. The angle could be computed according to the final curves, thus to place the scalpel to give convenience to the real surgery. Results: The virtual surgery system was used in hospital, and the results were satisfied. Conclusion: The virtual surgery system added to medical image process system is helpful to decrease risk of surgical operation.}, author = {Song, Li Mei and Luo, Jing and Wen, Yu Hua}, issn = {10033289}, journal = {Chinese Journal of Medical Imaging Technology}, keywords = {Image process,Virtual segmentation,Virtual surgery,Visualization}, number = {8}, pages = {1481--1484}, title = {{Virtual surgery based on medical images}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-78650586605{\&}partnerID=40{\&}md5=047142ba89c1419e7f5474c726cfdc20}, volume = {25}, year = {2009} } @book{Song2009a, abstract = {Objective: To establish a virtual surgery system based on three-dimensional visualization technique which can help the doctors to plot the surgery. Methods: A curve was created on the 3D space, and some points added on the curve adjusted by controlling the points which could be added and deleted. The angle could be computed according to the final curves, thus to place the scalpel to give convenience to the real surgery. Results: The virtual surgery system was used in hospital, and the results were satisfied. Conclusion: The virtual surgery system added to medical image process system is helpful to decrease risk of surgical operation.}, address = {New York}, author = {Song, Li Mei and Luo, Jing and Wen, Yu Hua}, booktitle = {Chinese Journal of Medical Imaging Technology}, isbn = {978-1-4244-2901-1}, issn = {10033289}, keywords = {Image process,Virtual segmentation,Virtual surgery,Visualization}, number = {8}, pages = {1481--1484}, publisher = {Ieee}, series = {2009 3rd International Conference on Bioinformatics and Biomedical Engineering, Vols 1-11}, title = {{Virtual surgery based on medical images}}, type = {Book}, url = {{\%}3CGo to}, volume = {25}, year = {2009} } @article{Soomro2019, abstract = {The main goal of this work is to automatically segment colorectal tumors in 3D T2-weighted (T2w) MRI with reasonable accuracy. For such a purpose, a novel deep learning-based algorithm suited for volumetric colorectal tumor segmentation is proposed. The proposed CNN architecture, based on densely connected neural network, contains multiscale dense interconnectivity between layers of fine and coarse scales, thus leveraging multiscale contextual information in the network to get better flow of information throughout the network. Additionally, the 3D level-set algorithm was incorporated as a postprocessing task to refine contours of the network predicted segmentation. The method was assessed on T2-weighted 3D MRI of 43 patients diagnosed with locally advanced colorectal tumor (cT3/T4). Cross validation was performed in 100 rounds by partitioning the dataset into 30 volumes for training and 13 for testing. Three performance metrics were computed to assess the similarity between predicted segmentation and the ground truth (i.e., manual segmentation by an expert radiologist/oncologist), including Dice similarity coefficient (DSC), recall rate (RR), and average surface distance (ASD). The above performance metrics were computed in terms of mean and standard deviation (mean ± standard deviation). The DSC, RR, and ASD were 0.8406 ± 0.0191, 0.8513 ± 0.0201, and 2.6407 ± 2.7975 before postprocessing, and these performance metrics became 0.8585 ± 0.0184, 0.8719 ± 0.0195, and 2.5401 ± 2.402 after postprocessing, respectively. We compared our proposed method to other existing volumetric medical image segmentation baseline methods (particularly 3D U-net and DenseVoxNet) in our segmentation tasks. The experimental results reveal that the proposed method has achieved better performance in colorectal tumor segmentation in volumetric MRI than the other baseline techniques.}, author = {Soomro, Mumtaz Hussain and Coppotelli, Matteo and Conforto, Silvia and Schmid, Maurizio and Giunta, Gaetano and {Del Secco}, Lorenzo and Neri, Emanuele and Caruso, Damiano and Rengo, Marco and Laghi, Andrea}, doi = {10.1155/2019/1075434}, issn = {20402309}, journal = {Journal of Healthcare Engineering}, pages = {11}, title = {{Automated segmentation of colorectal tumor in 3D MRI Using 3D multiscale densely connected convolutional neural network}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {2019}, year = {2019} } @article{Soudah2017, abstract = {In the last few years, wall shear stress (WSS) has arisen as a new diagnostic indicator in patients with arterial disease. There is a substantial evidence that the WSS plays a significant role, together with hemodynamic indicators, in initiation and progression of the vascular diseases. Estimation of WSS values, therefore, may be of clinical significance and the methods employed for its measurement are crucial for clinical community. Recently, four-dimensional (4D) flow cardiovascular magnetic resonance (CMR) has been widely used in a number of applications for visualization and quantification of blood flow, and although the sensitivity to blood flow measurement has increased, it is not yet able to provide an accurate three-dimensional (3D) WSS distribution. The aim of this work is to evaluate the aortic blood flow features and the associated WSS by the combination of 4D flow cardiovascular magnetic resonance (4D CMR) and computational fluid dynamics technique. In particular, in this work, we used the 4D CMR to obtain the spatial domain and the boundary conditions needed to estimate the WSS within the entire thoracic aorta using computational fluid dynamics. Similar WSS distributions were found for cases simulated. A sensitivity analysis was done to check the accuracy of the method. 4D CMR begins to be a reliable tool to estimate the WSS within the entire thoracic aorta using computational fluid dynamics. The combination of both techniques may provide the ideal tool to help tackle these and other problems related to wall shear estimation.}, author = {Soudah, E. and Casacuberta, J. and Gamez-Montero, P. J. and P{\'{e}}rez, J. S. and Rodr{\'{i}}guez-Cancio, M. and Raush, G. and Li, C. H. and Carreras, F. and Castilla, R.}, doi = {10.1142/S0219519417500464}, issn = {02195194}, journal = {Journal of Mechanics in Medicine and Biology}, keywords = {Phase-contrast MRI,blood flow patterns,computational fluid dynamics,velocity mapping,wall shear stress}, number = {3}, pages = {16}, title = {{Estimation of wall shear stress using 4D flow cardiovascular MRI and computational fluid dynamics}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {17}, year = {2017} } @article{Spadea2014, abstract = {Methods and Materials: Six lung cancer patients CT scans were preprocessed by masking out the gross tumor volume (GTV), and digitally reconstructed radiographs along the planned beams eye view (BEV) were generated, for a total of 27 projections. Proton radiographies (PR) were also computed for the same BEV through Monte Carlo simulations. The digitally reconstructed radiograph was subtracted from the corresponding proton image, resulting in a contrast-enhanced proton radiography (CEPR). Michelson contrast analysis was performed both on PR and CEPR. The tumor region was then automatically segmented on CEPR and compared to the ground truth (GT) provided by physicians in terms of Dice coefficient, accuracy, precision, sensitivity, and specificity. Purpose: To obtain a contrasted image of the tumor region during the setup for proton therapy in lung patients, by using proton radiography and x-ray computed tomography (CT) prior knowledge. Results: Contrast on CEPR was, on average, 4 times better than on PR. For 10 lateral projections (±45 off of 90 or 270), although it was not possible to distinguish the tumor region in the PR, CEPR offers excellent GTV visibility. The median ± quartile values of Dice, precision, and accuracy indexes were 0.86 ± 0.03, 0.86 ± 0.06, and 0.88 ± 0.02, respectively, thus confirming the reliability of the method in highlighting tumor boundaries. Sensitivity and specificity analysis demonstrated that there is no systematic over- or underestimation of the tumor region. Identification of the tumor boundaries using CEPR resulted in a more accurate and precise definition of GTV compared to that obtained from pretreatment CT. Conclusions: In most proton centers, the current clinical protocol is to align the patient using kV imaging with bony anatomy as a reference. We demonstrated that CEPR can significantly improve tumor visualization, allowing better patient set-up and permitting image guided proton therapy (IGPT).}, author = {Spadea, Maria Francesca and Fassi, Aurora and Zaffino, Paolo and Riboldi, Marco and Baroni, Guido and Depauw, Nicolas and Seco, Joao}, doi = {10.1016/j.ijrobp.2014.06.057}, issn = {1879355X}, journal = {International Journal of Radiation Oncology Biology Physics}, number = {3}, pages = {628--636}, title = {{Contrast-enhanced proton radiography for patient set-up by using x-ray CT prior knowledge}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {90}, year = {2014} } @article{Spanakis2019, abstract = {Evolutionary algorithms have been used recently as an alternative in image registration, especially in cases where the similarity function is non-convex with many local optima. However, their drawback is that they tend to be computationally expensive. Trying to avoid local minima can increase the computational cost. The purpose of authors' research is to minimise the duration of the image registration process. This paper presents a method to minimise the computational cost by introducing a machine learning-based variant of Harmony Search. To this end, a series of machine-learning regression methods are tested in order to find the most appropriate that minimises the cost without degrading the quality of the results. The best regression method is then incorporated in the optimisation process and is compared with two well-known ITK image registration methods. The comparison of authors' image registration method with ITK concerns both the quality of the results and the duration of the registration experiments. The comparison is done on a set of random image pairs of various sources (e.g. medical or satellite images), and the encouraging results strongly indicate that authors' method can be used in a variety of image registration applications producing quality results in significantly less time.}, author = {Spanakis, Constantinos and Mathioudakis, Emmanouil and Kampanis, Nikos and Tsiknakis, Manolis and Marias, Kostas}, doi = {10.1049/iet-ipr.2018.5389}, issn = {17519659}, journal = {IET Image Processing}, number = {5}, pages = {843--849}, title = {{Machine-learning regression in evolutionary algorithms and image registration}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {13}, year = {2019} } @article{Sparks2015, abstract = {Purpose: Transrectal ultrasound (TRUS)-guided needle biopsy is the current gold standard for prostate cancer diagnosis. However, up to 40{\%} of prostate cancer lesions appears isoechoic on TRUS. Hence, TRUS-guided biopsy has a high false negative rate for prostate cancer diagnosis. Magnetic resonance imaging (MRI) is better able to distinguish prostate cancer from benign tissue. However, MRI-guided biopsy requires special equipment and training and a longer procedure time. MRI-TRUS fusion, where MRI is acquired preoperatively and then aligned to TRUS, allows for advantages of both modalities to be leveraged during biopsy. MRI-TRUS-guided biopsy increases the yield of cancer positive biopsies. In this work, the authors present multiattribute probabilistic postate elastic registration (MAPPER) to align prostate MRI and TRUS imagery. Methods: MAPPER involves (1) segmenting the prostate on MRI, (2) calculating a multiattribute probabilistic map of prostate location on TRUS, and (3) maximizing overlap between the prostate segmentation on MRI and the multiattribute probabilistic map on TRUS, thereby driving registration of MRI onto TRUS. MAPPER represents a significant advancement over the current state-of-the-art as it requires no user interaction during the biopsy procedure by leveraging texture and spatial information to determine the prostate location on TRUS. Although MAPPER requires manual interaction to segment the prostate on MRI, this step is performed prior to biopsy and will not substantially increase biopsy procedure time. Results: MAPPER was evaluated on 13 patient studies from two independent datasetsDataset 1 has 6 studies acquired with a side-firing TRUS probe and a 1.5 T pelvic phased-array coil MRI; Dataset 2 has 7 studies acquired with a volumetric end-firing TRUS probe and a 3.0 T endorectal coil MRI. MAPPER has a root-mean-square error (RMSE) for expert selected fiducials of 3.36±1.10 mm for Dataset 1 and 3.14±0.75 mm for Dataset 2. State-of-the-art MRI-TRUS fusion methods report RMSE of 3.062.07 mm. Conclusions: MAPPER aligns MRI and TRUS imagery without manual intervention ensuring efficient, reproducible registration. MAPPER has a similar RMSE to state-of-the-art methods that require manual intervention.}, author = {Sparks, Rachel and {Nicolas Bloch}, B. and Feleppa, Ernest and Barratt, Dean and Moses, Daniel and Ponsky, Lee and Madabhushi, Anant}, doi = {10.1118/1.4905104}, issn = {00942405}, journal = {Medical Physics}, keywords = {MRI-ultrasound fusion,image registration,image-guided biopsy,prostate cancer,prostate imaging}, number = {3}, pages = {1153--1163}, title = {{Multiattribute probabilistic prostate elastic registration (MAPPER): Application to fusion of ultrasound and magnetic resonance imaging}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {42}, year = {2015} } @article{Srinivasan2018, abstract = {Purpose: In this study, a 3D fat-based deformable registration algorithm was developed for registering dynamic contrast-enhanced breast images. Methods: The mutual information similarity measure with free-form deformation motion correction in rapidly enhancing lesions can introduce motion. However, in Dixon-based fat-water separated acquisitions, the nonenhancing fat signal can directly be used to estimate deformable motion, which can be later used to deform the water images. Qualitative comparison of the fat-based registration method to a water-based registration method, and to the unregistered images, was performed by two experienced readers. Quantitative analysis of the registration was evaluated by estimating the mean-squared signal difference on the fat images. Results: Using a scale of 0 (no motion) to 2 ({\textgreater} 4 voxels of motion), the average image quality score of the fat-based registered images was 0.5 ± 0.6, water-based registration was 0.8 ± 0.8, and the unregistered dataset was 1.6 ± 0.6. The mean-squared-signal-difference metric on the fat images was significantly lower for fat-based registered images compared with both water-based registered and unregistered images. Conclusions: Fat-based registration of breast dynamic contrast-enhanced images is a promising technique for performing deformable motion correction of breast without introducing new motion. Magn Reson Med 79:2408–2414, 2018. {\textcopyright} 2017 International Society for Magnetic Resonance in Medicine.}, author = {Srinivasan, Subashini and Hargreaves, Brian A. and Daniel, Bruce L.}, doi = {10.1002/mrm.26851}, issn = {15222594}, journal = {Magnetic Resonance in Medicine}, keywords = {breast deformable motion,fat-based registration}, number = {4}, pages = {2408--2414}, title = {{Fat-based registration of breast dynamic contrast enhanced water images}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {79}, year = {2018} } @article{Stegmaier2014, abstract = {Automated analysis of multi-dimensional microscopy images has become an integral part of modern research in life science. Most available algorithms that provide sufficient segmentation quality, however, are infeasible for a large amount of data due to their high complexity. In this contribution we present a fast parallelized segmentation method that is especially suited for the extraction of stained nuclei from microscopy images, e.g., of developing zebrafish embryos. The idea is to transform the input image based on gradient and normal directions in the proximity of detected seed points such that it can be handled by straightforward global thresholding like Otsu's method. We evaluate the quality of the obtained segmentation results on a set of real and simulated benchmark images in 2D and 3D and show the algorithm's superior performance compared to other state-of-the-art algorithms. We achieve an up to ten-fold decrease in processing times, allowing us to process large data sets while still providing reasonable segmentation results. {\textcopyright} 2014 Stegmaier et al.}, author = {Stegmaier, Johannes and Otte, Jens C. and Kobitski, Andrei and Bartschat, Andreas and Garcia, Ariel and Nienhaus, G. Ulrich and Str{\"{a}}hle, Uwe and Mikut, Ralf}, doi = {10.1371/journal.pone.0090036}, issn = {19326203}, journal = {PLoS ONE}, number = {2}, pages = {11}, title = {{Fast segmentation of stained nuclei in terabyte-scale, time resolved 3D microscopy image stacks}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {9}, year = {2014} } @inproceedings{Stevensona, abstract = {Recent advances in medical research hypothesize that certain body fat, in addition to having a classical role of energy storage, may also have mechanical function. In particular, we analyzed the infrapatellar fat pad of Hoffa using 3D CT images of the knee at multiple angles to determine how the fat pad changes shape as the knee bends and whether the fat pad provides cushioning in the knee joint. The images were initially processed using a median filter then segmented using a region growing technique to isolate the fat pad from the rest of the knee. Next, rigid registration was performed to align the series of images to match the reference image. Finally, multi-resolution FEM registration was completed between the aligned images. The resulting displacements fields were used to determine the local volume change of the fat pad as the knee bends from extension to flexion through different angles. This multi-angle analysis provides a finer description of the intermediate deformations compared to earlier work, where only a pair of images (full extension and flexion) was analyzed.}, author = {Stevenson, Kevin and Schweitzer, Mark and Hamarneh, Ghassan}, booktitle = {Medical Imaging 2006: Physiology, Function, and Structure from Medical Images}, doi = {10.1117/12.654301}, isbn = {0819461865}, issn = {16057422}, pages = {614329}, title = {{Multi-angle deformation analysis of Hoffa's fat pad}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-33745353456{\&}doi=10.1117{\%}2F12.654301{\&}partnerID=40{\&}md5=595f4e12f1f8758a3702a5ddb70cd256}, volume = {6143}, year = {2006} } @article{Stoyanova2016, abstract = {Prostate cancer exhibits intra-tumoral heterogeneity that we hypothesize to be the leading confounding factor contributing to the underperformance of the current pre-treatment clinical-pathological and genomic assessment. These limitations impose an urgent need to develop better computational tools to identify men with low risk of prostate cancer versus others that may be at risk for developing metastatic cancer. The patient stratification will directly translate to patient treatments, wherein decisions regarding active surveillance or intensified therapy are made. Multiparametric MRI (mpMRI) provides the platform to investigate tumor heterogeneity by mapping the individual tumor habitats. We hypothesize that quantitative assessment (radiomics) of these habitats results in distinct combinations of descriptors that reveal regions with different physiologies and phenotypes. Radiogenomics, a discipline connecting tumor morphology described by radiomic and its genome described by the genomic data, has the potential to derive "radio phenotypes" that both correlate to and complement existing validated genomic risk stratification biomarkers. In this article we first describe the radiomic pipeline, tailored for analysis of prostate mpMRI, and in the process we introduce our particular implementations of radiomics modules. We also summarize the efforts in the radiomics field related to prostate cancer diagnosis and assessment of aggressiveness. Finally, we describe our results from radiogenomic analysis, based on mpMRI-Ultrasound (MRI-US) biopsies and discuss the potential of future applications of this technique. The mpMRI radiomics data indicate that the platform would significantly improve the biopsy targeting of prostate habitats through better recognition of indolent versus aggressive disease, thereby facilitating a more personalized approach to prostate cancer management. The expectation to non-invasively identify habitats with high probability of housing aggressive cancers would result in directed biopsies that are more informative and actionable. Conversely, providing evidence for lack of disease would reduce the incidence of non-informative biopsies. In radiotherapy of prostate cancer, dose escalation has been shown to reduce biochemical failure. Dose escalation only to determinate prostate habitats has the potential to improve tumor control with less toxicity than when the entire prostate is dose escalated.}, author = {Stoyanova, Radka and Takhar, Mandeep and Tschudi, Yohann and Ford, John C. and Sol{\'{o}}rzano, Gabriel and Erho, Nicholas and Balagurunathan, Yoganand and Punnen, Sanoj and Davicioni, Elai and Gillies, Robert J. and Pollack, Alan}, doi = {10.21037/tcr.2016.06.20}, issn = {22196803}, journal = {Translational Cancer Research}, keywords = {Gene expression,MRI-targeted biopsies,Multiparametric MRI,Prostate cancer,Radiogenomics,Radiomics}, number = {4}, pages = {432--447}, pmid = {29188191}, title = {{Prostate cancer radiomics and the promise of radiogenomics}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {5}, year = {2016} } @incollection{Stredney2003, abstract = {We report on efforts to provide high-level intuitive tools that exploit commodity-based computing to facilitate real-time and distributed interactions with volumetric data. These efforts include an open source volume-rendering library, a portable volume visualization application framework, and parallel volume-rendering exploiting commodity-based hardware. We present our design and implementations, as well as examples of some of the various groups currently utilizing these tools, and discuss the tradeoffs of our developments versus existing techniques.}, author = {Stredney, D. and Bryan, J. and Sessanna, D. and Kerwin, T.}, booktitle = {Studies in Health Technology and Informatics}, doi = {10.3233/978-1-60750-938-7-329}, isbn = {1586033204}, issn = {18798365}, pages = {329--335}, title = {{Facilitating real-time volume interaction}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-6344254574{\&}doi=10.3233{\%}2F978-1-60750-938-7-329{\&}partnerID=40{\&}md5=a00d000d033135abebb47756881fd5ff}, volume = {94}, year = {2003} } @article{Streicher2012, abstract = {Object The temperature dependence of the proton resonance frequency (PRF) is often used in MR thermometry. However, thismethod is prone to even very small changes in local magnetic field strength. Here, we report on the effects of susceptibility changes of surrounding air on the magnetic field inside an object and their inferred effect on themeasuredMR temperature. Materials and methods MR phase thermometry was performed on spherical agar phantoms enclosed in cylindrical containers at 7T. The air susceptibility inside the cylindrical container was changed by both heating the air and changing the gas composition. Results Changing the temperature of surrounding air from 23 to 69?C caused an apparent MR temperature error of 2 K. When ambient air was displaced by 100{\%} oxygen, the MR temperature error increased to 40 K. The magnetic field shift and therefore error in inferred MR temperature scales linearly with volume susceptibility change and has a strong and nontrivial dependence on the experimental configuration. Conclusion Air susceptibility changes associated with oxygen concentration changes greatly affect PRFMR thermometry measurements. Air temperature changes can also affect thesemeasurements, but to a smaller degree. For uncalibrated MR thermometry, air susceptibility changes may be a significant source of error. {\textcopyright} ESMRMB 2011.}, author = {Streicher, Markus N. and Sch{\"{a}}fer, Andreas and Reimer, Enrico and Dhital, Bibek and Trampel, Robert and Ivanov, Dimo and Turner, Robert}, doi = {10.1007/s10334-011-0249-8}, issn = {09685243}, journal = {Magnetic Resonance Materials in Physics, Biology and Medicine}, keywords = {Air,PRF,PRFS-based MR thermometry,Susceptibility,Temperature}, number = {1}, pages = {41--47}, title = {{Effects of air susceptibility on proton resonance frequency MR thermometry}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {25}, year = {2012} } @article{Sultana2019, abstract = {Purpose: We propose a segmentation methodology for brainstem cranial nerves using statistical shape model (SSM)-based deformable 3D contours from T2 MR images. Methods: We create shape models for ten pairs of cranial nerves. High-resolution T2 MR images are segmented for nerve centerline using a 1-Simplex discrete deformable 3D contour model. These segmented centerlines comprise training datasets for the shape model. Point correspondence for the training dataset is performed using an entropy-based energy minimization framework applied to particles located on the centerline curve. The shape information is incorporated into the 1-Simplex model by introducing a shape-based internal force, making the deformation stable against low resolution and image artifacts. Results: The proposed method is validated through extensive experiments using both synthetic and patient MRI data. The robustness and stability of the proposed method are experimented using synthetic datasets. SSMs are constructed independently for ten pairs (CNIII–CNXII) of brainstem cranial nerves using ten non-pathological image datasets of the brainstem. The constructed ten SSMs are assessed in terms of compactness, specificity and generality. In order to quantify the error distances between segmented results and ground truths, two metrics are used: mean absolute shape distance (MASD) and Hausdorff distance (HD). MASD error using the proposed shape model is 0.19 ± 0.13 (mean ± std. deviation) mm and HD is 0.21 mm which are sub-voxel accuracy given the input image resolution. Conclusion: This paper described a probabilistic digital atlas of the ten brainstem-attached cranial nerve pairs by incorporating a statistical shape model with the 1-Simplex deformable contour. The integration of shape information as a priori knowledge results in robust and accurate centerline segmentations from even low-resolution MRI data, which is essential in neurosurgical planning and simulations for accurate and robust 3D patient-specific models of critical tissues including cranial nerves.}, author = {Sultana, Sharmin and Agrawal, Praful and Elhabian, Shireen and Whitaker, Ross and Blatt, Jason E. and Gilles, Benjamin and Cetas, Justin and Rashid, Tanweer and Audette, Michel A.}, doi = {10.1007/s11548-019-02014-z}, issn = {18616429}, journal = {International Journal of Computer Assisted Radiology and Surgery}, keywords = {3D contour models,Brainstem,Centerline,Cranial nerves,Deformable models,MRI,Segmentation,Statistical shape models,Surgical guidance}, number = {11}, pages = {1955--1967}, title = {{Medial axis segmentation of cranial nerves using shape statistics-aware discrete deformable models}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {14}, year = {2019} } @article{Sun2013, abstract = {Purpose: The temporal mismatch between PET and standard helical CT (HCT) causes substantial respiratory artifacts in PET reconstructed images when using HCT as the attenuation map. Previously we developed an interpolated average CT (IACT) method for attenuation correction (AC) and demonstrated its merits in simulations. In this study we aim to apply IACT in patients with thoracic lesions using an active breathing controller (ABC). Methods: Under local ethics approval, we recruited 15 patients with a total of 18 lesions in different thoracic regions: left upper lobe (2), right upper lobe (4), right hilum (3), right lower lobe (3), left hilum (2), and esophagus (4). All patients underwent whole body PET scans 1 h after 300-480 MBq 18F-FDG injection, depending on the patients' weight. The PET sinograms were reconstructed with AC using: (i) standard HCT [120 kV, smart mA (30-150 mA), 0.984:1 pitch] and (ii) IACT obtained from end-inspiration and end-expiration breath-hold HCTs (120 kV, 10 mA, 0.984:1 pitch) aided by ABC. IACT was obtained by averaging the intensity of two extreme phases and the interpolated phases between them, where the nonlinear interpolation was obtained by B-spline registration and with an empirical sinusoidal function. The SUVmax, SUVmean, and the differences of centroid-of-lesion (d) between PET and different CT schemes were measured for each lesion. Results: From visual inspection, the respiratory artifacts and blurring generally reduced in the thoracic region for PET IACT. Matching between CT and PET improved for PETIACT, with an average decrease of d for 1.34 ± 1.79 mm as compared to PET HCT. The SUVmax and SUVmean were consistently higher for PETIACT versus PETHCT for all lesions, with (30.95 ± 18.63){\%} and (22.39 ± 15.91){\%} average increase, respectively. Conclusions: IACT-ABC reduces respiratory artifacts, PET/CT misregistration and enhances lesion quantitation. This technique is a robust and low dose AC protocol for clinical oncology application especially in the thoracic region. {\textcopyright} 2013 American Association of Physicists in Medicine.}, author = {Sun, Tao and Wu, Tung Hsin and Wang, Shyh Jen and Yang, Bang Hung and Wu, Nien Yun and Mok, Greta S.P.}, doi = {10.1118/1.4820976}, issn = {00942405}, journal = {Medical Physics}, keywords = {Active breathing controller,Attenuation correction,PET/CT,Respiratory artifacts}, number = {10}, pages = {9}, title = {{Low dose interpolated average CT for thoracic PET/CT attenuation correction using an active breathing controller}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {40}, year = {2013} } @article{Tasserie2020, abstract = {Non-human primate functional MRI (fMRI) is a growing field in neuroscience. However, there is no standardized method for monkey fMRI data analysis, specifically for data preprocessing. The preprocessing of monkey fMRI data is challenged by several technical and experimental specificities of the monkey research such as artifacts related to body movements or to intracranial leads. Here we propose to address these challenges by developing a new versatile pipeline for macaque fMRI preprocessing. We developed a Python module, Pypreclin, to process raw images using state of the art algorithms embedded in a fully automatic pipeline. To evaluate its robustness, we applied Pypreclin to fMRI data acquired at 3T in both awake and anesthetized macaques, with or without iron oxide contrast agent, using single loop or multichannel phased-array coils, combined or not with intracranial implanted electrodes. We performed both resting-state and auditory evoked fMRI and compared the results of Pypreclin to a previously employed preprocessing pipeline. Pypreclin successfully achieved the registration of the fMRI data to the macaque brain template in all the experimental conditions. Moreover, Pypreclin enables more accurate locations of auditory evoked activations in relation to the gray matter at corrected level in the awake fMRI condition. Finally, using the Primate neuroimaging Data-Exchange open access platform, we could further validate Pypreclin for monkey fMRI images that were acquired at ultra-high fields, from other institutions and using different protocols. Pypreclin is a validated preprocessing tool that adapts to diverse experimental and technical situations of monkey fMRI. Pypreclin code is available on open source data sharing platform.}, author = {Tasserie, Jordy and Grigis, Antoine and Uhrig, Lynn and Dupont, Morgan and Amadon, Alexis and Jarraya, B{\'{e}}chir}, doi = {10.1016/j.neuroimage.2019.116353}, issn = {10959572}, journal = {NeuroImage}, keywords = {Automatic,Macaque,Motion artifact,Movement artifact,Non-human primate,Preprocessing,fMRI}, pages = {15}, title = {{Pypreclin: An automatic pipeline for macaque functional MRI preprocessing}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {207}, year = {2020} } @article{Taylor2019, abstract = {Despite general acceptance that the retinotopic organisation of human V4 (hV4) takes the form of a single, uninterrupted ventral hemifield, measured retinotopic maps of this visual area are often incomplete. Here, we test hypotheses that artefact from draining veins close to hV4 cause inverted BOLD responses that may serve to obscure a portion of the lower visual quarterfield-including the lower vertical meridian-in some hemispheres. We further test whether correcting such responses can restore the 'missing' retinotopic coverage in hV4. Subjects (N = 10) viewed bowtie, ring, drifting bar and full field flash stimuli. Functional EPIs were acquired over approximately 1.5h and analysed to reveal retinotopic maps of early visual cortex, including hV4. Normalised mean maps (which show the average EPI signal amplitude) were constructed by voxel-wise averaging of the EPI time course and used to locate venous eclipses, which can be identified by a decrease in the EPI signal caused by deoxygenated blood. Inverted responses are shown to cluster in these regions and correcting these responses improves maps of hV4 in some hemispheres, including restoring a complete hemifield map in one. A leftwards bias was found whereby 6/10 left hemisphere hV4 maps were incomplete, while this was the case in only 1/10 right hemisphere maps. Incomplete hV4 maps did not correspond with venous artefact in every instance, with incomplete maps being present in the absence of a venous eclipse and complete maps coexisting with a proximate venous eclipse. We also show that mean maps of upper surfaces (near the boundary between cortical grey matter and CSF) provide highly detailed maps of veins on the cortical surface. Results suggest that venous eclipses and inverted voxels can explain some incomplete hV4 maps, but cannot explain the remainder nor the leftwards bias in hV4 coverage reported here.}, author = {{Boyd Taylor}, H. G. and Puckett, A. M. and Isherwood, Z. J. and Schira, M. M.}, doi = {10.1371/journal.pone.0204388}, issn = {19326203}, journal = {PLoS ONE}, number = {6}, pages = {32}, title = {{Vascular effects on the BOLD response and the retinotopic mapping of hV4}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {14}, year = {2019} } @article{Thomas2016, abstract = {Positron emission tomography (PET) images are degraded by a phenomenon known as the partial volume effect (PVE). Approaches have been developed to reduce PVEs, typically through the utilisation of structural information provided by other imaging modalities such as MRI or CT. These methods, known as partial volume correction (PVC) techniques, reduce PVEs by compensating for the effects of the scanner resolution, thereby improving the quantitative accuracy. The PETPVC toolbox described in this paper comprises a suite of methods, both classic and more recent approaches, for the purposes of applying PVC to PET data. Eight core PVC techniques are available. These core methods can be combined to create a total of 22 different PVC techniques. Simulated brain PET data are used to demonstrate the utility of toolbox in idealised conditions, the effects of applying PVC with mismatched point-spread function (PSF) estimates and the potential of novel hybrid PVC methods to improve the quantification of lesions. All anatomy-based PVC techniques achieve complete recovery of the PET signal in cortical grey matter (GM) when performed in idealised conditions. Applying deconvolution-based approaches results in incomplete recovery due to premature termination of the iterative process. PVC techniques are sensitive to PSF mismatch, causing a bias of up to 16.7{\%} in GM recovery when over-estimating the PSF by 3 mm. The recovery of both GM and a simulated lesion was improved by combining two PVC techniques together. The PETPVC toolbox has been written in C++, supports Windows, Mac and Linux operating systems, is open-source and publicly available.}, author = {Thomas, Benjamin A. and Cuplov, Vesna and Bousse, Alexandre and Mendes, Adriana and Thielemans, Kris and Hutton, Brian F. and Erlandsson, Kjell}, doi = {10.1088/0031-9155/61/22/7975}, issn = {13616560}, journal = {Physics in Medicine and Biology}, keywords = {PET/MR,partial volume correction,partial volume effects,positron emission tomography}, number = {22}, pages = {7975--7993}, title = {{PETPVC: A toolbox for performing partial volume correction techniques in positron emission tomography}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {61}, year = {2016} } @article{Tixier2019, abstract = {Background: Glioblastoma (GBM) is the most common malignant central nervous system tumor, and MGMT promoter hypermethylation in this tumor has been shown to be associated with better prognosis. We evaluated the capacity of radiomics features to add complementary information to MGMT status, to improve the ability to predict prognosis. Methods: 159 patients with untreated GBM were included in this study and divided into training and independent test sets. 286 radiomics features were extracted from the magnetic resonance images acquired prior to any treatments. A least absolute shrinkage selection operator (LASSO) selection followed by Kaplan-Meier analysis was used to determine the prognostic value of radiomics features to predict overall survival (OS). The combination of MGMT status with radiomics was also investigated and all results were validated on the independent test set. Results: LASSO analysis identified 8 out of the 286 radiomic features to be relevant which were then used for determining association to OS. One feature (edge descriptor) remained significant on the external validation cohort after multiple testing (p=0.04) and the combination with MGMT identified a group of patients with the best prognosis with a survival probability of 0.61 after 43 months (p=0.0005). Conclusion: Our results suggest that combining radiomics with MGMT is more accurate in stratifying patients into groups of different survival risks when compared to with using these predictors in isolation. We identified two subgroups within patients who have methylated MGMT: one with a similar survival to unmethylated MGMT patients and the other with a significantly longer OS.}, author = {Tixier, Florent and Um, Hyemin and Bermudez, Dalton and Iyer, Aditi and Apte, Aditya and Graham, Maya S. and Nevel, Kathryn S. and Deasy, Joseph O. and Young, Robert J. and Veeraraghavan, Harini}, doi = {10.18632/oncotarget.26578}, issn = {19492553}, journal = {Oncotarget}, keywords = {Glioblastoma,MGMT,Magnetic resonance imaging,Radiomics,Survival analysis}, number = {6}, pages = {660--672}, title = {{Preoperative MRI-radiomics features improve prediction of survival in glioblastoma patients over MGMT methylation status alone}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85060153455{\&}doi=10.18632{\%}2Foncotarget.26578{\&}partnerID=40{\&}md5=fc3514c9fba21e53a37d3020503c236d}, volume = {10}, year = {2019} } @article{Tong2016, abstract = {Purpose: To elucidate the molecular pathogenesis of age-related macular degeneration (AMD) and interpretation of fundus autofluorescence imaging, the authors identified spectral autofluorescence characteristics of drusen and retinal pigment epithelium (RPE) in donor eyes with AMD. Methods: Macular RPE/Bruch membrane flat mounts were prepared from 5 donor eyes with AMD. In 12 locations (1-3 per eye), hyperspectral autofluorescence images in 10-nmwavelength steps were acquired at 2 excitation wavelengths (lex 436, 480 nm). A nonnegative tensor factorization algorithm was used to recover 5 abundant emission spectra and their corresponding spatial localizations. Results: At lex 436 nm, the authors consistently localized a novel spectrum (SDr) with a peak emission near 510 nm in drusen and sub-RPE deposits. Abundant emission spectra seen previously (S0 in Bruch membrane and S1, S2, and S3 in RPE lipofuscin/melanolipofuscin, respectively) also appeared in AMD eyes, with the same shapes and peak wavelengths as in normal tissue. Lipofuscin/melanolipofuscin spectra localizations in AMD eyes varied widely in their overlap with drusen, ranging from none to complete. Conclusion: An emission spectrum peaking at-510 nm (lex 436 nm) appears to be sensitive and specific for drusen and sub-RPE deposits. One or more abundant spectra from RPE organelles exhibit characteristic relationships with drusen.}, author = {Tong, Yuehong and Ami, Tal Ben and Hong, Sungmin and Heintzmann, Rainer and Gerig, Guido and Ablonczy, Zsolt and Curcio, Christine A. and Ach, Thomas and Smith, R. Theodore}, doi = {10.1097/IAE.0000000000001325}, issn = {15392864}, journal = {Retina}, keywords = {Age-related macular degeneration,Autofluorescence,Bruch membrane,Drusen,Fluorophores,Hyperspectral imaging,Lipofuscin,Nonnegative tensor factorization,Retinal pigment epithelium,Sub-retinal pigment epithelium deposits}, number = {12}, pages = {S127--S136}, title = {{Hyperspectral autofluorescence imaging of drusen and retinal pigment epithelium in donor eyes with age-related macular degeneration}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {36}, year = {2016} } @article{Tourbier2015, abstract = {Although fetal anatomy can be adequately viewed in new multi-slice MR images, many critical limitations remain for quantitative data analysis. To this end, several research groups have recently developed advanced image processing methods, often denoted by super-resolution (SR) techniques, to reconstruct from a set of clinical low-resolution (LR) images, a high-resolution (HR) motion-free volume. It is usually modeled as an inverse problem where the regularization term plays a central role in the reconstruction quality. Literature has been quite attracted by Total Variation energies because of their ability in edge preserving but only standard explicit steepest gradient techniques have been applied for optimization. In a preliminary work, it has been shown that novel fast convex optimization techniques could be successfully applied to design an efficient Total Variation optimization algorithm for the super-resolution problem. In this work, two major contributions are presented. Firstly, we will briefly review the Bayesian and Variational dual formulations of current state-of-the-art methods dedicated to fetal MRI reconstruction. Secondly, we present an extensive quantitative evaluation of our SR algorithm previously introduced on both simulated fetal and real clinical data (with both normal and pathological subjects). Specifically, we study the robustness of regularization terms in front of residual registration errors and we also present a novel strategy for automatically select the weight of the regularization as regards the data fidelity term. Our results show that our TV implementation is highly robust in front of motion artifacts and that it offers the best trade-off between speed and accuracy for fetal MRI recovery as in comparison with state-of-the art methods.}, author = {Tourbier, S{\'{e}}bastien and Bresson, Xavier and Hagmann, Patric and Thiran, Jean Philippe and Meuli, Reto and Cuadra, Meritxell Bach}, doi = {10.1016/j.neuroimage.2015.06.018}, issn = {10959572}, journal = {NeuroImage}, keywords = {Fast convex optimization,Fetal brain MRI,Super-resolution,Total variation}, pages = {584--597}, pmid = {26072252}, title = {{An efficient total variation algorithm for super-resolution in fetal brain MRI with adaptive regularization}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84940440329{\&}doi=10.1016{\%}2Fj.neuroimage.2015.06.018{\&}partnerID=40{\&}md5=58941cfc11c891f903f91a0bf8aa005c}, volume = {118}, year = {2015} } @article{Tourbier2017, abstract = {Most fetal brain MRI reconstruction algorithms rely only on brain tissue-relevant voxels of low-resolution (LR) images to enhance the quality of inter-slice motion correction and image reconstruction. Consequently the fetal brain needs to be localized and extracted as a first step, which is usually a laborious and time consuming manual or semi-automatic task. We have proposed in this work to use age-matched template images as prior knowledge to automatize brain localization and extraction. This has been achieved through a novel automatic brain localization and extraction method based on robust template-to-slice block matching and deformable slice-to-template registration. Our template-based approach has also enabled the reconstruction of fetal brain images in standard radiological anatomical planes in a common coordinate space. We have integrated this approach into our new reconstruction pipeline that involves intensity normalization, inter-slice motion correction, and super-resolution (SR) reconstruction. To this end we have adopted a novel approach based on projection of every slice of the LR brain masks into the template space using a fusion strategy. This has enabled the refinement of brain masks in the LR images at each motion correction iteration. The overall brain localization and extraction algorithm has shown to produce brain masks that are very close to manually drawn brain masks, showing an average Dice overlap measure of 94.5{\%}. We have also demonstrated that adopting a slice-to-template registration and propagation of the brain mask slice-by-slice leads to a significant improvement in brain extraction performance compared to global rigid brain extraction and consequently in the quality of the final reconstructed images. Ratings performed by two expert observers show that the proposed pipeline can achieve similar reconstruction quality to reference reconstruction based on manual slice-by-slice brain extraction. The proposed brain mask refinement and reconstruction method has shown to provide promising results in automatic fetal brain MRI segmentation and volumetry in 26 fetuses with gestational age range of 23 to 38 weeks.}, author = {Tourbier, S{\'{e}}bastien and Velasco-Annis, Clemente and Taimouri, Vahid and Hagmann, Patric and Meuli, Reto and Warfield, Simon K. and {Bach Cuadra}, Meritxell and Gholipour, Ali}, doi = {10.1016/j.neuroimage.2017.04.004}, issn = {10959572}, journal = {NeuroImage}, keywords = {B-Spline deformation,Block matching,Brain localization,Fetal brain MRI,Slice-by-slice brain extraction,Slice-to-template registration,Super-resolution reconstruction}, pages = {460--472}, title = {{Automated template-based brain localization and extraction for fetal brain MRI reconstruction}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {155}, year = {2017} } @article{Tournier2019, abstract = {MRtrix3 is an open-source, cross-platform software package for medical image processing, analysis and visualisation, with a particular emphasis on the investigation of the brain using diffusion MRI. It is implemented using a fast, modular and flexible general-purpose code framework for image data access and manipulation, enabling efficient development of new applications, whilst retaining high computational performance and a consistent command-line interface between applications. In this article, we provide a high-level overview of the features of the MRtrix3 framework and general-purpose image processing applications provided with the software.}, author = {Tournier, J. Donald and Smith, Robert and Raffelt, David and Tabbara, Rami and Dhollander, Thijs and Pietsch, Maximilian and Christiaens, Daan and Jeurissen, Ben and Yeh, Chun Hung and Connelly, Alan}, doi = {10.1016/j.neuroimage.2019.116137}, issn = {10959572}, journal = {NeuroImage}, keywords = {Image,MRI,Processing,Software,Visualisation}, pages = {17}, title = {{MRtrix3: A fast, flexible and open software framework for medical image processing and visualisation}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {202}, year = {2019} } @article{Townsend2017, abstract = {The ability to perform non-destructive areal surface analysis of the internal surfaces of additively manufactured (AM) components would be advantageous during product development, process control and product acceptance. Currently industrial X-ray computed tomography (XCT) is the only practical method for imaging the internal surfaces of AM components. A viable method of extracting useable areal surface texture data from XCT scans has now been developed and this paper reports on three measurement and data processing factors affecting the value of areal parameters per ISO 25178-2 generated from XCT volume data using this novel technique.}, author = {Townsend, Andrew and Pagani, Luca and Blunt, Liam and Scott, Paul J. and Jiang, Xiangqian}, doi = {10.1016/j.cirp.2017.04.074}, issn = {17260604}, journal = {CIRP Annals - Manufacturing Technology}, keywords = {Additive manufacturing,Metrology,X-ray}, number = {1}, pages = {547--550}, title = {{Factors affecting the accuracy of areal surface texture data extraction from X-ray CT}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {66}, year = {2017} } @inbook{Tung2009, abstract = {Three dimensional (3D) image processing and visualisation methods were applied in craniomaxillofacial surgery for preoperative surgical procedures and surgery planning. Each patient differed in their formation of cranium and facial bones, hence requiring customised reconstruction to identify the defect area and to plan procedural steps. This paper explores the processing and visualisation of patients' data into 3D form, constructed from flat two dimensional (2D) Computed Tomography (CT) images. Depth perception has been useful to identify certain regions of Interest (ROI) elusive in 2D CT slices. We have noted that the 3D models have exemplified the depth perception with the provision of additional cues of perspective, motion, texture and steropsis. This has led to the improvement of treatment design and implementation for patients in this study. {\textcopyright} 2009 Springer-Verlag.}, address = {Berlin}, author = {Tung, Tan Su and Rathinam, Alwin Kumar and Kumar, Yuwaraj and Rahman, Zainal Ariff Abdul}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-642-05036-7_15}, editor = {Zaman, H B and Robinson, P and Petrou, M and Olivier, P and Schroder, H and Shih, T K}, isbn = {3642050352}, issn = {03029743}, keywords = {3D modelling,3D stereo visualisation,Medical image processing,VRC-UM,Virtual reality}, pages = {148--155}, publisher = {Springer-Verlag Berlin}, series = {Lecture Notes in Computer Science}, title = {{Additional cues derived from three dimensional image processing to aid customised reconstruction for medical applications}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {5857 LNCS}, year = {2009} } @incollection{Tward2019, abstract = {The neuroimaging field is moving toward micron scale and molecular features in digital pathology and animal models. These require mapping to common coordinates for annotation, statistical analysis, and collaboration. An important example, the BRAIN Initiative Cell Census Network, is generating 3D brain cell atlases in mouse, and ultimately primate and human. We aim to establish RNAseq profiles from single neurons and nuclei across the mouse brain, mapped to Allen Common Coordinate Framework (CCF). Imaging includes (Forumala Presented). 500 tape-transfer cut 20 (Forumala Presented). m thick Nissl-stained slices per brain. In key areas 100 {\&}{\#}x0024;{\&}{\#}x0024;$\backslash$upmu {\&}{\#}x0024;{\&}{\#}x0024; m thick slices with 0.5–2 mm diameter circular regions punched out for snRNAseq are imaged. These contain abnormalities including contrast changes and missing tissue, two challenges not jointly addressed in diffeomorphic image registration. Existing methods for mapping 3D images to histology require manual steps unacceptable for high throughput, or are sensitive to damaged tissue. Our approach jointly: registers 3D CCF to 2D slices, models contrast changes, estimates abnormality locations. Our registration uses 4 unknown deformations: 3D diffeomorphism, 3D affine, 2D diffeomorphism per-slice, 2D rigid per-slice. Contrast changes are modeled using unknown cubic polynomials per-slice. Abnormalities are estimated using Gaussian mixture modeling. The Expectation Maximization algorithm is used iteratively, with E step: compute posterior probabilities of abnormality, M step: registration and intensity transformation minimizing posterior-weighted sum-of-square-error. We produce per-slice anatomical labels using Allen Institute's ontology, and publicly distribute results online, with several typical and abnormal slices shown here. This work has further applications in digital pathology, and 3D brain mapping with stroke, multiple sclerosis, or other abnormalities.}, author = {Tward, Daniel and Li, Xu and Huo, Bingxing and Lee, Brian and Mitra, Partha and Miller, Michael}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-030-33226-6_18}, isbn = {9783030332259}, issn = {16113349}, keywords = {Histology,Image registration,Neuroimaging}, pages = {162--173}, title = {{3D Mapping of Serial Histology Sections with Anomalies Using a Novel Robust Deformable Registration Algorithm}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85075564799{\&}doi=10.1007{\%}2F978-3-030-33226-6{\_}18{\&}partnerID=40{\&}md5=09c3ae4bdd0efa79e25ca5a9e196c9eb}, volume = {11846 LNCS}, year = {2019} } @article{Um2019, abstract = {Recent advances in radiomics have enhanced the value of medical imaging in various aspects of clinical practice, but a crucial component that remains to be investigated further is the robustness of quantitative features to imaging variations and across multiple institutions. In the case of MRI, signal intensity values vary according to the acquisition parameters used, yet no consensus exists on which preprocessing techniques are favorable in reducing scanner-dependent variability of image-based features. Hence, the purpose of this study was to assess the impact of common image preprocessing methods on the scanner dependence of MRI radiomic features in multi-institutional glioblastoma multiforme (GBM) datasets. Two independent GBM cohorts were analyzed: 50 cases from the TCGA-GBM dataset and 111 cases acquired in our institution, and each case consisted of 3 MRI sequences viz. FLAIR, T1-weighted, and T1-weighted post-contrast. Five image preprocessing techniques were examined: 8-bit global rescaling, 8-bit local rescaling, bias field correction, histogram standardization, and isotropic resampling. A total of 420 features divided into eight categories representing texture, shape, edge, and intensity histogram were extracted. Two distinct imaging parameters were considered: scanner manufacturer and scanner magnetic field strength. Wilcoxon tests identified features robust to the considered acquisition parameters under the selected image preprocessing techniques. A machine learning-based strategy was implemented to measure the covariate shift between the analyzed datasets using features computed using the aforementioned preprocessing methods. Finally, radiomic scores (rad-scores) were constructed by identifying features relevant to patients' overall survival after eliminating those impacted by scanner variability. These were then evaluated for their prognostic significance through Kaplan-Meier and Cox hazards regression analyses. Our results demonstrate that overall, histogram standardization contributes the most in reducing radiomic feature variability as it is the technique to reduce the covariate shift for three feature categories and successfully discriminate patients into groups of different survival risks.}, author = {Um, Hyemin and Tixier, Florent and Bermudez, Dalton and Deasy, Joseph O. and Young, Robert J. and Veeraraghavan, Harini}, doi = {10.1088/1361-6560/ab2f44}, issn = {13616560}, journal = {Physics in Medicine and Biology}, keywords = {covariate shift,glioblastoma multiforme (GBM),image preprocessing,magnetic resonance imaging (MRI),multi-institution,radiomics,scanner variations}, number = {16}, pages = {12}, title = {{Impact of image preprocessing on the scanner dependence of multi-parametric MRI radiomic features and covariate shift in multi-institutional glioblastoma datasets}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {64}, year = {2019} } @article{Urish2013, abstract = {Objective: Although conventional radiography is used to assess osteoarthritis in a clinical setting, it has limitations, including an inability to stage early cartilage degeneration. There is a growing interest in using quantitative magnetic resonance imaging to identify degenerative changes in articular cartilage, including the large multicentered study, the Osteoarthritis Initiative (OAI). There is a demand for suitable image registration and segmentation software to complete this analysis. The objective of this study was to develop and validate the open source software, ImageK, that registers 3 T MRI T2 mapping and double echo steady state (DESS) knee MRI sequences acquired in the OAI protocol. Methods: A C++ library, the insight toolkit, was used to develop open source software to register DESS and T2 mapping image MRI sequences using Mattes's Multimodality Mutual information metric. Results: Registration was assessed using three separate methods. A checkerboard layout demonstrated acceptable visual alignment. Fiducial markers placed in cadaveric knees measured a registration error of 0.85 voxels. Measuring the local variation in Mattes's Mutual Information metric in the local area of the registered solution showed precision within 1 pixel. In this group, the registered solution required a transform of 56 voxels in translation and 1 degree of rotation. Conclusion: The software we have developed, ImageK, provides free, open source image analysis software that registers DESS and T2 mapping sequences of knee articular cartilage within 1 voxel accuracy. This image registration software facilitates quantitative MRI analyses of knee articular cartilage. {\textcopyright} The Author(s) 2013.}, author = {Urish, Kenneth L. and Williams, Ashley A. and Durkin, John R. and Chu, Constance R.}, doi = {10.1177/1947603512451745}, issn = {19476035}, journal = {Cartilage}, keywords = {DESS,MRI,T2,cartilage,registration}, number = {1}, pages = {20--27}, title = {{Registration of Magnetic Resonance Image Series for Knee Articular Cartilage Analysis: Data from the Osteoarthritis Initiative}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {4}, year = {2013} } @article{Uthoff2019, abstract = {Background: This study explores whether objective, quantitative radiomic biomarkers derived from magnetic resonance (MR), positron emission tomography (PET), and computed tomography (CT) may be useful in reliably distinguishing malignant peripheral nerve sheath tumors (MPNST) from benign plexiform neurofibromas (PN). Methods: A registration and segmentation pipeline was established using a cohort of NF1 patients with histopathological diagnosis of PN or MPNST, and medical imaging of the PN including MR and PET-CT. The corrected MR datasets were registered to the corresponding PET-CT via landmark-based registration. PET standard-uptake value (SUV) thresholds were used to guide segmentation of volumes of interest: MPNST-associated PET-hot regions (SUV ≥ 3.5) and PN-associated PET-elevated regions (2.0 {\textless} SUV {\textless} 3.5). Quantitative imaging features were extracted from the MR, PET, and CT data and compared for statistical differences. Intensity histogram features included (mean, media, maximum, variance, full width at half maximum, entropy, kurtosis, and skewness), while image texture was quantified using Law's texture energy measures, grey-level co-occurrence matrices, and neighborhood grey-tone difference matrices. Results: For each of the 20 NF1 subjects, a total of 320 features were extracted from the image data. Feature reduction and statistical testing identified 9 independent radiomic biomarkers from the MR data (4 intensity and 5 texture) and 4 PET (2 intensity and 2 texture) were different between the PET-hot versus PET-elevated volumes of interest. Conclusions: Our data suggests imaging features can be used to distinguish malignancy in NF1-realted tumors, which could improve MPNST risk assessment and positively impact clinical management of NF1 patients.}, author = {Uthoff, J. and {De Stefano}, F. A. and Panzer, K. and Darbro, B. W. and Sato, T. S. and Khanna, R. and Quelle, D. E. and Meyerholz, D. K. and Weimer, J. and Sieren, J. C.}, doi = {10.1016/j.neurad.2018.05.006}, issn = {17730406}, journal = {Journal of Neuroradiology}, keywords = {Magnetic resonance imaging,Malignant peripheral nerve sheath tumor,Plexiform neurofibroma,Positron emission tomography,Quantitative feature extraction}, number = {3}, pages = {179--185}, title = {{Radiomic biomarkers informative of cancerous transformation in neurofibromatosis-1 plexiform tumors}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {46}, year = {2019} } @article{Vanquin2019, abstract = {Purpose: Magnetic resonance imaging (MRI) plays an increasing role in radiotherapy dose planning. Indeed, MRI offers superior soft tissue contrast compared to computerized tomography (CT) and therefore could provide a better delineation of target volumes and organs at risk than CT for radiotherapy. Furthermore, an MRI-only radiotherapy workflow would suppress registration errors inherent to the registration of MRI with CT. However, the estimation of the electronic density of tissues using MRI images is still a challenging issue. The purpose of this work was to design and evaluate a pseudo-CT generation method for prostate cancer treatments. Materials and methods: A pseudo-CT was generated for ten prostate cancer patients using an elastic deformation based method. For each patient, dose delivered to the patient was calculated using both the planning CT and the pseudo-CT. Dose differences between CT and pseudo-CT were investigated. Results: Mean dose relative difference in the planning target volume is 0.9{\%} on average and ranges from 0.1{\%} to 1.7{\%}. In organs at risks, this value is 1.8{\%}, 0.8{\%}, 0.8{\%} and 1{\%} on average in the rectum, the right and left femoral heads, and the bladder respectively. Conclusion: The dose calculated using the pseudo-CT is very close to the dose calculated using the CT for both organs at risk and PTV. These results confirm that pseudo-CT images generated using the proposed method could be used to calculate radiotherapy treatment doses on MRI images.}, author = {Vanquin, L. and Boydev, C. and Korhonen, J. and Rault, E. and Crop, F. and Lacornerie, T. and Wagner, A. and Laffarguette, J. and Pasquier, D. and Reynaert, N.}, doi = {10.1016/j.canrad.2018.09.005}, issn = {17696658}, journal = {Cancer/Radiotherapie}, keywords = {Elastic registration,MRI,Pseudo-CT (Pseudo-Computerized Tomography),Radiotherapy}, number = {4}, pages = {281--289}, title = {{Radiotherapy treatment planning of prostate cancer using magnetic resonance imaging}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {23}, year = {2019} } @inbook{Veress2014, abstract = {In 1961, scientists from biology, geology, and material sciences convened a small meeting at a mountaintop retreat in Germany. Their purpose was to discuss an obscure problem common to all of their research: how to quantify the morphological features of arbitrary-shaped 3-D objects based on their 2-D appearance on cut surfaces. To describe this topic, they selected the term stereology, from the Greek stereos for “the study of objects in three dimensions,” and a year later formed the International Society for Stereology (ISS). After more than a decade of debating the relative merits of different stereology approaches in their respective fields of study, the ISS categorically rejected older methods based on assumption-and model-based Euclidean geometry in favor of innovative new methods designed to avoid methodological bias. Today unbiased or design-based stereology is the sine qua non for the reliable quantification of structure in many fields of biological and biomedical research. This chapter examines key concepts of design-based stereology using systematic random sampling and objective geometry probes in comparison with biased methods. To exemplify an application to neurosciences (neurostereology), we review the results and discussion from our recently published study to quantify differences in postmortem brains from autistic and normal children. Continuing advances in computer-assisted microscopy, image segmentation, and whole slide scanning support the growing trend toward higher throughput and greater efficiency in computerized stereology, with the promise of fully automatic and accurate stereoanalysis of tissue sections and in vivo images in the near future.}, author = {Mouton, Peter R.}, booktitle = {Handbook of Imaging in Biological Mechanics}, doi = {10.1201/b17566}, isbn = {9781466588141}, pages = {217--228}, title = {{Quantitative anatomy using design-based stereology}}, type = {Book Section}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85054614475{\&}doi=10.1201{\%}2Fb17566{\&}partnerID=40{\&}md5=c910ea6ed3c6c6ab22591fe45986254f}, year = {2014} } @article{Vijayavenkataraman2018, abstract = {3D bioprinting is a pioneering technology that enables fabrication of biomimetic, multiscale, multi-cellular tissues with highly complex tissue microenvironment, intricate cytoarchitecture, structure-function hierarchy, and tissue-specific compositional and mechanical heterogeneity. Given the huge demand for organ transplantation, coupled with limited organ donors, bioprinting is a potential technology that could solve this crisis of organ shortage by fabrication of fully-functional whole organs. Though organ bioprinting is a far-fetched goal, there has been a considerable and commendable progress in the field of bioprinting that could be used as transplantable tissues in regenerative medicine. This paper presents a first-time review of 3D bioprinting in regenerative medicine, where the current status and contemporary issues of 3D bioprinting pertaining to the eleven organ systems of the human body including skeletal, muscular, nervous, lymphatic, endocrine, reproductive, integumentary, respiratory, digestive, urinary, and circulatory systems were critically reviewed. The implications of 3D bioprinting in drug discovery, development, and delivery systems are also briefly discussed, in terms of in vitro drug testing models, and personalized medicine. While there is a substantial progress in the field of bioprinting in the recent past, there is still a long way to go to fully realize the translational potential of this technology. Computational studies for study of tissue growth or tissue fusion post-printing, improving the scalability of this technology to fabricate human-scale tissues, development of hybrid systems with integration of different bioprinting modalities, formulation of new bioinks with tuneable mechanical and rheological properties, mechanobiological studies on cell-bioink interaction, 4D bioprinting with smart (stimuli-responsive) hydrogels, and addressing the ethical, social, and regulatory issues concerning bioprinting are potential futuristic focus areas that would aid in successful clinical translation of this technology.}, author = {Vijayavenkataraman, Sanjairaj and Yan, Wei Cheng and Lu, Wen Feng and Wang, Chi Hwa and Fuh, Jerry Ying Hsi}, doi = {10.1016/j.addr.2018.07.004}, issn = {18728294}, journal = {Advanced Drug Delivery Reviews}, keywords = {3D bioprinting,3D printing,Bioprinting,Organ printing,Regenerative medicine,Tissue engineering}, pages = {296--332}, title = {{3D bioprinting of tissues and organs for regenerative medicine}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {132}, year = {2018} } @article{Vikal2010, abstract = {Motivation: Image-guided percutaneous (through the skin) needle-based surgery has become part of routine clinical practice in performing procedures such as biopsies, injections and therapeutic implants. A novice physician typically performs needle interventions under the supervision of a senior physician; a slow and inherently subjective training process that lacks objective, quantitative assessment of the surgical skill and performance. Shortening the learning curve and increasing procedural consistency are important factors in assuring high-quality medical care. Methods: This paper describes a laboratory validation system, called Perk Station, for standardized training and performance measurement under different assistance techniques for needle-based surgical guidance systems. The initial goal of the Perk Station is to assess and compare different techniques: 2D image overlay, biplane laser guide, laser protractor and conventional freehand. The main focus of this manuscript is the planning and guidance software system developed on the 3D Slicer platform, a free, open source software package designed for visualization and analysis of medical image data. Results: The prototype Perk Station has been successfully developed, the associated needle insertion phantoms were built, and the graphical user interface was fully implemented. The system was inaugurated in undergraduate teaching and a wide array of outreach activities. Initial results, experiences, ongoing activities and future plans are reported. {\textcopyright} 2009 Elsevier Ltd. All rights reserved.}, author = {Vikal, Siddharth and U-Thainual, Paweena and Carrino, John A. and Iordachita, Iulian and Fischer, Gregory S. and Fichtinger, Gabor}, doi = {10.1016/j.compmedimag.2009.05.001}, issn = {08956111}, journal = {Computerized Medical Imaging and Graphics}, keywords = {Augmented reality,Image guidance,Needle placement,Surgical training}, number = {1}, pages = {19--32}, title = {{Perk Station-Percutaneous surgery training and performance measurement platform}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {34}, year = {2010} } @article{VonFalck2008, abstract = {OBJECTIVES: To analyze the effects of the sliding-thin-slab averaging algorithm on low-contrast performance in MDCT imaging and to find reasonable parameters for clinical routine work. MATERIALS AND METHODS: A low-contrast phantom simulating hypodense lesions (20 HU object contrast) was scanned with a 16-slice spiral CT scanner using different mAs-settings of 25, 50, 100, and 195 mAs. Other scan parameters were as follows: tube voltage = 120 kVp, slice collimation = 0.625 mm, pitch = 1.375 (high speed), reconstruction interval = 0.5 mm. Images were reconstructed with soft, standard, and bone algorithms, resulting in a total of 12 datasets. A sliding-thin-slab averaging algorithm was applied to these primary datasets, systematically varying the slab thickness between 0.5 and 5.0 mm. The low-contrast performance of the resulting datasets was semi-automatically analyzed using a statistical reader-independent approach: A size-dependent analysis of the image noise within the phantom was used to empirically generate a contrast discrimination function (CDF). The ratio between the actual contrast and the minimum contrast necessary for the detection (as given by the CDF) was calculated for all lesions in each dataset and used to evaluate the low-contrast detectability of the different lesions at increasing slab thickness. The results were compared with the original datasets to calculate the improvement in low-contrast detectability. RESULTS: Using the sliding-thin-slab algorithm, low-contrast performance was increased by a factor between 1.1 and 1.7 when compared with the primary dataset. The improvement of the visibility index at optimal slab thickness when compared with the original slice thickness (0.625 mm) was statistically significant (P {\textless} 0.05, Student t test) for the following datasets: 8 mm: all datasets; 6 mm: 25 mAs/soft, 195 mAs/bone, 25 mAs/bone; 5 mm: 25 mAs/soft, 25 mAs/bone. The ideal slab thickness over all datasets was 43{\%} (±3{\%}) of the diameter of the lesion to be detected. CONCLUSIONS: The use of an interactive sliding-thin-slab averaging algorithm can be readily applied to optimize low-contrast detectability in thin-collimated CT datasets. As a general rule for daily routine, a slice thickness of approximately 2.5 to 3.0 mm can be regarded as a reasonable preset, resulting in an optimized detectability of lesions with a diameter of 5 mm and above. {\textcopyright} 2008 Lippincott Williams {\&} Wilkins, Inc.}, author = {{Von Falck}, Christian and Hartung, Alexander and Berndzen, Frank and King, Benjamin and Galanski, Michael and Shin, Hoen Oh}, doi = {10.1097/RLI.0b013e3181614f2d}, issn = {00209996}, journal = {Investigative Radiology}, keywords = {Contrast,Imaging phantom,Spiral CT}, number = {4}, pages = {229--235}, title = {{Optimization of low-contrast detectability in thin-collimated modern multidetector CT using an interactive sliding-thin-slab averaging algorithm}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-40949127029{\&}doi=10.1097{\%}2FRLI.0b013e3181614f2d{\&}partnerID=40{\&}md5=787eeed3860e71b1c776f64c81144821}, volume = {43}, year = {2008} } @article{Falck2010, abstract = {Rationale and Objectives: To develop and evaluate a novel algorithm for semiautomated segmentation and volumetry of pleural effusions in multidetector computed tomography (MDCT) datasets. Materials and Methods: A seven-step algorithm for semiautomated segmentation of pleural effusions in MDCT datasets was developed, mainly using algorithms from the ITK image processing library. Semiautomated segmentation of pleural effusions was performed in 40 MDCT datasets of the chest (males = 22, females = 18, mean age: 56.7 ± 19.3 years). The accuracy of the semiautomated segmentation as compared with a manual segmentation approach was quantified based on the differences of the segmented volumes, the degree of over-/undersegmentation, and the Hausdorff distance. The time needed for the semiautomated and the manual segmentation process were recorded and compared. Results: The mean volume of the pleural effusions was 557.30 mL (± 477.27 mL) for the semiautomated and 553.19 (± 473.49 mL) for the manual segmentation. The difference was not statistically significant (Student t-test, P = .133). Regression analysis confirmed a strong relationship between the semiautomated algorithm and the gold standard (r2 = 0.998). Mean overlap of the segmented areas was 79{\%} (± 9.3{\%}) over all datasets with moderate oversegmentation (22{\%} ± 9.3{\%}) and undersegmentation (21{\%} ± 9.7{\%}). The mean Hausdorff distance was 17.2 mm (± 8.35 mm). The mean duration of the semiautomated segmentation process with user interaction was 8.4 minutes (± 2.6 minutes) as compared to 32.9 minutes (± 17.4 minutes) for manual segmentation. Conclusion: The semiautomated algorithm for segmentation and volumetry of pleural effusions in MDCT datasets shows a high diagnostic accuracy when compared with manual segmentation. {\textcopyright} 2010 AUR.}, author = {von Falck, Christian and Meier, Simone and J{\"{o}}rdens, Steffen and King, Benjamin and Galanski, Michael and oh Shin, Hoen}, doi = {10.1016/j.acra.2010.02.011}, issn = {10766332}, journal = {Academic Radiology}, keywords = {MDCT,Pleural effusion,diaphragm,segmentation,volumetry}, number = {7}, pages = {841--848}, title = {{Semiautomated Segmentation of Pleural Effusions in MDCT Datasets}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-77952884852{\&}doi=10.1016{\%}2Fj.acra.2010.02.011{\&}partnerID=40{\&}md5=af8867fcfe3518e57c7b6076890f3032}, volume = {17}, year = {2010} } @inproceedings{Weber, abstract = {Image segmentation plays a major role in medical imaging. Especially in radiology, the detection and development of tumors and other diseases can be supported by image segmentation applications. Tools that provide image segmentation and calculation of segmentation scores are not available at any time for every device due to the size and scope of functionalities they offer. These tools need huge periodic updates and do not properly work on old or weak systems. However, medical use-cases often require fast and accurate results. A complex and slow software can lead to additional stress and thus unnecessary errors. The aim of this contribution is the development of a cross-platform tool for medical segmentation use-cases. The goal is a device-independent and always available possibility for medical imaging including manual segmentation and metric calculation. The result is Studierfenster (studierfenster.at), a web-tool for manual segmentation and segmentation metric calculation. In this contribution, the focus lies on the segmentation metric calculation part of the tool. It provides the functionalities of calculating directed and undirected Hausdorff Distance (HD) and Dice Similarity Coefficient (DSC) scores for two uploaded volumes, filtering for specific values, searching for specific values in the calculated metrics and exporting filtered metric lists in different file formats.}, author = {Weber, Maximilian and Wild, Daniel and Wallner, Jurgen and Egger, Jan}, booktitle = {Proceedings of the Annual International Conference of the IEEE Engineering in Medicine and Biology Society, EMBS}, doi = {10.1109/EMBC.2019.8856481}, isbn = {9781538613115}, issn = {1557170X}, pages = {3463--3467}, title = {{A Client/Server based Online Environment for the Calculation of Medical Segmentation Scores}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85077880308{\&}doi=10.1109{\%}2FEMBC.2019.8856481{\&}partnerID=40{\&}md5=289fafd6a7830c42d4e57271141c9bce}, year = {2019} } @article{Weis2015, abstract = {{\textcopyright} 2015 Society of Photo-Optical Instrumentation Engineers (SPIE).Cancer progression has been linked to mechanics. Therefore, there has been recent interest in developing noninvasive imaging tools for cancer assessment that are sensitive to changes in tissue mechanical properties. We have developed one such method, modality independent elastography (MIE), that estimates the relative elastic properties of tissue by fitting anatomical image volumes acquired before and after the application of compression to biomechanical models. The aim of this study was to assess the accuracy and reproducibility of the method using phantoms and a murine breast cancer model. Magnetic resonance imaging data were acquired, and the MIE method was used to estimate relative volumetric stiffness. Accuracy was assessed using phantom data by comparing to gold-standard mechanical testing of elasticity ratios. Validation error was {\textless}12{\%}. Reproducibility analysis was performed on animal data, and within-subject coefficients of variation ranged from 2 to 13{\%} at the bulk level and 32{\%} at the voxel level. To our knowledge, this is the first study to assess the reproducibility of an elasticity imaging metric in a preclinical cancer model. Our results suggest that the MIE method can reproducibly generate accurate estimates of the relative mechanical stiffness and provide guidance on the degree of change needed in order to declare biological changes rather than experimental error in future therapeutic studies.}, author = {Weis, Jared A. and Flint, Katelyn M. and Sanchez, Violeta and Yankeelov, Thomas E. and Miga, Michael I.}, doi = {10.1117/1.jmi.2.3.036001}, issn = {2329-4302}, journal = {Journal of Medical Imaging}, number = {3}, pages = {036001}, title = {{Assessing the accuracy and reproducibility of modality independent elastography in a murine model of breast cancer}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {2}, year = {2015} } @article{Wellein2011, abstract = {In various clinical or research scenarios, such as neurosurgical intervention planning, diagnostics, or clinical studies concerning neurological diseases, cortex segmentation can be of great value. As, e.g., the visualization of the cortical surface along with target and risk structures enables conservative access planning and gives context information about the patient-specific anatomy. We present an interactive cortex segmentation pipeline (CSP) for T1-weighted MR images, utilizing watershed and level set methods. It is designed to allow the user to adjust the intermediate results at any stage of the segmentation process. Particular attention is paid to the appropriate visualization of the segmentation in the context of the original data for verification and to different interaction methods (manual editing, parameter tuning, morphological operations). Evaluation of the interactive CSP is performed with the Segmentation Validation Engine (SVE) by Shattuck et al. (NeuroImage 45(2):431-439, 2009). The segmentation quality of our method is comparable to the best results of three different established methods: the brain extraction tool (BET), brain surface extractor (BSE), and hybrid watershed algorithm (HWA). Being designed for interaction, the CSP integrates the users' expertise by allowing him to perform correction at any stage of the pipeline, enabling him to easily achieve a segmentation fulfilling his specific needs. {\textcopyright} 2010 Springer-Verlag.}, author = {Wellein, Daniela and Born, Silvia and Pfeifle, Matthias and Duffner, Frank and Bartz, Dirk}, doi = {10.1007/s00450-010-0130-4}, issn = {18652034}, journal = {Computer Science - Research and Development}, keywords = {Cortex segmentation,Level set method,Neurosurgical intervention planning,Segmentation evaluation,User interaction,Watershed algorithm}, number = {1-2}, pages = {87--96}, title = {{A pipeline for interactive cortex segmentation}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-79251618712{\&}doi=10.1007{\%}2Fs00450-010-0130-4{\&}partnerID=40{\&}md5=63eb5e1f357ccd1387341c04ab3f7522}, volume = {26}, year = {2011} } @inproceedings{Wetzel, abstract = {The detailed reconstruction of neural anatomy for connectomics studies requires a combination of resolution and large three-dimensional data capture provided by serial section electron microscopy (ssEM). The convergence of high throughput ssEM imaging and improved tissue preparation methods now allows ssEM capture of complete specimen volumes up to cubic millimeter scale. The resulting multi-terabyte image sets span thousands of serial sections and must be precisely registered into coherent volumetric forms in which neural circuits can be traced and segmented. This paper introduces a Signal Whitening Fourier Transform Image Registration approach (SWiFT-IR) under development at the Pittsburgh Supercomputing Center and its use to align mouse and zebrafish brain datasets acquired using the wafer mapper ssEM imaging technology recently developed at Harvard University. Unlike other methods now used for ssEM registration, SWiFT-IR modifies its spatial frequency response during image matching to maximize a signal-to-noise measure used as its primary indicator of alignment quality. This alignment signal is more robust to rapid variations in biological content and unavoidable data distortions than either phase-only or standard Pearson correlation, thus allowing more precise alignment and statistical confidence. These improvements in turn enable an iterative registration procedure based on projections through multiple sections rather than more typical adjacent-pair matching methods. This projection approach, when coupled with known anatomical constraints and iteratively applied in a multi-resolution pyramid fashion, drives the alignment into a smooth form that properly represents complex and widely varying anatomical content such as the full crosssection zebrafish data.}, archivePrefix = {arXiv}, arxivId = {1612.04787}, author = {Wetzel, Arthur W. and Bakal, Jennifer and Dittrich, Markus and Hildebrand, David G.C. and Morgan, Josh L. and Lichtman, Jeff W.}, booktitle = {Proceedings - Applied Imagery Pattern Recognition Workshop}, doi = {10.1109/AIPR.2016.8010595}, eprint = {1612.04787}, isbn = {9781509032846}, issn = {21642516}, keywords = {connectomics,electron microscopy,image registration,neural circuit reconstruction,signal whitening}, title = {{Registering large volume serial-section electron microscopy image sets for neural circuit reconstruction using FFT signal whitening}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85028755924{\&}doi=10.1109{\%}2FAIPR.2016.8010595{\&}partnerID=40{\&}md5=688016e1bcc150f30e8230da73803097}, year = {2017} } @article{Whitmarsh2011, abstract = {The accurate diagnosis of osteoporosis has gained increasing importance due to the aging of our society. Areal bone mineral density (BMD) measured by dual-energy X-ray absorptiometry (DXA) is an established criterion in the diagnosis of osteoporosis. This measure, however, is limited by its two-dimensionality. This work presents a method to reconstruct both the 3D bone shape and 3D BMD distribution of the proximal femur from a single DXA image used in clinical routine. A statistical model of the combined shape and BMD distribution is presented, together with a method for its construction from a set of quantitative computed tomography (QCT) scans. A reconstruction is acquired in an intensity based 3D-2D registration process whereby an instance of the model is found that maximizes the similarity between its projection and the DXA image. Reconstruction experiments were performed on the DXA images of 30 subjects, with a model constructed from a database of QCT scans of 85 subjects. The accuracy was evaluated by comparing the reconstructions with the same subject QCT scans. The method presented here can potentially improve the diagnosis of osteoporosis and fracture risk assessment from the low radiation dose and low cost DXA devices currently used in clinical routine. {\textcopyright} 2006 IEEE.}, author = {Whitmarsh, Tristan and Humbert, Ludovic and {De Craene}, Mathieu and {Del Rio Barquero}, Luis M. and Frangi, Alejandro F.}, doi = {10.1109/TMI.2011.2163074}, issn = {02780062}, journal = {IEEE Transactions on Medical Imaging}, keywords = {Active appearance model,active shape model,deformable models,image reconstruction,image registration}, number = {12}, pages = {2101--2114}, title = {{Reconstructing the 3D shape and bone mineral density distribution of the proximal femur from dual-energy x-ray absorptiometry}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {30}, year = {2011} } @article{Wibmer2015, abstract = {Objectives: To investigate Haralick texture analysis of prostate MRI for cancer detection and differentiating Gleason scores (GS). Methods: One hundred and forty-seven patients underwent T2- weighted (T2WI) and diffusion-weighted prostate MRI. Cancers ≥0.5 ml and non-cancerous peripheral (PZ) and transition (TZ) zone tissue were identified on T2WI and apparent diffusion coefficient (ADC) maps, using whole-mount pathology as reference. Texture features (Energy, Entropy, Correlation, Homogeneity, Inertia) were extracted and analysed using generalized estimating equations. Results: PZ cancers (n = 143) showed higher Entropy and Inertia and lower Energy, Correlation and Homogeneity compared to non-cancerous tissue on T2WI and ADC maps (p-values: {\textless}.0001–0.008). In TZ cancers (n = 43) we observed significant differences for all five texture features on the ADC map (all p-values: {\textless}.0001) and for Correlation (p = 0.041) and Inertia (p = 0.001) on T2WI. On ADC maps, GS was associated with higher Entropy (GS 6 vs. 7: p = 0.0225; 6 vs. {\textgreater}7: p = 0.0069) and lower Energy (GS 6 vs. 7: p = 0.0116, 6 vs. {\textgreater}7: p = 0.0039). ADC map Energy (p = 0.0102) and Entropy (p = 0.0019) were significantly different in GS ≤3 + 4 versus ≥4 + 3 cancers; ADC map Entropy remained significant after controlling for the median ADC (p = 0.0291). Conclusion: Several Haralick-based texture features appear useful for prostate cancer detection and GS assessment. Key Points: • Several Haralick texture features may differentiate non-cancerous and cancerous prostate tissue. • Tumour Energy and Entropy on ADC maps correlate with Gleason score. • T2w-image-derived texture features are not associated with the Gleason score.}, author = {Wibmer, Andreas and Hricak, Hedvig and Gondo, Tatsuo and Matsumoto, Kazuhiro and Veeraraghavan, Harini and Fehr, Duc and Zheng, Junting and Goldman, Debra and Moskowitz, Chaya and Fine, Samson W. and Reuter, Victor E. and Eastham, James and Sala, Evis and Vargas, Hebert Alberto}, doi = {10.1007/s00330-015-3701-8}, issn = {14321084}, journal = {European Radiology}, keywords = {Adenocarcinoma,Computer-assisted,Gleason grading,Image processing,Magnetic resonance imaging,Prostatic neoplasm}, number = {10}, pages = {2840--2850}, title = {{Haralick texture analysis of prostate MRI: utility for differentiating non-cancerous prostate from prostate cancer and differentiating prostate cancers with different Gleason scores}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {25}, year = {2015} } @article{Wienert2013, abstract = {Background: Automated image analysis methods are becoming more and more important to extract and quantify image features in microscopy-based biomedical studies and several commercial or open-source tools are available. However, most of the approaches rely on pixel-wise operations, a concept that has limitations when high-level object features and relationships between objects are studied and if user-interactivity on the object-level is desired.Results: In this paper we present an open-source software that facilitates the analysis of content features and object relationships by using objects as basic processing unit instead of individual pixels. Our approach enables also users without programming knowledge to compose " analysis pipelines" that exploit the object-level approach. We demonstrate the design and use of example pipelines for the immunohistochemistry-based cell proliferation quantification in breast cancer and two-photon fluorescence microscopy data about bone-osteoclast interaction, which underline the advantages of the object-based concept.Conclusions: We introduce an open source software system that offers object-based image analysis. The object-based concept allows for a straight-forward development of object-related interactive or fully automated image analysis solutions. The presented software may therefore serve as a basis for various applications in the field of digital image analysis. {\textcopyright} 2013 Wienert et al.; licensee BioMed Central Ltd.}, author = {Wienert, Stephan and Heim, Daniel and Kotani, Manato and Lindequist, Bj{\"{o}}rn and Stenzinger, Albrecht and Ishii, Masaru and Hufnagl, Peter and Beil, Michael and Dietel, Manfred and Denkert, Carsten and Klauschen, Frederick}, doi = {10.1186/1746-1596-8-34}, issn = {17461596}, journal = {Diagnostic Pathology}, keywords = {Image analysis,Object-based image analysis,Open source,Software}, number = {1}, pages = {8}, title = {{CognitionMaster: An object-based image analysis framework}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {8}, year = {2013} } @article{Wolfelschneider2017, abstract = {Purpose: The aim of this study was to evaluate a surrogate-driven motion model based on fourdimensional computed tomography that is able to predict CT volumes corresponding to arbitrary respiratory phases. Furthermore, the comparison of three different driving surrogates is examined and the feasibility of using the model for 4D dose re-calculation will be discussed. Methods: The study is based on repeated 4DCTs of twenty patients treated for bronchial carcinoma and metastasis. The motion model was estimated from the planning 4DCT through deformable image registration. To predict a certain phase of a follow-up 4DCT, the model considers inter-fractional variations (baseline correction) and intra-fractional respiratory parameters (amplitude and phase) derived from surrogates. The estimated volumes resulting from the model were compared to ground-truth clinical 4DCTs using absolute HU differences in the lung region and landmarks localized using the Scale Invariant Feature Transform. Finally, the c-index was used to evaluate the dosimetric effects of the intensity differences measured between the estimated and the ground-truth CTvolumes. Results: The results show absolute HU differences between estimated and ground-truth images with median value (± standard deviation) of (61.3 ± 16.7) HU. Median 3D distances, measured on about 400 matching landmarks in each volume, were (2.9 ± 3.0) mm. 3D errors up to 28.2 mm were found for CT images with artifacts or reduced quality. Pass rates for all surrogate approaches were above 98.9{\%} with a c-criterion of 2{\%}/2 mm. Conclusion: The results depend mainly on the image quality of the initial 4DCT and the deformable image registration. All investigated surrogates can be used to estimate follow-up 4DCT phases, however, uncertainties decrease for volumetric approaches. Application of the model for 4D dose calculations is feasible.}, author = {Wolfelschneider, Jens and Seregni, Matteo and Fassi, Aurora and Ziegler, Marc and Baroni, Guido and Fietkau, Rainer and Riboldi, Marco and Bert, Christoph}, doi = {10.1002/mp.12243}, issn = {00942405}, journal = {Medical Physics}, keywords = {4D dose reconstruction,4DCT,Respiratory motion model,Scale invariant feature transform,Surrogates}, number = {6}, pages = {2066--2076}, title = {{Examination of a deformable motion model for respiratory movements and 4D dose calculations using different driving surrogates}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {44}, year = {2017} } @article{Wollny2013, abstract = {Background: Gray scale images make the bulk of data in bio-medical image analysis, and hence, the main focus of many image processing tasks lies in the processing of these monochrome images. With ever improving acquisition devices, spatial and temporal image resolution increases, and data sets become very large. Various image processing frameworks exists that make the development of new algorithms easy by using high level programming languages or visual programming. These frameworks are also accessable to researchers that have no background or little in software development because they take care of otherwise complex tasks. Specifically, the management of working memory is taken care of automatically, usually at the price of requiring more it. As a result, processing large data sets with these tools becomes increasingly difficult on work station class computers. One alternative to using these high level processing tools is the development of new algorithms in a languages like C++, that gives the developer full control over how memory is handled, but the resulting workflow for the prototyping of new algorithms is rather time intensive, and also not appropriate for a researcher with little or no knowledge in software development. Another alternative is in using command line tools that run image processing tasks, use the hard disk to store intermediate results, and provide automation by using shell scripts. Although not as convenient as, e.g. visual programming, this approach is still accessable to researchers without a background in computer science. However, only few tools exist that provide this kind of processing interface, they are usually quite task specific, and don't provide an clear approach when one wants to shape a new command line tool from a prototype shell script.Results: The proposed framework, MIA, provides a combination of command line tools, plug-ins, and libraries that make it possible to run image processing tasks interactively in a command shell and to prototype by using the according shell scripting language. Since the hard disk becomes the temporal storage memory management is usually a non-issue in the prototyping phase. By using string-based descriptions for filters, optimizers, and the likes, the transition from shell scripts to full fledged programs implemented in C++ is also made easy. In addition, its design based on atomic plug-ins and single tasks command line tools makes it easy to extend MIA, usually without the requirement to touch or recompile existing code.Conclusion: In this article, we describe the general design of MIA, a general purpouse framework for gray scale image processing. We demonstrated the applicability of the software with example applications from three different research scenarios, namely motion compensation in myocardial perfusion imaging, the processing of high resolution image data that arises in virtual anthropology, and retrospective analysis of treatment outcome in orthognathic surgery. With MIA prototyping algorithms by using shell scripts that combine small, single-task command line tools is a viable alternative to the use of high level languages, an approach that is especially useful when large data sets need to be processed. {\textcopyright} 2013 Wollny et al.; licensee BioMed Central Ltd.}, author = {Wollny, Gert and Kellman, Peter and Ledesma-Carbayo, Mar{\'{i}}a Jesus and Skinner, Matthew M. and Hublin, Jean Jaques and Hierl, Thomas}, doi = {10.1186/1751-0473-8-20}, issn = {17510473}, journal = {Source Code for Biology and Medicine}, title = {{MIA - A free and open source software for gray scale medical image analysis}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84885357120{\&}doi=10.1186{\%}2F1751-0473-8-20{\&}partnerID=40{\&}md5=67c49f93d1bf780334ae506f97a71ff2}, volume = {8}, year = {2013} } @article{Wollny2010, abstract = {Free-breathing image acquisition is desirable in first-pass gadolinium-enhanced magnetic resonance imaging (MRI), but the breathing movements hinder the direct automatic analysis of the myocardial perfusion and qualitative readout by visual tracking. Nonrigid registration can be used to compensate for these movements but needs to deal with local contrast and intensity changes with time. We propose an automatic registration scheme that exploits the quasiperiodicity of free breathing to decouple movement from intensity change. First, we identify and register a subset of the images corresponding to the same phase of the breathing cycle. This registration step deals with small differences caused by movement but maintains the full range of intensity change. The remaining images are then registered to synthetic references that are created as a linear combination of images belonging to the already registered subset. Because of the quasiperiodic respiratory movement, the subset images are distributed evenly over time and, therefore, the synthetic references exhibit intensities similar to their corresponding unregistered images. Thus, this second registration step needs to account only for the movement. Validation experiments were performed on data obtained from six patients, three slices per patient, and the automatically obtained perfusion profiles were compared with profiles obtained by manually segmenting the myocardium. The results show that our automatic approach is well suited to compensate for the free-breathing movement and that it achieves a significant improvement in the average Pearson correlation coefficient between manually and automatically obtained perfusion profiles before (0.87±0.18) and after (0.96±0.09) registration. {\textcopyright} 2010 IEEE.}, author = {Wollny, Gert and Ledesma-Carbayo, Maria J. and Kellman, Peter and Santos, Andres}, doi = {10.1109/TMI.2010.2049270}, issn = {02780062}, journal = {IEEE Transactions on Medical Imaging}, keywords = {Heart,image registration,myocardial perfusion}, number = {8}, pages = {1516--1527}, title = {{Exploiting quasiperiodicity in motion correction of free-breathing myocardial perfusion MRI}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {29}, year = {2010} } @article{Wu2012, abstract = {True 3D CT dataset for treatment planning of an oversized patient is difficult to acquire due to the bore size and field of view (FOV) reconstruction. This project aims to provide a simple approach to reconstruct true CT data for oversize patients using CT scanner with limited FOV by acquiring double partial CT (left and right side) images. An efficient line profile-based method has been developed to minimize the difference of the CT numbers in the overlapping region between the right and left images and to generate a complete true 3D CT dataset in the natural state. New image processing modules have been developed and integrated to the Insight Segmentation {\&} Registration Toolkit (ITK 3.6) package. For example, different modules for image cropping, line profile generation, line profile matching, and optimized partial image fusion have been developed. The algorithm has been implemented for images containing the bony structure of the spine and tested on 3D CT planning datasets from both phantom and real patients with satisfactory results in both cases. The proposed optimized line profile-based partial registration method provides a simple and accurate method for acquiring a complete true 3D CT dataset for an oversized patient using CT scanning with small bore size, that can be used for accurate treatment planning.}, author = {Wu, Huanmei and Zhao, Qingya and Cao, Minsong and Das, Indra}, doi = {10.1120/jacmp.v13i2.3629}, issn = {15269914}, journal = {Journal of Applied Clinical Medical Physics}, keywords = {Double scanning,Oversized patients,Partial registration,Planning CT}, number = {2}, pages = {20--31}, title = {{A line profile-based double partial fusion method for acquiring planning CT of oversized patients in radiation treatment}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {13}, year = {2012} } @article{Wunderling2017, abstract = {Secondary growth occurs in dicotyledons and gymnosperms, and results in an increased girth of plant organs. It is driven primarily by the vascular cambium, which produces thousands of cells throughout the life of several plant species. For instance, even in the small herbaceous model plant Arabidopsis, manual quantification of this massive process is impractical. Here, we provide a comprehensive overview of current methods used to measure radial growth. We discuss the issues and problematics related to its quantification. We highlight recent advances and tools developed for automated cellular phenotyping and its future applications.}, author = {Wunderling, Anna and Targem, Mehdi Ben and {De Reuille}, Pierre Barbier and Ragni, Laura and Turner, Simon}, doi = {10.1093/jxb/erw450}, issn = {14602431}, journal = {Journal of Experimental Botany}, keywords = {Arabidopsis,Automated cellular phenotyping,Machine learning,Quantitative histology,Secondary growth}, number = {1}, pages = {89--95}, title = {{Novel tools for quantifying secondary growth}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {68}, year = {2016} } @article{Xu2013, abstract = {Purpose: To assess retrospectively the clinical accuracy of an magnetic resonance imaging-guided robotic prostate biopsy system that has been used in the US National Cancer Institute for over 6 years. Methods: Series of 2D transverse volumetric MR image slices of the prostate both pre (high-resolution T2-weighted)- and post (low-resolution)- needle insertions were used to evaluate biopsy accuracy. A three-stage registration algorithm consisting of an initial two-step rigid registration followed by a B-spline deformable alignment was developed to capture prostate motion during biopsy. The target displacement (distance between planned and actual biopsy target), needle placement error (distance from planned biopsy target to needle trajectory), and biopsy error (distance from actual biopsy target to needle trajectory) were calculated as accuracy assessment. Results: A total of 90 biopsies from 24 patients were studied. The registrations were validated by checking prostate contour alignment using image overlay, and the results were accurate to within 2 mm. The mean target displacement, needle placement error, and clinical biopsy error were 5.2, 2.5, and 4.3 mm, respectively. Conclusion: The biopsy error reported suggests that quantitative imaging techniques for prostate registration and motion compensation may improve prostate biopsy targeting accuracy. {\textcopyright} 2013 CARS.}, author = {Xu, Helen and Lasso, Andras and Guion, Peter and Krieger, Axel and Kaushal, Aradhana and Singh, Anurag K. and Pinto, Peter A. and Coleman, Jonathan and Grubb, Robert L. and Lattouf, Jean Baptiste and Menard, Cynthia and Whitcomb, Louis L. and Fichtinger, Gabor}, doi = {10.1007/s11548-013-0831-9}, issn = {18616429}, journal = {International Journal of Computer Assisted Radiology and Surgery}, keywords = {Accuracy validation,Image registration,MRI-guidance,Prostate biopsy}, number = {6}, pages = {937--944}, title = {{Accuracy analysis in MRI-guided robotic prostate biopsy}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {8}, year = {2013} } @article{Xu2015, abstract = {Purpose: Prostate needle biopsy is a commonly performed procedure since it is the most definitive form of cancer diagnosis. Magnetic resonance imaging (MRI) allows target-specific biopsies to be performed. However, needle placements are often inaccurate due to intra-operative prostate motion and the lack of motion compensation techniques. This paper detects and determines the extent of tissue displacement during an MRI-guided biopsy so that the needle insertion plan can be adjusted accordingly. Methods: A multi-slice-to-volume registration algorithm was developed to align the pre-operative planning image volume with three intra-operative orthogonal image slices of the prostate acquired immediately before needle insertion. The algorithm consists of an initial rigid transformation followed by a deformable step. Results: A total of 14 image sets from 10 patients were studied. Based on prostate contour alignment, the registrations were accurate to within 2 mm. Conclusion: This algorithm can be used to increase the needle targeting accuracy by alerting the clinician if the biopsy target has moved significantly prior to needle insertion. The proposed method demonstrated feasibility of intra-operative target localization and motion compensation for MRI-guided prostate biopsy.}, author = {Xu, Helen and Lasso, Andras and Fedorov, Andriy and Tuncali, Kemal and Tempany, Clare and Fichtinger, Gabor}, doi = {10.1007/s11548-014-1108-7}, issn = {18616429}, journal = {International Journal of Computer Assisted Radiology and Surgery}, keywords = {Image registration,MRI-guidance,Prostate biopsy,Target localization}, number = {5}, pages = {563--572}, title = {{Multi-slice-to-volume registration for MRI-guided transperineal prostate biopsy}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {10}, year = {2015} } @article{Xu2015a, abstract = {Structural abnormalities of the microvasculature can impair perfusion and function. Conventional histology provides good spatial resolution with which to evaluate the microvascular structure but affords no 3-dimensional information; this limitation could lead to misinterpretations of the complex microvessel network in health and disease. The objective of this study was to develop and evaluate an accurate, fully automated 3D histology reconstruction method to visualize the arterioles and venules within the mouse hind-limb. Sections of the tibialis anterior muscle from C57BL/J6 mice (both normal and subjected to femoral artery excision) were reconstructed using pairwise rigid and affine registrations of 5 $\mu$m-thick, paraffin-embedded serial sections digitized at 0.25 $\mu$m/pixel. Low-resolution intensity-based rigid registration was used to initialize the nucleus landmark-based registration, and conventional high-resolution intensity-based registration method. The affine nucleus landmark-based registration was developed in this work and was compared to the conventional affine high-resolution intensity-based registration method. Target registration errors were measured between adjacent tissue sections (pairwise error), as well as with respect to a 3D reference reconstruction (accumulated error, to capture propagation of error through the stack of sections). Accumulated error measures were lower (p{\textless}0.01) for the nucleus landmark technique and superior vasculature continuity was observed. These findings indicate that registration based on automatic extraction and correspondence of small, homologous landmarks may support accurate 3D histology reconstruction. This technique avoids the otherwise problematic "banana-into-cylinder" effect observed using conventional methods that optimize the pairwise alignment of salient structures, forcing them to be section-orthogonal. This approach will provide a valuable tool for high-accuracy 3D histology tissue reconstructions for analysis of diseased microvasculature.}, author = {Xu, Yiwen and Pickering, J. Geoffrey and Nong, Zengxuan and Gibson, Eli and Arpino, John Michael and Yin, Hao and Ward, Aaron D.}, doi = {10.1371/journal.pone.0126817}, issn = {19326203}, journal = {PLoS ONE}, number = {5}, pages = {24}, title = {{A method for 3D histopathology reconstruction supporting mouse microvasculature analysis}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {10}, year = {2015} } @article{Yan2018, abstract = {Bioprinted skin tissue has the potential for aiding drug screening, formulation development, clinical transplantation, chemical and cosmetic testing, as well as basic research. Limitations of conventional skin tissue engineering approaches have driven the development of biomimetic skin equivalent via 3D bioprinting. A key hope for bioprinting skin is the improved tissue authenticity over conventional skin equivalent construction, enabling the precise localization of multiple cell types and appendages within a construct. The printing of skin faces challenges broadly associated with general 3D bioprinting, including the selection of cell types and biomaterials, and additionally requires in vitro culture formats that allow for growth at an air-liquid interface. This paper provides a thorough review of current 3D bioprinting technologies used to engineer human skin constructs and presents the overall pipelines of designing a biomimetic artificial skin via 3D bioprinting from the design phase (i.e. pre-processing phase) through the tissue maturation phase (i.e. post-processing) and into final product evaluation for drug screening, development, and drug delivery applications.}, author = {Yan, Wei Cheng and Davoodi, Pooya and Vijayavenkataraman, Sanjairaj and Tian, Yuan and Ng, Wei Cheng and Fuh, Jerry Y.H. and Robinson, Kim Samirah and Wang, Chi Hwa}, doi = {10.1016/j.addr.2018.07.016}, issn = {18728294}, journal = {Advanced Drug Delivery Reviews}, keywords = {3D bioprinting,Artificial skin,Skin tissue engineering,Tissue engineering}, pages = {270--295}, title = {{3D bioprinting of skin tissue: From pre-processing to final product evaluation}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {132}, year = {2018} } @article{Yang2015, abstract = {Purpose: Accurate visualization of lung motion is important in many clinical applications, such as radiotherapy of lung cancer. Advancement in imaging modalities [e.g., computed tomography (CT) and MRI] has allowed dynamic imaging of lung and lung tumor motion. However, each imaging modality has its advantages and disadvantages. The study presented in this paper aims at generating synthetic 4D-CT dataset for lung cancer patients by combining both continuous three-dimensional (3D) motion captured by 4D-MRI and the high spatial resolution captured by CT using the author's proposed approach.Methods: A novel hybrid approach based on deformable image registration (DIR) and finite element method simulation was developed to fuse a static 3D-CT volume (acquired under breath-hold) and the 3D motion information extracted from 4D-MRI dataset, creating a synthetic 4D-CT dataset. Results: The study focuses on imaging of lung and lung tumor. Comparing the synthetic 4D-CT dataset with the acquired 4D-CT dataset of six lung cancer patients based on 420 landmarks, accurate results (average error {\textless}2 mm) were achieved using the authors' proposed approach. Their hybrid approach achieved a 40{\%} error reduction (based on landmarks assessment) over using only DIR techniques. Conclusions: The synthetic 4D-CT dataset generated has high spatial resolution, has excellent lung details, and is able to show movement of lung and lung tumor over multiple breathing cycles.}, author = {Yang, Y. X. and Teo, S. K. and {Van Reeth}, E. and Tan, C. H. and Tham, I. W.K. and Poh, C. L.}, doi = {10.1118/1.4923167}, issn = {00942405}, journal = {Medical Physics}, keywords = {DIR,FEM,image fusion,lung cancer,radiotherapy}, number = {8}, pages = {4484--4496}, title = {{A hybrid approach for fusing 4D-MRI temporal information with 3D-CT for the study of lung and lung tumor motion}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {42}, year = {2015} } @article{Yin2006, abstract = {Oligometastases refer to metastases that are limited in number and location and are amenable to regional treatment. The majority of these metastases appear in the brain, lung, liver, and bone. Although the focus of interest in the past within radiation oncology has been on the treatment of intracranial metastases, there has been growing interest in extracranial sites such as the liver and lung. This is largely because of the rapid development of targeting techniques for oligometastases such as intensity-modulated and image-guided radiation therapy, which has made it possible to deliver single or a few fractions of high-dose radiation treatments, highly conformal to the target. The clinical decision to use radiation to treat oligometastases is based on both radiobiological and physics considerations. The radiobiological considerations involve improvement of treatment schema for time, dose, and volume. Areas of interests are hypofractionation, tumor and normal tissue tolerance, and hypoxia. The physics considerations for oligometastases treatment are focused mainly on ensuring treatment accuracy and precision. This article discusses the physics and imaging aspects involved in each step of the radiation treatment process for oligometastases, including target definition, treatment simulation, treatment planning, pretreatment target localization, radiation delivery, treatment verification, and treatment evaluation. {\textcopyright} 2006 Elsevier Inc. All rights reserved.}, author = {Yin, Fang Fang and Das, Shiva and Kirkpatrick, John and Oldham, Mark and Wang, Zhiheng and Zhou, Su Min}, doi = {10.1016/j.semradonc.2005.12.004}, issn = {10534296}, journal = {Seminars in Radiation Oncology}, number = {2}, pages = {85--101}, title = {{Physics and imaging for targeting of oligometastases}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-33645088409{\&}doi=10.1016{\%}2Fj.semradonc.2005.12.004{\&}partnerID=40{\&}md5=1274c24db6658fb341f2b0b1f475f7ce}, volume = {16}, year = {2006} } @article{Zaffino2016, abstract = {Purpose: Multiatlas based segmentation is largely used in many clinical and research applications. Due to its good performances, it has recently been included in some commercial platforms for radiotherapy planning and surgery guidance. Anyway, to date, a software with no restrictions about the anatomical district and image modality is still missing. In this paper we introduce PLASTIMATCH MABS, an open source software that can be used with any image modality for automatic segmentation. Methods: PLASTIMATCH MABS workflow consists of two main parts: (1) an offline phase, where optimal registration and voting parameters are tuned and (2) an online phase, where a new patient is labeled from scratch by using the same parameters as identified in the former phase. Several registration strategies, as well as different voting criteria can be selected. A flexible atlas selection scheme is also available. To prove the effectiveness of the proposed software across anatomical districts and image modalities, it was tested on two very different scenarios: head and neck (H{\&}N) CT segmentation for radiotherapy application, and magnetic resonance image brain labeling for neuroscience investigation. Results: For the neurological study, minimum dice was equal to 0.76 (investigated structures: left and right caudate, putamen, thalamus, and hippocampus). For head and neck case, minimum dice was 0.42 for the most challenging structures (optic nerves and submandibular glands) and 0.62 for the other ones (mandible, brainstem, and parotid glands). Time required to obtain the labels was compatible with a real clinical workflow (35 and 120 min). Conclusions: The proposed software fills a gap in the multiatlas based segmentation field, since all currently available tools (both for commercial and for research purposes) are restricted to a well specified application. Furthermore, it can be adopted as a platform for exploring MABS parameters and as a reference implementation for comparing against other segmentation algorithms.}, author = {Zaffino, Paolo and Raudaschl, Patrik and Fritscher, Karl and Sharp, Gregory C. and Spadea, Maria Francesca}, doi = {10.1118/1.4961121}, issn = {00942405}, journal = {Medical Physics}, keywords = {CT,MRI,automatic segmentation,multiatlas based segmentation,open source}, number = {9}, pages = {5155--5160}, title = {{Technical Note: Plastimatch mabs, an open source tool for automatic image segmentation}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {43}, year = {2016} } @article{Zhang2019, abstract = {Purpose: Respiratory gated four-dimensional (4D) single photon emission computed tomography (SPECT) with phase-matched CT reduces respiratory blurring and attenuation correction (AC) artifacts in cardiac SPECT. This study aims to develop and investigate the effectiveness of an interpolated CT (ICT) method for improved cardiac SPECT AC using simulations. Methods: We used the 4D XCAT phantom to simulate a population of ten patients varied in gender, anatomy, 99mTc-sestamibi distribution, respiratory patterns, and disease states. Simulated 120 SPECT projection data were rebinned into six equal count gates. Activity and attenuation maps in each gate were averaged as gated SPECT and CT (GCT). Three helical CTs were simulated at end-inspiration (HCT-IN), end-expiration (HCT-EX), and mid-respiration (HCT-MID). The ICTs were obtained from HCT-EX and HCT-IN using the motion vector field generated between them from affine plus b-spline registration. Projections were reconstructed by OS-EM method, using GCT, ICT, and three HCTs for AC. Reconstructed images of each gate were registered to end-expiration and averaged to generate the polar plots. Relative difference for each segment and relative defect size were computed using images of GCT AC as reference. Results: The average of maximum relative difference through ten phantoms was 7.93 ± 4.71{\%}, 2.50 ± 0.98{\%}, 3.58 ± 0.74{\%}, and 2.14 ± 0.56{\%} for noisy HCT-IN, HCT-MID, HCT-EX, and ICT AC data, respectively. The ICT showed closest defect size to GCT while the differences from HCTs can be over 40{\%}. Conclusion: We conclude that the performance of ICT is similar to GCT. It improves the image quality and quantitative accuracy for respiratory-gated cardiac SPECT as compared to conventional HCT, while it can potentially further reduce the radiation dose of GCT.}, author = {Zhang, Duo and Ghaly, Michael and Mok, Greta S.P.}, doi = {10.1002/mp.13513}, issn = {00942405}, journal = {Medical Physics}, keywords = {attenuation correction,cardiac SPECT/CT,respiratory gating}, number = {6}, pages = {2621--2628}, title = {{Interpolated CT for attenuation correction on respiratory gating cardiac SPECT/CT — A simulation study}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {46}, year = {2019} } @article{Zhang2018, abstract = {Background: Respiratory gating reduces motion blurring in cardiac SPECT. Here we aim to evaluate the performance of three respiratory gating strategies using a population of digital phantoms with known truth and clinical data. Methods: We analytically simulated 60 projections for 10 XCAT phantoms with 99mTc-sestamibi distributions using three gating schemes: equal amplitude gating (AG), equal count gating (CG), and equal time gating (TG). Clinical list-mode data for 10 patients who underwent 99mTc-sestamibi scans were also processed using the 3 gating schemes. Reconstructed images in each gate were registered to a reference gate, averaged and reoriented to generate the polar plots. For simulations, image noise, relative difference (RD) of averaged count for each of the 17 segment, and relative defect size difference (RSD) were analyzed. For clinical data, image intensity profile and FWHM were measured across the left ventricle wall. Results: For simulations, AG and CG methods showed significantly lower RD and RSD compared to TG, while noise variation was more non-uniform through different gates for AG. In the clinical study, AG and CG had smaller FWHM than TG. Conclusions: AG and CG methods show better performance for motion reduction and are recommended for clinical respiratory gating SPECT implementation.}, author = {Zhang, Duo and Pretorius, P. Hendrik and Ghaly, Michael and Zhang, Qi and King, Michael A. and Mok, Greta S.P.}, doi = {10.1007/s12350-018-1392-7}, issn = {15326551}, journal = {Journal of Nuclear Cardiology}, keywords = {Cardiac perfusion,Respiratory gating,SPECT/CT,Simulation}, title = {{Evaluation of different respiratory gating schemes for cardiac SPECT}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85051860976{\&}doi=10.1007{\%}2Fs12350-018-1392-7{\&}partnerID=40{\&}md5=58fd21a00fccc01830e89c4949b3bcee}, year = {2018} } @article{Zheng2012, abstract = {The longitudinal coverage of a LINAC-mounted CBCT scan is limited to the corresponding dimensional limits of its flat panel detector, which is often shorter than the length of the treatment field. These limits become apparent when fields are designed to encompass wide regions, as when providing nodal coverage. Therefore, we developed a novel protocol to acquire double orbit CBCT images using a commercial system, and combine the images to extend the longitudinal coverage for image-guided adaptive radiotherapy (IGART). The protocol acquires two CBCT scans with a couch shift similar to the "step-and-shoot" cine CT acquisition, allowing a small longitudinal overlap of the two reconstructed volumes. An in-house DICOM reading/writing software was developed to combine the two image sets into one. Three different approaches were explored to handle the possible misalignment between the two image subsets: simple stacking, averaging the overlapped volumes, and a 3D-3D image registration with the three translational degrees of freedom. Using thermoluminescent dosimeters and custom-designed holders for a CTDI phantom set, dose measurements were carried out to assess the resultant imaging dose of the technique and its geometric distribution. Deformable registration was tested on patient images generated with the double-orbit protocol, using both the planning FBCT and the artificially deformed CBCT as source images. The protocol was validated on phantoms and has been employed clinically for IRB-approved IGART studies for head and neck and prostate cancer patients.}, author = {Zheng, Dandan and Lu, Jun and Jefferson, Ariel and Zhang, Cheng and Wu, Jian and Sleeman, William and Weiss, Elisabeth and Dogan, Nesrin and Song, Shiyu and Williamson, Jeffrey}, doi = {10.1120/jacmp.v13i4.3796}, issn = {15269914}, journal = {Journal of Applied Clinical Medical Physics}, keywords = {Cbct,Field of view,Igart,Image registration}, number = {4}, pages = {141--151}, title = {{A protocol to extend the longitudinal coverage of on-board cone-beam CT}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {13}, year = {2012} } @article{Zhuge2006, abstract = {We present a system for segmenting the human aortic aneurysm in CT angiograms (CTA), which, in turn, allows measurements of volume and morphological aspects useful for treatment planning. The system estimates a rough "initial surface," and then refines it using a level set segmentation scheme augmented with two external analyzers: The global region analyzer, which incorporates a priori knowledge of the intensity, volume, and shape of the aorta and other structures, and the local feature analyzer, which uses voxel location, intensity, and texture features to train and drive a support vector machine classifier. Each analyzer outputs a value that corresponds to the likelihood that a given voxel is part of the aneurysm, which is used during level set iteration to control the evolution of the surface. We tested our system using a database of 20 CTA scans of patients with aortic aneurysms. The mean and worst case values of volume overlap, volume error, mean distance error, and maximum distance error relative to human tracing were 95.3{\%}±1.4{\%} (s.d.); worst case=92.9{\%}, 3.5{\%}±2.5{\%} (s.d.); worst case=7.0{\%}, 0.6±0.2 mm (s.d.); worst case=1.0 mm, and 5.2±2.3mm (s.d.); worstcase=9.6 mm, respectively. When implemented on a 2.8 GHz Pentium IV personal computer, the mean time required for segmentation was 7.4±3.6min (s.d.). We also performed experiments that suggest that our method is insensitive to parameter changes within 10{\%} of their experimentally determined values. This preliminary study proves feasibility for an accurate, precise, and robust system for segmentation of the abdominal aneurysm from CTA data, and may be of benefit to patients with aortic aneurysms. {\textcopyright} 2006 American Association of Physicists in Medicine.}, author = {Zhuge, Feng and Rubin, Geoffrey D. and Sun, Shaohua and Napel, Sandy}, doi = {10.1118/1.2193247}, issn = {00942405}, journal = {Medical Physics}, keywords = {Abdominal aortic aneurysm,CT angiography,Deformable model}, number = {5}, pages = {1440--1453}, title = {{An abdominal aortic aneurysm segmentation method: Level set with region and statistical information}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-33646440020{\&}doi=10.1118{\%}2F1.2193247{\&}partnerID=40{\&}md5=b99c7457d5ae6ae677679f84b3f61548}, volume = {33}, year = {2006} } @article{Ziegeler2012, abstract = {A common problem with modern numerical oceanographic models is spatial displacement, including misplacement and misshapenness of ocean circulation features. Traditional error metrics, such as least squares methods, are ineffective in many such cases; for example, only small errors in the location of a frontal pattern are translated to large differences in least squares of intensities. Such problems are common in meteorological forecast verification as well, so the application of spatial error metrics have been a recently popular topic there. Spatial error metrics separate model error into a displacement component and an intensity component, providing a more reliable assessment of model biases and a more descriptive portrayal of numerical model prediction skill. The application of spatial error metrics to oceanographic models has been sparse, and further advances for both meteorology and oceanography exist in the medical imaging field. These advances are presented, along with modifications necessary for oceanographic model output. Standard methods and options for those methods in the literature are explored, and where the best arrangements of options are unclear, comparison studies are conducted. These trials require the reproduction of synthetic displacements in conjunction with synthetic intensity perturbations across 480 Navy Coastal Ocean Model (NCOM) temperature fields from various regions of the globe throughout 2009. Study results revealed the success of certain approaches novel to both meteorology and oceanography, including B-spline transforms and mutual information. That, combined with other common methods, such as quasi-Newton optimization and land masking, could best recover the synthetic displacements under various synthetic intensity changes. {\textcopyright} 2012 American Meteorological Society.}, author = {Ziegeler, Sean B. and Dykes, James D. and Shriver, Jay F.}, doi = {10.1175/JTECH-D-11-00109.1}, issn = {07390572}, journal = {Journal of Atmospheric and Oceanic Technology}, keywords = {Error analysis,Forecast verification,Ocean models}, number = {2}, pages = {260--266}, title = {{Spatial error metrics for oceanographic model verification}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {29}, year = {2012} } @article{Ziegler2019, abstract = {The Vero system can treat intra-fractionally moving tumors with gimbaled dynamic tumor tracking (DTT) by rotating the treatment beam so that it follows the motion of the tumor. However, the changes in the beam geometry and the constant breathing motion of the patient influence the dose applied to the patient. This study aims to perform a full 4D dose reconstruction for thirteen patients treated with DTT at the Vero system at the Universit{\"{a}}tsklinikum Erlangen and investigates the temporal resolution required to perform an accurate 4D dose reconstruction. For all patients, a 4DCT was used to train a 4D motion model, which is able to calculate pseudo-CT images for arbitrary breathing phases. A new CT image was calculated for every 100 ms of treatment and a dose calculation was performed according to the current beam geometry (i.e. the rotation of the treatment beam at this moment in time) by rotating according to the momentary beam rotation, which is extracted from log-files. The resulting dose distributions were accumulated on the planning CT and characteristic parameters were extracted and compared. $\gamma$-evaluations of dose accumulations with different spatialoral resolutions were performed to determine the minimal required resolution. In total 173 700 dose calculations were performed. The accumulated 4D dose distributions show a reduced mean GTV dose of 0.77{\%} compared to the static treatment plan. For some patients larger deviations were observed, especially in the presence of a poor 4DCT quality. The $\gamma$-evaluation showed that a temporal resolution of 500 ms is sufficient for an accurate dose reconstruction. If the tumor motion is regarded as well, a spatialoral sampling of 1400 ms and 2 mm yields accurate results, which reduces the workload by 84{\%}.}, author = {Ziegler, Marc and Brandt, Tobias and Lettmaier, Sebastian and Fietkau, Rainer and Bert, Christoph}, doi = {10.1088/1361-6560/ab4e51}, issn = {13616560}, journal = {Physics in Medicine and Biology}, keywords = {4D dose calculation,Tumor tracking,motion modelling,real-time monitoring,stereotactic body radiation therapy}, number = {22}, pages = {12}, title = {{Method for a motion model based automated 4D dose calculation}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {64}, year = {2019} } @article{Ziegler2019a, abstract = {Purpose: With the introduction of dynamic tumor tracking in radiotherapy, it is possible to irradiate moving targets with minimal safety margins. However, most dynamic tumor tracking techniques rely on changing the beam geometry by, for example, adapting the multileaf collimator (MLC) positions or rotating the LINAC head. These changes are relative to a reference position which is determined by a specific breathing phase. Since these changes in the beam path also influence the delivered dose, choosing a different reference position based on a different breathing phase impacts the applied dose to the patient. This work investigates the influence of choosing different reference breathing phases on the dose distribution. Methods: The Vero system tracks the moving target by performing a pan and tilt rotation of the LINAC head. For 13 patients, the target position was extracted from every phase of a four-dimensional computed tomography (4DCT) and the pan and tilt values were determined with respect to three different reference phases. These reference phases were inspiration, expiration, and the midventilation. For all reference phases, a 4D dose calculation was performed on the 4DCT regarding the respective pan and tilt values. Furthermore, the applied dose to the target and surrounding organs at risk was calculated. To accumulate the dose distribution, weights from the actual patient breathing motion were determined. The weights were calculated from the breathing motions from different days to investigate the impact of daily variations in the breathing motion onto the accumulated dose distribution. All obtained values were then compared to the static treatment plan. Results: The mean and maximum doses applied to the target or surrounding organs at risk show no general behavior depending on the different reference phases. Nevertheless, for some patients, large differences (approx. 30{\%}) in the applied dose to certain organs at risk could be observed, whereas the applied dose to the target shows no dependency on the different reference phases. However, the mean target dose is in all cases approx. 1.5{\%} below the reference value from the static treatment plan. Conclusion: Although no general dependency of the applied dose on the selected reference phase could be found, the choice of the reference phase can have great impact on the organ at risk dose for some patients. Thus, the choice of the reference phase used for patient positioning should be considered during treatment planning since it can be seen as a new degree of freedom of a treatment based on tracking.}, author = {Ziegler, Marc and Lettmaier, Sebastian and Fietkau, Rainer and Bert, Christoph}, doi = {10.1002/mp.13654}, issn = {00942405}, journal = {Medical Physics}, keywords = {4D dose calculation,dynamic tumor tracking,liver cancer,lung cancer}, number = {8}, pages = {3371--3377}, title = {{Choosing a reference phase for a dynamic tumor tracking treatment: A new degree of freedom?}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {46}, year = {2019} } @article{Zukic2018, abstract = {Multicenter clinical trials that use positron emission tomography (PET) imaging frequently rely on stable bias in imaging biomarkers to assess drug effectiveness. Many well-documented factors cause variability in PET intensity values. Two of the largest scanner-dependent errors are scanner calibration and reconstructed image resolution variations. For clinical trials, an increase in measurement error significantly increases the number of patient scans needed. We aim to provide a robust quality assurance system using portable PET/computed tomography "pocket" phantoms and automated image analysis algorithms with the goal of reducing PET measurement variability. A set of the "pocket" phantoms was scanned with patients, affixed to the underside of a patient bed. Our software analyzed the obtained images and estimated the image parameters. The analysis consisted of 2 steps, automated phantom detection and estimation of PET image resolution and global bias. Performance of the algorithm was tested under variations in image bias, resolution, noise, and errors in the expected sphere size. A web-based application was implemented to deploy the image analysis pipeline in a cloud-based infrastructure to support multicenter data acquisition, under Software-as-a-Service (SaaS) model. The automated detection algorithm localized the phantom reliably. Simulation results showed stable behavior when image properties and input parameters were varied. The PET "pocket" phantom has the potential to reduce and/or check for standardized uptake value measurement errors.}, author = {Zuki{\'{c}}, D{\v{z}}enan and Byrd, Darrin W. and Kinahan, Paul E. and Enquobahrie, Andinet}, doi = {10.18383/j.tom.2018.00020}, issn = {2379139X}, journal = {Tomography (Ann Arbor, Mich.)}, keywords = {PET imaging,bias,calibration,correction,phantom}, number = {3}, pages = {148--158}, title = {{Calibration Software for Quantitative PET/CT Imaging Using Pocket Phantoms}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {4}, year = {2018} } @article{Willfuhr2015, abstract = {The lung parenchyma provides a maximal surface area of blood-containing capillaries that are in close contact with a large surface areaof the air-containing alveoli. Volume and surface area of capillaries are the classic stereological parameters to characterize the alveolar capillary network (ACN) and have provided essential structure-function information of the lung. When loss (rarefaction) or gain (angiogenesis) of capillaries occurs, these parameters may not be sufficient to provide mechanistic insight. Therefore, it would be desirable to estimate the number of capillaries, as it contains more distinct and mechanistically oriented information. Here, we present a new stereological method to estimate the number of capillary loops in the ACN. One advantage of this method is that it is independent of the shape, size, or distribution of the capillaries. We used consecutive, 1 {\_}mthick sections from epoxy resin-embedded material as a physical disector. The Euler-Poincar{\'{e}} characteristic of capillary networks can be estimated by counting the easily recognizable topolog ical constellations of “islands,” “bridges,” and “holes.” The total number of capillary loops in the ACN can then be calculated from the Euler- Poincar{\'{e}} characteristic. With the use of the established estimator of alveolar number, it is possible to obtain the mean number of capillary loops per alveolus. In conclusion, estimation of alveolar capillaries by design-based stereology is an efficient and unbiased method to characterize the ACN and may be particularly useful for studies on emphysema, pulmonary hypertension, or lung development.}, author = {Willf{\"{u}}hr, Alper and Brandenberger, Christina and Piatkowski, Tanja and Grothausmann, Roman and Nyengaard, Jens Randel and Ochs, Matthias and M{\"{u}}hlfeld, Christian}, doi = {10.1152/ajplung.00410.2014}, issn = {15221504}, journal = {American Journal of Physiology - Lung Cellular and Molecular Physiology}, keywords = {Capillary number,Euler number,Stereology}, number = {11}, pages = {L1286--L1293}, title = {{Estimation of the number of alveolar capillaries by the euler number (Euler-poincar{\'{e}} characteristic)}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {309}, year = {2015} } @inbook{Alaytsev2018, abstract = {Computed tomography is a great source of biomedical data because it allows a detailed exploration of complex anatomical structures. Some structures are not visible on CT scans, and some are hard to distinguish due to partial volume effect. CT datasets require preprocessing before using them as anatomical models in a simulation system. The work describes segmentation and data transformation methods for an anatomical model creation from the CT data. The result models may be used for visual and haptic rendering and drilling simulation in a virtual surgery system. {\textcopyright} 2018 SPIE.}, address = {Bellingham}, author = {Danilova, Tatyana V. and Manturov, Alexey O. and Mareev, Gleb O. and Mareev, Oleg V. and Alaytsev, Innokentiy K.}, booktitle = {Saratov Fall Meeting 2017: Laser Physics and Photonics Xviii; and Computational Biophysics and Analysis of Biomedical Data Iv}, doi = {10.1117/12.2309318}, editor = {Derbov, V L and Postnov, D E}, isbn = {9781510620032}, issn = {16057422}, pages = {18}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of SPIE}, title = {{Creation of anatomical models from CT data}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {10717}, year = {2018} } @inbook{Alic2010, abstract = {Spatial correspondence between histology and multi sequence MRI can provide information about the capabilities of non-invasive imaging to characterize cancerous tissue. However, shrinkage and deformation occurring during the excision of the tumor and the histological processing complicate the co registration of MR images with histological sections. This work proposes a methodology to establish a detailed 3D relation between histology sections and in vivo MRI tumor data. The key features of the methodology are a very dense histological sampling (up to 100 histology slices per tumor), mutual information based non-rigid B-spline registration, the utilization of the whole 3D data sets, and the exploitation of an intermediate ex vivo MRI. In this proof of concept paper, the methodology was applied to one tumor. We found that, after registration, the visual alignment of tumor borders and internal structures was fairly accurate. Utilizing the intermediate ex vivo MRI, it was possible to account for changes caused by the excision of the tumor: we observed a tumor expansion of 20{\%}. Also the effects of fixation, dehydration and histological sectioning could be determined: 26{\%} shrinkage of the tumor was found. The annotation of viable tissue, performed in histology and transformed to the in vivo MRI, matched clearly with high intensity regions in MRI. With this methodology, histological annotation can be directly related to the corresponding in vivo MRI. This is a vital step for the evaluation of the feasibility of multi-spectral MRI to depict histological ground-truth. {\textcopyright} 2010 Copyright SPIE - The International Society for Optical Engineering.}, address = {Bellingham}, author = {Alic, Lejla and Haeck, Joost C. and Klein, Stefan and Bol, Karin and van Tiel, Sandra T. and Wielopolski, Piotr A. and Bijster, Magda and Niessen, Wiro J. and Bernsen, Monique and Veenland, Jifke F. and de Jong, Marion}, booktitle = {Medical Imaging 2010: Biomedical Applications in Molecular, Structural, and Functional Imaging}, doi = {10.1117/12.844123}, editor = {Molthen, R C and Weaver, J B}, isbn = {9780819480279}, issn = {16057422}, pages = {762603}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of SPIE}, title = {{Multi-modal image registration: matching MRI with histology}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {7626}, year = {2010} } @article{Bieri2013, abstract = {Purpose The speed limit for three-dimensional Fourier-encoded steady state free precession (SSFP) imaging is explored on a clinical whole body system and pushed toward a pulse repetition time (TR) close to or even below the 1 ms regime; in the following referred to as ultra-fast SSFP imaging. Methods To this end, contemporary optimization strategies, such as efficient gradient switching patterns, partial echoes, ramp sampling techniques, and a target-related design of excitation pulses were applied to explore the lower boundaries in TR for SSFP-based Cartesian imaging. Results Generally, minimal TR was limited in vivo by peripheral nerve stimulation, allowing a TR ∼1 ms for isotropic resolutions down to about 2 mm. As a result, ultra-fast balanced SSFP provides artifact-free images even for targets with severe susceptibility variations, and native high-resolution structural and functional in vivo 1H imaging of the human lung is demonstrated at 1.5 T. Conclusion On clinical whole body MRI systems, the TR of SSFP-based Cartesian imaging can be pushed toward the 1 ms regime. As a result, ultra-fast SSFP protocols might represent a promising new powerful approach for SSFP-based imaging, not only for lung but also in a variety of clinical and scientific applications. Copyright {\textcopyright} 2013 Wiley Periodicals, Inc.}, author = {Bieri, Oliver}, doi = {10.1002/mrm.24858}, issn = {07403194}, journal = {Magnetic Resonance in Medicine}, keywords = {SSFP,imaging,lung,steady state,ultra-fast}, number = {3}, pages = {657--663}, title = {{Ultra-fast steady state free precession and its application to in vivo 1H morphological and functional lung imaging at 1.5 tesla}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84883229895{\&}doi=10.1002{\%}2Fmrm.24858{\&}partnerID=40{\&}md5=8542d776ecb40faea69bd7bf6e63ebf1}, volume = {70}, year = {2013} } @article{Ceranka2020, abstract = {Purpose: To improve multi-atlas segmentation of the skeleton from whole-body MRI. In particular, we study the effect of employing the atlas segmentations to iteratively mask tissues outside of the region of interest to improve the atlas alignment and subsequent segmentation. Methods: An improved atlas registration scheme is proposed. Starting from a suitable initial alignment, the alignment is refined by introducing additional stages of deformable registration during which the image sampling is limited to the dilated atlas segmentation label mask. The performance of the method was demonstrated using leave-one-out cross-validation using atlases of 10 whole-body 3D-T1 images of prostate cancer patients with bone metastases and healthy male volunteers, and compared to existing state of the art. Both registration accuracy and resulting segmentation quality, using four commonly used label fusion strategies, were evaluated. Results: The proposed method showed significant improvement in registration and segmentation accuracy with respect to the state of the art for all validation criteria and label fusion strategies, resulting in a Dice coefficient of 0.887 (STEPS label fusion). The average Dice coefficient for the multi-atlas segmentation showed over 11{\%} improvement with a decrease of false positive rate from 28.3{\%} to 13.2{\%}. For this application, repeated application of the background masking did not lead to significant improvement of the segmentation result. Conclusions: A registration strategy, relying on the use of atlas segmentations as mask during image registration was proposed and evaluated for multi-atlas segmentation of whole-body MRI. The approach significantly improved registration and final segmentation accuracy and may be applicable to other structures of interest.}, author = {Ceranka, Jakub and Verga, Sabrina and Kvasnytsia, Maryna and Lecouvet, Fr{\'{e}}d{\'{e}}ric and Michoux, Nicolas and de Mey, Johan and Raeymaekers, Hubert and Metens, Thierry and Absil, Julie and Vandemeulebroucke, Jef}, doi = {10.1002/mrm.28042}, issn = {15222594}, journal = {Magnetic Resonance in Medicine}, keywords = {atlas-based segmentation,bone segmentation,image registration,whole-body MRI}, number = {5}, pages = {1851--1862}, title = {{Multi-atlas segmentation of the skeleton from whole-body MRI—Impact of iterative background masking}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {83}, year = {2020} } @inproceedings{Chakraborty2016, abstract = {{\textcopyright} 2016 SPIE. Image registration techniques using free-form deformation models have shown promising results for 3D myocardial strain estimation from ultrasound. However, the use of this technique has mostly been limited to research institutes due to the high computational demand, which is primarily due to the computational load of the regularization term ensuring spatially smooth cardiac strain estimates. Indeed, this term typically requires evaluating derivatives of the transformation field numerically in each voxel of the image during every iteration of the optimization process. In this paper, we replace this time-consuming step with a closed-form solution directly associated with the transformation field resulting in a speed up factor of ∼10-60,000, for a typical 3D B-mode image of 250 3 and 500 3 voxels, depending upon the size and the parametrization of the transformation field. The performance of the numeric and the analytic solutions was contrasted by computing tracking and strain accuracy on two realistic synthetic 3D cardiac ultrasound sequences, mimicking two ischemic motion patterns. Mean and standard deviation of the displacement errors over the cardiac cycle for the numeric and analytic solutions were 0.68±0.40 mm and 0.75±0.43 mm respectively. Correlations for the radial, longitudinal and circumferential strain components at end-systole were 0.89, 0.83 and 0.95 versus 0.90, 0.88 and 0.92 for the numeric and analytic regularization respectively. The analytic solution matched the performance of the numeric solution as no statistically significant differences (p {\textgreater} 0.05) were found when expressed in terms of bias or limits-of-Agreement.}, address = {Bellingham}, author = {Chakraborty, Bidisha and Heyde, Brecht and Alessandrini, Martino and D'hooge, Jan}, booktitle = {Medical Imaging 2016: Ultrasonic Imaging and Tomography}, doi = {10.1117/12.2216781}, editor = {Duric, N and Heyde, B}, isbn = {9781510600256}, issn = {16057422}, pages = {979006}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of SPIE}, title = {{Fast myocardial strain estimation from 3D ultrasound through elastic image registration with analytic regularization}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84976497384{\&}doi=10.1117{\%}2F12.2216781{\&}partnerID=40{\&}md5=7e796a05f80c2deec426ba41b27b2284}, volume = {9790}, year = {2016} } @inproceedings{Chu2013, abstract = {Whole-body bone scintigraphy (or bone scan) is a highly sensitive method for visualizing bone metastases and is the accepted standard imaging modality for detection of metastases and assessment of treatment outcomes. The development of a quantitative biomarker using computer-aided detection on bone scans for treatment response assessment may have a significant impact on the evaluation of novel oncologic drugs directed at bone metastases. One of the challenges to lesion segmentation on bone scans is the non-specificity of the radiotracer, manifesting as high activity related to non-malignant processes like degenerative joint disease, sinuses, kidneys, thyroid and bladder. In this paper, we developed an automated bone scan lesion segmentation method that implements intensity normalization, a two-threshold model, and automated detection and removal of areas consistent with non-malignant processes from the segmentation. The two-threshold model serves to account for outlier bone scans with elevated and diffuse intensity distributions. Parameters to remove degenerative joint disease were trained using a multi-start Nelder-Mead simplex optimization scheme. The segmentation reference standard was constructed manually by a panel of physicians. We compared the performance of the proposed method against a previously published method. The results of a two-fold cross validation show that the overlap ratio improved in 67.0{\%} of scans, with an average improvement of 5.1{\%} points. {\textcopyright} 2013 SPIE.}, address = {Bellingham}, author = {Chu, Gregory H. and Lo, Pechin and Kim, Hyun J. and Auerbach, Martin and Goldin, Jonathan and Henkel, Keith and Banola, Ashley and Morris, Darren and Coy, Heidi and Brown, Matthew S.}, booktitle = {Medical Imaging 2013: Computer-Aided Diagnosis}, doi = {10.1117/12.2008082}, editor = {Novak, C L and Aylward, S}, isbn = {9780819494443}, issn = {0277786X}, pages = {867007}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of SPIE}, title = {{Preliminary results of automated removal of degenerative joint disease in bone scan lesion segmentation}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84878418964{\&}doi=10.1117{\%}2F12.2008082{\&}partnerID=40{\&}md5=a9e7952d5bc2b133959d4ad734f5d4ac}, volume = {8670}, year = {2013} } @inproceedings{Chu2012, abstract = {Quantification of overall tumor area on bone scans may be a potential biomarker for treatment response assessment and has, to date, not been investigated. Segmentation of bone metastases on bone scans is a fundamental step for this response marker. In this paper, we propose a fully automated computerized method for the segmentation of bone metastases on bone scans, taking into account characteristics of different anatomic regions. A scan is first segmented into anatomic regions via an atlas-based segmentation procedure, which involves non-rigidly registering a labeled atlas scan to the patient scan. Next, an intensity normalization method is applied to account for varying levels of radiotracer dosing levels and scan timing. Lastly, lesions are segmented via anatomic regionspecific intensity thresholding. Thresholds are chosen by receiver operating characteristic (ROC) curve analysis against manual contouring by board certified nuclear medicine physicians. A leave-one-out cross validation of our method on a set of 39 bone scans with metastases marked by 2 board-certified nuclear medicine physicians yielded a median sensitivity of 95.5{\%}, and specificity of 93.9{\%}. Our method was compared with a global intensity thresholding method. The results show a comparable sensitivity and significantly improved overall specificity, with a p-value of 0.0069.}, address = {Bellingham}, author = {Chu, Gregory H. and Lo, Pechin and Kim, Hyun J. and Lu, Peiyun and Ramakrishna, Bharath and Gjertson, David and Poon, Cheryce and Auerbach, Martin and Goldin, Jonathan and Brown, Matthew S.}, booktitle = {Medical Imaging 2012: Computer-Aided Diagnosis}, doi = {10.1117/12.911462}, editor = {VanGinneken, B and Novak, C L}, isbn = {9780819489647}, issn = {16057422}, pages = {83150F}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of SPIE}, title = {{Automated segmentation of tumors on bone scans using anatomy-specific thresholding}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84874884929{\&}doi=10.1117{\%}2F12.911462{\&}partnerID=40{\&}md5=892c7bf1991afd54a3da43cf68fda6d8}, volume = {8315}, year = {2012} } @inbook{Corradi2009, abstract = {The XTENS (eXTensible Environment for NeuroScience) platform consists in an highly extensible environment for collaborative work that improve repeatability of experiment and provides data storage and analysis capabilities. The platform is divided in repository and application domains, branched in services with different purpose. The first domain is the central component of the platform and consists in a multimodal repository with a client-server architecture. The second one provides remote tools for image and signal visualization and analysis. The main issue for such a platform is not only to provide an extensible collaborative environment, but also to build a development platform for testing models and algorithms in neuroscience. For these reasons a Grid approach has been considered. Both computational and data Grids infrastructures can be exploited to analyze and share large datasets of distributed data. The architecture has been deployed to support surgical planning for patients affected by drug resistant epilepsy. In that scenario, a complex analysis for a fully multimodal dataset including different image modalities, EEG and video is required to localize the origin of the ictal discharge and critical brain areas. As first results, prototype versions of both repository and application domain components are presented. {\textcopyright} 2009 The authors and IOS Press. All rights reserved.}, address = {Amsterdam}, author = {Corradi, Luca and Arnulfo, Gabriele and Schenone, Andrea and Porro, Ivan and Fato, Marco}, booktitle = {Studies in Health Technology and Informatics}, doi = {10.3233/978-1-60750-027-8-127}, editor = {Solomonides, T and HofmannApitius, M and Freudigmann, M and Semler, S C and Legre, Y and Kratz, M}, isbn = {9781607500278}, issn = {18798365}, keywords = {Collaborative environment,Data integration,Epilepsy,Grid,Multimodal and multiscale analysis,Remote visualization,Surgical planning}, pages = {127--136}, publisher = {Ios Press}, series = {Studies in Health Technology and Informatics}, title = {{XTENS - An eXTensible Environment for NeuroScience}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {147}, year = {2009} } @article{Crombe2019, abstract = {Background: Standard of care for patients with high-grade soft-tissue sarcoma (STS) are being redefined since neoadjuvant chemotherapy (NAC) has demonstrated a positive effect on patients' outcome. Yet response evaluation in clinical trials still relies on RECIST criteria. Purpose: To investigate the added value of a Delta-radiomics approach for early response prediction in patients with STS undergoing NAC. Study Type: Retrospective. Population: Sixty-five adult patients with newly-diagnosed, locally-advanced, histologically proven high-grade STS of trunk and extremities. All were treated by anthracycline-based NAC followed by surgery and had available MRI at baseline and after two chemotherapy cycles. Field Strength/Sequence: Pre- and postcontrast enhanced T1-weighted imaging (T1-WI), turbo spin echo T2-WI at 1.5 T. Assessment: A threshold of {\textless}10{\%} viable cells on surgical specimens defined good response (Good-HR). Two senior radiologists performed a semantic analysis of the MRI. After 3D manual segmentation of tumors at baseline and early evaluation, and standardization of voxel-sizes and intensities, absolute changes in 33 texture and shape features were calculated. Statistical Tests: Classification models based on logistic regression, support vector machine, k-nearest neighbors, and random forests were elaborated using crossvalidation (training and validation) on 50 patients ("training cohort") and was validated on 15 other patients ("test cohort"). Results: Sixteen patients were good-HR. Neither RECIST status (P = 0.112) nor semantic radiological variables were associated with response (range of P-values: 0.134–0.490) except an edema decrease (P = 0.003), although 14 shape and texture features were (range of P-values: 0.002–0.037). On the training cohort, the highest diagnostic performances were obtained with random forests built on three features: $\Delta${\_}Histogram{\_}Entropy, $\Delta${\_}Elongation, $\Delta${\_}Surrounding{\_}Edema, which provided: area under the curve the receiver operating characteristic = 0.86, accuracy = 88.1{\%}, sensitivity = 94.1{\%}, and specificity = 66.3{\%}. On the test cohort, this model provided an accuracy of 74.6{\%} but 3/5 good-HR were systematically ill-classified. Data Conclusion: A T2-based Delta-radiomics approach might improve early response assessment in STS patients with a limited number of features. Level of Evidence: 3. Technical Efficacy: Stage 2. J. Magn. Reson. Imaging 2019;50:497–510.}, author = {Cromb{\'{e}}, Amandine and P{\'{e}}rier, Cynthia and Kind, Mich{\`{e}}le and {De Senneville}, Baudouin Denis and {Le Loarer}, Fran{\c{c}}ois and Italiano, Antoine and Buy, Xavier and Saut, Olivier}, doi = {10.1002/jmri.26589}, issn = {15222586}, journal = {Journal of Magnetic Resonance Imaging}, number = {2}, pages = {497--510}, title = {{T2-based MRI Delta-radiomics improve response prediction in soft-tissue sarcomas treated by neoadjuvant chemotherapy.}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {50}, year = {2019} } @inbook{Santos2018, abstract = {Evolutions in image-scanning technology have led to vast improvements in the fetal assessment. Ultrasound (US) is the main technology for fetal evaluation. Magnetic resonance imaging (MRI) is generally used when US cannot provide high-quality images. This paper presents an interactive bidirectional actuated human-machine interface proposal developed by the combination of a haptic device system (force-feedback technology) and a non-invasive medical image technology.}, address = {Cham}, author = {{Roberto Lopes dos Santos}, Jorge and Werner, Heron and Raposo, Alberto and Hurtado, Jan and Arcoverde, Vinicius and Ribeiro, Gerson}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-319-91397-1_40}, editor = {Duffy, V G}, isbn = {9783319913964}, issn = {16113349}, keywords = {Fetus,Haptics,Interaction,MRI,Ultrasound}, pages = {502--512}, publisher = {Springer International Publishing Ag}, series = {Lecture Notes in Computer Science}, title = {{A Proposal for Combining Ultrasound, Magnetic Resonance Imaging and Force Feedback Technology, During the Pregnancy, to Physically Feel the Fetus}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {10917 LNCS}, year = {2018} } @inbook{Fallavollita2010b, address = {Bellingham}, author = {Fallavollita, P. and KarimAghaloo, Z. and Burdette, E. C. and Song, D. Y. and Abolmaesumi, P. and Fichtinger, G.}, booktitle = {Medical Imaging 2010: Visualization, Image-Guided Procedures, and Modeling}, doi = {10.1117/12.844015}, editor = {Wong, K H and Miga, M I}, isbn = {9780819480262}, issn = {16057422}, pages = {762519}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of SPIE}, title = {{Localization of brachytherapy seeds in ultrasound by registration to fluoroscopy}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {7625}, year = {2010} } @article{Gaddy2015, abstract = {Background: Features of the tumor microenvironment influence the efficacy of cancer nanotherapeutics. The ability to directly radiolabel nanotherapeutics offers a valuable translational tool to obtain biodistribution and tumor deposition data, testing the hypothesis that the extent of delivery predicts therapeutic outcome. In support of a first in-human clinical trial with 64Cu-labeled HER2-targeted liposomal doxorubicin (64Cu-MM-302), a preclinical dosimetric analysis was performed. Methods: Whole-body biodistribution and pharmacokinetic data were obtained in mice that received 64Cu-MM-302 and used to estimate absorbed radiation doses in normal human organs. PET/CT imaging revealed non-uniform distribution of 64Cu signal in mouse kidneys. Kidney micro-dosimetry analysis was performed in mice and squirrel monkeys, using a physiologically based pharmacokinetic model to estimate the full dynamics of the 64Cu signal in monkeys. Results: Organ-level dosimetric analysis of mice receiving 64Cu-MM-302 indicated that the heart was the organ receiving the highest radiation absorbed dose, due to extended liposomal circulation. However, PET/CT imaging indicated that 64Cu-MM-302 administration resulted in heterogeneous exposure in the kidney, with a focus of 64Cu activity in the renal pelvis. This result was reproduced in primates. Kidney micro-dosimetry analysis illustrated that the renal pelvis was the maximum exposed tissue in mice and squirrel monkeys, due to the highly concentrated signal within the small renal pelvis surface area. Conclusions: This study was used to select a starting clinical radiation dose of 64Cu-MM-302 for PET/CT in patients with advanced HER2-positive breast cancer. Organ-level dosimetry and kidney micro-dosimetry results predicted that a radiation dose of 400 MBq of 64Cu-MM-302 should be acceptable in patients.}, author = {Gaddy, Daniel F. and Lee, Helen and Zheng, Jinzi and Jaffray, David A. and Wickham, Thomas J. and Hendriks, Bart S.}, doi = {10.1186/s13550-015-0096-0}, issn = {2191219X}, journal = {EJNMMI Research}, keywords = {Copper-64,Dosimetry,Nanotherapeutics,Positron emission tomography}, number = {1}, pages = {10}, title = {{Whole-body organ-level and kidney micro-dosimetric evaluations of 64Cu-loaded HER2/ErbB2-targeted liposomal doxorubicin (64Cu-MM-302) in rodents and primates}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84928392739{\&}doi=10.1186{\%}2Fs13550-015-0096-0{\&}partnerID=40{\&}md5=9be19ccea7a7aa4683c93949d1fae115}, volume = {5}, year = {2015} } @inbook{Gamarra2017, abstract = {The advance technology in microscopy and computing has allowed the development of cell image analysis. Cloud Computing offers services, software and computing infrastructure to manage cell images' big data. However the usability of these platforms is adequate to expert users only. Many software tools are oriented to expert users in image processing, likewise the use of bioinformatics require a basic knowledge in programming. In this paper we present a framework to develop a software solution with a Service-Oriented Architecture (SOA) applied to the analysis of cell images using cloud computing.}, address = {Berlin}, author = {Gamarra, Margarita and Zurek, Eduardo and Nieto, Wilson and Jimeno, Miguel and Sierra, Deibys}, booktitle = {Advances in Intelligent Systems and Computing}, doi = {10.1007/978-3-319-56535-4_71}, editor = {Rocha, A and Correia, A M and Adeli, H and Reis, L P and Costanzo, S}, isbn = {9783319565347}, issn = {21945357}, keywords = {Bioinformatic,Cloud computing,Image processing,SOA}, pages = {724--734}, publisher = {Springer-Verlag Berlin}, series = {Advances in Intelligent Systems and Computing}, title = {{A service-oriented architecture for bioinformatics: An application in cell image analysis}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {569}, year = {2017} } @inbook{Gillies2018, abstract = {Prostate cancer has the second highest noncutaneous cancer incidence in men. Three-dimensional (3D) transrectal ultrasound (TRUS) fused with a magnetic resonance image (MRI) is used to guide prostate biopsy as an alternative technique to conventional 2D TRUS sextant biopsy. The TRUS-MRI fusion technique can provide intraoperative needle guidance to suspicious cancer tissues identified on MRI, increasing the targeting capabilities of a physician. Currently, 3D TRUS-MR guided biopsy suffers from image and target misalignment caused by various forms of prostate motion. Thus, we previously developed a real-time motion compensation algorithm to align 2D and 3D TRUS images with an update rate around an ultrasound system frame rate. During clinical implementation, observations of image misalignment occurred when obtaining tissue samples near the left and right boundaries of the prostate. To minimize transducer translation on the rectal wall and avoid prostate motion and deformation, we are proposing the use of a 3D model-based ring navigation procedure. This navigation keeps the transducer positioned towards the centroid of the prostate when guiding the tracked biopsy gun to targets. Prostate biopsy was performed on three patients while using real-time motion compensation in the background. Our navigation approach was compared to a conventional 2D TRUS-guided procedure using approximately 20 2D and 3D TRUS image pairs and resulted in median {\{}[{\}}first quartile, third quartile] registration errors of 2.0 {\{}[{\}}1.3,2.5] mm and 3.4 {\{}[{\}}1.5, 8.2] mm, respectively. Using our navigation approach, registration error and variability were reduced, potentially suggesting a more robust technique when performing continuous motion compensation.}, address = {Bellingham}, author = {Tessier, David and Gillies, Derek J. and Gardi, Lori and Mercado, Ashley and Fenster, Aaron}, booktitle = {Medical Imaging 2018: Image-Guided Procedures, Robotic Interventions, and Modeling}, doi = {10.1117/12.2292922}, editor = {Fei, B and Webster, R J}, isbn = {9781510616417}, issn = {16057422}, pages = {52}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of SPIE}, title = {{Ring navigation: an ultrasound-guided technique using real-time motion compensation for prostate biopsies}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {10576}, year = {2018} } @inbook{Gillies2017a, abstract = {{\textcopyright} 2017 SPIE. During image-guided prostate biopsy, needles are targeted at suspicious tissues to obtain specimens that are later examined histologically for cancer. Patient motion causes inaccuracies when using MR-transrectal ultrasound (TRUS) image fusion approaches used to augment the conventional biopsy procedure. Motion compensation using a single, user initiated correction can be performed to temporarily compensate for prostate motion, but a real-time continuous registration offers an improvement to clinical workflow by reducing user interaction and procedure time. An automatic motion compensation method, approaching the frame rate of a TRUS-guided system, has been developed for use during fusion-based prostate biopsy to improve image guidance. 2D and 3D TRUS images of a prostate phantom were registered using an intensitybased algorithm utilizing normalized cross-correlation and Powell's method for optimization with user initiated and continuous registration techniques. The user initiated correction performed with observed computation times of 78 ± 35 ms, 74 ± 28 ms, and 113 ± 49 ms for in-plane, out-of-plane, and roll motions, respectively, corresponding to errors of 0.5 ± 0.5 mm, 1.5 ± 1.4 mm, and 1.5 ± 1.6°. The continuous correction performed significantly faster (p {\textless} 0.05) than the user initiated method, with observed computation times of 31 ± 4 ms, 32 ± 4 ms, and 31 ± 6 ms for in-plane, out-of-plane, and roll motions, respectively, corresponding to errors of 0.2 ± 0.2 mm, 0.6 ± 0.5 mm, and 0.8 ± 0.4°.}, address = {Bellingham}, author = {Gillies, Derek J. and Gardi, Lori and Zhao, Ren and Fenster, Aaron}, booktitle = {Medical Imaging 2017: Image-Guided Procedures, Robotic Interventions, and Modeling}, doi = {10.1117/12.2255006}, editor = {Webster, R J and Fei, B}, isbn = {9781510607156}, issn = {16057422}, pages = {101351F}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of SPIE}, title = {{Optimization of real-time rigid registration motion compensation for prostate biopsies using 2D/3D ultrasound}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {10135}, year = {2017} } @inbook{Haak2012a, address = {Bellingham}, author = {Haak, Alexander and van Stralen, Marijn and van Burken, Gerard and Klein, Stefan and Pluim, Josien P. W. and de Jong, Nico and van der Steen, Antonius F. W. and Bosch, Johan G.}, booktitle = {Medical Imaging 2012: Ultrasonic Imaging, Tomography, and Therapy}, doi = {10.1117/12.905893}, editor = {Bosch, J G and Doyley, M M}, isbn = {9780819489692}, issn = {16057422}, pages = {832007}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of SPIE}, title = {{Comparison of spatiotemporal interpolators for 4D image reconstruction from 2D transesophageal ultrasound}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {8320}, year = {2012} } @inbook{Harris2018, abstract = {{\textcopyright} 2018 SPIE. Dilatation of the cerebral ventricles is a common condition in preterm neonates with intraventricular hemorrhage (IVH). Post Hemorrhagic Ventricular Dilatation (PHVD) can lead to lifelong neurological impairment caused by ischemic injury due to increased intracranial pressure, and without treatment can lead to death. Previously, we have developed and validated a 3D ultrasound (US) system to monitor the progression of ventricle volumes (VV) in IVH patients; however, many patients with severe PHVD have ventricles so large they cannot be imaged within a single 3D US image. This limits the utility of atlas based segmentation algorithms required to measure VV as parts of the ventricles are in separate 3D US images, and thus, an already challenging segmentation becomes increasingly difficult to solve. Without a more automated segmentation, the clinical utility of 3D US ventricle volumes cannot be fully realized due to the large number of images and patients required to validate the technique in a clinical trials. Here, we describe the initial results of an automated {\^{a}}stitching' algorithm used to register and combine multiple 3D US images of the ventricles of patients with PHVD. Our registration results show that we were able to register these images with an average target registration error (TRE) of 4.25±1.95 mm.}, address = {Bellingham}, author = {Harris, Andrew and Kishimoto, Jessica and Fenster, Aaron and de Ribaupierre, Sandrine and Gardi, Lori}, booktitle = {Medical Imaging 2018: Ultrasonic Imaging and Tomography}, doi = {10.1117/12.2292925}, editor = {Duric, N and Byram, B C}, isbn = {9781510616493}, issn = {16057422}, pages = {42}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of SPIE}, title = {{Automated registration and stitching of multiple 3D ultrasound images for monitoring neonatal intraventricular hemorrhage}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {10580}, year = {2018} } @article{Hesse2014, abstract = {Osteonecrosis of the jaw, in association with bisphosphonates (BRONJ) used for treating osteoporosis or cancer, is a severe and most often irreversible side effect whose underlying pathophysiological mechanisms remain largely unknown. Osteocytes are involved in bone remodeling and mineralization where they orchestrate the delicate equilibrium between osteoclast and osteoblast activity and through the active process called osteocytic osteolysis. Here, we hypothesized that (i) changes of the mineralized tissue matrix play a substantial role in the pathogenesis of BRONJ, and (ii) the osteocyte lacunar morphology is altered in BRONJ. Synchrotron $\mu$CT with phase contrast is an appropriate tool for assessing both the 3D morphology of the osteocyte lacunae and the bone matrix mass density. Here, we used this technique to investigate the mass density distribution and 3D osteocyte lacunar properties at the sub-micrometer scale in human bone samples from the jaw, femur and tibia. First, we compared healthy human jaw bone to human tibia and femur in order to assess the specific differences and address potential explanations of why the jaw bone is exclusively targeted by the necrosis as a side effect of BP treatment. Second, we investigated the differences between BRONJ and control jaw bone samples to detect potential differences which could aid an improved understanding of the course of BRONJ. We found that the apparent mass density of jaw bone was significantly smaller compared to that of tibia, consistent with a higher bone turnover in the jaw bone. The variance of the lacunar volume distribution was significantly different depending on the anatomical site. The comparison between BRONJ and control jaw specimens revealed no significant increase in mineralization after BP. We found a significant decrease in osteocyte-lacunar density in the BRONJ group compared to the control jaw. Interestingly, the osteocyte-lacunar volume distribution was not altered after BP treatment. {\textcopyright} 2014 Hesse et al.}, author = {Hesse, Bernhard and Langer, Max and Varga, Peter and Pacureanu, Alexandra and Dong, Pei and Schrof, Susanne and Man̈nicke, Nils and Suhonen, Heikki and Olivier, Cecile and Maurer, Peter and Kazakia, Galateia J. and Raum, Kay and Peyrin, Francoise}, doi = {10.1371/journal.pone.0088481}, issn = {19326203}, journal = {PLoS ONE}, number = {2}, pages = {11}, title = {{Alterations of mass density and 3D osteocyte lacunar properties in bisphosphonate-related osteonecrotic human jaw bone, a synchrotron $\mu$CT study}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {9}, year = {2014} } @inbook{Hoehme2017, abstract = {In this chapter, we illustrate how three-dimensional liver tissue models can be created from experimental image modalities by utilizing a well-established processing chain of experiments, microscopic imaging, image processing, image analysis and model construction. We describe how key features of liver tissue architecture are quantified and translated into model parameterizations, and show how a systematic iteration of experiments and model simulations often leads to a better understanding of biological phenomena in systems biology and systems medicine.}, address = {Totowa}, author = {Hoehme, Stefan and Friebel, Adrian and Hammad, Seddik and Drasdo, Dirk and Hengstler, Jan G.}, booktitle = {Methods in Molecular Biology}, doi = {10.1007/978-1-4939-6506-9_22}, editor = {Stock, P and Christ, B}, isbn = {978-1-4939-6506-9; 978-1-4939-6504-5}, issn = {10643745}, keywords = {2D/3D microscopy,Confocal scanning microscopy,Hepatocyte transplantation,Liver architecture,Liver tissue model,Spatiotemporal model,Systems biology,Systems medicine,TiQuant software}, pages = {319--362}, publisher = {Humana Press Inc}, series = {Methods in Molecular Biology}, title = {{Creation of three-dimensional liver tissue models from experimental images for systems medicine}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {1506}, year = {2017} } @inbook{Joshi2017, address = {Bellingham}, author = {Joshi, K. D. and Marchant, T. E. and Moore, C. J.}, booktitle = {Medical Imaging 2017: Physics of Medical Imaging}, doi = {10.1117/12.2254035}, editor = {Flohr, T G and Lo, J Y and Schmidt, T G}, isbn = {9781510607095}, issn = {16057422}, pages = {101322A}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of SPIE}, title = {{Shading correction algorithm for cone-beam CT in radiotherapy: extensive clinical validation of image quality improvement}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {10132}, year = {2017} } @article{Kim2014a, abstract = {Spatiotemporal regulation of cell contractility coordinates cell shape change to construct tissue architecture and ultimately directs the morphology and function of the organism. Here we show that contractility responses to spatially and temporally controlled chemical stimuli depend much more strongly on intercellular mechanical connections than on biochemical cues in both stimulated tissues and adjacent cells. We investigate how the cell contractility is triggered within an embryonic epithelial sheet by local ligand stimulation and coordinates a long-range contraction response. Our custom microfluidic control system allows spatiotemporally controlled stimulation with extracellular ATP, which results in locally distinct contractility followed by mechanical strain pattern formation. The stimulationresponse circuit exposed here provides a better understanding of how morphogenetic processes integrate responses to stimulation and how intercellular responses are transmitted across multiple cells. These findings may enable one to create a biological actuator that actively drives morphogenesis.}, author = {Kim, Yong Tae and Hazar, Melis and Vijayraghavan, Deepthi S. and Song, Jiho and Jackson, Timothy R. and Joshi, Sagar D. and Messner, William C. and Davidson, Lance A. and LeDuc, Philip R.}, doi = {10.1073/pnas.1405209111}, issn = {10916490}, journal = {Proceedings of the National Academy of Sciences of the United States of America}, keywords = {Mechanotransduction,Microfluidics,Multicellular,Signaling}, number = {40}, pages = {14366--14371}, title = {{Mechanochemcal actuators of embryonic epithelial contractility}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85047696684{\&}doi=10.1073{\%}2Fpnas.1405209111{\&}partnerID=40{\&}md5=21fb602c98b9aba0f290a278894f3d81}, volume = {111}, year = {2014} } @article{Neumann2017, abstract = {Objectives This work proposes a modular, anthropomorphic MR and CT thorax phantom that enables the comparison of experimental studies for quantitative evaluation of deformable, multimodal image registration algorithms and realistic multi-nuclear MR imaging techniques. Methods A human thorax phantom was developed with insertable modules representing lung, liver, ribs and additional tracking spheres. The quality of human tissue mimicking characteristics was evaluated for 1H and 23Na MR as well as CT imaging. The position of landmarks in the lung lobes was tracked during CT image acquisition at several positions during breathing cycles. 1H MR measurements of the liver were repeated after seven months to determine long term stability. Results The modules possess HU, T1 and T2 values comparable to human tissues (lung module: −756 ± 148 HU, artificial ribs: 218 ± 56 HU (low CaCO3 concentration) and 339 ± 121  (high CaCO3 concentration), liver module: T1 = 790 ± 28 ms, T2 = 65 ± 1 ms). Motion analysis showed that the landmarks in the lung lobes follow a 3D trajectory similar to human breathing motion. The tracking spheres are well detectable in both CT and MRI. The parameters of the tracking spheres can be adjusted in the following ranges to result in a distinct signal: HU values from 150 to 900 HU, T1 relaxation time from 550 ms to 2000 ms, T2 relaxation time from 40 ms to 200 ms. Conclusion The presented anthropomorphic multimodal thorax phantom fulfills the demands of a simple, inexpensive system with interchangeable components. In future, the modular design allows for complementing the present set up with additional modules focusing on specific research targets such as perfusion studies, 23Na MR quantification experiments and an increasing level of complexity for motion studies.}, author = {Neumann, Wiebke and Lietzmann, Florian and Schad, Lothar R. and Z{\"{o}}llner, Frank G.}, doi = {10.1016/j.zemedi.2016.07.004}, issn = {18764436}, journal = {Zeitschrift fur Medizinische Physik}, keywords = {Anthropomorphic phantom,multi-nuclear MRI,multimodal imaging,quantification,thorax phantom}, number = {2}, pages = {124--131}, title = {{Design eines multimodalen (1H/23Na MR/CT) anthropomorphen Thorax-Phantoms}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {27}, year = {2017} } @article{Nyholm2018, abstract = {Purpose: We describe a public dataset with MR and CT images of patients performed in the same position with both multiobserver and expert consensus delineations of relevant organs in the male pelvic region. The purpose was to provide means for training and validation of segmentation algorithms and methods to convert MR to CT like data, i.e., so called synthetic CT (sCT). Acquisition and validation methods: T1-and T2-weighted MR images as well as CT data were collected for 19 patients at three different departments. Five experts delineated nine organs for each patient based on the T2-weighted MR images. An automatic method was used to fuse the delineations. Starting from each fused delineation, a consensus delineation was agreed upon by the five experts for each organ and patient. Segmentation overlap between user delineations with respect to the consensus delineations was measured to describe the spread of the collected data. Finally, an open-source software was used to create deformation vector fields describing the relation between MR and CT images to further increase the usability of the dataset. Data format and usage notes: The dataset has been made publically available to be used for academic purposes, and can be accessed from https://zenodo.org/record/583096. Potential applications: The dataset provides a useful source for training and validation of segmentation algorithms as well as methods to convert MR to CT-like data (sCT). To give some examples: The T2-weighted MR images with their consensus delineations can directly be used as a template in an existing atlas-based segmentation engine; the expert delineations are useful to validate the performance of a segmentation algorithm as they provide a way to measure variability among users which can be compared with the result of an automatic segmentation; and the pairwise deformably registered MR and CT images can be a source for an atlas-based sCT algorithm or for validation of sCT algorithm.}, author = {Nyholm, Tufve and Svensson, Stina and Andersson, Sebastian and Jonsson, Joakim and Sohlin, Maja and Gustafsson, Christian and Kjell{\'{e}}n, Elisabeth and S{\"{o}}derstr{\"{o}}m, Karin and Albertsson, Per and Blomqvist, Lennart and Zackrisson, Bj{\"{o}}rn and Olsson, Lars E. and Gunnlaugsson, Adalsteinn}, doi = {10.1002/mp.12748}, issn = {00942405}, journal = {Medical Physics}, keywords = {CT,MRI,open dataset,organs at risk,radiotherapy}, number = {3}, pages = {1295--1300}, title = {{MR and CT data with multiobserver delineations of organs in the pelvic area-Part of the Gold Atlas project:}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {45}, year = {2018} } @article{Pennati2014a, abstract = {Purpose: To introduce a method based on multivolume proton (hydrogen [1H]) magnetic resonance (MR) imaging for the regional assessment of lung ventilatory function, investigating its use in healthy volunteers and patients with obstructive lung disease and comparing the outcome with the outcome of the research standard helium 3 (3He) MR imaging. Materials and Methods: The institutional review board approved the HIPAA-compliant protocol, and informed written consent was obtained from each subject. Twenty-six subjects, including healthy volunteers (n = 6) and patients with severe asthma (n = 11) and mild (n = 6) and severe (n = 3) emphysema, were imaged with a 1.5-T whole-body MR unit at four lung volumes (residual volume [RV], functional residual capacity [FRC], 1 L above FRC [FRC+1 L], total lung capacity [TLC]) with breath holds of 10-11 seconds, by using volumetric interpolated breath-hold examination. Each pair of volumes were registered, resulting in maps of 1H signal change between the two lung volumes. 3He MR imaging was performed at FRC+1 L by using a two-dimensional gradient-echo sequence. 1H signal change and 3He signal were measured and compared in corresponding regions of interest selected in ventral, intermediate, and dorsal areas. Results: In all volunteers and patients combined, proton signal difference between TLC and RV correlated positively with 3He signal (correlation coefficient R2 = 0.64, P {\textless} .001). Lower (P {\textless} .001) but positive correlation results from 1H signal difference between FRC and FRC+1 L (R2 = 0.44, P {\textless} .001). In healthy volunteers, 1H signal changes show a higher median and interquartile range compared with patients with obstructive disease and significant differences between nondependent and dependent regions. Conclusion: Findings in this study demonstrate that multivolume 1H MR imaging, without contrast material, can be used as a biomarker for regional ventilation, both in healthy volunteers and patients with obstructive lung disease.}, author = {Pennati, Francesca and Quirk, James D. and Yablonskiy, Dmitriy A. and Castro, Mario and Aliverti, Andrea and Woods, Jason C.}, doi = {10.1148/radiol.14132470}, issn = {15271315}, journal = {Radiology}, number = {2}, pages = {580--590}, title = {{Assessment of regional lung function with multivolume 1H MR imaging in health and obstructive lung Disease: Comparison with 3He MR imaging}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84910050100{\&}doi=10.1148{\%}2Fradiol.14132470{\&}partnerID=40{\&}md5=75e8d2666ef74110c8fda5fcbf082a75}, volume = {273}, year = {2014} } @article{Pietzsch2013, author = {Pietzsch, Tobias and Preibisch, Stephan and Toman{\v{c}}{\'{a}}k, Pavel and Saalfeld, Stephan}, doi = {10.1093/bioinformatics/bts685}, issn = {13674803}, journal = {Bioinformatics}, number = {2}, pages = {298}, title = {{Erratum: ImgLib2 - Generic image processing in Java (Bioinformatics (2012) 28:22 (3009-3011) DOI: 10.1093/bioinformatics/bts543)}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {29}, year = {2013} } @inbook{Rittscher2010, abstract = {The systems-level analysis of complex biological processes requires methods that enable the quantification of a broad range of phenotypical alterations, the precise localization of signaling events, and the ability to correlate such signaling events in the context of the spatial organization of the biological specimen. The goal of this review is to illustrate that, when combined with modern imaging platforms and labeling techniques, automated image analysis methods can provide such quantitative information. The article attempts to review necessary image analysis techniques as well as applications that utilize these techniques to provide the data that will enable systems-level biology. The text includes a review of image registration and image segmentation methods, as well as algorithms that enable the analysis of cellular architecture, cell morphology, and tissue organization. Various methods that enable the analysis of dynamic events are also presented.}, address = {Palo Alto}, author = {Rittscher, Jens}, booktitle = {Annual Review of Biomedical Engineering}, doi = {10.1146/annurev-bioeng-070909-105235}, editor = {Yarmush, M L and Duncan, J S and Gray, M L}, isbn = {978-0-8243-3512-0}, issn = {1523-9829}, number = {1}, pages = {315--344}, publisher = {Annual Reviews}, series = {Annual Review of Biomedical Engineering}, title = {{Characterization of Biological Processes through Automated Image Analysis}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {12}, year = {2010} } @inbook{Rusu2013, abstract = {Statistical imaging atlases allow for integration of information from multiple patient studies collected across different image scales and modalities, such as multi-parametric (MP) MRI and histology, providing population statistics regarding a specific pathology within a single canonical representation. Such atlases are particularly valuable in the identification and validation of meaningful imaging signatures for disease characterization in vivo within a population. Despite the high incidence of prostate cancer, an imaging atlas focused on different anatomic structures of the prostate, i.e. an anatomic atlas, has yet to be constructed. In this work we introduce a novel framework for MRI atlas construction that uses an iterative, anatomically constrained registration (AnCoR) scheme to enable the proper alignment of the prostate (Pr) and central gland (CG) boundaries. Our current implementation uses endorectal, 1.5T or 3T, T2-weighted MRI from 51 patients with biopsy confirmed cancer; however, the prostate atlas is seamlessly extensible to include additional MRI parameters. In our cohort, radical prostatectomy is performed following MP-MR image acquisition; thus ground truth annotations for prostate cancer are available from the histological specimens. Once mapped onto MP-MRI through elastic registration of histological slices to corresponding T2-w MRI slices, the annotations are utilized by the AnCoR framework to characterize the 3D statistical distribution of cancer per anatomic structure. Such distributions are useful for guiding biopsies toward regions of higher cancer likelihood and understanding imaging profiles for disease extent in vivo. We evaluate our approach via the Dice similarity coefficient (DSC) for different anatomic structures (delineated by expert radiologists): Pr, CG and peripheral zone (PZ). The AnCoR-based atlas had a CG DSC of 90.36{\%}, and Pr DSC of 89.37{\%}. Moreover, we evaluated the deviation of anatomic landmarks, the urethra and veromontanum, and found 3.64 mm and respectively 4.31 mm. Alternative strategies that use only the T2-w MRI or the prostate surface to drive the registration were implemented as comparative approaches. The AnCoR framework outperformed the alternative strategies by providing the lowest landmark deviations. {\textcopyright} 2013 SPIE.}, address = {Bellingham}, author = {Rusu, Mirabela and Bloch, B. Nicolas and Jaffe, Carl C. and Rofsky, Neil M. and Genega, Elizabeth M. and Feleppa, Ernest and Lenkinski, Robert E. and Madabhushi, Anant}, booktitle = {Medical Imaging 2013: Image Processing}, doi = {10.1117/12.2006941}, editor = {Ourselin, S and Haynor, D R}, isbn = {9780819494436}, issn = {0277-786X}, pages = {866913}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of SPIE}, title = {{Statistical 3D prostate imaging atlas construction via anatomically constrained registration}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {8669}, year = {2013} } @article{Santini2015, abstract = {Purpose To develop a novel sequence for simultaneous quantification of T1 and T2 relaxation times in the myocardium based on the transient phase of the balanced steady-state free precession. Methods A new prototype sequence, named "cardiac balanced-SSFP inversion recovery with interleaved sampling acquisition" (CABIRIA) was developed based on a single-shot bSSFP readout following an inversion pulse. With this method, T1 and T2 values can be calculated from the analysis of signal evolution. The scan duration for a single slice in vivo was 8 heartbeats, thus feasible in a breath-hold. The sequence was validated both in vitro by comparing it to conventional inversion recovery and multi-echo spin-echo methods and in 5 healthy volunteers by comparing it to the Modified Look-Locker Inversion Recovery (MOLLI) sequence and to a T2 quantification sequence based on multi-T2-prepared bSSFP. Results The method showed good agreement with conventional methods for both T1 and T2 measurements (concordance correlation coefficient ≥ 0.99) in vitro. In healthy volunteers the measured T1 values were 1227 ± 68 ms and T2 values 37.9 ± 2.4 ms, with similar inter- and intrasubject variability with respect to existing methods. Conclusion The proposed CABIRIA method enables simultaneous quantification of myocardial T1 and T2 values with good accuracy and precision. Magn Reson Med 74:365-371, 2015.}, author = {Santini, Francesco and Kawel-Boehm, N. and Greiser, A. and Bremerich, J. and Bieri, O.}, doi = {10.1002/mrm.25402}, issn = {15222594}, journal = {Magnetic Resonance in Medicine}, keywords = {balanced steady-state free precession,cardiac MRI,cardiac relaxometry,inversion recovery}, number = {2}, pages = {365--371}, title = {{Simultaneous T1 and T2 quantification of the myocardium using cardiac balanced-SSFP inversion recovery with interleaved sampling acquisition (CABIRIA)}}, type = {Journal Article}, url = {{\%}3CGo to}, volume = {74}, year = {2015} } @book{Schober2015, abstract = {Anatomical reference brains are indispensable tools in human brain mapping, enabling the integration of multimodal data or the alignment of a series of adjacent histological brain sections into an anatomically realistic space. This study describes a robust and efficient method for an automatic 3D reconstruction of blockface images taken from postmortem brains during cutting as a prerequisite for high-quality 3D reconstruction of brain sections. The refinement technique used in this registration method is applicable for a broad range of pre-registered histological stacks.}, address = {Berlin}, author = {Schober, Martin and Schl{\"{o}}mer, Philipp and Cremer, Markus and Mohlberg, Hartmut and Huynh, Anh Minh and Schubert, Nicole and Kirlangic, Mehmet E. and Amunts, Katrin and Axer, Markus}, booktitle = {Informatik aktuell}, doi = {10.1007/978-3-662-46224-9_26}, isbn = {9783662462232}, issn = {1431472X}, pages = {143--148}, publisher = {Springer-Verlag Berlin}, series = {Bildverarbeitung Fur Die Medizin 2015: Algorithmen - Systeme - Anwendungen}, title = {{Reference volume generation for subsequent 3D reconstruction of histological sections}}, type = {Book}, url = {{\%}3CGo to}, year = {2015} } @inproceedings{Sibley2016, address = {Bellingham}, author = {Sibley, Adam R. and Markiewicz, Erica and Mustafi, Devkumar and Fan, Xiaobing and Conzen, Suzanne and Karczmar, Greg and Giger, Maryellen L.}, booktitle = {Medical Imaging 2016: Biomedical Applications in Molecular, Structural, and Functional Imaging}, doi = {10.1117/12.2217425}, editor = {Gimi, B and Krol, A}, isbn = {9781510600232}, issn = {16057422}, pages = {97882M}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of SPIE}, title = {{Computerized segmentation algorithm with personalized atlases of murine MRIs in a SV40 large T-antigen mouse mammary cancer model}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84978919989{\&}doi=10.1117{\%}2F12.2217425{\&}partnerID=40{\&}md5=df4d24295f5fc4323b0407742579e008}, volume = {9788}, year = {2016} } @article{Sobottka2013, abstract = {Object. Intraoperative optical imaging (IOI) is an experimental technique used for visualizing functional brain areas after surgical exposure of the cerebral cortex. This technique identifies areas of local changes in blood volume and oxygenation caused by stimulation of specific brain functions. The authors describe a new IOI method, including innovative data analysis, that can facilitate intraoperative functional imaging on a routine basis. To evaluate the reliability and validity of this approach, they used the new IOI method to demonstrate visualization of the median nerve area of the somatosensory cortex. Methods. In 41 patients with tumor lesions adjacent to the postcentral gyrus, lesions were surgically removed by using IOI during stimulation of the contralateral median nerve. Optical properties of the cortical tissue were measured with a sensitive camera system connected to a surgical microscope. Imaging was performed by using 9 cycles of alternating prolonged stimulation and rest periods of 30 seconds. Intraoperative optical imaging was based on blood volume changes detected by using a filter at an isosbestic wavelength ($\lambda$ = 568 nm). A spectral analysis algorithm was used to improve computation of the activity maps. Movement artifacts were compensated for by an elastic registration algorithm. For validation, intraoperative conduction of the phase reversal over the central sulcus and postoperative evaluation of the craniotomy site were used. Results. The new method and analysis enabled significant differentiation (p {\textless} 0.005) between functional and nonfunctional tissue. The identification and visualization of functionally intact somatosensory cortex was highly reliable; sensitivity was 94.4{\%} and specificity was almost 100{\%}. The surgeon was provided with a 2D high-resolution activity map within 12 minutes. No method-related side effects occurred in any of the 41 patients. Conclusions. The authors' new approach makes IOI a contact-free and label-free optical technique that can be used safely in a routine clinical setup. Intraoperative optical imaging can be used as an alternative to other methods for the identification of sensory cortex areas and offers the added benefit of a high-resolution map of functional activity. It has great potential for visualizing and monitoring additional specific functional brain areas such as the visual, motor, and speech cortex. A prospective national multicenter clinical trial is currently being planned. {\textcopyright} AANS, 2013.}, author = {Sobottka, Stephan B. and Meyer, Tobias and Kirsch, Matthias and Koch, Edmund and Steinmeier, Ralf and Morgenstern, Ute and Schackert, Gabriele}, doi = {10.3171/2013.5.JNS122155}, issn = {00223085}, journal = {Journal of Neurosurgery}, keywords = {Brain tumors,Functional brain mapping,Functional imaging,Functional neurosurgery,Image-guided surgery,Intraoperative optical imaging,Intrinsic signals,Oncology,Somatosensory cortex}, number = {4}, pages = {853--863}, title = {{Intraoperative optical imaging of intrinsic signals: A reliable method for visualizing stimulated functional brain areas during surgery}}, type = {Journal Article}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84881515898{\&}doi=10.3171{\%}2F2013.5.JNS122155{\&}partnerID=40{\&}md5=8ed4109f65f8364384e1830b43ef2107}, volume = {119}, year = {2013} } @inbook{Sparks2013, abstract = {In this work, we present a novel, automated, registration method to fuse magnetic resonance imaging (MRI) and transrectal ultrasound (TRUS) images of the prostate. Our methodology consists of: (1) delineating the prostate on MRI, (2) building a probabilistic model of prostate location on TRUS, and (3) aligning the MRI prostate segmentation to the TRUS probabilistic model. TRUS-guided needle biopsy is the current gold standard for prostate cancer (CaP) diagnosis. Up to 40{\%} of CaP lesions appear isoechoic on TRUS, hence TRUS-guided biopsy cannot reliably target CaP lesions and is associated with a high false negative rate. MRI is better able to distinguish CaP from benign prostatic tissue, but requires special equipment and training. MRI-TRUS fusion, whereby MRI is acquired pre-operatively and aligned to TRUS during the biopsy procedure, allows for information from both modalities to be used to help guide the biopsy. The use of MRI and TRUS in combination to guide biopsy at least doubles the yield of positive biopsies. Previous work on MRI-TRUS fusion has involved aligning manually determined fiducials or prostate surfaces to achieve image registration. The accuracy of these methods is dependent on the reader's ability to determine fiducials or prostate surfaces with minimal error, which is a difficult and time-consuming task. Our novel, fully automated MRI-TRUS fusion method represents a significant advance over the current state-of-the-art because it does not require manual intervention after TRUS acquisition. All necessary preprocessing steps (i.e. delineation of the prostate on MRI) can be performed offline prior to the biopsy procedure. We evaluated our method on seven patient studies, with B-mode TRUS and a 1.5 T surface coil MRI. Our method has a root mean square error (RMSE) for expertly selected fiducials (consisting of the urethra, calcifications, and the centroids of CaP nodules) of 3 .39 ± 0.85 mm. {\textcopyright} 2013 SPIE.}, address = {Bellingham}, author = {Sparks, Rachel and Bloch, B. Nicholas and Feleppa, Ernest and Barratt, Dean and Madabhushi, Anant}, booktitle = {Medical Imaging 2013: Image-Guided Procedures, Robotic Interventions, and Modeling}, doi = {10.1117/12.2007610}, editor = {Holmes, D R and Yaniv, Z R}, isbn = {9780819494450}, issn = {0277-786X}, pages = {86710A}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of SPIE}, title = {{Fully automated prostate magnetic resonance imaging and transrectal ultrasound fusion via a probabilistic registration metric}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {8671}, year = {2013} } @inbook{Toledo2015, address = {Bellingham}, author = {{Velasco Toledo}, Nelson and {Romero Castro}, Eduardo}, booktitle = {10th International Symposium on Medical Information Processing and Analysis}, doi = {10.1117/12.2073319}, editor = {Romero, E and Lepore, N}, isbn = {9781628413625}, issn = {16057422}, pages = {92871C}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of SPIE}, title = {{Fast high resolution reconstruction in multi-slice and multi-view cMRI}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {9287}, year = {2015} } @inbook{Toledo2013, abstract = {Acquisition of proper cardiac MR images is highly limited by continued heart motion and apnea periods. A typical acquisition results in volumes with inter-slice separations of up to 8 mm. This paper presents a super-resolution strategy that estimates a high-resolution image from a set of low-resolution image series acquired in different non- orthogonal orientations. The proposal is based on a Bayesian approach that implements a Maximum a Posteriori (MAP) estimator combined with a Wiener filter. A pre-processing stage was also included, to correct or eliminate differences in the image intensities and to transform the low-resolution images to a common spatial reference system. The MAP estimation includes an observation image model that represents the different contributions to the voxel intensities based on a 3D Gaussian function. A quantitative and qualitative assessment was performed using synthetic and real images, showing that the proposed approach produces a high-resolution image with significant improvements (about 3dB in PSNR) with respect to a simple trilinear interpolation. The Wiener filter shows little contribution to the final result, demonstrating that the MAP uniformity prior is able to filter out a large amount of the acquisition noise. {\textcopyright} 2013 SPIE.}, address = {Bellingham}, author = {{Velasco Toledo}, Nelson and Rueda, Andrea and {Santa Marta}, Cristina and Romero, Eduardo}, booktitle = {Medical Imaging 2013: Image Processing}, doi = {10.1117/12.2007074}, editor = {Ourselin, S and Haynor, D R}, isbn = {9780819494436}, issn = {16057422}, pages = {866932}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of SPIE}, title = {{Super-resolution in cardiac MRI using a Bayesian approach}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {8669}, year = {2013} } @inproceedings{Tourbier2015a, abstract = {{\textcopyright} 2015 SPIE. In fetal brain MRI, most of the high-resolution reconstruction algorithms rely on brain segmentation as a preprocessing step. Manual brain segmentation is however highly time-consuming and therefore not a realistic solution. In this work, we assess on a large dataset the performance of Multiple Atlas Fusion (MAF) strategies to automatically address this problem. Firstly, we show that MAF significantly increase the accuracy of brain segmentation as regards single-atlas strategy. Secondly, we show that MAF compares favorably with the most recent approach (Dice above 0.90). Finally, we show that MAF could in turn provide an enhancement in terms of reconstruction quality.}, address = {Bellingham}, author = {Tourbier, S{\'{e}}bastien and Hagmann, Patric and Cagneaux, Maud and Guibaud, Laurent and Gorthi, Subrahmanyam and Schaer, Marie and Thiran, Jean-Philippe and Meuli, Reto and {Bach Cuadra}, Meritxell}, booktitle = {Medical Imaging 2015: Image Processing}, doi = {10.1117/12.2081777}, editor = {Ourselin, S and Styner, M A}, isbn = {9781628415032}, issn = {16057422}, pages = {94130Y}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of SPIE}, title = {{Automatic brain extraction in fetal MRI using multi-atlas-based segmentation}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84943378886{\&}doi=10.1117{\%}2F12.2081777{\&}partnerID=40{\&}md5=1e5911cbeee0e1cf86d55e9988c697b9}, volume = {9413}, year = {2015} } @inbook{Tran2017, abstract = {{\textcopyright} 2017 SPIE. Although it is well documented that abnormal levels of either intraocular (IOP) or intracranial pressure (ICP) can lead to potentially blinding conditions, such as glaucoma and papilledema, little is known about how the pressures actually affect the eye. Even less is known about potential interplay between their effects, namely how the level of one pressure might alter the effects of the other. Our goal was to measure in-vivo the pressure-induced stretch and compression of the lamina cribrosa due to acute changes of IOP and ICP. The lamina cribrosa is a structure within the optic nerve head, in the back of the eye. It is important because it is in the lamina cribrosa that the pressure-induced deformations are believed to initiate damage to neural tissues leading to blindness. An eye of a rhesus macaque monkey was imaged in-vivo with optical coherence tomography while IOP and ICP were controlled through cannulas in the anterior chamber and lateral ventricle, respectively. The image volumes were analyzed with a newly developed digital image correlation technique. The effects of both pressures were highly localized, nonlinear and non-monotonic, with strong interactions. Pressure variations from the baseline normal levels caused substantial stretch and compression of the neural tissues in the posterior pole, sometimes exceeding 20{\%}. Chronic exposure to such high levels of biomechanical insult would likely lead to neural tissue damage and loss of vision. Our results demonstrate the power of digital image correlation technique based on non-invasive imaging technologies to help understand how pressures induce biomechanical insults and lead to vision problems.}, address = {Bellingham}, author = {Tran, H. and Grimm, J. and Wang, B. and Smith, M. A. and Gogola, A. and Nelson, S. and Tyler-Kabara, E. and Schuman, J. and Wollstein, G. and Sigal, I. A.}, booktitle = {Optical Elastography and Tissue Biomechanics IV}, doi = {10.1117/12.2257360}, editor = {Larin, K V and Sampson, D D}, isbn = {9781510605756}, issn = {0277-786X}, pages = {100670B}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of SPIE}, title = {{Mapping in-vivo optic nerve head strains caused by intraocular and intracranial pressures}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {10067}, year = {2017} } @inbook{Vimort2018, abstract = {{\textcopyright} 2018 SPIE. To date, there is no single sign, symptom, or test that can clearly diagnose early stages of Temporomandibular Joint Osteoarthritis (TMJ OA). However, it has been observed that changes in the bone occur in early stages of this disease, involving structural changes both in the texture and morphometry of the bone marrow and the subchondral cortical plate. In this paper we present a tool to detect and highlight subtle variations in subchondral bone structure obtained from high resolution Cone Beam Computed Tomography (hr-CBCT) in order to help with detecting early TMJ OA. The proposed tool was developed in ITK and 3DSlicer and it has been disseminated as open-source software tools. We have validated both our texture analysis and morphometry analysis biomarkers for detection of TMJ OA comparing hr-CBCT to $\mu$CT. Our initial statistical results using the multidimensional features computed with our tool indicate that it is possible to classify areas of demonstrated loss of trabecular bone in both $\mu$CT and hr-CBCT. This paper describes the first steps to alleviate the current inability of radiological changes to diagnose TMJ OA before morphological changes are too advanced by quantifying subchondral bone biomarkers. This paper indicates that texture based and morphometry based biomarkers have the potential to identify OA patients at risk for further bone destruction.}, address = {Bellingham}, author = {Prothero, Jack and McCormick, Matthew and Paniagua, Beatriz and Vimort, Jean-Baptiste and Ruellas, Antonio Carlos and Marron, J.S. and Cevidanes, Lucia and Benavides, Erika}, booktitle = {Medical Imaging 2018: Biomedical Applications in Molecular, Structural, and Functional Imaging}, doi = {10.1117/12.2293654}, editor = {Gimi, B and Krol, A}, isbn = {9781510616455}, issn = {0277-786X}, pages = {25}, publisher = {Spie-Int Soc Optical Engineering}, series = {Proceedings of SPIE}, title = {{Detection of bone loss via subchondral bone analysis}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {10578}, year = {2018} } @inbook{Zhou2014, abstract = {A lot of image registration algorithms are proposed in recent year, among these algorithms, which one is better or faster than the other can be only validated by experiments. In this paper, ITK (Insight Segmentation and Registration Toolkit) is used for verifying different algorithms as a framework. ITK framework requires the following components: a fixed image, a moving image, a transform, a metric, an interpolator and an optimizer. Dozens of classical algorithms are tested under the same conditions and their experimental results are demonstrated with different metrics, interpolators or optimizers. By comparison of registration time and accuracy, those practical and useful algorithms are selected for developing software in image analysis. These kinds of experiments are very valuable for software engineering, they can shorten the cycle of software development and greatly reduce the development costs. {\textcopyright} (2014) Trans Tech Publications, Switzerland.}, address = {Durnten-Zurich}, author = {Zhou, Zhen Huan}, booktitle = {Applied Mechanics and Materials}, doi = {10.4028/www.scientific.net/AMM.442.515}, editor = {Yang, G}, isbn = {9783037859018}, issn = {16609336}, keywords = {Algorithms comparison,ITK,Image registration,Registration framework}, pages = {515--519}, publisher = {Trans Tech Publications Ltd}, series = {Applied Mechanics and Materials}, title = {{Comparison and assessment of different image registration algorithms based on ITK}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {442}, year = {2014} } @inproceedings{Kugu2013, abstract = {Satellite imaging is being the most attractive source of information for the governmental agencies and the commercial companies in last decade. The quality of the images is very important especially for the military or the police forces to pick the valuable information from the details. Satellite images may have unwanted signals called as noise in addition to useful information for several reasons such as heat generated electrons, bad sensor, wrong ISO settings, vibration and clouds. There are several image enhancement algorithms to reduce the effects of noise over the image to see the details and gather meaningful information. Many of these algorithms accept several parameters from the user to reach the best results. In the process of denoising, there is always a competition between the noise reduction and the fine preservation. If there is a competition between the objectives then an evolutionary multi objective optimization (EMO) is needed. In this work, the parameters of the image denoising algorithms have been optimized to minimize the trade-off by using improved Strength Pareto Evolutionary Algorithm (SPEA2). SPEA2 differs from the other EMO algorithms with the fitness assignment, the density estimation and the archive truncation processes. There is no single optimal solution in a multi objective problems instead there is a set of solutions called as Pareto efficient. Four objective functions, namely Mean Square Error (MSE), Entropy, Structural SIMilarity (SSIM) and Second Derivative of the image, have been used in this work. MSE is calculated by taking the square of difference between the noise free image and the deniosed image. Entropy is a measure of randomness of the content of difference image. The lower entropy is the better. The second derivate of an image can be achieved by convolving the image with the Laplacian Mask. SSIM algorithm is based on the similarities of the structures on the noise free image and the structures of the denoised image. For the image enhancement algorithms, Insight Segmentation and Registration Toolkit (ITK) is selected. ITK is an open source project and it is being developed in C++ to provide developers with a rich set of applications for image analysis. It includes tens of image filters for the registration and segmentation purposes. In this work, Bilateral Image Filter is evaluated in the field of satellite imaging for the noise removal process. The evaluated filter receives two parameters from the user side within their predefined ranges. Here, SPEA2 algorithm takes the responsibility to optimize these parameters to reach the best noise free image results. SPEA2 algorithm was implemented in Matlab and executable files of image filter were called in Matlab environment. The results of the work were represented graphically to show the effectiveness of selected method. {\textcopyright} 2013 IEEE.}, address = {New York}, author = {Kugu, Emin}, booktitle = {RAST 2013 - Proceedings of 6th International Conference on Recent Advances in Space Technologies}, doi = {10.1109/RAST.2013.6581204}, isbn = {9781467363938}, keywords = {Image denoising,SPEA2,bilateral filter,parameter optimization}, pages = {217--223}, publisher = {Ieee}, series = {Proceedings of 6th International Conference on Recent Advances in Space Technologies}, title = {{Satellite image denoising using Bilateral Filter with SPEA2 optimized parameters}}, type = {Book}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84883888988{\&}doi=10.1109{\%}2FRAST.2013.6581204{\&}partnerID=40{\&}md5=eba34fda9e6d5ba7ef3f98fd189e8ed5 {\%}3CGo to}, year = {2013} } @inproceedings{Ponzio2017, abstract = {US-guided neuronavigation exploits the simplicity of use and minimal invasiveness of Ultrasound (US) imaging and the high tissue resolution and signal-to-noise ratio of Magnetic Resonance Imaging (MRI) to guide brain surgeries. More specifically, the intra-operative 3D US images are combined with pre-operative MR images to accurately localise the course of instruments in the operative field with minimal invasiveness. Multi-modal image registration of 3D US and MR images is an essential part of such system. In this paper, we present a complete software framework that enables the registration US and MR brain scans based on a multi resolution deformable transform, tackling elastic deformations (i.e. brain shifts) possibly occurring during the surgical procedure. The framework supports also simpler and faster registration techniques, based on rigid or affine transforms, and enables the interactive visualisation and rendering of the overlaid US and MRI volumes. The registration was experimentally validated on a public dataset of realistic brain phantom images, at different levels of artificially induced deformations.}, address = {Setubal}, author = {Ponzio, Francesco and Macii, Enrico and Ficarra, Elisa and {Di Cataldo}, Santa}, booktitle = {BIOIMAGING 2017 - 4th International Conference on Bioimaging, Proceedings; Part of 10th International Joint Conference on Biomedical Engineering Systems and Technologies, BIOSTEC 2017}, doi = {10.5220/0006239201140121}, isbn = {9789897582158}, keywords = {Image Processing,MR-US Image Integration,Multi-modal Image Registration,Neuroimaging,US-based Neuronavigation}, pages = {114--121}, publisher = {Scitepress}, series = {Proceedings of the 10th International Joint Conference on Biomedical Engineering Systems and Technologies, Vol 2: Bioimaging}, title = {{A multi-modal brain image registration framework for US-guided neuronavigation systems integrating MR and US for minimally invasive neuroimaging}}, type = {Book}, url = {{\%}3CGo to https://www.scopus.com/inward/record.uri?eid=2-s2.0-85049240676{\&}partnerID=40{\&}md5=0189c956d91dec04b477ed5846ecb930}, volume = {2017-Janua}, year = {2017} } @inproceedings{Mandl, abstract = {Functional electrical stimulation (FES) of longterm denervated, degenerated human skeletal muscle has proven to be an effective method for improving a number of physiological parameters. In order to derive suitable stimulation configurations (electrode position and size) for certain muscle specific training tasks, the activation pattern induced by a given configuration must be known. The probability of activation can be estimated by activating functions which depend on the distribution of the externally applied electrical field. We thus chose to create both, a finite element (FE) and a finite difference (FD) model of the field distribution to simulate and study activation patterns and to compare their efficiency and feasibility. First preliminary results show good agreement between the two modeling approaches.}, address = {La Laguna}, author = {Mandl, Thomas and Martinek, Johannes and Mayr, Winfried and Rattay, Frank and Reichel, Martin and Moser, Ewald}, booktitle = {21st European Modeling and Simulation Symposium, EMSS 2009}, isbn = {978-84-692-5415-8}, keywords = {Finite difference method,Finite element method,Functional electrical stimulation,Patient specific model}, pages = {209--+}, publisher = {Univ De La Laguna}, series = {Emss 2009: 21st European Modeling and Simulation Symposium, Vol Ii}, title = {{Towards a numerical 3D model of functional electrical stimulation of denervated, degenerated human skeletal muscle}}, type = {Book}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84874185461{\&}partnerID=40{\&}md5=a9627e50e42db43cbfa2f5ca618bdad4 {\%}3CGo to}, year = {2009} } @inproceedings{Medina2017, abstract = {Cardiovascular diseases are the main cause of death in the World. This fact has motivated different actions for prevention, diagnosis and monitoring of cardiovascular diseases. In this work, the accuracy of a connected confidence left ventricle segmentation method is performed. This task is accomplished using a software platform for left ventricle segmentation of 3-D cardiac Multi-Slice Computerized Tomography (MSCT) images that is also described. The software platform has as a goal performing research about efficient methods for cardiac image segmentation and quantification. The accuracy assessment of the segmentation method is performed by comparing the estimated segmentation with respect to segmentations manually traced by cardiologists. Results show that the segmentation method provides Dice Similarity coefficients higher than 0.90 with low computational cost. The obtained segmentation is able to include within the left ventricular lumen the papillary trabeculae muscles, enabling further accurate estimation of the left ventricular mass.}, address = {New York}, author = {Medina, Ruben and Bautista, Sebastian and Morocho, Villie}, booktitle = {2017 IEEE 2nd Ecuador Technical Chapters Meeting, ETCM 2017}, doi = {10.1109/ETCM.2017.8247499}, isbn = {9781538638941}, keywords = {Connected confidence,Left ventricle segmentation,Multi-slice computerized tomography,Software platform}, pages = {1--6}, publisher = {Ieee}, series = {2017 Ieee Second Ecuador Technical Chapters Meeting}, title = {{Accuracy of connected confidence left ventricle segmentation in 3-D multi-slice computerized tomography images}}, type = {Book}, url = {{\%}3CGo to https://www.scopus.com/inward/record.uri?eid=2-s2.0-85045738426{\&}doi=10.1109{\%}2FETCM.2017.8247499{\&}partnerID=40{\&}md5=31a12ff7f15e6264e843b9c8cc503a7c}, volume = {2017-Janua}, year = {2018} } @inproceedings{Pastorelli2014, abstract = {The paper investigates the beneficial contribution of visual feedback in the development of an algorithm for the automatized analysis of fibre orientations in short fibre reinforced composites. Of special interest was steel fibre reinforced concrete (SFRC), a multi-disciplinary research area involving material sciences, physics and civil engineering. More in detail, this paper explains how scientific visualization techniques, employed on a Virtual Reality environment, contribute to the understanding of the SFRC properties, both for research and educational aims. Furthermore, the analysis algorithm to obtain fibre orientation distributions from noisy tomography scans is presented.}, address = {New York}, author = {Pastorelli, Emiliano and Herrmann, Heiko}, booktitle = {Proceedings of the Biennial Baltic Electronics Conference, BEC}, doi = {10.1109/BEC.2014.7320591}, isbn = {9781467395397}, issn = {17363705}, pages = {201--204}, publisher = {Ieee}, series = {2014 Proceedings of the 14th Biennial Baltic Electronics Conference}, title = {{Virtual Reality visualization for short fibre orientation analysis}}, type = {Book}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84971310302{\&}doi=10.1109{\%}2FBEC.2014.7320591{\&}partnerID=40{\&}md5=3476efbc0430ba44724fcc8006637b70 {\%}3CGo to}, volume = {2015-Novem}, year = {2014} } @inproceedings{Haaka, abstract = {For interventional monitoring, we aim at 4D ultrasound reconstructions of structures in the beating heart from 2D transesophageal echo images by fast scan plane rotation, unsynchronized to the heart rate. For such sparsely and irregularly sampled 2D images, a special spatiotemporal interpolation approach is desired. We have previously shown the potential of spatiotemporal interpolation by normalized convolution (NC). In this work we optimized NC for our application and compared it to nearest neighbor interpolation (NN) and to temporal binning followed by linear spatial interpolation (LTB). The test datasets consisted of 600, 1350, and 1800 2D images and were derived by slicing a 4D echocardiography data sets at random rotation angle ($\theta$, range: 0-180°) and random normalized cardiac phase ($\tau$, range: 0-1). A Gaussian kernel was used for NC and optimal kernel sizes ($\sigma$ $\tau$ and $\sigma$ $\theta$) were found by performing an exhaustive search. The RMS gray value error (RMSE) of the reconstructed images was computed for all interpolation methods. The estimated optimal kernels were $\sigma$ $\theta$=3.24°/ $\sigma$ $\tau$=0.048, $\sigma$ $\theta$=2.34°/$\sigma$ $\tau$=0.026, and $\sigma$ $\theta$=1.89°/$\sigma$ $\tau$=0.023 for 600, 1350, and 1800 input images, respectively. The minimum RMSE for NC was 13.8, 10.4, and 9.4 for 600, 1350, and 1800 input images, respectively. The NN/LTB reconstruction had an RMSE of 17.8/16.4, 13.9/15.1, and 12.0/14.7 for 600, 1350, and 1800 2D input images, respectively. We showed that NC outperforms NN and LTB. For a small number of input images the advantage of NC is more pronounced. {\textcopyright} 2011 IEEE.}, address = {New York}, author = {Haak, Alexander and {Van Stralen}, Marijn and {Van Burken}, Gerard and Klein, Stefan and Pluim, Josien P.W. and {De Jong}, Nico and {Van Der Steen}, Antonius F.W. and Bosch, Johannes G.}, booktitle = {IEEE International Ultrasonics Symposium, IUS}, doi = {10.1109/ULTSYM.2011.0038}, isbn = {9781457712531}, issn = {19485719}, pages = {152--155}, publisher = {Ieee}, series = {IEEE International Ultrasonics Symposium}, title = {{Spatiotemporal interpolation by normalized convolution for 4D transesophageal echocardiography}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84869068068{\&}doi=10.1109{\%}2FULTSYM.2011.0038{\&}partnerID=40{\&}md5=e5a5718535ae51415891100024bf51a5 {\%}3CGo to}, year = {2011} } @incollection{Zhang2014, abstract = {Subject-specific models of the musculoskeletal system are capable of accurately estimating function and loads and show promise for clinical use. However, creating subject-specific models is time-consuming and requires high levels of expertise. To address these issues, we have developed the open source Musculoskeletal Atlas Project (MAP) Client software. The MAP Client provides a user-friendly interface for creating musculoskeletal modelling workflows using community-created plug-ins. In this paper, we discuss the design of the MAP Client, its plug-in architecture and its integration with the Physiome Model Repository. We demonstrate the use of MAP Client with a subject-specific femur modeling workflow using a set of modular open source plug-ins for image segmentation, landmark prediction, model registration and customisation. Our long-term goal is to foster a community of MAP users and plug-in developers to accelerate the clinical use of computational models.}, address = {Cham}, author = {Zhang, Ju and Sorby, Hugh and Clement, John and Thomas, C. David L. and Hunter, Peter and Nielsen, Poul and Lloyd, David and Taylor, Mark and Besier, Thor}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-319-12057-7_21}, editor = {Bello, F and Cotin, S}, isbn = {9783319120560}, issn = {16113349}, keywords = {Biomechanical modelling,Musculoskeletal atlas project,Musculoskeletal modelling,Open source software,Personalised simulation,Pipelines,workflows}, pages = {182--192}, publisher = {Springer International Publishing Ag}, series = {Lecture Notes in Computer Science}, title = {{The MAP client: User-friendly musculoskeletal modelling workflows}}, type = {Serial}, url = {{\%}3CGo to https://www.scopus.com/inward/record.uri?eid=2-s2.0-84911390478{\&}partnerID=40{\&}md5=d52525096590e132a0f2bdfbee3e3a8e}, volume = {8789}, year = {2014} } @inproceedings{Andrade2018, abstract = {The large variety of medical image modalities (e.g. Computed Tomography, Magnetic Resonance Imaging, and Positron Emission Tomography) acquired from the same body region of a patient together with recent advances in computer architectures with faster and larger CPUs and GPUs allows a new, exciting, and unexplored world for image registration area. A precise and accurate registration of images makes possible understanding the etiology of diseases, improving surgery planning and execution, detecting otherwise unnoticed health problem signals, and mapping functionalities of the brain. The goal of this paper is to present a review of the state-of-the-art in medical image registration starting from the preprocessing steps, covering the most popular methodologies of the literature and finish with the more recent advances and perspectives from the application of Deep Learning architectures.}, address = {New York}, author = {Andrade, Natan and Faria, Fabio Augusto and Cappabianco, F{\'{a}}bio Augusto Menocci}, booktitle = {Proceedings - 31st Conference on Graphics, Patterns and Images, SIBGRAPI 2018}, doi = {10.1109/SIBGRAPI.2018.00066}, isbn = {9781538692646}, keywords = {Deep Learning,Image Registration,Medical Imaging}, pages = {463--470}, publisher = {Ieee}, series = {SIBGRAPI - Brazilian Symposium on Computer Graphics and Image Processing}, title = {{A Practical Review on Medical Image Registration: From Rigid to Deep Learning Based Approaches}}, type = {Book Section}, url = {{\%}3CGo to https://www.scopus.com/inward/record.uri?eid=2-s2.0-85062231809{\&}doi=10.1109{\%}2FSIBGRAPI.2018.00066{\&}partnerID=40{\&}md5=14da8e5ef491a8566fbbd2a58e32c72b}, year = {2019} } @inproceedings{Meesters, abstract = {Intracranial depth electrodes are commonly used to identify the regions of the brain that are responsible for epileptic seizures. Knowledge of the exact location of the electrodes is important as to properly interpret the EEG in relation to the anatomy. In order to provide fast and accurate identification of these electrodes, a procedure has been developed for automatic detection and localization in computed tomography data. Results indicate that in the vast majority of cases the depth electrodes can be automatically found. The localization of the electrodes versus the anatomy showed an acceptably small error when compared to manual positioning. Furthermore, interactive visualization software is developed to show the detected electrodes together with pre-operative MRI images, which enables the physician to confirm that the electrode is placed at the expected anatomical location.}, address = {New York}, author = {Meesters, Stephan and Ossenblok, Pauly and Colon, Albert and Schijns, Olaf and Florack, Luc and Boon, Paul and Wagner, Louis and Fuster, Andrea}, booktitle = {Proceedings - International Symposium on Biomedical Imaging}, doi = {10.1109/ISBI.2015.7164034}, isbn = {9781479923748}, issn = {19458452}, keywords = {Computed tomography (CT),Computer-aided detection and diagnosis (CAD),Visualization}, pages = {976--979}, publisher = {Ieee}, series = {IEEE International Symposium on Biomedical Imaging}, title = {{Automated identification of intracranial depth electrodes in computed tomography data}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84944328676{\&}doi=10.1109{\%}2FISBI.2015.7164034{\&}partnerID=40{\&}md5=3989bdde301290c2ae9d67f2142f6235 {\%}3CGo to}, volume = {2015-July}, year = {2015} } @inproceedings{AlDhamari, abstract = {Creating patient-specific simulation models helps to make customised implant or treatment plans. To create such models, exact locations of the Origin and Insertion Points of the Ligaments (OIPL) are required. Locating these OIPL is usually done manually through a time-consuming procedure.A fast method to detect these OIPL automatically using spine atlas-based segmentation is proposed in this paper. The average detection rate is 96.16{\%} with a standard deviation of 3.45. The required time to detect these points is approximately 5 seconds. The proposed method can be generalised to detect any other important points or features related to a specific vertebra.The method is implemented as an open-source plugin for 3D Slicer. The method and the datasets can be download for free from a public server.}, address = {New York}, author = {Al-Dhamari, Ibraheem and Bauer, Sabine and Keller, Eva and Paulus, Dietrich}, booktitle = {Proceedings - International Symposium on Biomedical Imaging}, doi = {10.1109/ISBI.2019.8759223}, isbn = {9781538636411}, issn = {19458452}, keywords = {Acir,Asgd,Atlas-segmentation,Detection,Origin and insertion points,Registration,Spine}, pages = {48--51}, publisher = {Ieee}, series = {IEEE International Symposium on Biomedical Imaging}, title = {{Automatic detection of cervical spine ligaments origin and insertion points}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85073909464{\&}doi=10.1109{\%}2FISBI.2019.8759223{\&}partnerID=40{\&}md5=1c62e6194c979866476e3863b6d3f513 {\%}3CGo to}, volume = {2019-April}, year = {2019} } @inproceedings{Suter2018, abstract = {The cortical thickness and curvature of the human brain have proven to be valuable markers to detect and monitor neurodegenerative diseases [1]. Since the computational burden of currently available tools for brain morphometry is very high, this analysis often is only used for retrospective studies and not routinely in the clinics. A first attempt at a clinical use of cortical morphology is reported in [2]. We present an experiment for fast morphometry estimations using Random Forest (RF) regression [3] directly from MR imaging data. An uncertainty-aware voxel-wise, parcellation-wise, and multioutput model was built to estimate the thickness and mean curvature of the human cerebral cortex in 15 minutes instead of many hours for mesh-based tools. Preliminary results on a healthy controls database with 315 subjects show a substantial bias for the voxel-wise prediction, but high scan-rescan robustness, the proposed multi-output-parcellation prediction demonstrates the feasibility of the approach.}, address = {New York}, author = {Suter, Yannick and Rummel, Christian and Wiest, Roland and Reyes, Mauricio}, booktitle = {Proceedings - International Symposium on Biomedical Imaging}, doi = {10.1109/ISBI.2018.8363752}, isbn = {9781538636367}, issn = {19458452}, keywords = {Cortical curvature,Cortical thickness,Human brain morphometry,Machine learning,Random forest regression}, pages = {1052--1055}, publisher = {Ieee}, series = {IEEE International Symposium on Biomedical Imaging}, title = {{Fast and uncertainty-aware cerebral cortex morphometry estimation using random forest regression}}, type = {Book Section}, url = {{\%}3CGo to https://www.scopus.com/inward/record.uri?eid=2-s2.0-85048099086{\&}doi=10.1109{\%}2FISBI.2018.8363752{\&}partnerID=40{\&}md5=e22ed68e255aa6c6af3b32100df7276a}, volume = {2018-April}, year = {2018} } @inproceedings{Ali2007, abstract = {Segmentation of transparent cells in brightfield microscopy images could facilitate the quantitative analysis of corresponding fluorescence images. However, this presents a challenge due to irregular morphology and weak intensity variation, particularly in ultra-thin regions. A boundary detection technique is applied to a series of variable focus images whereby a level set contour is initialised on a defocused image with improved intensity contrast, and subsequently evolved towards the correct boundary using images of improving focus. Local phase coherence is used to identify features within the images, driving contour evolution particularly in near-focus images which lack intensity contrast. Preliminary results demonstrate the effectiveness of this approach in segmenting the main cell body regions. {\textcopyright} 2007 IEEE.}, address = {New York}, author = {Ali, Rehan and Gooding, Mark and Christlieb, Martin and Brady, Michael}, booktitle = {2007 4th IEEE International Symposium on Biomedical Imaging: From Nano to Macro - Proceedings}, doi = {10.1109/ISBI.2007.356787}, isbn = {1424406722}, keywords = {Biomedical image processing,Image segmentation,Microscopy}, pages = {57--60}, publisher = {Ieee}, series = {IEEE International Symposium on Biomedical Imaging}, title = {{Phase-based segmentation of cells from brightfield microscopy}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-36348985016{\&}doi=10.1109{\%}2FISBI.2007.356787{\&}partnerID=40{\&}md5=dd565221c35ae17147c212afbaded4f7 {\%}3CGo to}, year = {2007} } @inproceedings{Rezaei2013, abstract = {To date, attenuation correction of gated PET emission data remains a challenge. Joint activity and attenuation estimation methods may contribute to solving this challenge. In this work, we demonstrate a framework in which the gated PET activity and attenuation images are jointly reconstructed and then registered to a reference frame by a joint registration approach. The method is studied and compared to common approaches by means of 2D and 3D simulations. {\textcopyright} 2013 IEEE.}, address = {New York}, author = {Rezaei, Ahmadreza and Nuyts, Johan}, booktitle = {IEEE Nuclear Science Symposium Conference Record}, doi = {10.1109/NSSMIC.2013.6829031}, isbn = {9781479905348}, issn = {10957863}, publisher = {Ieee}, series = {IEEE Nuclear Science Symposium and Medical Imaging Conference}, title = {{Joint registration of attenuation and activity images in gated TOF-PET}}, type = {Book Section}, url = {{\%}3CGo to https://www.scopus.com/inward/record.uri?eid=2-s2.0-84904204340{\&}doi=10.1109{\%}2FNSSMIC.2013.6829031{\&}partnerID=40{\&}md5=9a3c352e39a5cde2bb765fec1156cfb1}, year = {2013} } @inproceedings{Adluru2006, abstract = {Segmentation of the myocardium in dynamic contrast enhanced MR short axis images is an important step towards the estimation of semi-quantitative or quantitative parameters to determine the perfusion to the tissue regions. The perfusion indices of the tissue are obtained by dividing the tissue into regions of interest and estimating perfusion to each region. A fast automatic segmentation method based on level sets has been developed that makes use of the spatial and temporal information available in the dynamic images. The algorithm is validated on cardiac data qualitatively and quantitatively by comparing against regional flow indices from manually segmented tissue regions. {\textcopyright} 2006 IEEE.}, address = {New York}, author = {Adluru, Ganesh and Dibella, Edward V.R. and Whitaker, Ross T.}, booktitle = {2006 3rd IEEE International Symposium on Biomedical Imaging: From Nano to Macro - Proceedings}, doi = {10.1109/isbi.2006.1624870}, isbn = {0780395778}, pages = {133--136}, publisher = {Ieee}, series = {IEEE International Symposium on Biomedical Imaging}, title = {{Automatic segmentation of cardiac short axis slices in perfusion MRI}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-33750957116{\&}partnerID=40{\&}md5=94b184992f39bcb89fce66f4291f1585 {\%}3CGo to}, volume = {2006}, year = {2006} } @inproceedings{Stevenson2011, abstract = {Studying the surface of an internal organ such as the placenta using three-dimensional ultrasound (3D US) is difficult. Image data from the surrounding tissue makes accurate identification of the interface technically challenging. The placental/maternal interface (basal plate) is thought to be the location of significant vascular pathology causing major maternal and fetal morbidity. We propose a new method for identifying this interface which combined with parameterisation (flattening) offers a novel way to study the vasculature of the developing placenta. {\textcopyright} 2011 IEEE.}, address = {New York}, author = {Stevenson, Gordon N. and Collins, Sally L. and Impey, Lawrence and Noble, J. Alison}, booktitle = {Proceedings - International Symposium on Biomedical Imaging}, doi = {10.1109/ISBI.2011.5872547}, isbn = {9781424441280}, issn = {19457928}, keywords = {Doppler measurements,Ultrasonography,anatomical structure,biomedical image processing,pregnancy}, pages = {891--894}, publisher = {Ieee}, series = {IEEE International Symposium on Biomedical Imaging}, title = {{Surface parameterisation of the utero/placental interface using 3D power doppler ultrasound}}, type = {Book Section}, url = {{\%}3CGo to https://www.scopus.com/inward/record.uri?eid=2-s2.0-80055033565{\&}doi=10.1109{\%}2FISBI.2011.5872547{\&}partnerID=40{\&}md5=906b8369dc14ae8e1bb4597aa31b1ec7}, year = {2011} } @incollection{Tourbier2014, abstract = {Fetal MRI reconstruction aims at finding a high-resolution image given a small set of low-resolution images. It is usually modeled as an inverse problem where the regularization term plays a central role in the reconstruction quality. Literature has considered several regularization terms s.a. Dirichlet/Laplacian energy [1], Total Variation (TV)-based energies [2,3] and more recently non-local means [4]. Although TV energies are quite attractive because of their ability in edge preservation, standard explicit steepest gradient techniques have been applied to optimize fetal-based TV energies. The main contribution of this work lies in the introduction of a well-posed TV algorithm from the point of view of convex optimization. Specifically, our proposed TV optimization algorithm for fetal reconstruction is optimal w.r.t. the asymptotic and iterative convergence speeds O(1/n 2) and O(1/√$\epsilon$), while existing techniques are in O(1/n) and O(1/$\epsilon$). We apply our algorithm to (1) clinical newborn data, considered as ground truth, and (2) clinical fetal acquisitions. Our algorithm compares favorably with the literature in terms of speed and accuracy. {\textcopyright} 2014 Springer International Publishing.}, address = {Berlin}, author = {Tourbier, S{\'{e}}bastien and Bresson, Xavier and Hagmann, Patric and Thiran, Jean Philippe and Meuli, Reto and Cuadra, Meritxell Bach}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-319-10470-6_32}, editor = {Golland, P and Hata, N and Barillot, C and Hornegger, J and Howe, R}, isbn = {9783319104690}, issn = {16113349}, number = {PART 2}, pages = {252--259}, publisher = {Springer-Verlag Berlin}, series = {Lecture Notes in Computer Science}, title = {{Efficient total variation algorithm for fetal brain MRI reconstruction}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84906979454{\&}doi=10.1007{\%}2F978-3-319-10470-6{\_}32{\&}partnerID=40{\&}md5=d90bc6bfffc46271b10c0f02d0ea2f2a {\%}3CGo to}, volume = {8674 LNCS}, year = {2014} } @incollection{Seidel2013a, abstract = {The segmentation of three-dimensional microscopic images of cardiac tissues provides important parameters for characterizing cardiac diseases and modeling of tissue function. Segmenting these images is, however, challenging. Currently only time-consuming manual approaches have been developed for this purpose. Here, we introduce an efficient approach for the semi-automatic segmentation (SAS) of cardiomyocytes and the extracellular space in image stacks obtained from confocal microscopy. The approach is based on a morphological watershed algorithm and iterative creation of watershed seed points on a distance map. Results of SAS were consistent with results from manual segmentation (Dice similarity coefficient: 90.8±2.6{\%}). Cell volume was 4.6±6.5{\%} higher in SAS cells, which mainly resulted from cell branches and membrane protrusions neglected by manual segmentation. We suggest that the novel approach constitutes an important tool for characterizing normal and diseased cardiac tissues. Furthermore, the approach is capable of providing crucial parameters for modeling of tissue structure and function. {\textcopyright} 2013 Springer-Verlag.}, address = {Berlin}, author = {Seidel, Thomas and Draebing, Thomas and Seemann, Gunnar and Sachse, Frank B.}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-642-38899-6_36}, editor = {Ourselin, S and Rueckert, D and Smith, N}, isbn = {9783642388989}, issn = {03029743}, keywords = {algorithm,cardiac tissue,confocal microscopy,segmentation,three-dimensional}, pages = {300--307}, publisher = {Springer-Verlag Berlin}, series = {Lecture Notes in Computer Science}, title = {{A semi-automatic approach for segmentation of three-dimensional microscopic image stacks of cardiac tissue}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84879835400{\&}doi=10.1007{\%}2F978-3-642-38899-6{\_}36{\&}partnerID=40{\&}md5=f7e591d9366417855cbf78f95bd7e1d2 {\%}3CGo to}, volume = {7945 LNCS}, year = {2013} } @inproceedings{Haak2012, abstract = {A transesophageal echocardiography (TEE) micro-probe is suitable for monitoring long minimally invasive interventions in the heart, because it is well tolerated by patients. To visualize complex 3D structures of the beating heart, a 4D-image reconstruction derived from irregularly and sparsely sampled 2D images is needed. We previously showed that normalized convolution (NC) with optimized kernels performs better than nearest-neighbor or linear interpolation. In order to use NC for image reconstructions we need to be able to predict optimal kernel sizes. We therefore present an advanced optimization scheme, and estimate optimal NC kernel sizes for five different patient-data sets. From the optimization results we derive a model for estimating optimal NC kernel sizes. As ground truth (GT), we used five full-volume 4D TEE patient scans, acquired with the X7-2t matrix transducer. To simulate 2D data acquisition, the GT datasets were sliced at random rotation angles and at random normalized cardiac phases. Data sets containing 400, 600, 900, 1350, and 1800 2D images were created for all patients, producing a total of 25 data sets. A 2D Gaussian function was used as NC kernel, and optimal kernel sizes were obtained with a quasi-Newton optimizer. A power law model was fitted to the optimal kernels estimated. We conclude that optimal kernel sizes for NC can be successfully predicted by a model at the cost of a relatively small increase in the reconstruction error. {\textcopyright} 2012 IEEE.}, address = {New York}, author = {Haak, Alexander and Klein, Stefan and {Van Burken}, Gerard and {De Jong}, Nico and {Van Der Steen}, Antonius F.W. and Bosch, Johannes G. and {Van Stralen}, Marijn and Pluim, Josien P.W.}, booktitle = {IEEE International Ultrasonics Symposium, IUS}, doi = {10.1109/ULTSYM.2012.0175}, isbn = {9781467345613}, issn = {19485719}, keywords = {kernel optimization,normalized convolution,quasi newton}, pages = {703--706}, publisher = {Ieee}, series = {2012 Ieee International Ultrasonics Symposium}, title = {{Optimal kernel sizes for 4D image reconstruction using normalized convolution from sparse fast-rotating transesophageal 2D ultrasound images}}, type = {Book}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84882446800{\&}doi=10.1109{\%}2FULTSYM.2012.0175{\&}partnerID=40{\&}md5=2eab7eaf8d8fbce0ab5d12917b0183ab {\%}3CGo to}, year = {2012} } @inproceedings{Akbarzadeh, abstract = {Diagnosis , staging and treatment of disease depends on the morphological and functional information obtained from multimodality molecular imaging systems. The combination of functional and morphological information is now routinely performed to overcome the limitations of each individual modality. Attenuation of photons in the object under study is one of the main limitations of quantitative PET imaging. Attenuation correction plays a pivotal role in PET imaging. However, the availability of CT data on hybrid PET/CT scanners made it possible to build an accurate attenuation map. One of the well-known methods for generation of the attenuation map on PE/MRI systems is MR-based attenuation correction (MRAC) where image segmentation is used to classify MRI into several classes corresponding to different attenuation factors. In this study we investigate the effect of using different numbers of classes for the generation of attenuation maps on the accuracy of attenuation correction of PET data. The study was carried out using simulations of the XCAT phantom and 10 clinical studies. For the later, CT and PET images of 10 patients were used with CT-based attenuation correction assumed as reference. MRI was classified into different classes to produce two, three and four-class attenuation maps using the ITK library. The relative error showed that the lower number of classes will increase the global error over 8{\%}. The elimination of bony structures from the attenuation map will cause a local error over 3{\%}. In clinical studies, SUV mean and SUVmax were calculated for each AC method. The results seem to indicate an underestimation of 11{\%} because of neglecting bone. {\textcopyright} 2011 IEEE.}, address = {New York}, author = {Akbarzadeh, A. and Ay, M. R. and Ahmadian, A. and {Riahi Alam}, N. and Zaidi, H.}, booktitle = {IEEE Nuclear Science Symposium Conference Record}, doi = {10.1109/NSSMIC.2011.6152682}, isbn = {9781467301183}, issn = {10957863}, pages = {2524--2530}, publisher = {Ieee}, series = {IEEE Nuclear Science Symposium and Medical Imaging Conference}, title = {{Impact of using different tissue classes on the accuracy of MR-based attenuation correction in PET-MRI}}, type = {Book Section}, url = {{\%}3CGo to https://www.scopus.com/inward/record.uri?eid=2-s2.0-84858681408{\&}doi=10.1109{\%}2FNSSMIC.2011.6152682{\&}partnerID=40{\&}md5=cb5e411de14cb1afbdf27ca41e4b2746}, year = {2011} } @inproceedings{Fishbaugh2018a, abstract = {Statistical shape analysis captures the geometric properties of a given set of shapes, obtained from medical images, by means of statistical methods. Orthognathic surgery is a type of craniofacial surgery that is aimed at correcting severe skeletal deformities in the mandible and maxilla. Methods assuming spherical topology cannot represent the class of anatomical structures exhibiting complex geometries and topologies, including the mandible. In this paper we propose methodology based on non-rigid deformations of 3D geometries to be applied to objects with thin, complex structures. We are able to accurately and quantitatively characterize bone healing at the osteotomy site as well as condylar remodeling for three orthognathic surgery cases, demonstrating the effectiveness of the proposed methodology.}, address = {New York}, author = {Fishbaugh, James and Pascal, Laura and Fischer, Luke and Nguyen, Tung and Boen, Celso and Goncalves, Joao and Gerig, Guido and Paniagua, Beatriz}, booktitle = {Proceedings - International Symposium on Biomedical Imaging}, doi = {10.1109/ISBI.2018.8363742}, isbn = {9781538636367}, issn = {19458452}, keywords = {Complex topology,Diffeomorphic shape registration,Orthognathic surgery,Statistical shape analysis,Statistical shape modeling}, month = {apr}, pages = {1010--1013}, publisher = {Ieee}, series = {IEEE International Symposium on Biomedical Imaging}, title = {{Estimating shape correspondence for populations of objects with complex topology}}, type = {Book Section}, url = {https://ieeexplore.ieee.org/document/8363742/ {\%}3CGo to}, volume = {2018-April}, year = {2018} } @inproceedings{Schiwarth2018, abstract = {Fiber-reinforced polymers (FRPs) are of great importance in various industries because of their superior properties as compared to conventional materials, their versatile processing, and their wide application possibilities. To fulfil the high-quality standards in its respective applications, industrial 3D X-ray computed tomography (XCT) is increasingly used. It enables an accurate, non-destructive characterization of material features such as inclusions, voids, fibers, or other reinforcements, which is of core importance for material and component design. In this work we present FeatureAnalyzer, a generalization of the previously introduced PorosityAnalyzer tool, which allows to analyze dataset series as generated for exploring the parameter space of image processing workflows (including pre-filtering, segmentation, postprocessing or quantification) applied to XCT datasets of fiber-reinforced polymers. With a scatter plot matrix (SPLOM), the characteristics of the features of interest may be examined in more detail regarding the used input and output parameters. Individual results may be selected in the SPLOM and analyzed using 2D slice views and 3D renderings. For this work, three different samples (sample {\#}1 - {\#}3) were scanned by means of XCT and were evaluated by using FeatureAnalyzer. The samples {\#}1 and {\#}2 have a porosity value of approximately 1.7 vol. {\%}. By using the FeatureAnalyzer in combination with SPLOM, the threshold parameters could be analyzed before the over-segmentation of voids occurs. Additional evaluations by parallel coordinates clearly show, that sample {\#}2 has a higher number of spherical voids in the center of the specimen compared to sample {\#}1. By evaluating the resin content of sample {\#}3, the individual layer thickness could be measured. The source code of the tool is available on Github: https://github.com/3dct/open-iA/}, address = {Bristol}, author = {Schiwarth, M. and Weissenb{\"{o}}ck, J. and Plank, B. and Fr{\"{o}}hler, B. and Heinzl, C. and Kastner, J.}, booktitle = {IOP Conference Series: Materials Science and Engineering}, doi = {10.1088/1757-899X/406/1/012014}, issn = {1757899X}, number = {1}, publisher = {Iop Publishing Ltd}, series = {IOP Conference Series-Materials Science and Engineering}, title = {{Visual analysis of void and reinforcement characteristics in X-ray computed tomography dataset series of fiber-reinforced polymers}}, type = {Book Section}, url = {{\%}3CGo to https://www.scopus.com/inward/record.uri?eid=2-s2.0-85054221856{\&}doi=10.1088{\%}2F1757-899X{\%}2F406{\%}2F1{\%}2F012014{\&}partnerID=40{\&}md5=e1757458a87eb396d4743ad19f658d5a}, volume = {406}, year = {2018} } @article{johnson2015itk, abstract = {The Insight Toolkit (ITK) is an open-source software toolkit for performing registration and segmen- tation. Segmentation is the process of identifying and classifying data found in a digitally sampled representation. Typically the sampled representation is an image acquired fromsuchmedical instru- mentation as CT orMRI scanners. Registration is the task of aligning or developing correspondences between data. For example, in themedical environment, a CT scan may be aligned with aMRI scan in order to combine the information contained in both.}, author = {Johnson, Hans J. and McCormick, Matthew M and Ibanez, Luis}, isbn = {978-1930934276}, journal = {Kitware, Inc.(January 2015)}, keywords = {Guide,Registration,Segmentation}, publisher = {Kitware, Inc.}, title = {{The ITK Software Guide Book 1: Introduction and Development Guidelines Fourth Edition Updated for ITK version 4.7}}, url = {https://itk.org/}, year = {2015} } @article{Johnson2018, abstract = {A robust fully automated algorithm for identifying an arbitrary number of landmark points in the human brain is described and validated. The proposed method combines statistical shape models with trained brain morphometric measures to estimate midbrain landmark positions reliably and accurately. Gross morphometric constraints provided by automatically identified eye centers and the center of the head mass are shown to provide robust initialization in the presence of large rotations in the initial head orientation. Detection of primary midbrain landmarks are used as the foundation from which extended detection of an arbitrary set of secondary landmarks in different brain regions by applying a linear model estimation and principle component analysis. This estimation model sequentially uses the knowledge of each additional detected landmark as an improved foundation for improved prediction of the next landmark location. The accuracy and robustness of the presented method was evaluated by comparing the automatically generated results to two manual raters on 30 identified landmark points extracted from each of 30 T1-weighted magnetic resonance images. For the landmarks with unambiguous anatomical definitions, the average discrepancy between the algorithm results and each human observer differed by less than 1 mm from the average inter-observer variability when the algorithm was evaluated on imaging data collected from the same site as the model building data. Similar results were obtained when the same model was applied to a set of heterogeneous image volumes from seven different collection sites representing 3 scanner manufacturers. This method is reliable for general application in large-scale multi-site studies that consist of a variety of imaging data with different orientations, spacings, origins, and field strengths.}, author = {Ghayoor, Ali and Vaidya, Jatin G. and Johnson, Hans J.}, doi = {10.1016/j.neuroimage.2017.04.012}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Ghayoor, Vaidya, Johnson/NeuroImage/Ghayoor, Vaidya, Johnson - 2018 - Robust automated constellation-based landmark detection in human brain imaging.pdf:pdf}, issn = {10959572}, journal = {NeuroImage}, keywords = {Automated landmark detection,Morphometric measures,Principle component analysis,Statistical shape models}, pages = {471--481}, title = {{Robust automated constellation-based landmark detection in human brain imaging}}, volume = {170}, year = {2018} } @incollection{Avants2012, abstract = {Publicly available scientific resources help establish evaluation standards, provide a platform for teaching and may improve reproducibility. Version 4 of the Insight ToolKit ( ITK ) seeks to establish new standards in publicly available image registration methodology. In this work, we provide an overview and preliminary evaluation of the revised toolkit against registration based on the previous major ITK version (3.20). Furthermore, we propose a nomenclature that may be used to discuss registration frameworks via schematic representations. In total, the ITK contribution is intended as a structure to support reproducible research practices, will provide a more extensive foundation against which to evaluate new work in image registration and also enable application level programmers a broad suite of tools on which to build. {\textcopyright} 2012 Springer-Verlag.}, annote = {From Duplicate 2 (A unified image registration framework for ITK - Avants, Brian B.; Tustison, Nicholas J.; Song, Gang; Wu, Baohua; Stauffer, Michael; McCormick, Matthew M.; Johnson, Hans J.; Gee, James C.) From Duplicate 1 (A unified image registration framework for ITK - Avants, Brian B.; Tustison, Nicholas J.; Song, Gang; Wu, Baohua; Stauffer, Michael; McCormick, Matthew M.; Johnson, Hans J.; Gee, James C.) From Duplicate 1 (A unified image registration framework for ITK - Avants, Brian B.; Tustison, Nicholas J.; Song, Gang; Wu, Baohua; Stauffer, Michael; McCormick, Matthew M.; Johnson, Hans J.; Gee, James C.) {\#}{\#}CONTRIBUTIONS: As a member of ITK development team, I worked closely to develop, test, and implement the registration frameworks described in this work.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (A unified image registration framework for ITK - Avants, Brian B.; Tustison, Nicholas J.; Song, Gang; Wu, Baohua; Stauffer, Michael; McCormick, Matthew M.; Johnson, Hans J.; Gee, James C.) From Duplicate 2 (A unified image registration framework for ITK - Avants, Brian B.; Tustison, Nicholas J.; Song, Gang; Wu, Baohua; Stauffer, Michael; McCormick, Matthew M.; Johnson, Hans J.; Gee, James C.) From Duplicate 1 (A unified image registration framework for ITK - Avants, Brian B.; Tustison, Nicholas J.; Song, Gang; Wu, Baohua; Stauffer, Michael; McCormick, Matthew M.; Johnson, Hans J.; Gee, James C.) {\#}{\#}CONTRIBUTIONS: As a member of ITK development team, I worked closely to develop, test, and implement the registration frameworks described in this work.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#}}, author = {Avants, Brian B. and Tustison, Nicholas J. and Song, Gang and Wu, Baohua and Stauffer, Michael and McCormick, Matthew M. and Johnson, Hans J. and Gee, James C.}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, chapter = {A Unified}, doi = {10.1007/978-3-642-31340-0_28}, edition = {5th Intern}, editor = {{Dawant, B.; Christensen, G.E.; Fitzpatrick, J.M.; Rueckert}, D (Eds.}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Avants et al/Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)/Avants et al. - 2012 - A unified image registration framework for ITK.pdf:pdf}, isbn = {9783642313394}, issn = {03029743}, number = {LNCS 7359}, organization = {Springer Berlin Heidelberg}, pages = {266--275}, publisher = {Springer Berlin Heidelberg}, title = {{A unified image registration framework for ITK}}, url = {http://link.springer.com/10.1007/978-3-642-31340-0{\_}28 http://link.springer.com/chapter/10.1007{\%}2F978-3-642-31340-0{\_}28 http://link.springer.com/chapter/10.1007{\%}252F978-3-642-31340-0{\_}28}, volume = {7359 LNCS}, year = {2012} } @article{forbes2016open, abstract = {The creation of high-quality medical imaging reference atlas datasets with consistent dense anatomical region labels is a challenging task. Reference atlases have many uses in medical image applications and are essential components of atlas-based segmentation tools commonly used for producing personalized anatomical measurements for individual subjects. The process of manual identification of anatomical regions by experts is regarded as a so-called gold standard; however, it is usually impractical because of the labor-intensive costs. Further, as the number of regions of interest increases, these manually created atlases often contain many small inconsistently labeled or disconnected regions that need to be identified and corrected. This project proposes an efficient process to drastically reduce the time necessary for manual revision in order to improve atlas label quality. We introduce the LabelAtlasEditor tool, a SimpleITK-based open-source label atlas correction tool distributed within the image visualization software 3D Slicer. LabelAtlasEditor incorporates several 3D Slicer widgets into one consistent interface and provides label-specific correction tools, allowing for rapid identification, navigation, and modification of the small, disconnected erroneous labels within an atlas. The technical details for the implementation and performance of LabelAtlasEditor are demonstrated using an application of improving a set of 20 Huntingtons Disease-specific multi-modal brain atlases. Additionally, we present the advantages and limitations of automatic atlas correction. After the correction of atlas inconsistencies and small, disconnected regions, the number of unidentified voxels for each dataset was reduced on average by 68.48{\%}.}, annote = {From Duplicate 1 (An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases. - Forbes, Jessica L; Kim, Regina E Y; Paulsen, Jane S; Johnson, Hans J) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases. - Forbes, Jessica L; Kim, Regina EY Y; Paulsen, Jane S.; Johnson, Hans J.) From Duplicate 1 (An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases. - Forbes, Jessica L; Kim, Regina EY Y; Paulsen, Jane S.; Johnson, Hans J.) From Duplicate 1 (An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases. - Forbes, Jessica L; Kim, Regina E Y; Paulsen, Jane S; Johnson, Hans J) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases. - Forbes, Jessica L; Kim, Regina EY; Paulsen, Jane S.; Johnson, Hans J.) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper.  I had contributions to the software methods development, interpretation of validation results for this work.  I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases. - Forbes, Jessica L; Kim, Regina EY Y; Paulsen, Jane S.; Johnson, Hans J.) From Duplicate 1 (An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases. - Forbes, Jessica L; Kim, Regina EY Y; Paulsen, Jane S.; Johnson, Hans J.) From Duplicate 1 (An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases. - Forbes, Jessica L; Kim, Regina E Y; Paulsen, Jane S; Johnson, Hans J) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases. - Forbes, Jessica L; Kim, Regina EY; Paulsen, Jane S.; Johnson, Hans J.) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper.  I had contributions to the software methods development, interpretation of validation results for this work.  I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases. - Forbes, Jessica L; Kim, Regina E Y; Paulsen, Jane S; Johnson, Hans J) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#}}, author = {Forbes, Jessica L. and Kim, Regina E.Y. and Paulsen, Jane S. and Johnson, Hans J.}, doi = {10.3389/fninf.2016.00029}, file = {:Users/johnsonhj/Library/Application Support/Mendeley Desktop/Downloaded/Forbes et al. - 2016 - An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases.pdf:pdf}, issn = {16625196}, journal = {Frontiers in Neuroinformatics}, keywords = {Brain MRI,Huntingtons Disease,ITK,Label atlas,Multi-atlas,Multi-modal,Open-source}, month = {aug}, number = {AUG}, pages = {1--11}, pmid = {27536233}, publisher = {Frontiers Media SA}, title = {{An open-source label atlas correction tool and preliminary results on huntingtons disease whole-brain MRI atlases}}, url = {http://journal.frontiersin.org/Article/10.3389/fninf.2016.00029/abstract{\%}5Cnhttp://www.ncbi.nlm.nih.gov/pubmed/27536233{\%}5Cnhttp://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=PMC4971025}, volume = {10}, year = {2016} } @article{RID:1020151228438-28, abstract = {The BRAINS (Brain Research: Analysis of Images, Networks, and Systems) image analysis software has been in use, and in constant development, for over 20. years. The original neuroimage analysis pipeline using BRAINS was designed as a semiautomated procedure to measure volumes of the cerebral lobes and subcortical structures, requiring manual intervention at several stages in the process. Through use of advanced image processing algorithms the need for manual intervention at stages of image realignment, tissue sampling, and mask editing have been eliminated. In addition, inhomogeneity correction, intensity normalization, and mask cleaning routines have been added to improve the accuracy and consistency of the results. The fully automated method, AutoWorkup, is shown in this study to be more reliable (ICC ≥ 0.96, Jaccard index ≥ 0.80, and Dice index ≥ 0.89 for all tissues in all regions) than the average of 18 manual raters. On a set of 1130 good quality scans, the failure rate for correct realignment was 1.1{\%}, and manual editing of the brain mask was required on 4{\%} of the scans. In other tests, AutoWorkup is shown to produce measures that are reliable for data acquired across scanners, scanner vendors, and across sequences. Application of AutoWorkup for the analysis of data from the 32-site, multivendor PREDICT-HD study yield estimates of reliability to be greater than or equal to 0.90 for all tissues and regions. {\textcopyright} 2010 Elsevier Inc.}, annote = {From Duplicate 1 (Fully automated analysis using BRAINS: AutoWorkup - Pierson, Ronald K.; Johnson, Hans J.; Harris, Gregory; Keefe, Helen; Paulsen, Jane S.; Andreasen, Nancy C.; Magnotta, Vincent A.) {\#}{\#}CONTRIBUTIONS: I developed custom analysis software to achieve the desired interpretation of results.   I had substantial contributions to the software methods development, interpretation of validation results for this work.  I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Fully automated analysis using BRAINS: AutoWorkup - Pierson, Ronald K.; Johnson, Hans J.; Harris, Gregory; Keefe, Helen; Paulsen, Jane S.; Andreasen, Nancy C.; Magnotta, Vincent A.) From Duplicate 2 (Fully automated analysis using BRAINS: AutoWorkup - Pierson, Ronald K.; Johnson, Hans J.; Harris, Gregory; Keefe, Helen; Paulsen, Jane S.; Andreasen, Nancy C.; Magnotta, Vincent A.) From Duplicate 1 (Fully automated analysis using BRAINS: AutoWorkup - Pierson, Ronald K.; Johnson, Hans J.; Harris, Gregory; Keefe, Helen; Paulsen, Jane S.; Andreasen, Nancy C.; Magnotta, Vincent A.) From Duplicate 1 (Fully automated analysis using BRAINS: AutoWorkup - Pierson, Ronald K.; Johnson, Hans J.; Harris, Gregory; Keefe, Helen; Paulsen, Jane S.; Andreasen, Nancy C.; Magnotta, Vincent A.) {\#}{\#}CONTRIBUTIONS: I developed custom analysis software to achieve the desired interpretation of results.   I had substantial contributions to the software methods development, interpretation of validation results for this work.  I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#}}, author = {Pierson, Ronald and Johnson, Hans and Harris, Gregory and Keefe, Helen and Paulsen, Jane S. and Andreasen, Nancy C. and Magnotta, Vincent A.}, doi = {10.1016/j.neuroimage.2010.06.047}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Pierson et al/NeuroImage/Pierson et al. - 2011 - Fully automated analysis using BRAINS AutoWorkup.pdf:pdf}, isbn = {1095-9572 (Electronic)$\backslash$n1053-8119 (Linking)}, issn = {10538119}, journal = {NeuroImage}, keywords = {Automated image analysis,BRAINS,Morphometry,Pipeline,Segmentation,Volumetric analysis}, month = {jan}, number = {1}, pages = {328--336}, pmid = {20600977}, publisher = {Elsevier Inc.}, title = {{Fully automated analysis using BRAINS: AutoWorkup}}, url = {http://www.sciencedirect.com/science/article/pii/S1053811910009055 papers2://publication/uuid/35F9A8B8-DD4A-4857-903F-16D3F7CCAEF7 http://www.ncbi.nlm.nih.gov/pubmed/20600977}, volume = {54}, year = {2011} } @inproceedings{Gerard2016, abstract = {Lung segmentation is a critical initial step in planning radiation therapy interventions for lung cancer patients. Achieving robust automatic segmentation of lungs with large tumors is challenging due to large variations in lung morphology, tumor location, and tumor shape between subjects. We present an automatic method to segment lungs with large tumors in CT images using an initial intensity based segmentation followed by alpha shape construction and graph search. We evaluated our method by comparing automated segmentations to manual segmentations on twelve subjects. Computed metrics for segmentation quality include average surface distance of 0.727 mm and average DICE coefficient of 0.970. These results demonstrate that the proposed method accurately segments the entire lung regions both free of and in the presence of large tumors.}, annote = {From Duplicate 1 (Alpha Shapes for Lung Segmentation in the Presence of Large Tumors - Gerard, Sarah E; Johnson, Hans J.; Bayouth, John E; Christensen, Gary E.; Du, Kaifang; Guo, Junfeng; Reinhardt, Joseph M) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I was responsible for the data processing for one of the evaluated algorithms. I had the primary contributions to the interpretation of validation results for this work.  I contributed to revising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (Alpha Shapes for Lung Segmentation in the Presence of Large Tumors - Gerard, Sarah E; Johnson, Hans J.; Bayouth, John E; Christensen, Gary E.; Du, Kaifang; Guo, Junfeng; Reinhardt, Joseph M) From Duplicate 1 (Alpha Shapes for Lung Segmentation in the Presence of Large Tumors - Gerard, Sarah E; Johnson, Hans J.; Bayouth, John E; Christensen, Gary E.; Du, Kaifang; Guo, Junfeng; Reinhardt, Joseph M) From Duplicate 1 (Alpha Shapes for Lung Segmentation in the Presence of Large Tumors - Gerard, Sarah E; Johnson, Hans J.; Bayouth, John E; Christensen, Gary E.; Du, Kaifang; Guo, Junfeng; Reinhardt, Joseph M) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I was responsible for the data processing for one of the evaluated algorithms. I had the primary contributions to the interpretation of validation results for this work.  I contributed to revising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (Alpha Shapes for Lung Segmentation in the Presence of Large Tumors - Gerard, Sarah E; Johnson, Hans J; Bayouth, John E; Christensen, Gary E; Du, Kaifang; Guo, Junfeng; Reinhardt, Joseph M) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I was responsible for the data processing for one of the evaluated algorithms. I had the primary contributions to the interpretation of validation results for this work. I contributed to revising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (Alpha Shapes for Lung Segmentation in the Presence of Large Tumors - Gerard, Sarah E; Johnson, Hans J.; Bayouth, John E; Christensen, Gary E.; Du, Kaifang; Guo, Junfeng; Reinhardt, Joseph M) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I was responsible for the data processing for one of the evaluated algorithms. I had the primary contributions to the interpretation of validation results for this work.  I contributed to revising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#}}, author = {Gerard, Sarah E and Johnson, Hans J. and Bayouth, John E and Christensen, Gary E. and Du, Kaifang and Guo, Junfeng and Reinhardt, Joseph M}, booktitle = {PIA: Pulmonary Image Analysis}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Gerard et al/PIA Pulmonary Image Analysis/Gerard et al. - 2016 - Alpha Shapes for Lung Segmentation in the Presence of Large Tumors.pdf:pdf}, pages = {1--9}, title = {{Alpha Shapes for Lung Segmentation in the Presence of Large Tumors}}, year = {2016} } @article{avants2015neuroinformatics, annote = {From Duplicate 1 (Neuroinformatics and the The Insight ToolKit - Avants, Brian B.; Johnson, Hans J.; Tustison, Nicholas J.) From Duplicate 1 (Neuroinformatics and the The Insight ToolKit - Avants, Brian B.; Johnson, Hans J.; Tustison, Nicholas J.) {\#}{\#}CONTRIBUTIONS: As a contributing guest editor for a special issue of "Frontiers in Neruoinformatics" I had substantial oversight in the criteria for selection, review and ultimate publication of materials in the issue.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Editorial :{\#}{\#} From Duplicate 2 (Neuroinformatics and the The Insight ToolKit - Avants, Brian B.; Johnson, Hans J.; Tustison, Nicholas J.) From Duplicate 2 (Neuroinformatics and the The Insight ToolKit - Avants, Brian B.; Johnson, Hans J.; Tustison, Nicholas J.) {\#}{\#}CONTRIBUTIONS: As a contributing guest editor for a special issue of "Frontiers in Neruoinformatics" I had substantial oversight in the criteria for selection, review and ultimate publication of materials in the issue.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Editorial :{\#}{\#}}, author = {Avants, Brian B. and Johnson, Hans J. and Tustison, Nicholas J.}, doi = {10.3389/fninf.2015.00005}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Avants, Johnson, Tustison/Frontiers in neuroinformatics/Avants, Johnson, Tustison - 2015 - Neuroinformatics and the The Insight ToolKit.pdf:pdf}, issn = {16625196}, journal = {Frontiers in Neuroinformatics}, keywords = {C++,ITK,Open source,Registration,Segmentation}, number = {MAR}, pages = {1--3}, pmid = {25859213}, publisher = {Frontiers Media SA}, title = {{Neuroinformatics and the the insight toolkit}}, url = {http://www.frontiersin.org/Neuroinformatics/10.3389/fninf.2015.00005/full}, volume = {9}, year = {2015} } @article{oguz2014dtiprep, abstract = {In the last decade, diffusion MRI (dMRI) studies of the human and animal brain have been used to investigate a multitude of pathologies and drug-related effects in neuroscience research. Study after study identifies white matter (WM) degeneration as a crucial biomarker for all these diseases. The tool of choice for studying WM is dMRI. However, dMRI has inherently low signal-to-noise ratio and its acquisition requires a relatively long scan time; in fact, the high loads required occasionally stress scanner hardware past the point of physical failure. As a result, many types of artifacts implicate the quality of diffusion imagery. Using these complex scans containing artifacts without quality control (QC) can result in considerable error and bias in the subsequent analysis, negatively affecting the results of research studies using them. However, dMRI QC remains an under-recognized issue in the dMRI community as there are no user-friendly tools commonly available to comprehensively address the issue of dMRI QC. As a result, current dMRI studies often perform a poor job at dMRI QC. Thorough QC of dMRI will reduce measurement noise and improve reproducibility, and sensitivity in neuroimaging studies; this will allow researchers to more fully exploit the power of the dMRI technique and will ultimately advance neuroscience. Therefore, in this manuscript, we present our open-source software, DTIPrep, as a unified, user friendly platform for thorough QC of dMRI data. These include artifacts caused by eddy-currents, head motion, bed vibration and pulsation, venetian blind artifacts, as well as slice-wise and gradient-wise intensity inconsistencies. This paper summarizes a basic set of features of DTIPrep described earlier and focuses on newly added capabilities related to directional artifacts and bias analysis. {\textcopyright} 2014 Oguz, Farzinfar, Matsui, Budin, Liu, Gerig, Johnson and Styner.}, annote = {From Duplicate 1 (DTIPrep: quality control of diffusion-weighted images - Oguz, Ipek; Farzinfar, Mahshid; Matsui, Joy T.; Budin, Francois; Liu, Zhexing; Gerig, Guido; Johnson, Hans J.; Styner, Martin) From Duplicate 1 (DTIPrep: quality control of diffusion-weighted images - Oguz, Ipek; Farzinfar, Mahshid; Matsui, Joy T.; Budin, Francois; Liu, Zhexing; Gerig, Guido; Johnson, Hans J.; Styner, Martin) {\#}{\#}CONTRIBUTIONS: As a member of the National Alliance for Medical Imaging Computing (NAMIC) I collaborated on many software engineering projets.   I had substantial contributions to the software methods development, interpretation of validation results for this work.  I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (DTIPrep: quality control of diffusion-weighted images - Oguz, Ipek; Farzinfar, Mahshid; Matsui, Joy T.; Budin, Francois; Liu, Zhexing; Gerig, Guido; Johnson, Hans J.; Styner, Martin) From Duplicate 1 (DTIPrep: quality control of diffusion-weighted images - Oguz, Ipek; Farzinfar, Mahshid; Matsui, Joy T.; Budin, Francois; Liu, Zhexing; Gerig, Guido; Johnson, Hans J.; Styner, Martin) From Duplicate 2 (DTIPrep: quality control of diffusion-weighted images - Oguz, Ipek; Farzinfar, Mahshid; Matsui, Joy T.; Budin, Francois; Liu, Zhexing; Gerig, Guido; Johnson, Hans J.; Styner, Martin) {\#}{\#}CONTRIBUTIONS: As a member of the National Alliance for Medical Imaging Computing (NAMIC) I collaborated on many software engineering projets.   I had substantial contributions to the software methods development, interpretation of validation results for this work.  I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (DTIPrep: quality control of diffusion-weighted images - Oguz, Ipek; Farzinfar, Mahshid; Matsui, Joy T.; Budin, Francois; Liu, Zhexing; Gerig, Guido; Johnson, Hans J.; Styner, Martin) From Duplicate 1 (DTIPrep: quality control of diffusion-weighted images - Oguz, Ipek; Farzinfar, Mahshid; Matsui, Joy T.; Budin, Francois; Liu, Zhexing; Gerig, Guido; Johnson, Hans J.; Styner, Martin) From Duplicate 2 (DTIPrep: quality control of diffusion-weighted images - Oguz, Ipek; Farzinfar, Mahshid; Matsui, Joy T.; Budin, Francois; Liu, Zhexing; Gerig, Guido; Johnson, Hans J.; Styner, Martin) {\#}{\#}CONTRIBUTIONS: As a member of the National Alliance for Medical Imaging Computing (NAMIC) I collaborated on many software engineering projets.   I had substantial contributions to the software methods development, interpretation of validation results for this work.  I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#}}, author = {Oguz, Ipek and Farzinfar, Mahshid and Matsui, Joy and Budin, Francois and Liu, Zhexing and Gerig, Guido and Johnson, Hans J. and Styner, Martin}, doi = {10.3389/fninf.2014.00004}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Oguz et al/Frontiers in neuroinformatics/Oguz et al. - 2014 - DTIPrep quality control of diffusion-weighted images.pdf:pdf}, isbn = {1662-5196 (Electronic)$\backslash$r1662-5196 (Linking)}, issn = {16625196}, journal = {Frontiers in Neuroinformatics}, keywords = {Diffusion MRI,Diffusion tensor imaging,Open-source,Preprocessing,Quality control,Software}, number = {JAN}, pages = {4}, pmid = {24523693}, publisher = {Frontiers Media SA}, title = {{DTIPrep: Quality control of diffusion-weighted images}}, url = {http://journal.frontiersin.org/article/10.3389/fninf.2014.00004/abstract{\%}0Ahttp://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=3906573{\&}tool=pmcentrez{\&}rendertype=abstract{\%}5Cnhttp://journal.frontiersin.org/Journal/10.3389/fninf.2014.00004/full http://}, volume = {8}, year = {2014} } @techreport{Kim2011, abstract = {This document describes an affine transformation algorithm as an additional feature for landmark based registration in ITK www.itk.org. The algorithm is based on the paper by Sp{\"{a}}th, H [2]. The author derives a set of linear equations from paired landmarks and generates an affine transform from them. The method implemented here gives more freedom in the choice of registration and/or initialization method in ITK. The submission describes ITK implementation of the algorithm.}, annote = {From Duplicate 1 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Regina EY Y; Johnson, Hans J.; Williams, Norman K; Kim, Eun Young Regina; Kim EYR Johnson HJ, Williams N K) From Duplicate 1 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Regina E Y; Johnson, Hans J; Williams, Norman K; Kim, Eun Young Regina; Kim EYR Johnson HJ, Williams N K) From Duplicate 1 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K; Kim EYR Johnson HJ, Williams N K) From Duplicate 2 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K) From Duplicate 3 ( Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young; Johnson, Hans J.; Williams, Norman K ) From Duplicate 3 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Regina E Y; Johnson, Hans J.; Williams, Norman K) From Duplicate 1 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K; Kim EYR Johnson HJ, Williams N K) From Duplicate 2 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K) From Duplicate 3 ( Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young; Johnson, Hans J.; Williams, Norman K ) From Duplicate 2 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Regina EY; Johnson, Hans J.; Williams, Norman K) From Duplicate 1 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K; Kim EYR Johnson HJ, Williams N K) From Duplicate 2 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K) From Duplicate 3 ( Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young; Johnson, Hans J.; Williams, Norman K ) From Duplicate 3 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Regina E Y; Johnson, Hans J.; Williams, Norman K) From Duplicate 1 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K; Kim EYR Johnson HJ, Williams N K) From Duplicate 2 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K) From Duplicate 3 ( Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young; Johnson, Hans J.; Williams, Norman K ) From Duplicate 2 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Regina E Y; Johnson, Hans J; Williams, Norman K; Kim, Eun Young Regina; Kim EYR Johnson HJ, Williams N K) From Duplicate 1 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K; Kim EYR Johnson HJ, Williams N K) From Duplicate 2 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K) From Duplicate 3 ( Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young; Johnson, Hans J.; Williams, Norman K ) From Duplicate 3 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Regina E Y; Johnson, Hans J.; Williams, Norman K) From Duplicate 1 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K; Kim EYR Johnson HJ, Williams N K) From Duplicate 2 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K) From Duplicate 3 ( Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young; Johnson, Hans J.; Williams, Norman K )}, author = {Kim, Regina EY Y and Johnson, Hans J. and Williams, Norman K and Kim, Eun Young Regina and {Kim EYR Johnson HJ}, Williams N K}, booktitle = {Engineering}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Kim et al/Engineering/Kim et al. - 2011 - Affine Transformation for Landmark Based Registration Initializer in ITK.pdf:pdf}, keywords = {Affine Transformation for Landmark Based Registrat,Insight Journal}, mendeley-tags = {Insight Journal}, pages = {1--8}, title = {{Affine Transformation for Landmark Based Registration Initializer in ITK}}, year = {2011} } @article{yaniv2018simpleitk, abstract = {Modern scientific endeavors increasingly require team collaborations to construct and interpret complex computational workflows. This work describes an image-analysis environment that supports the use of computational tools that facilitate reproducible research and support scientists with varying levels of software development skills. The Jupyter notebook web application is the basis of an environment that enables flexible, well-documented, and reproducible workflows via literate programming. Image-analysis software development is made accessible to scientists with varying levels of programming experience via the use of the SimpleITK toolkit, a simplified interface to the Insight Segmentation and Registration Toolkit. Additional features of the development environment include user friendly data sharing using online data repositories and a testing framework that facilitates code maintenance. SimpleITK provides a large number of examples illustrating educational and research-oriented image analysis workflows for free download from GitHub under an Apache 2.0 license: github.com/InsightSoftwareConsortium/SimpleITK-Notebooks.}, author = {Yaniv, Ziv and Lowekamp, Bradley C. and Johnson, Hans J. and Beare, Richard}, doi = {10.1007/s10278-017-0037-8}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Yaniv et al/Journal of Digital Imaging/Yaniv et al. - 2018 - SimpleITK Image-Analysis Notebooks a Collaborative Environment for Education and Reproducible Research.pdf:pdf}, issn = {1618727X}, journal = {Journal of Digital Imaging}, keywords = {Image analysis,Open-source software,Python,R,Registration,Segmentation}, number = {3}, pages = {290--303}, pmid = {29181613}, publisher = {Springer International Publishing}, title = {{SimpleITK Image-Analysis Notebooks: a Collaborative Environment for Education and Reproducible Research}}, volume = {31}, year = {2018} } @inproceedings{shao2016population, abstract = {This paper examines the shape collapse problem that occurs when registering a pair of images or a population of images of the brain to a reference (target) image coordinate system using diffeomorphic image registration. Shape collapse occurs when a foreground or background structure in an image with non-zero volume is transformed into a set of zero or near zero volume as measured on a discrete voxel lattice in the target image coordinate system. Shape collapse may occur during image registration when the moving image has a structure that is either missing or does not sufficiently overlap the corresponding structure in the target image[4]. Such a problem is common in image registration algorithms with large degrees of freedom such as many diffeomorphic image registration algorithms. Shape collapse is a concern when mapping functional data. For example, loss of signal may occur when mapping functional data such as fMRI, PET, SPECT using a transformation with a shape collapse if the functional signal occurs at the collapse region. This paper proposes an novel shape collapse measurement algorithm to detect the regions of shape collapse after image registration in pairwise registration. We further compute the shape collapse for a population of pairwise transformations such as occurs when registering many images to a common atlas coordinate system. Experiments are presented using the SyN diffeomorphic image registration algorithm. We demonstrate how changing the input parameters to the SyN registration algorithm can mitigate some of the collapse image registration artifacts.}, annote = {From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E; Johnson, Hans J; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P; Shaffer, Joseph J; Magnotta, Vincent A; Fiedorowicz, Jess G; Wemmie, John A) From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E; Johnson, Hans J; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P; Shaffer, Joseph J; Magnotta, Vincent A; Fiedorowicz, Jess G; Wemmie, John A) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I had the primary contributions to the interpretation of validation results for this work. I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper.  I had the primary contributions to the interpretation of validation results for this work.  I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper.  I had the primary contributions to the interpretation of validation results for this work.  I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper.  I had the primary contributions to the interpretation of validation results for this work.  I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E; Johnson, Hans J; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P; Shaffer, Joseph J; Magnotta, Vincent A; Fiedorowicz, Jess G; Wemmie, John A) From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E; Johnson, Hans J; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P; Shaffer, Joseph J; Magnotta, Vincent A; Fiedorowicz, Jess G; Wemmie, John A) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I had the primary contributions to the interpretation of validation results for this work. I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper.  I had the primary contributions to the interpretation of validation results for this work.  I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper.  I had the primary contributions to the interpretation of validation results for this work.  I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#}}, author = {Shao, Wei and Christensen, Gary E. and Johnson, Hans J. and Song, Joo H. and Durumeric, Oguz C. and Johnson, Casey P. and Shaffer, Joseph J. and Magnotta, Vincent A. and Fiedorowicz, Jess G. and Wemmie, John A.}, booktitle = {IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops}, doi = {10.1109/CVPRW.2016.75}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Shao et al/Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops/Shao et al. - 2016 - Population Shape Collapse in Large Deformation Registration of MR Brain Images.pdf:pdf}, isbn = {9781467388504}, issn = {21607516}, keywords = {Diffeomorphic image registration,Shape collapse}, pages = {549--557}, title = {{Population Shape Collapse in Large Deformation Registration of MR Brain Images}}, year = {2016} } @article{hawley2010introduction, author = {Lu, Wei and Johnson, Hans and Hawley, J and Johnson, Hans and Dowling, J and Malaterre, M and Greer, P B and Salvado, O}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Lu et al/Insight Journal/Lu et al. - 2010 - Introduction to ITK resample in-place image filter.pdf:pdf}, journal = {Insight Journal}, pages = {3--6}, title = {{Introduction to ITK resample in-place image filter}}, url = {http://ir.uiowa.edu/etd/851/}, year = {2010} } @inproceedings{ghayoor2016tissue, abstract = {This paper describes enhancements to automate classification of brain tissues formulti-site degenerative magnetic resonance imaging (MRI) data analysis. Processing of large collections of MR images is a key research technique to advance our understanding of the human brain. Previous studies have developed a robust multi-modal tool for automated tissue classification of large-scale data based on expectation maximization (EM) method initialized by group-wise prior probability distributions. This work aims to augment the EM-based classification using a non-parametric fuzzy k-Nearest Neighbor (k-NN) classifier that can model the unique anatomical states of each subject in the study of degenerative diseases. The presented method is applicable to multi-center heterogeneous data analysis and is quantitatively validated on a set of 18 synthetic multi-modal MR datasets having six different levels of noise and three degrees of bias-field provided with known ground truth. Dice index and average Hausdorff distance are used to compare the accuracy and robustness of the proposed method to a state-of-the-art classification method implemented based on EM algorithm. Both evaluation measurements show that presented enhancements produce superior results as compared to the EM only classification.}, annote = {From Duplicate 1 (Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method - Ghayoor, Ali; Paulsen, Jane S.; Kim, Regina EY Y; Johnson, Hans J.) From Duplicate 1 (Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method - Ghayoor, Ali; Paulsen, Jane S.; Kim, Regina EY; Johnson, Hans J.) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper.  I had contributions to the software methods development, interpretation of validation results for this work.  I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method - Ghayoor, Ali; Paulsen, Jane S; Kim, Regina E Y; Johnson, Hans J) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method - Ghayoor, Ali; Paulsen, Jane S.; Kim, Regina EY Y; Johnson, Hans J.) From Duplicate 1 (Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method - Ghayoor, Ali; Paulsen, Jane S; Kim, Regina E Y; Johnson, Hans J) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method - Ghayoor, Ali; Paulsen, Jane S.; Kim, Regina EY Y; Johnson, Hans J.) From Duplicate 1 (Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method - Ghayoor, Ali; Paulsen, Jane S.; Kim, Regina EY Y; Johnson, Hans J.) From Duplicate 1 (Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method - Ghayoor, Ali; Paulsen, Jane S.; Kim, Regina EY; Johnson, Hans J.) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper.  I had contributions to the software methods development, interpretation of validation results for this work.  I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method - Ghayoor, Ali; Paulsen, Jane S; Kim, Regina E Y; Johnson, Hans J) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#} From Duplicate 2 (Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method - Ghayoor, Ali; Paulsen, Jane S.; Kim, Regina EY; Johnson, Hans J.) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper.  I had contributions to the software methods development, interpretation of validation results for this work.  I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#}}, author = {Ghayoor, Ali and Paulsen, Jane S. and Kim, Regina E. Y. and Johnson, Hans J.}, booktitle = {Medical Imaging 2016: Image Processing}, doi = {10.1117/12.2216625}, editor = {Styner, Martin A. and Angelini, Elsa D.}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Ghayoor et al/Medical Imaging 2016 Image Processing/Ghayoor et al. - 2016 - Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method.pdf:pdf}, isbn = {9781510600195}, issn = {16057422}, keywords = {expectation maximization,fuzzy k-nearest neighborhood method,multi-site studies,neurodegenerative diseases,segmentation,tissue classification}, month = {mar}, organization = {International Society for Optics and Photonics}, pages = {97841V}, title = {{Tissue classification of large-scale multi-site MR data using fuzzy k-nearest neighbor method}}, url = {http://proceedings.spiedigitallibrary.org/proceeding.aspx?doi=10.1117/12.2216625}, volume = {9784}, year = {2016} } @inproceedings{kim2015efficient, abstract = {Advances in medical image applications have led to mounting expectations in regard to their impact on neuroscience studies. In light of this fact, a comprehensive application is needed to move neuroimaging data into clinical research discoveries in a way that maximizes collected data utilization and minimizes the development costs.We introduce BRAINS AutoWorkup, a Nipype based open source MRI analysis application distributed with BRAINSTools suite (http://brainsia.github. io/BRAINSTools/). This work describes the use of efficient and extensible automated brain MRI analysis workflow for large-scale multi-center longitudinal studies. We first explain benefits of our extensible workflow development using Nipype, including fast integration and validation of recently introduced tools with heterogeneous software infrastructures. Based on this workflow development, we also discuss our recent advancements to the workflow for reliable and accurate analysis of multi-center longitudinal data. In addition to Nipype providing a unified workflow, its support for High Performance Computing (HPC) resources leads to a further increased time efficiency of our workflow. We show our success on a few selected large-scale studies, and discuss future direction of this translation research in medical imaging applications.}, annote = {From Duplicate 2 (Efficient and extensible workflow: Reliable whole brain segmentation for large-scale, multi-center longitudinal human MRI analysis using high performance/throughput computing resources - Kim, Regina EY Y; Nopoulos, Peggy C.; Paulsen, Jane S.; Johnson, Hans J.) From Duplicate 1 (Efficient and Extensible Workflow: Reliable Whole Brain Segmentation for Large-Scale, Multi-center Longitudinal Human MRI Analysis Using High Performance/Throughput Computing Resources - Kim, Regina EY Y; Nopoulos, Peggy C.; Paulsen, Jane S.; Johnson, Hans J.) From Duplicate 1 (Efficient and extensible workflow: Reliable whole brain segmentation for large-scale, multi-center longitudinal human MRI analysis using high performance/throughput computing resources - Kim, Regina EY Y; Nopoulos, Peggy C.; Paulsen, Jane S.; Johnson, Hans J.) From Duplicate 1 (Efficient and extensible workflow: Reliable whole brain segmentation for large-scale, multi-center longitudinal human MRI analysis using high performance/throughput computing resources - Kim, Regina EY; Nopoulos, Peggy C.; Paulsen, Jane S.; Johnson, Hans J.) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper.  I had contributions to the software methods development, interpretation of validation results for this work.  I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#}}, author = {Kim, Regina E.Y. and Nopoulos, Peg and Paulsen, Jane and Johnson, Hans}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-319-31808-0_7}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Kim et al/Workshop on Clinical Image-Based Procedures/Kim et al. - 2015 - Efficient and extensible workflow Reliable whole brain segmentation for large-scale, multi-center longitudinal human.pdf:pdf}, isbn = {9783319318073}, issn = {16113349}, keywords = {Brain,HPC/HTC,Large-scale,Longitudinal data,MRI,Pipeline}, organization = {Springer, Cham}, pages = {54--61}, title = {{Efficient and extensible workflow: Reliable whole brain segmentation for large-scale, multi-center longitudinal human MRI analysis using high performance/throughput computing resources}}, url = {http://link.springer.com/10.1007/978-3-319-31808-0{\_}7}, volume = {9401}, year = {2016} } @article{Lowekamp2013, abstract = {SimpleITK is a new interface to the Insight Segmentation and Registration Toolkit (ITK) designed to facilitate rapid prototyping, education and scientific activities via high level programming languages. ITK is a templated C++ library of image processing algorithms and frameworks for biomedical and other applications, and it was designed to be generic, flexible and extensible. Initially, ITK provided a direct wrapping interface to languages such as Python and Tcl through the WrapITK system. Unlike WrapITK, which exposed ITK's complex templated interface, SimpleITK was designed to provide an easy to use and simplified interface to ITK's algorithms. It includes procedural methods, hides ITK's demand driven pipeline, and provides a template-less layer. Also SimpleITK provides practical conveniences such as binary distribution packages and overloaded operators. Our user-friendly design goals dictated a departure from the direct interface wrapping approach of WrapITK, toward a new facade class structure that only exposes the required functionality, hiding ITK's extensive template use. Internally SimpleITK utilizes a manual description of each filter with code-generation and advanced C++ meta-programming to provide the higher-level interface, bringing the capabilities of ITK to a wider audience. SimpleITK is licensed as open source software library under the Apache License Version 2.0 and more information about downloading it can be found at http://www.simpleitk.org. {\textcopyright} 2013 Lowek amp, Chen, Ib{\'{a}}{\~{n}}ez and Blezek.}, author = {Lowekamp, Bradley C. and Chen, David T. and Ib{\'{a}}{\~{n}}ez, Luis and Blezek, Daniel}, doi = {10.3389/fninf.2013.00045}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Lowekamp et al/Frontiers in Neuroinformatics/Lowekamp et al. - 2013 - The design of simpleITK.pdf:pdf}, isbn = {2762010004}, issn = {16625196}, journal = {Frontiers in Neuroinformatics}, keywords = {Image processing and analysis,Image processing software,Insight toolkit,Segmentation,Software design,Software development}, number = {DEC}, pages = {45}, pmid = {24416015}, title = {{The design of simpleITK}}, url = {http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=3874546{\&}tool=pmcentrez{\&}rendertype=abstract https://www.frontiersin.org/article/10.3389/fninf.2013.00045}, volume = {7}, year = {2013} } @article{Kim2014, abstract = {Machine learning (ML)-based segmentation methods are a common technique in the medical image processing field. In spite of numerous research groups that have investigated ML-based segmentation frameworks, there remains unanswered aspects of performance variability for the choice of two key components: ML algorithm and intensity normalization. This investigation reveals that the choice of those elements plays a major part in determining segmentation accuracy and generalizability. The approach we have used in this study aims to evaluate relative benefits of the two elements within a subcortical MRI segmentation framework. Experiments were conducted to contrast eight machine-learning algorithm configurations and 11 normalization strategies for our brain MR segmentation framework. For the intensity normalization, a Stable Atlas-based Mapped Prior (STAMP) was utilized to take better account of contrast along boundaries of structures. Comparing eight machine learning algorithms on down-sampled segmentation MR data, it was obvious that a significant improvement was obtained using ensemble-based ML algorithms (i.e., random forest) or ANN algorithms. Further investigation between these two algorithms also revealed that the random forest results provided exceptionally good agreement with manual delineations by experts. Additional experiments showed that the effect of STAMP-based intensity normalization also improved the robustness of segmentation for multicenter data sets. The constructed framework obtained good multicenter reliability and was successfully applied on a large multicenter MR data set (n{\textgreater}. 3000). Less than 10{\%} of automated segmentations were recommended for minimal expert intervention. These results demonstrate the feasibility of using the ML-based segmentation tools for processing large amount of multicenter MR images. We demonstrated dramatically different result profiles in segmentation accuracy according to the choice of ML algorithm and intensity normalization chosen. {\textcopyright} 2014 Elsevier Inc.}, annote = {From Duplicate 1 (Stable Atlas-based Mapped Prior (STAMP) machine-learning segmentation for multicenter large-scale MRI data - Kim, Regina EY; Magnotta, Vincent A.; Liu, Dawei; Johnson, Hans J.) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper.  I had contributions to the software methods development, interpretation of validation results for this work.  I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Stable Atlas-based Mapped Prior (STAMP) machine-learning segmentation for multicenter large-scale MRI data - Kim, Regina EY; Magnotta, Vincent A.; Liu, Dawei; Johnson, Hans J.; Kim, Eun Young; Magnotta, Vincent A.; Liu, Dawei; Johnson, Hans J.) From Duplicate 1 (Stable Atlas-based Mapped Prior (STAMP) machine-learning segmentation for multicenter large-scale MRI data - Kim, Regina EY; Magnotta, Vincent A.; Liu, Dawei; Johnson, Hans J.; Kim, Eun Young; Magnotta, Vincent A.; Liu, Dawei; Johnson, Hans J.) From Duplicate 2 (Stable Atlas-based Mapped Prior (STAMP) machine-learning segmentation for multicenter large-scale MRI data - Kim, Regina EY; Magnotta, Vincent A.; Liu, Dawei; Johnson, Hans J.) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper.  I had contributions to the software methods development, interpretation of validation results for this work.  I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Stable Atlas-based Mapped Prior (STAMP) machine-learning segmentation for multicenter large-scale MRI data - Kim, Regina EY; Magnotta, Vincent A.; Liu, Dawei; Johnson, Hans J.) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper.  I had contributions to the software methods development, interpretation of validation results for this work.  I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#}}, author = {Kim, Eun Young and Magnotta, Vincent A. and Liu, Dawei and Johnson, Hans J.}, doi = {10.1016/j.mri.2014.04.016}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Kim et al/Magnetic Resonance Imaging/Kim et al. - 2014 - Stable Atlas-based Mapped Prior (STAMP) machine-learning segmentation for multicenter large-scale MRI data.pdf:pdf}, isbn = {3193213152}, issn = {18735894}, journal = {Magnetic Resonance Imaging}, keywords = {Machine learning,Multicenter study,Random forest,Segmentation}, month = {may}, number = {7}, pages = {832--844}, pmid = {24818817}, publisher = {Elsevier Inc.}, title = {{Stable Atlas-based Mapped Prior (STAMP) machine-learning segmentation for multicenter large-scale MRI data}}, url = {http://www.ncbi.nlm.nih.gov/pubmed/24818817}, volume = {32}, year = {2014} } @article{hong2018genetic, abstract = {Huntington's disease (HD) is an inherited neurodegenerative disorder that causes progressive breakdown of striatal neurons. Standard white matter integrity measures like fractional anisotropy and mean diffusivity derived from diffusion tensor imaging were analyzed in prodromal-HD subjects; however, they studied either a whole brain or specific subcortical white matter structures with connections to cortical motor areas. In this work, we propose a novel analysis of a longitudinal cohort of 243 prodromal-HD individuals and 88 healthy controls who underwent two or more diffusion MRI scans as part of the PREDICT-HD study. We separately trace specific white matter fiber tracts connecting the striatum (caudate and putamen) with four cortical regions corresponding to the hand, face, trunk, and leg motor areas. A multi-tensor tractography algorithm with an isotropic volume fraction compartment allows estimating diffusion of fast-moving extra-cellular water in regions containing crossing fibers and provides quantification of a microstructural property related to tissue atrophy. The tissue atrophy rate is separately analyzed in eight cortico-striatal pathways as a function of CAG-repeats (genetic load) by statistically regressing out age effect from our cohort. The results demonstrate a statistically significant increase in isotropic volume fraction (atrophy) bilaterally in hand fiber connections to the putamen with increasing CAG-repeats, which connects the genetic abnormality (CAG-repeats) to an imaging-based microstructural marker of tissue integrity in specific white matter pathways in HD. Isotropic volume fraction measures in eight cortico-striatal pathways are also correlated significantly with total motor scores and diagnostic confidence levels, providing evidence of their relevance to HD clinical presentation.}, author = {Hong, Yi and O'Donnell, Lauren J. and Savadjiev, Peter and Zhang, Fan and Wassermann, Demian and Pasternak, Ofer and Johnson, Hans and Paulsen, Jane and Vonsattel, Jean Paul and Makris, Nikos and Westin, Carl F. and Rathi, Yogesh}, doi = {10.1002/hbm.24217}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Hong et al/Human Brain Mapping/Hong et al. - 2018 - Genetic load determines atrophy in hand cortico-striatal pathways in presymptomatic Huntington's disease.pdf:pdf}, issn = {10970193}, journal = {Human Brain Mapping}, keywords = {CAG-repeats,cortico-striatal pathways,diffusion MRI,isotropic volume fraction,prodromal-HD}, number = {10}, pages = {3871--3883}, title = {{Genetic load determines atrophy in hand cortico-striatal pathways in presymptomatic Huntington's disease}}, volume = {39}, year = {2018} } @book{Ibanez2003, abstract = {Digital watermarking is a technique of hiding specific identification data for copyright authentication. This technique is adapted here for interleaving patient information with medical images to reduce storage and transmission overheads. The text data are encrypted before interleaving with images to ensure greater security. The graphical signals are compressed and subsequently interleaved with the image. Differential pulse-code-modulation and adaptive-delta-modulation techniques are employed for data compression, and encryption and results are tabulated for a specific example.}, author = {Ibanez, Luis and Schroeder, Will and Ng, Lydia and Cates, Josh}, booktitle = {IEEE Transactions on Information Technology in Biomedicine}, doi = {10.1109/4233.966107}, isbn = {1930934106}, issn = {1089-7771}, number = {4}, pages = {539}, title = {{The ITK Software Guide: The Insight Segmentation and Registration Toolkit}}, url = {http://www.amazon.com/dp/1930934106}, volume = {5}, year = {2003} } @inproceedings{Ibanez2002, abstract = {This paper describes the design and implementation of the generic framework for image registration contained in the National Library of Medicine NLM/NIH Segmentation and Registration Toolkit (ITK). The problem of image registration has been modeled here as a structure of pluggable components that can be easily interchanged. The rationale behind the framework is presented in this paper both from the image processing and software engineering points of view. ITK is an open source project that provides a platform for developing image processing and analysis applications. State of the art practices of software engineering have been used for the design, implementation and testing of the toolkit The source code can be downloaded free of charge and used in academic and commercial applications.}, author = {Ib{\'{a}}{\~{n}}ez, L. and Ng, L. and Gee, J. and Aylward, S.}, booktitle = {Proceedings - International Symposium on Biomedical Imaging}, doi = {10.1109/ISBI.2002.1029264}, isbn = {078037584X}, issn = {19458452}, pages = {345--348}, title = {{Registration patterns: The generic framework for image registration of the insight toolkit}}, volume = {2002-Janua}, year = {2002} } @article{Newton2011, abstract = {Background: From teaching juniors and peers to educating patients, it is imperative for all doctors to have basic core teaching skills. The Junior Association for the Study of Medical Education (JASME) felt that a short course in the fundamentals of teaching would be well received by students. Context: This article shares the lessons from a one-day teaching course aimed at senior medical students. Qualitative feedback helped decide which aspects of the course were most valued. Intervention: The course was piloted in London. It combined interactive plenary sessions on teaching theory with practical teaching sessions. Each student taught a small group of others a basic clinical skill, and the student teacher then received extensive feedback from their peers and an experienced clinician with a special interest in medical education. There was an opportunity to re-teach part of the skill after having taken the feedback on board. Implications: Students completed questionnaires at the start and end of the day to ascertain their expectations of the course and what they found most useful. Expectations can be grouped into three main areas: students wanted to improve their teaching skills; gain teaching experience; and receive feedback on their teaching. The most valuable part of the course was being able to practise teaching and receive feedback. Keywords used to describe the feedback included 'individual', 'valuable', 'constructive', 'instant' and 'in depth'. By continuing to run similar workshops we hope that we can further encourage the teachers of tomorrow. {\textcopyright} Blackwell Publishing Ltd 2011.}, author = {Newton, Ashley and Wright, Lucie}, doi = {10.1111/j.1743-498X.2011.00453.x}, issn = {17434971}, journal = {Clinical Teacher}, number = {4}, pages = {254--257}, title = {{Teaching toolkit for medical students}}, volume = {8}, year = {2011} } @incollection{Johnson2014a, abstract = {The Insight Toolkit (ITK) is an open-source software toolkit for performing registration and segmentation. Segmentation is the process of identifying and classifying data found in a digitally sampled representation. Typically the sampled representation is an image acquired from such medical instrumentation as CT or MRI scanners. Registration is the task of aligning or developing correspondences between data. For example, in the medical environment, a CT scan may be aligned with a MRI scan in order to combine the information contained in both. ITK is a cross-platform software. It uses a build environment known as CMake to manage platform-specific project generation and compilation process in a platform-independent way. ITK is implemented in C++. ITK's implementation style employs generic programming, which involves the use of templates to generate, at compile-time, code that can be applied generically to any class or data-type that supports the operations used by the template. The use of C++ templating means that the code is highly efficient and many issues are discovered at compile- time, rather than at run-time during program execution. It also means that many of ITK's algorithms can be applied to arbitrary spatial dimensions and pixel types. An automated wrapping system integrated with ITK generates an interface between C++ and a high-level programming language Python. This enables rapid prototyping and faster exploration of ideas by shortening the edit-compile-execute cycle. In addition to automated wrapping, the SimpleITK project provides a streamlined interface to ITK that is available for C++, Python, Java, CSharp, R, Tcl and Ruby. Developers from around the world can use, debug, maintain, and extend the software because ITK is an open-source project. ITK uses a model of software development known as Extreme Programming. Extreme Programming collapses the usual software development methodology into a simultaneous iterative process of design-implement-test-release. The key features of Extreme Programming are communication and testing. Communication among the members of the ITK community is what helps manage the rapid evolution of the software. Testing is what keeps the software stable. An extensive testing process supported by the system known as CDash measures the quality of ITK code on a daily basis. The ITK Testing Dashboard is updated continuously, reflecting the quality of the code at any moment. The most recent version of this document is available online at http://itk.org/ItkSoftwareGuide.pdf. This book is a guide to developing soft- ware with ITK; it is the first of two companion books. This book covers building and installation, general architecture and design, as well as the process of contributing in the ITK community. The second book covers detailed design and functionality for reading and and writing images, filtering, registration, segmentation, and performing statistical analysis.}, author = {Johnson, Hans J. and Ib, Luis and Mccormick, Matthew and Consortium, Software}, booktitle = {The ITK Software GUIDE}, doi = {1�930934-15�7}, isbn = {978-1-930934-28-3}, keywords = {Guide,Registration,Segmentation}, pages = {805}, title = {{The ITK Software Guide Book 2 : Design and Functionality Fourth Edition Updated for ITK version 4.6}}, year = {2014} } @book{Ibanez2003a, abstract = {Digital watermarking is a technique of hiding specific identification data for copyright authentication. This technique is adapted here for interleaving patient information with medical images to reduce storage and transmission overheads. The text data are encrypted before interleaving with images to ensure greater security. The graphical signals are compressed and subsequently interleaved with the image. Differential pulse-code-modulation and adaptive-delta-modulation techniques are employed for data compression, and encryption and results are tabulated for a specific example.}, author = {Ibanez, Luis and Schroeder, Will and Ng, Lydia and Cates, Josh}, booktitle = {IEEE Transactions on Information Technology in Biomedicine}, doi = {10.1109/4233.966107}, isbn = {1930934106}, issn = {1089-7771}, number = {4}, pages = {539}, title = {{The ITK Software Guide: The Insight Segmentation and Registration Toolkit}}, url = {http://www.amazon.com/dp/1930934106}, volume = {5}, year = {2003} } @article{Papademetris2006, abstract = {This book is an edited collection of class handouts that was written for the graduate seminar “Programming for Medical Image Analysis” (ENAS 920a). This class was taught at Yale University, Department of Biomedical Engineering, in the Fall of 2006 and again in the Spring 2009 semester. Some the comments in this draft version of the book reflect this fact. For example, see comments beginning “at Yale”. Furthermore, many of the references that will appear in the final version are still omitted. It is made available at this stage in the hope that it will be useful.}, author = {Papademetris, Xenophon and Joshi, Alark}, journal = {Yale University}, pages = {283}, title = {{An Introduction to Programming for medical image Analysis with the visualization Toolkit}}, url = {http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.122.7477{\&}rep=rep1{\&}type=pdf}, year = {2006} } @book{Yarbs2015, abstract = {BACKGROUND: The analysis of eye movements (EM) by eye-tracking has been carried out for several decades to investigate mood regulation, emotional information processing, and psychomotor disturbances in depressive disorders.$\backslash$nMETHOD: A systematic review of all English language PubMed articles using the terms "saccadic eye movements" OR "eye-tracking" AND "depression" OR "bipolar disorders" was conducted using PRISMA guidelines. The aim of this review was to characterize the specific alterations of EM in unipolar and bipolar depression.$\backslash$nRESULTS: Findings regarding psychomotor disturbance showed an increase in reaction time in prosaccade and antisaccade tasks in both unipolar and bipolar disorders. In both disorders, patients have been reported to have an attraction for negative emotions, especially for negative pictures in unipolar and threatening images in bipolar disorder. However, the pattern could change with aging, elderly unipolar patients disengaging key features of sad and neutral stimuli. METHODological limitations generally include small sample sizes with mixed unipolar and bipolar depressed patients.$\backslash$nCONCLUSION: Eye movement analysis can be used to discriminate patients with depressive disorders from controls, as well as patients with bipolar disorder from patients with unipolar depression. General knowledge concerning psychomotor alterations and affective regulation strategies associated with each disorder can also be gained thanks to the analysis. Future directions for research on eye movement and depression are proposed in this review.}, author = {Yarbs, Aared L and Haigh, Basil and Ettinger, Ulrich and Kumari, Veena and Crawford, Trevor J. and Flak, Vanja and Sharma, Tonmoy and Davis, Robert E. and Corr, Philip J. and Brazil, Inti A. and de Bruijn, Ellen R A and Bulten, Berend H. and von Borries, A. Katinka L and van Lankveld, Jacques J D M and Buitelaar, Jan K. and Verkes, Robbert J. and Eisenbarth, Hedwig and Alpers, Georg W. and Singler, Eric and Gillath, Omri and Bahns, Angela J. and Burghart, Hayley A. and Clifton, Charles and Ferreira, Fernanda and Henderson, John M. and Inhoff, Albrecht W. and Liversedge, Simon P. and Reichle, Erik D. and Schotter, Elizabeth R. and Carvalho, Nicolas and Laurent, Eric and Noiret, Nicolas and Chopard, Gilles and Haffen, Emmanuel and Bennabi, Djamila and Vandel, Pierre and Gregory, R L}, booktitle = {Problems of Information Transmission}, doi = {10.1016/j.biopsycho.2004.03.014}, isbn = {0749-596X}, issn = {1931-1516}, keywords = {Antisaccade,Attraction,Automatic processing,Bipolar depression,Chest,E-Z Reader,Emotion,Endophenotype,Eye movement,Eye movement control,Eye-tracking,Friendship,Keith Rayner,Mating,Negative emotionality,Neuroticism,Oculomotor control,Prosaccade (reflexive saccade,Saccade,Schizophrenia,Schizotypal personality traits,Sentences and discourses,Unipolar depression,Visual scenes,Waist-to-hip ratio,Word recognition,behavioral adaptation,dimberg,e,emotion,error positivity,error signaling,error-related negativity,eye-tracking,facial expressions,facial expressions contain information,fast and automatic processing,g,havior,http://www.archive.org/details/eyebrainpsycholo00r,information is optimized,psychopathy,relevant for social be-,scan path,the processing of facial,thunberg,thus,visually-guided saccade)}, number = {1}, pages = {860--865}, pmid = {15312695}, title = {{Third edition : revised and updated World}}, url = {http://doi.apa.org/getdoi.cfm?doi=10.1037/a0022758{\%}0Ahttp://dx.doi.org/10.1016/j.biopsych.2008.08.011}, volume = {86}, year = {2015} } @article{Wang2005, abstract = {We have successfully created a software environment in which ultrasound data can be manipulated by, ITK (the Insight Tool-Kit), in real-time. We were able to access each frame generated within the resident computer of a TerasonTM Ultrasound Machine, convert it into the ITK image format, and demonstrate the concurrent operation of ITK on the same computer by writing the images to an external hard drive. At a rate of 10 frames per second, 512 by 512 pixel grayscale frames were written by ITK methods to the external hard drive through USB 2.0 while the ultrasound scan was occurring without thrashing or delay in system performance. This simple exercise demonstrates the potential of ITK in processing ultrasound images in real-time in addition to the more traditional off-line processing.}, author = {Wang, David and Chang, Wilson and Stetten, George}, journal = {Proc MICCAI}, pages = {1--5}, title = {{Real-time ultrasound image analysis for the insight toolkit}}, url = {http://insight-journal.org/dspace/handle/1926/43}, year = {2005} } @article{Wang2005a, abstract = {We have successfully created a software environment in which ultrasound data can be manipulated by, ITK (the Insight Tool-Kit), in real-time. We were able to access each frame generated within the resident computer of a TerasonTM Ultrasound Machine, convert it into the ITK image format, and demonstrate the concurrent operation of ITK on the same computer by writing the images to an external hard drive. At a rate of 10 frames per second, 512 by 512 pixel grayscale frames were written by ITK methods to the external hard drive through USB 2.0 while the ultrasound scan was occurring without thrashing or delay in system performance. This simple exercise demonstrates the potential of ITK in processing ultrasound images in real-time in addition to the more traditional off-line processing.}, author = {Wang, David and Chang, Wilson and Stetten, George}, journal = {Proc MICCAI}, pages = {1--5}, title = {{Real-time ultrasound image analysis for the insight toolkit}}, url = {http://insight-journal.org/dspace/handle/1926/43}, year = {2005} } @book{Johnson2015, abstract = {The Insight Toolkit (ITK) is an open-source software toolkit for performing registra tion and segmen- tation. Segmentation is the process of identifying and classifying data found in a digitally sampled representation. Typically the sampled representation is a n image acquired from such medical instru- mentation as CT or MRI scanners. Registration is the task of aligning or developing correspondences between data. For example, in the medical environment, a CT s can may be aligned with a MRI scan in order to combine the information contained in both. ITK is a cross-platform software. It uses a build environmen t known as CMake to manage platform- specific project generation and compilation process in a pla tform-independent way. ITK is imple- mented in C++. ITK's implementation style employs generic p rogramming, which involves the use of templates to generate, at compile-time, code that can be applied generically to any class or data-type that supports the operations used by the template . The use of C++ templating means that the code is highly efficient and many issues are discovered at compile-time, rather than at run-time during program execution. It also means that many of ITK's al gorithms can be applied to arbitrary spatial dimensions and pixel types. An automated wrapping system integrated with ITK generates an interface between C++ and a high- level programming language Python . This enables rapid prototyping and faster exploration of i deas by shortening the edit-compile-execute cycle. In addition to automated wrapping, the SimpleITK project provides a streamlined interface to ITK that is avai lable for C++, Python, Java, CSharp, R, Tcl and Ruby. Developers from around the world can use, debug, maintain, a nd extend the software because ITK is an open-source project. ITK uses a model of software devel opment known as Extreme Program- ming. Extreme Programming collapses the usual software dev elopment methodology into a simulta- neous iterative process of design-implement-test-releas e. The key features of Extreme Programming are communication and testing. Communication among the mem bers of the ITK community is what helps manage the rapid evolution of the software. Testing is what keeps the software stable. An extensive testing process supported by the system known as CDash measures the quality of ITK code on a daily basis. The ITK Testing Dashboard is updated co ntinuously, reflecting the quality of the code at any moment. The most recent version of this document is available online at http://itk.org/ItkSoftwareGuide.pdf . This book is a guide to developing software with ITK; it is the first of two companion books. This book cove rs building and installation, general architecture and design, as well as the process of contribut ing in the ITK community. The second book covers detailed design and functionality for reading a nd writing images, filtering, registration, segmentation, and performing statistical analysis.}, author = {Johnson, Hans J. and McCormick, Matthew M and Ibanez, Luis}, booktitle = {Kitware, Inc.(January 2015)}, isbn = {1930934270}, keywords = {Guide,Registration,Segmentation}, pages = {248}, title = {{The ITK Software Guide: Introduction and Development Guidelines version 4.6}}, url = {https://itk.org/}, year = {2015} } @article{Ng2003, abstract = {This paper introduces the new concept of narrow-band to image registration. Narrow-banding is a common technique used in the solution of level set approaches to image processing. For our application, the narrow-band describes the shape of an object by using a data structure containing the signed distance values at a small band of neighboring pixels. This compact representation of an object is well suited for performing registration against a standard image as well as against another narrow-band. The novel technique was implemented in the registration framework of the NLM Insight Toolkit (ITK). This implementation illustrates the great advantage of a modular framework structure that allows researchers to concentrate in the interesting aspects of a new algorithm by building on an existing set of predefined components for providing the rest of standard functionalities that are required. {\textcopyright} Springer-Verlag Berlin Heidelberg 2003.}, author = {Ng, Lydia and Ib{\'{a}}{\~{n}}ez, Luis}, doi = {10.1007/978-3-540-39701-4_29}, issn = {03029743}, journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, pages = {271--280}, title = {{Narrow band to image registration in the insight toolkit}}, volume = {2717}, year = {2003} } @inproceedings{Muller2006, abstract = {Visual information retrieval is an emerging domain in the medical field as it has been in computer vision for more than ten years. It has the potential to help better managing the rising amount of visual medical data. One of the most frequent application fields for content-based medical image retrieval (CBIR) is diagnostic aid. By submitting an image showing a certain pathology to a CBIR system, the medical expert can easily find similar cases. A major problem is the background surrounding the object in many medical images. System parameters of the imaging modalities are stored around the images in text as well as patient name or a logo of the institution. With such noisy input data, image retrieval often rather finds images where the object appears in the same area and is surrounded by similar structures. Whereas in specialised application domains, segmentation can focus the research on a particular area, PACS-like (Picture Archiving and Communication System) databases containing a large variety of images need a more general approach. This article describes an algorithm to extract the important object of the image to reduce the amount of data to be analysed for CBIR and focuses analysis to the important object. Most current solutions index the entire image without making a difference between object and background when using varied PACS-like databases or radiology teaching files. Our requirement is to have a fully automatic algorithm for object extraction. Medical images have the advantage to normally have one particular object more or less in the centre of the image. The database used for evaluating this task is taken from a radiology teaching file called casimage and the retrieval component is an open source retrieval engine called medGIFT. {\textcopyright} Springer-Verlag Berlin Heidelberg 2006.}, author = {M{\"{u}}ller, Henning and Heuberger, Joris and Depeursinge, Adrien and Geissb{\"{u}}hler, Antoine}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/11880592_36}, isbn = {3540457801}, issn = {16113349}, pages = {476--488}, title = {{Automated object extraction for medical image retrieval using the Insight Toolkit (ITK)}}, volume = {4182 LNCS}, year = {2006} } @book{Higham2016, abstract = {Third edition. "MATLAB is an interactive system for numerical computation that is widely used for teaching and research in industry and academia. It provides a modern programming language and problem solving environment, with powerful data structures, customizable graphics, and easy-to-use editing and debugging tools. This third edition of MATLAB Guide completely revises and updates the best-selling second edition and is more than 25 percent longer. The book remains a lively, concise introduction to the most popular and important features of MATLAB and the Symbolic Math Toolbox. Key features are a tutorial in Chapter 1 that gives a hands-on overview of MATLAB, a thorough treatment of MATLAB mathematics, including the linear algebra and numerical analysis functions and the differential equation solvers, and a web page that provides a link to example program files, updates, and links to MATLAB resources. The new edition contains color figures throughout, includes pithy discussions of related topics in new "Asides" boxes that augment the text, has new chapters on the Parallel Computing Toolbox, object-oriented programming, graphs, and large data sets, covers important new MATLAB data types such as categorical arrays, string arrays, tall arrays, tables, and timetables, contains more on MATLAB workflow, including the Live Editor and unit tests, and fully reflects major updates to the MATLAB graphics system." Chapter 1: A Brief Tutorial -- Chapter 2: Basics -- Chapter 3: Distinctive Features of MATLAB -- Chapter 4: Arithmetic -- Chapter 5: Matrices -- Chapter 6: Operators and Flow Control -- Chapter 7: Program Files -- Chapter 8: Graphics -- Chapter 9: Linear Algebra -- Chapter 10: More on Functions -- Chapter 11: Numerical Methods: Part I -- Chapter 12: Numerical Methods: Part II -- Chapter 13: Input and Output -- Chapter 14: Troubleshooting -- Chapter 15: Sparse Matrices -- Chapter 16: More on Coding -- Chapter 17: Advanced Graphics -- Chapter 18: Other Data Types and Multidimensional Arrays -- Chapter 19: Object-Oriented Programming -- Chapter 20: The Symbolic Math Toolbox -- Chapter 21: Graphs -- Chapter 22: Large Data Sets -- Chapter 23: Optimizing Codes -- Chapter 24: Tricks and Tips -- Chapter 25: The Parallel Computing Toolbox -- Chapter 26: Case Studies.}, author = {Higham, Desmond J. and Higham, Nicholas J.}, booktitle = {MATLAB Guide, Third Edition}, doi = {10.1137/1.9781611974669}, title = {{MATLAB Guide, Third Edition}}, year = {2016} } @incollection{Hanssen2002, author = {Hanssen, Nils and von Rymon-Lipinski, Bartosz and Jansen, Thomas and Li{\'{e}}vin, Marc and Keeve, Erwin}, booktitle = {CARS 2002 Computer Assisted Radiology and Surgery}, doi = {10.1007/978-3-642-56168-9_74}, pages = {445--449}, title = {{Integrating the Insight Toolkit itk into a medical software framework}}, year = {2002} } @article{Avants2014a, abstract = {Publicly available scientific resources help establish evaluation standards, provide a platform for teaching and improve reproducibility. Version 4 of the Insight ToolKit (ITK4) seeks to establish new standards in publicly available image registration methodology. ITK4 makes several advances in comparison to previous versions of ITK. ITK4 supports both multivariate images and objective functions; it also unifies high-dimensional (deformation field) and low-dimensional (affine) transformations with metrics that are reusable across transform types and with composite transforms that allow arbitrary series of geometric mappings to be chained together seamlessly. Metrics and optimizers take advantage of multi-core resources, when available. Furthermore, ITK4 reduces the parameter optimization burden via principled heuristics that automatically set scaling across disparate parameter types (rotations vs. translations). A related approach also constrains steps sizes for gradient-based optimizers. The result is that tuning for different metrics and/or image pairs is rarely necessary allowing the researcher to more easily focus on design/comparison of registration strategies. In total, the ITK4 contribution is intended as a structure to support reproducible research practices, will provide a more extensive foundation against which to evaluate new work in image registration and also enable application level programmers a broad suite of tools on which to build. Finally, we contextualize this work with a reference registration evaluation study with application to pediatric brain labeling. {\textcopyright} 2014 Avants, Tustison, Stauffer, Song, Wuand Gee.}, author = {Avants, Brian B. and Tustison, Nicholas J. and Stauffer, Michael and Song, Gang and Wu, Baohua and Gee, James C.}, doi = {10.3389/fninf.2014.00044}, issn = {16625196}, journal = {Frontiers in Neuroinformatics}, keywords = {Brain,Death,MRI,Open-source,Registration}, number = {APR}, title = {{The Insight ToolKit image registration framework}}, volume = {8}, year = {2014} } @misc{Ibanez2005, abstract = {Everything you need to install, use, and extend the Insight Segmentation and Registration Toolikit ITK. Includes detailed examples, installation procedures, and system overview for ITK version 2.4. (The included examples are taken directly from the ITK source code repository and are designed to demonstrate the essential features of the software.) The book comes with a CD-ROM that contains a complete hyperlinked version of the book plus ITK source code, data, Windows binaries, and extensive class documentation. Also includes CMake binaries for managing the ITK build process on a variety of compiler and operating system configurations.}, author = {Ibanez, L and Schroeder, W and Ng, L and Cates, J}, booktitle = {The ITK Software Guide}, doi = {1�930934-15�7}, isbn = {1930934157}, issn = {10445323}, number = {May}, pages = {804}, pmid = {1000070720}, title = {{The ITK Software Guide}}, url = {http://www.itk.org/ItkSoftwareGuide.pdf}, volume = {Second}, year = {2005} } @article{Shelton2005, abstract = {We present several case studies which examine the role that the Insight Toolkit (ITK) played in three medical image analysis courses and several conference tutorials. These courses represent the first use of ITK in a teaching environment, and we believe that a discussion of the teaching approach in each case and the benefits and challenges of ITK will be useful to future medical image analysis course development. ITK was found to provide significant value in a classroom setting since it provides both working "canned" algorithms, including some recently developed methods that are unavailable elsewhere, as well as a framework for developing new techniques and applications. Several areas of difficulty, particularly in regards to code complexity and advanced object-oriented design techniques, have been identified which may make the learning curve of ITK somewhat more complex than a language such as Matlab™. {\textcopyright} 2005 Elsevier B.V. All rights reserved.}, author = {Shelton, Damion and Stetten, George and Aylward, Stephen and Ib{\'{a}}{\~{n}}ez, Luis and Cois, Aaron and Stewart, Charles}, doi = {10.1016/j.media.2005.04.011}, issn = {13618415}, journal = {Medical Image Analysis}, keywords = {Image analysis,Insight Toolkit,Teaching}, number = {6}, pages = {605--611}, title = {{Teaching medical image analysis with the Insight Toolkit}}, volume = {9}, year = {2005} } @book{Johnson2015a, abstract = {The Insight Toolkit (ITK) is an open-source software toolkit for performing registration and segmen- tation. Segmentation is the process of identifying and classifying data found in a digitally sampled representation. Typically the sampled representation is an image acquired fromsuchmedical instru- mentation as CT orMRI scanners. Registration is the task of aligning or developing correspondences between data. For example, in themedical environment, a CT scan may be aligned with aMRI scan in order to combine the information contained in both.}, author = {Johnson, Hans J. and McCormick, Matthew M and Ibanez, Luis}, booktitle = {Kitware, Inc.(January 2015)}, isbn = {978-1930934276}, title = {{The ITK Software Guide Book 1: Introduction and Development Guidelines Fourth Edition Updated for ITK version 4.7}}, url = {https://itk.org/}, year = {2015} } @inproceedings{Tsai2006, abstract = {This paper presents a correspondence-based toolkit for image registration. Written in C++, the toolkit complements the capabilities of the Insight Toolkit (ITK). Major components include features, feature sets, match generators, error scale estimators, robust transformation estimators, and convergence testers, all combined and controlled by several different registration engines. Correspondence-based algorithms which can be implemented using the toolkit extend from ICP to hybrids of intensity-based and feature-based registration. The toolkit is being used both as an education tool and the foundation for developing new algorithms. {\textcopyright} 2006 IEEE.}, author = {Tsai, Chia Ling and Stewart, Charles V. and Perera, Amitha and Lee, Ying Lin and Yang, Gehua and Sofka, Michal}, booktitle = {Conference Proceedings - IEEE International Conference on Systems, Man and Cybernetics}, doi = {10.1109/ICSMC.2006.384753}, isbn = {1424401003}, issn = {1062922X}, pages = {3972--3977}, title = {{A correspondence-based software toolkit for image registration}}, volume = {5}, year = {2006} } @book{Ibanez2005a, abstract = {The Insight Toolkit (ITK) is an open-source software toolkit for performing registration and segmentation. Segmentation is the process of identifying and classifying data found in a digi- tally sampled representation. Typically the sampled representation is an image acquired from such medical instrumentation as CT or MRI scanners. Registration is the task ofaligning or de- veloping correspondences between data. For example, in the medical environment, a CT scan may be aligned with a MRI scan in order to combine the information contained in both. ITK is implemented in C++. It is cross-platform, using a build environment known as CMake to manage the compilation process in a platform-independent way. In addition, an automated wrapping process (Cable) generates interfaces between C++ and interpreted programming lan- guages such as Tcl, Java, and Python. This enables developers to create software using a variety of programming languages. ITK's C++ implementation style is referred to as generic program- ming, which is to say that it uses templates so that the same code can be applied generically to any class or type that happens to support the operations used. Such C++ templating means that the code is highly efficient, and that many software problems are discovered at compile-time, rather than at run-time during program execution. Because ITK is an open-source project, developers from around the world can use, debug, main- tain, and extend the software. ITK uses a model ofsoftware development referred to as Extreme Programming. Extreme Programming collapses the usual software creation methodology into a simultaneous and iterative process of design-implement-test-release. The key features ofEx- treme Programming are communication and testing. Communication among the members of the ITK community is what helps manage the rapid evolution of the software. Testing is what keeps the software stable. In ITK, an extensive testing process (using a system known as Dart) is in place that measures the quality on a daily basis. The ITK Testing Dashboard is posted continuously, reflecting the quality of the software at any moment. This book is a guide to using and developing with ITK. The sample code in the directory pro- vides a companion to the material presented here. The most recent version of this document is available online at http://www.itk.org/ItkSoftwareGuide.pdf.}, author = {Ibanez, Luis and Schroeder, Will and Ng, Lydia and Cates, Josh}, booktitle = {Insight Software Consortium}, doi = {10.1016/S0014-5793(02)03066-1}, issn = {00145793}, keywords = {ITK,Software}, title = {{The ITK Software Guide, 2nd Edition}}, year = {2005} } @article{Pierson2011, abstract = {The BRAINS (Brain Research: Analysis of Images, Networks, and Systems) image analysis software has been in use, and in constant development, for over 20. years. The original neuroimage analysis pipeline using BRAINS was designed as a semiautomated procedure to measure volumes of the cerebral lobes and subcortical structures, requiring manual intervention at several stages in the process. Through use of advanced image processing algorithms the need for manual intervention at stages of image realignment, tissue sampling, and mask editing have been eliminated. In addition, inhomogeneity correction, intensity normalization, and mask cleaning routines have been added to improve the accuracy and consistency of the results. The fully automated method, AutoWorkup, is shown in this study to be more reliable (ICC ≥ 0.96, Jaccard index ≥ 0.80, and Dice index ≥ 0.89 for all tissues in all regions) than the average of 18 manual raters. On a set of 1130 good quality scans, the failure rate for correct realignment was 1.1{\%}, and manual editing of the brain mask was required on 4{\%} of the scans. In other tests, AutoWorkup is shown to produce measures that are reliable for data acquired across scanners, scanner vendors, and across sequences. Application of AutoWorkup for the analysis of data from the 32-site, multivendor PREDICT-HD study yield estimates of reliability to be greater than or equal to 0.90 for all tissues and regions. {\textcopyright} 2010 Elsevier Inc.}, annote = {{\#}{\#}CONTRIBUTIONS: I developed custom analysis software to achieve the desired interpretation of results.   I had substantial contributions to the software methods development, interpretation of validation results for this work.  I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#}}, author = {Pierson, Ronald K. and Johnson, Hans J. and Harris, Gregory and Keefe, Helen and Paulsen, Jane S. and Andreasen, Nancy C. and Magnotta, Vincent A.}, doi = {10.1016/j.neuroimage.2010.06.047}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Pierson et al/NeuroImage/Pierson et al. - 2011 - Fully automated analysis using BRAINS AutoWorkup.pdf:pdf}, isbn = {1095-9572 (Electronic)$\backslash$n1053-8119 (Linking)}, issn = {10538119}, journal = {NeuroImage}, keywords = {Automated image analysis,BRAINS,Morphometry,Pipeline,Segmentation,Volumetric analysis}, month = {jan}, number = {1}, pages = {328--336}, pmid = {20600977}, publisher = {Elsevier Inc.}, title = {{Fully automated analysis using BRAINS: AutoWorkup}}, url = {http://www.sciencedirect.com/science/article/pii/S1053811910009055 papers2://publication/uuid/35F9A8B8-DD4A-4857-903F-16D3F7CCAEF7 http://www.ncbi.nlm.nih.gov/pubmed/20600977}, volume = {54}, year = {2011} } @inproceedings{Gerard2016, abstract = {Lung segmentation is a critical initial step in planning radiation therapy interventions for lung cancer patients. Achieving robust automatic segmentation of lungs with large tumors is challenging due to large variations in lung morphology, tumor location, and tumor shape between subjects. We present an automatic method to segment lungs with large tumors in CT images using an initial intensity based segmentation followed by alpha shape construction and graph search. We evaluated our method by comparing automated segmentations to manual segmentations on twelve subjects. Computed metrics for segmentation quality include average surface distance of 0.727 mm and average DICE coefficient of 0.970. These results demonstrate that the proposed method accurately segments the entire lung regions both free of and in the presence of large tumors.}, annote = {{\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I was responsible for the data processing for one of the evaluated algorithms. I had the primary contributions to the interpretation of validation results for this work.  I contributed to revising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#}}, author = {Gerard, Sarah E and Johnson, Hans J. and Bayouth, John E and Christensen, Gary E. and Du, Kaifang and Guo, Junfeng and Reinhardt, Joseph M}, booktitle = {PIA: Pulmonary Image Analysis}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Gerard et al/PIA Pulmonary Image Analysis/Gerard et al. - 2016 - Alpha Shapes for Lung Segmentation in the Presence of Large Tumors.pdf:pdf}, pages = {1--9}, title = {{Alpha Shapes for Lung Segmentation in the Presence of Large Tumors}}, year = {2016} } @techreport{Kim2011, annote = {From Duplicate 1 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K; Kim EYR Johnson HJ, Williams N K) From Duplicate 2 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K) From Duplicate 3 ( Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young; Johnson, Hans J.; Williams, Norman K ) From Duplicate 3 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Regina E Y; Johnson, Hans J.; Williams, Norman K) From Duplicate 1 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K; Kim EYR Johnson HJ, Williams N K) From Duplicate 2 (Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young Regina; Johnson, Hans J.; Williams, Norman K) From Duplicate 3 ( Affine Transformation for Landmark Based Registration Initializer in ITK - Kim, Eun Young; Johnson, Hans J.; Williams, Norman K )}, author = {Kim, Regina E Y and Johnson, Hans J and Williams, Norman K and Kim, Eun Young Regina and {Kim EYR Johnson HJ}, Williams N K}, booktitle = {Engineering}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Kim et al/Engineering/Kim et al. - 2011 - Affine Transformation for Landmark Based Registration Initializer in ITK.pdf:pdf}, keywords = {Affine Transformation for Landmark Based Registrat,Insight Journal}, mendeley-tags = {Insight Journal}, pages = {1--8}, title = {{Affine Transformation for Landmark Based Registration Initializer in ITK}}, year = {2011} } @article{johnson2015itk, abstract = {The Insight Toolkit (ITK) is an open-source software toolkit for performing registra tion and segmen- tation. Segmentation is the process of identifying and classifying data found in a digitally sampled representation. Typically the sampled representation is a n image acquired from such medical instru- mentation as CT or MRI scanners. Registration is the task of aligning or developing correspondences between data. For example, in the medical environment, a CT s can may be aligned with a MRI scan in order to combine the information contained in both. ITK is a cross-platform software. It uses a build environmen t known as CMake to manage platform- specific project generation and compilation process in a pla tform-independent way. ITK is imple- mented in C++. ITK's implementation style employs generic p rogramming, which involves the use of templates to generate, at compile-time, code that can be applied generically to any class or data-type that supports the operations used by the template . The use of C++ templating means that the code is highly efficient and many issues are discovered at compile-time, rather than at run-time during program execution. It also means that many of ITK's al gorithms can be applied to arbitrary spatial dimensions and pixel types. An automated wrapping system integrated with ITK generates an interface between C++ and a high- level programming language Python . This enables rapid prototyping and faster exploration of i deas by shortening the edit-compile-execute cycle. In addition to automated wrapping, the SimpleITK project provides a streamlined interface to ITK that is avai lable for C++, Python, Java, CSharp, R, Tcl and Ruby. Developers from around the world can use, debug, maintain, a nd extend the software because ITK is an open-source project. ITK uses a model of software devel opment known as Extreme Program- ming. Extreme Programming collapses the usual software dev elopment methodology into a simulta- neous iterative process of design-implement-test-releas e. The key features of Extreme Programming are communication and testing. Communication among the mem bers of the ITK community is what helps manage the rapid evolution of the software. Testing is what keeps the software stable. An extensive testing process supported by the system known as CDash measures the quality of ITK code on a daily basis. The ITK Testing Dashboard is updated co ntinuously, reflecting the quality of the code at any moment. The most recent version of this document is available online at http://itk.org/ItkSoftwareGuide.pdf . This book is a guide to developing software with ITK; it is the first of two companion books. This book cove rs building and installation, general architecture and design, as well as the process of contribut ing in the ITK community. The second book covers detailed design and functionality for reading a nd writing images, filtering, registration, segmentation, and performing statistical analysis.}, author = {Johnson, Hans J. and Mccormick, Matthew M and Ibanez, Luis}, isbn = {978-1-930934-28-3}, keywords = {Guide,Registration,Segmentation}, publisher = {Kitware, Inc.}, title = {{The ITK Software Guide Book 1: Introduction and Development Guidelines-Volume 1}}, year = {2015} } @article{forbes2016open, abstract = {The creation of high-quality medical imaging reference atlas datasets with consistent dense anatomical region labels is a challenging task. Reference atlases have many uses in medical image applications and are essential components of atlas-based segmentation tools commonly used for producing personalized anatomical measurements for individual subjects. The process of manual identification of anatomical regions by experts is regarded as a so-called gold standard; however, it is usually impractical because of the labor-intensive costs. Further, as the number of regions of interest increases, these manually created atlases often contain many small inconsistently labeled or disconnected regions that need to be identified and corrected. This project proposes an efficient process to drastically reduce the time necessary for manual revision in order to improve atlas label quality. We introduce the LabelAtlasEditor tool, a SimpleITK-based open-source label atlas correction tool distributed within the image visualization software 3D Slicer. LabelAtlasEditor incorporates several 3D Slicer widgets into one consistent interface and provides label-specific correction tools, allowing for rapid identification, navigation, and modification of the small, disconnected erroneous labels within an atlas. The technical details for the implementation and performance of LabelAtlasEditor are demonstrated using an application of improving a set of 20 Huntingtons Disease-specific multi-modal brain atlases. Additionally, we present the advantages and limitations of automatic atlas correction. After the correction of atlas inconsistencies and small, disconnected regions, the number of unidentified voxels for each dataset was reduced on average by 68.48{\%}.}, annote = {{\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#}}, author = {Forbes, Jessica L and Kim, Regina E Y and Paulsen, Jane S and Johnson, Hans J}, doi = {10.3389/fninf.2016.00029}, issn = {1662-5196}, journal = {Frontiers in neuroinformatics}, keywords = {Huntingtons Disease,ITK,brain MRI,label atlas,multi-atlas,multi-modal,open-source}, month = {aug}, pages = {1--11}, pmid = {27536233}, publisher = {Frontiers Media SA}, title = {{An Open-Source Label Atlas Correction Tool and Preliminary Results on Huntingtons Disease Whole-Brain MRI Atlases.}}, url = {http://journal.frontiersin.org/Article/10.3389/fninf.2016.00029/abstract{\%}5Cnhttp://www.ncbi.nlm.nih.gov/pubmed/27536233{\%}5Cnhttp://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=PMC4971025}, volume = {10}, year = {2016} } @incollection{Avants2012, abstract = {Publicly available scientific resources help establish evaluation standards, provide a platform for teaching and may improve reproducibility. Version 4 of the Insight ToolKit ( ITK 4 ) seeks to establish new standards in publicly available image registration methodology. In this work, we provide an overview and preliminary evaluation of the revised toolkit against registration based on the previous major ITK version (3.20). Furthermore, we propose a nomenclature that may be used to discuss registration frameworks via schematic representations. In total, the ITK 4 contribution is intended as a structure to support reproducible research practices, will provide a more extensive foundation against which to evaluate new work in image registration and also enable application level programmers a broad suite of tools on which to build.}, author = {Avants, Brian B and Tustison, Nicholas J and Song, Gang and Wu, Baohua and Stauffer, Michael and McCormick, Matthew M and Johnson, Hans J and Gee, James C}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, chapter = {A Unified}, doi = {10.1007/978-3-642-31340-0_28}, edition = {5th Intern}, editor = {{Dawant, B.; Christensen, G.E.; Fitzpatrick, J.M.; Rueckert}, D (Eds.}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Avants et al/Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)/Avants et al. - 2012 - A unified image registration framework for ITK.pdf:pdf}, isbn = {9783642313394}, issn = {03029743}, number = {LNCS 7359}, organization = {Springer Berlin Heidelberg}, pages = {266--275}, publisher = {Springer Berlin Heidelberg}, title = {{A unified image registration framework for ITK}}, url = {http://link.springer.com/10.1007/978-3-642-31340-0{\_}28 http://link.springer.com/chapter/10.1007{\%}2F978-3-642-31340-0{\_}28}, volume = {7359 LNCS}, year = {2012} } @inproceedings{shao2016population, annote = {From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E; Johnson, Hans J; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P; Shaffer, Joseph J; Magnotta, Vincent A; Fiedorowicz, Jess G; Wemmie, John A) From Duplicate 1 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E; Johnson, Hans J; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P; Shaffer, Joseph J; Magnotta, Vincent A; Fiedorowicz, Jess G; Wemmie, John A) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper. I had the primary contributions to the interpretation of validation results for this work. I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper.  I had the primary contributions to the interpretation of validation results for this work.  I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Population Shape Collapse in Large Deformation Registration of MR Brain Images - Shao, Wei; Christensen, Gary E.; Johnson, Hans J.; Hyun Song, Joo; Durumeric, Oguz C; Johnson, Casey P.; Shaffer, Joseph J.; Magnotta, Vincent A.; Fiedorowicz, Jess G.; Wemmie, John A) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper.  I had the primary contributions to the interpretation of validation results for this work.  I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#}}, author = {Shao, Wei and Christensen, Gary E. and Johnson, Hans J. and {Hyun Song}, Joo and Durumeric, Oguz C and Johnson, Casey P. and Shaffer, Joseph J. and Magnotta, Vincent A. and Fiedorowicz, Jess G. and Wemmie, John A}, booktitle = {Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops}, doi = {10.1109/CVPRW.2016.75}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Shao et al/Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops/Shao et al. - 2016 - Population Shape Collapse in Large Deformation Registration of MR Brain Images.pdf:pdf}, isbn = {9781509014378}, pages = {109--117}, title = {{Population Shape Collapse in Large Deformation Registration of MR Brain Images}}, year = {2016} } @article{oguz2014dtiprep, abstract = {In the last decade, diffusion MRI (dMRI) studies of the human and animal brain have been used to investigate a multitude of pathologies and drug-related effects in neuroscience research. Study after study identifies white matter (WM) degeneration as a crucial biomarker for all these diseases. The tool of choice for studying WM is dMRI. However, dMRI has inherently low signal-to-noise ratio and its acquisition requires a relatively long scan time; in fact, the high loads required occasionally stress scanner hardware past the point of physical failure. As a result, many types of artifacts implicate the quality of diffusion imagery. Using these complex scans containing artifacts without quality control (QC) can result in considerable error and bias in the subsequent analysis, negatively affecting the results of research studies using them. However, dMRI QC remains an under-recognized issue in the dMRI community as there are no user-friendly tools commonly available to comprehensively address the issue of dMRI QC. As a result, current dMRI studies often perform a poor job at dMRI QC. Thorough QC of dMRI will reduce measurement noise and improve reproducibility, and sensitivity in neuroimaging studies; this will allow researchers to more fully exploit the power of the dMRI technique and will ultimately advance neuroscience. Therefore, in this manuscript, we present our open-source software, DTIPrep, as a unified, user friendly platform for thorough QC of dMRI data. These include artifacts caused by eddy-currents, head motion, bed vibration and pulsation, venetian blind artifacts, as well as slice-wise and gradient-wise intensity inconsistencies. This paper summarizes a basic set of features of DTIPrep described earlier and focuses on newly added capabilities related to directional artifacts and bias analysis.}, annote = {From Duplicate 1 (DTIPrep: quality control of diffusion-weighted images - Oguz, Ipek; Farzinfar, Mahshid; Matsui, Joy T.; Budin, Francois; Liu, Zhexing; Gerig, Guido; Johnson, Hans J.; Styner, Martin) From Duplicate 2 (DTIPrep: quality control of diffusion-weighted images - Oguz, Ipek; Farzinfar, Mahshid; Matsui, Joy T.; Budin, Francois; Liu, Zhexing; Gerig, Guido; Johnson, Hans J.; Styner, Martin) {\#}{\#}CONTRIBUTIONS: As a member of the National Alliance for Medical Imaging Computing (NAMIC) I collaborated on many software engineering projets.   I had substantial contributions to the software methods development, interpretation of validation results for this work.  I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Conference :{\#}{\#}}, author = {Oguz, Ipek and Farzinfar, Mahshid and Matsui, Joy T. and Budin, Francois and Liu, Zhexing and Gerig, Guido and Johnson, Hans J. and Styner, Martin}, doi = {10.3389/fninf.2014.00004}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Oguz et al/Frontiers in neuroinformatics/Oguz et al. - 2014 - DTIPrep quality control of diffusion-weighted images.pdf:pdf}, isbn = {1662-5196 (Electronic){\$}\backslash{\$}r1662-5196 (Linking)}, issn = {1662-5196}, journal = {Frontiers in neuroinformatics}, keywords = {diffusion MRI,diffusion mri,diffusion tensor imaging,open-source,preprocessing,quality c,quality control,software}, number = {January}, pages = {4}, pmid = {24523693}, publisher = {Frontiers Media SA}, title = {{DTIPrep: quality control of diffusion-weighted images}}, url = {http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=3906573{\&}tool=pmcentrez{\&}rendertype=abstract http://journal.frontiersin.org/Journal/10.3389/fninf.2014.00004/full http://journal.frontiersin.org/article/10.3389/fninf.2014.00004/abstract{\%}0Ahttp://www}, volume = {8}, year = {2014} } @article{yaniv2018simpleitk, author = {Yaniv, Ziv and Lowekamp, Bradley C and Johnson, Hans J and Beare, Richard}, journal = {Journal of digital imaging}, number = {3}, pages = {290--303}, publisher = {Springer International Publishing}, title = {{SimpleITK image-analysis notebooks: a collaborative environment for education and reproducible research}}, volume = {31}, year = {2018} } @article{hawley2010introduction, author = {Hawley, J and Johnson, H and Dowling, J and Malaterre, M and Greer, P B and Salvado, O}, journal = {Insight Journal}, title = {{Introduction to ITK resample in-place image filter}}, year = {2010} } @inproceedings{kim2015efficient, author = {Kim, Regina E Y and Nopoulos, Peg and Paulsen, Jane and Johnson, Hans}, booktitle = {Workshop on Clinical Image-Based Procedures}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Kim et al/Workshop on Clinical Image-Based Procedures/Kim et al. - 2015 - Efficient and extensible workflow Reliable whole brain segmentation for large-scale, multi-center longitudinal human.pdf:pdf}, organization = {Springer, Cham}, pages = {54--61}, title = {{Efficient and extensible workflow: Reliable whole brain segmentation for large-scale, multi-center longitudinal human MRI analysis using high performance/throughput computing resources}}, year = {2015} } @article{avants2015neuroinformatics, annote = {From Duplicate 1 (Neuroinformatics and the The Insight ToolKit - Avants, Brian B.; Johnson, Hans J.; Tustison, Nicholas J.) {\#}{\#}CONTRIBUTIONS: As a contributing guest editor for a special issue of "Frontiers in Neruoinformatics" I had substantial oversight in the criteria for selection, review and ultimate publication of materials in the issue.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Editorial :{\#}{\#}}, author = {Avants, Brian B. and Johnson, Hans J. and Tustison, Nicholas J.}, doi = {10.3389/fninf.2015.00005}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Avants, Johnson, Tustison/Frontiers in neuroinformatics/Avants, Johnson, Tustison - 2015 - Neuroinformatics and the The Insight ToolKit.pdf:pdf}, issn = {1662-5196}, journal = {Frontiers in neuroinformatics}, keywords = {C++,ITK,c,edited and reviewed by,itk,open source,registration,segmentation}, number = {March}, pages = {1--3}, pmid = {25859213}, publisher = {Frontiers Media SA}, title = {{Neuroinformatics and the The Insight ToolKit}}, url = {http://www.frontiersin.org/Neuroinformatics/10.3389/fninf.2015.00005/full}, volume = {9}, year = {2015} } @inbook{Stevenson2011, address = {New York}, author = {Stevenson, G N and Collins, S L and Impey, L and Noble, J A and Ieee}, booktitle = {2011 8th Ieee International Symposium on Biomedical Imaging: From Nano to Macro}, isbn = {978-1-4244-4128-0}, pages = {891--894}, publisher = {Ieee}, series = {IEEE International Symposium on Biomedical Imaging}, title = {{SURFACE PARAMETERISATION OF THE UTERO/PLACENTAL INTERFACE USING 3D POWER DOPPLER ULTRASOUND}}, type = {Book Section}, url = {{\%}3CGo to}, year = {2011} } @inproceedings{Rezaei, author = {Rezaei, A and Nuyts, J}, booktitle = {IEEE Nuclear Science Symposium Conference Record}, doi = {10.1109/NSSMIC.2013.6829031}, title = {{Joint registration of attenuation and activity images in gated TOF-PET}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84904204340{\&}doi=10.1109{\%}2FNSSMIC.2013.6829031{\&}partnerID=40{\&}md5=9a3c352e39a5cde2bb765fec1156cfb1} } @inbook{Suter2018, address = {New York}, author = {Suter, Y and Rummel, C and Wiest, R and Reyes, M and Ieee}, booktitle = {2018 Ieee 15th International Symposium on Biomedical Imaging}, isbn = {978-1-5386-3636-7}, pages = {1052--1055}, publisher = {Ieee}, series = {IEEE International Symposium on Biomedical Imaging}, title = {{FAST AND UNCERTAINTY-AWARE CEREBRAL CORTEX MORPHOMETRY ESTIMATION USING RANDOM FOREST REGRESSION}}, type = {Book Section}, url = {{\%}3CGo to}, year = {2018} } @inproceedings{Mandl, author = {Mandl, T and Martinek, J and Mayr, W and Rattay, F and Reichel, M and Moser, E}, booktitle = {21st European Modeling and Simulation Symposium, EMSS 2009}, title = {{Towards a numerical 3D model of functional electrical stimulation of denervated, degenerated human skeletal muscle}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84874185461{\&}partnerID=40{\&}md5=a9627e50e42db43cbfa2f5ca618bdad4} } @inbook{Zhang2014, address = {Cham}, author = {Zhang, J and Sorby, H and Clement, J and Thomas, C D L and Hunter, P and Nielsen, P and Lloyd, D and Taylor, M and Besier, T}, booktitle = {Biomedical Simulation}, editor = {Bello, F and Cotin, S}, isbn = {978-3-319-12057-7; 978-3-319-12056-0}, pages = {182--192}, publisher = {Springer International Publishing Ag}, series = {Lecture Notes in Computer Science}, title = {{The MAP Client: User-Friendly Musculoskeletal Modelling Workflows}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {8789}, year = {2014} } @book{Ponzio2017, address = {Setubal}, author = {Ponzio, F and Macii, E and Ficarra, E and {Di Cataldo}, S}, doi = {10.5220/0006239201140121}, isbn = {978-989-758-215-8}, pages = {114--121}, publisher = {Scitepress}, series = {Proceedings of the 10th International Joint Conference on Biomedical Engineering Systems and Technologies, Vol 2: Bioimaging}, title = {{A Multi-modal Brain Image Registration Framework for US-guided Neuronavigation Systems Integrating MR and US for Minimally Invasive Neuroimaging}}, type = {Book}, url = {{\%}3CGo to}, year = {2017} } @incollection{Tourbier2014a, author = {Tourbier, S and Bresson, X and Hagmann, P and Thiran, J P and Meuli, R and Cuadra, M B}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-319-10470-6_32}, pages = {252--259}, title = {{Efficient total variation algorithm for fetal brain MRI reconstruction}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84906979454{\&}doi=10.1007{\%}2F978-3-319-10470-6{\_}32{\&}partnerID=40{\&}md5=d90bc6bfffc46271b10c0f02d0ea2f2a}, volume = {8674 LNCS}, year = {2014} } @inproceedings{Medina, author = {Medina, R and Bautista, S and Morocho, V}, booktitle = {2017 IEEE 2nd Ecuador Technical Chapters Meeting, ETCM 2017}, doi = {10.1109/ETCM.2017.8247499}, pages = {1--6}, title = {{Accuracy of connected confidence left ventricle segmentation in 3-D multi-slice computerized tomography images}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85045738426{\&}doi=10.1109{\%}2FETCM.2017.8247499{\&}partnerID=40{\&}md5=31a12ff7f15e6264e843b9c8cc503a7c}, volume = {2017-Janua} } @inproceedings{Suter, author = {Suter, Y and Rummel, C and Wiest, R and Reyes, M}, booktitle = {Proceedings - International Symposium on Biomedical Imaging}, doi = {10.1109/ISBI.2018.8363752}, pages = {1052--1055}, title = {{Fast and uncertainty-aware cerebral cortex morphometry estimation using random forest regression}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85048099086{\&}doi=10.1109{\%}2FISBI.2018.8363752{\&}partnerID=40{\&}md5=e22ed68e255aa6c6af3b32100df7276a}, volume = {2018-April} } @inproceedings{Kugu, author = {Kugu, E}, booktitle = {RAST 2013 - Proceedings of 6th International Conference on Recent Advances in Space Technologies}, doi = {10.1109/RAST.2013.6581204}, pages = {217--223}, title = {{Satellite image denoising using Bilateral Filter with SPEA2 optimized parameters}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84883888988{\&}doi=10.1109{\%}2FRAST.2013.6581204{\&}partnerID=40{\&}md5=eba34fda9e6d5ba7ef3f98fd189e8ed5} } @inproceedings{Ponzio, author = {Ponzio, F and Macii, E and Ficarra, E and {Di Cataldo}, S}, booktitle = {BIOIMAGING 2017 - 4th International Conference on Bioimaging, Proceedings; Part of 10th International Joint Conference on Biomedical Engineering Systems and Technologies, BIOSTEC 2017}, pages = {114--121}, title = {{A multi-modal brain image registration framework for US-guided neuronavigation systems integrating MR and US for minimally invasive neuroimaging}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85049240676{\&}partnerID=40{\&}md5=0189c956d91dec04b477ed5846ecb930}, volume = {2017-Janua} } @inproceedings{Stevenson, author = {Stevenson, G N and Collins, S L and Impey, L and Noble, J A}, booktitle = {Proceedings - International Symposium on Biomedical Imaging}, doi = {10.1109/ISBI.2011.5872547}, pages = {891--894}, title = {{Surface parameterisation of the utero/placental interface using 3D power doppler ultrasound}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-80055033565{\&}doi=10.1109{\%}2FISBI.2011.5872547{\&}partnerID=40{\&}md5=906b8369dc14ae8e1bb4597aa31b1ec7} } @inproceedings{Schiwarth, author = {Schiwarth, M and Weissenb{\"{o}}ck, J and Plank, B and Fr{\"{o}}hler, B and Heinzl, C and Kastner, J}, booktitle = {IOP Conference Series: Materials Science and Engineering}, doi = {10.1088/1757-899X/406/1/012014}, title = {{Visual analysis of void and reinforcement characteristics in X-ray computed tomography dataset series of fiber-reinforced polymers}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-85054221856{\&}doi=10.1088{\%}2F1757-899X{\%}2F406{\%}2F1{\%}2F012014{\&}partnerID=40{\&}md5=e1757458a87eb396d4743ad19f658d5a}, volume = {406} } @inbook{Schiwarth2018, address = {Bristol}, author = {Schiwarth, M and Weissenbock, J and Plank, B and Frohler, B and Heinzl, C and Kastner, J and Iop}, booktitle = {13th International Conference on Textile Composites}, doi = {10.1088/1757-899x/406/1/012014}, publisher = {Iop Publishing Ltd}, series = {IOP Conference Series-Materials Science and Engineering}, title = {{Visual analysis of void and reinforcement characteristics in X-ray computed tomography dataset series of fiber-reinforced polymers}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {406}, year = {2018} } @book{Pastorelli2014, address = {New York}, author = {Pastorelli, E and Herrmann, H and Ieee}, isbn = {978-1-4673-9539-7}, pages = {201--204}, publisher = {Ieee}, series = {2014 Proceedings of the 14th Biennial Baltic Electronics Conference}, title = {{Virtual Reality Visualization for Short Fibre Orientation Analysis}}, type = {Book}, url = {{\%}3CGo to}, year = {2014} } @book{Mandl2009, address = {La Laguna}, author = {Mandl, T and Martinek, J and Mayr, W and Rattay, F and Reichel, M and Moser, E}, isbn = {978-84-692-5415-8}, pages = {209--+}, publisher = {Univ De La Laguna}, series = {Emss 2009: 21st European Modeling and Simulation Symposium, Vol Ii}, title = {{TOWARDS A NUMERICAL 3D MODEL OF FUNCTIONAL ELECTRICAL STIMULATION OF DENERVATED, DEGENERATED HUMAN SKELETAL MUSCLE}}, type = {Book}, url = {{\%}3CGo to}, year = {2009} } @inbook{Seidel2013, address = {Berlin}, author = {Seidel, T and Draebing, T and Seemann, G and Sachse, F B}, booktitle = {Functional Imaging and Modeling of the Heart}, editor = {Ourselin, S and Rueckert, D and Smith, N}, isbn = {978-3-642-38899-6; 978-3-642-38898-9}, pages = {300--307}, publisher = {Springer-Verlag Berlin}, series = {Lecture Notes in Computer Science}, title = {{A Semi-automatic Approach for Segmentation of Three-Dimensional Microscopic Image Stacks of Cardiac Tissue}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {7945}, year = {2013} } @inbook{Rezaei2013, address = {New York}, author = {Rezaei, A and Nuyts, J and Ieee}, booktitle = {2013 Ieee Nuclear Science Symposium and Medical Imaging Conference}, isbn = {978-1-4799-0534-8}, publisher = {Ieee}, series = {IEEE Nuclear Science Symposium and Medical Imaging Conference}, title = {{Joint registration of attenuation and activity images in gated TOF-PET}}, type = {Book Section}, url = {{\%}3CGo to}, year = {2013} } @inproceedings{Meesters, author = {Meesters, S and Ossenblok, P and Colon, A and Schijns, O and Florack, L and Boon, P and Wagner, L and Fuster, A}, booktitle = {Proceedings - International Symposium on Biomedical Imaging}, doi = {10.1109/ISBI.2015.7164034}, pages = {976--979}, title = {{Automated identification of intracranial depth electrodes in computed tomography data}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84944328676{\&}doi=10.1109{\%}2FISBI.2015.7164034{\&}partnerID=40{\&}md5=3989bdde301290c2ae9d67f2142f6235}, volume = {2015-July} } @incollection{Zhang2014a, author = {Zhang, J and Sorby, H and Clement, J and Thomas, C D L and Hunter, P and Nielsen, P and Lloyd, D and Taylor, M and Besier, T}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, pages = {182--192}, title = {{The MAP client: User-friendly musculoskeletal modelling workflows}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84911390478{\&}partnerID=40{\&}md5=d52525096590e132a0f2bdfbee3e3a8e}, volume = {8789}, year = {2014} } @inbook{Tourbier2014, address = {Berlin}, author = {Tourbier, S and Bresson, X and Hagmann, P and Thiran, J P and Meuli, R and Cuadra, M B}, booktitle = {Medical Image Computing and Computer-Assisted Intervention - Miccai 2014, Pt Ii}, editor = {Golland, P and Hata, N and Barillot, C and Hornegger, J and Howe, R}, isbn = {978-3-319-10470-6; 978-3-319-10469-0}, pages = {252--259}, publisher = {Springer-Verlag Berlin}, series = {Lecture Notes in Computer Science}, title = {{Efficient Total Variation Algorithm for Fetal Brain MRI Reconstruction}}, type = {Book Section}, url = {{\%}3CGo to}, volume = {8674}, year = {2014} } @inbook{Meesters2015, address = {New York}, author = {Meesters, S and Ossenblok, P and Colon, A and Schijns, O and Florack, L and Boon, P and Wagner, L and Fuster, A and Ieee}, booktitle = {2015 IEEE 12th International Symposium on Biomedical Imaging}, isbn = {978-1-4799-2374-8}, pages = {976--979}, publisher = {Ieee}, series = {IEEE International Symposium on Biomedical Imaging}, title = {{AUTOMATED IDENTIFICATION OF INTRACRANIAL DEPTH ELECTRODES IN COMPUTED TOMOGRAPHY DATA}}, type = {Book Section}, url = {{\%}3CGo to}, year = {2015} } @inproceedings{Pastorelli, author = {Pastorelli, E and Herrmann, H}, booktitle = {Proceedings of the Biennial Baltic Electronics Conference, BEC}, doi = {10.1109/BEC.2014.7320591}, pages = {201--204}, title = {{Virtual Reality visualization for short fibre orientation analysis}}, type = {Conference Proceedings}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84971310302{\&}doi=10.1109{\%}2FBEC.2014.7320591{\&}partnerID=40{\&}md5=3476efbc0430ba44724fcc8006637b70}, volume = {2015-Novem} } @book{Kugu2013, address = {New York}, author = {Kugu, E}, isbn = {978-1-4673-6396-9; 978-1-4673-6395-2}, pages = {217--223}, publisher = {Ieee}, series = {Proceedings of 6th International Conference on Recent Advances in Space Technologies}, title = {{Satellite Image Denoising Using Bilateral Filter with SPEA2 Optimized Parameters}}, type = {Book}, url = {{\%}3CGo to}, year = {2013} } @incollection{Seidel2013a, author = {Seidel, T and Draebing, T and Seemann, G and Sachse, F B}, booktitle = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)}, doi = {10.1007/978-3-642-38899-6_36}, pages = {300--307}, title = {{A semi-automatic approach for segmentation of three-dimensional microscopic image stacks of cardiac tissue}}, type = {Serial}, url = {https://www.scopus.com/inward/record.uri?eid=2-s2.0-84879835400{\&}doi=10.1007{\%}2F978-3-642-38899-6{\_}36{\&}partnerID=40{\&}md5=f7e591d9366417855cbf78f95bd7e1d2}, volume = {7945 LNCS}, year = {2013} } @book{Medina2017, address = {New York}, author = {Medina, R and Bautista, S and Morocho, V and Ieee}, isbn = {978-1-5386-3894-1}, publisher = {Ieee}, series = {2017 Ieee Second Ecuador Technical Chapters Meeting}, title = {{Accuracy of Connected Confidence Left Ventricle Segmentation in 3-D Multi-Slice Computerized Tomography Images}}, type = {Book}, url = {{\%}3CGo to}, year = {2017} } @inproceedings{pierson2009maximize, abstract = {A common procedure performed by many groups in the analysis of neuroimaging data is separating the brain from other tissues. This procedure is often utilized both by volumetric studies as well as functional imaging studies. Regardless of the intent, an accurate, robust method of identifying the brain or cranial vault is imperative. While this is a common requirement, there are relatively few tools to perform this task. Most of these tools require a T1 weighted image and are therefore not able to accurately define a region that includes surface CSF. In this paper, we have developed a novel brain extraction technique termed Maximize Uniformity by Summation Heuristic (MUSH) optimization. The algorithm was designed for extraction of the brain and surface CSF from a multi-modal magnetic resonance (MR) imaging study. The method forms a linear combination of multi-modal MR imaging data to make the signal intensity within the brain as uniform as possible. The resulting image is thresholded and simple morphological operators are utilized to generate the resulting representation of the brain. The resulting method was applied to a sample of 20 MR brain scans and compared to the results generated by 3dSkullStrip, 3dIntracranial, BET, and BET2. The average Jaccard metrics for the twenty subjects was 0.66 (BET), 0.61 (BET2), 0.88 (3dIntracranial), 0.91 (3dSkullStrip), and 0.94 (MUSH).}, annote = {From Duplicate 1 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 1 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 1 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) {\#}{\#}CONTRIBUTIONS: As the directory of medical imaging for the department of Psychiatry I work closely with neuroscience student and junior faculty to mentor on appropriate medical imaging analysis techniques.  I developed custom analysis software to achieve the desired interpretation of results.   I had substantial contributions to the software methods development, interpretation of validation results for this work.  I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K; Harris, Gregory; Johnson, Hans J; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 1 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 2 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 1 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 3 ( Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent A. ) From Duplicate 2 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 1 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 3 ( Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent A. ) From Duplicate 2 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K; Harris, Gregory; Johnson, Hans J; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 1 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 2 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 1 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 3 ( Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent A. ) From Duplicate 2 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 1 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) From Duplicate 3 ( Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent A. ) From Duplicate 2 (Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation - Pierson, Ronald K.; Harris, Gregory; Johnson, Hans J.; Dunn, Steve; Magnotta, Vincent a.) {\#}{\#}CONTRIBUTIONS: As the directory of medical imaging for the department of Psychiatry I work closely with neuroscience student and junior faculty to mentor on appropriate medical imaging analysis techniques.  I developed custom analysis software to achieve the desired interpretation of results.   I had substantial contributions to the software methods development, interpretation of validation results for this work.  I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#}}, author = {Pierson, Ronald and Harris, Gregory and Johnson, Hans J. and Dunn, Steve and Magnotta, Vincent A.}, booktitle = {Medical Imaging 2009: Image Processing}, doi = {10.1117/12.812322}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Pierson et al/Medical Imaging 2009 Image Processing/Pierson et al. - 2009 - Maximize uniformity summation heuristic (MUSH) a highly accurate simple method for intracranial delineation.pdf:pdf}, isbn = {9780819475107}, issn = {16057422}, keywords = {brain extraction,magnetic resonance imaging,optimization}, organization = {International Society for Optics and Photonics}, pages = {72593N}, publisher = {Spie}, title = {{Maximize uniformity summation heuristic (MUSH): a highly accurate simple method for intracranial delineation}}, url = {http://link.aip.org/link/PSISDG/v7259/i1/p72593N/s1{\&}Agg=doi}, volume = {7259}, year = {2009} } @article{shaffer2017longitudinal, abstract = {Introduction: Huntington's disease (HD) is a genetic neurodegenerative disorder that primarily affects striatal neurons. Striatal volume loss is present years before clinical diagnosis; however, white matter degradation may also occur prior to diagnosis. Diffusion-weighted imaging (DWI) can measure microstructural changes associated with degeneration that precede macrostructural changes. DWI derived measures enhance understanding of degeneration in prodromal HD (pre-HD). Methods: As part of the PREDICT-HD study, N = 191 pre-HD individuals and 70 healthy controls underwent two or more (baseline and 1–5 year follow-up) DWI, with n = 649 total sessions. Images were processed using cutting-edge DWI analysis methods for large multicenter studies. Diffusion tensor imaging (DTI) metrics were computed in selected tracts connecting the primary motor, primary somato-sensory, and premotor areas of the cortex with the subcortical caudate and putamen. Pre-HD participants were divided into three CAG-Age Product (CAP) score groups reflecting clinical diagnosis probability (low, medium, or high probabilities). Baseline and longitudinal group differences were examined using linear mixed models. Results: Cross-sectional and longitudinal differences in DTI measures were present in all three CAP groups compared with controls. The high CAP group was most affected. Conclusions: This is the largest longitudinal DWI study of pre-HD to date. Findings showed DTI differences, consistent with white matter degeneration, were present up to a decade before predicted HD diagnosis. Our findings indicate a unique role for disrupted connectivity between the premotor area and the putamen, which may be closely tied to the onset of motor symptoms in HD. Hum Brain Mapp 38:1460–1477, 2017. {\textcopyright} 2017 Wiley Periodicals, Inc.}, annote = {From Duplicate 2 (Longitudinal diffusion changes in prodromal and early HD: Evidence of white-matter tract deterioration - Shaffer, Joseph J.; Ghayoor, Ali; Long, Jeffrey D.; Kim, Regina EY Y; Lourens, Spencer; O'Donnell, Lauren J.; Westin, Carl-Fredrik; Rathi, Yogesh; Magnotta, Vincent; Paulsen, Jane S.; Johnson, Hans J.) From Duplicate 1 (Longitudinal diffusion changes in prodromal and early HD: Evidence of white-matter tract deterioration - Shaffer, Joseph J.; Ghayoor, Ali; Long, Jeffrey D.; Kim, Regina EY; Lourens, Spencer; O'Donnell, Lauren J.; Westin, Carl-Fredrik; Rathi, Yogesh; Magnotta, Vincent; Paulsen, Jane S.; Johnson, Hans J.) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper.  I had contributions to the software methods development, interpretation of validation results for this work.  I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Longitudinal diffusion changes in prodromal and early HD: Evidence of white-matter tract deterioration - Shaffer, Joseph J; Ghayoor, Ali; Long, Jeffrey D; Kim, Regina E Y; Lourens, Spencer; O'Donnell, Lauren J; Westin, Carl-Fredrik; Rathi, Yogesh; Magnotta, Vincent; Paulsen, Jane S; Johnson, Hans J) {\#}{\#}CONTRIBUTIONS: I was the primary mentor for all aspects of this project. I secondarily responsible for this paper. I had contributions to the software methods development, interpretation of validation results for this work. I was the primary author of the manuscript and oversaw revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process. :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#}}, author = {Shaffer, Joseph J. and Ghayoor, Ali and Long, Jeffrey D. and Kim, Regina Eun Young and Lourens, Spencer and O'Donnell, Lauren J. and Westin, Carl Fredrik and Rathi, Yogesh and Magnotta, Vincent and Paulsen, Jane S. and Johnson, Hans J.}, doi = {10.1002/hbm.23465}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Shaffer et al/Human Brain Mapping/Shaffer et al. - 2017 - Longitudinal diffusion changes in prodromal and early HD Evidence of white-matter tract deterioration.pdf:pdf}, issn = {10970193}, journal = {Human Brain Mapping}, keywords = {Huntington disease,computer-assisted,diffusion magnetic resonance imaging,diffusion tractography,disease progression,image processing,multicenter study,prodromal,white matter}, number = {3}, pages = {1460--1477}, pmid = {28045213}, title = {{Longitudinal diffusion changes in prodromal and early HD: Evidence of white-matter tract deterioration}}, url = {http://www.ncbi.nlm.nih.gov/pubmed/28045213{\%}0Ahttp://doi.wiley.com/10.1002/hbm.23465}, volume = {38}, year = {2017} } @article{powell2008registration, abstract = {The large amount of imaging data collected in several ongoing multi-center studies requires automated methods to delineate brain structures of interest. We have previously reported on using artificial neural networks (ANN) to define subcortical brain structures. Here we present several automated segmentation methods using multidimensional registration. A direct comparison between template, probability, artificial neural network (ANN) and support vector machine (SVM)-based automated segmentation methods is presented. Three metrics for each segmentation method are reported in the delineation of subcortical and cerebellar brain regions. Results show that the machine learning methods outperform the template and probability-based methods. Utilization of these automated segmentation methods may be as reliable as manual raters and require no rater intervention. {\textcopyright} 2007 Elsevier Inc. All rights reserved.}, annote = {From Duplicate 1 (Registration and machine learning-based automated segmentation of subcortical and cerebellar brain structures - Powell, Stephanie; Magnotta, Vincent A.; Johnson, Hans J.; Jammalamadaka, Vamsi K.; Pierson, Ronald K.; Andreasen, Nancy C.) From Duplicate 2 (Registration and machine learning-based automated segmentation of subcortical and cerebellar brain structures - Powell, Stephanie; Magnotta, Vincent A.; Johnson, Hans J.; Jammalamadaka, Vamsi K.; Pierson, Ronald K.; Andreasen, Nancy C.) {\#}{\#}CONTRIBUTIONS: As the directory of medical imaging for the department of Psychiatry I work closely with neuroscience student and junior faculty to mentor on appropriate medical imaging analysis techniques.  I developed custom analysis software to achieve the desired interpretation of results.   I had substantial contributions to the software methods development, interpretation of validation results for this work.  I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#} From Duplicate 2 (Registration and machine learning-based automated segmentation of subcortical and cerebellar brain structures - Powell, Stephanie; Magnotta, Vincent A.; Johnson, Hans J.; Jammalamadaka, Vamsi K.; Pierson, Ronald K.; Andreasen, Nancy C.) {\#}{\#}CONTRIBUTIONS: As the directory of medical imaging for the department of Psychiatry I work closely with neuroscience student and junior faculty to mentor on appropriate medical imaging analysis techniques.  I developed custom analysis software to achieve the desired interpretation of results.   I had substantial contributions to the software methods development, interpretation of validation results for this work.  I assisted with critically reviewing and revising the intellectual content of the medical imaging methods applied, and their interpretation with respect to the multi-site nature of the data collection process.  :{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#}}, archivePrefix = {arXiv}, arxivId = {NIHMS150003}, author = {Powell, Stephanie and Magnotta, Vincent A. and Johnson, Hans and Jammalamadaka, Vamsi K. and Pierson, Ronald and Andreasen, Nancy C.}, doi = {10.1016/j.neuroimage.2007.05.063}, eprint = {NIHMS150003}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Powell et al/NeuroImage/Powell et al. - 2008 - Registration and machine learning-based automated segmentation of subcortical and cerebellar brain structures.pdf:pdf}, isbn = {1053-8119 (Print)}, issn = {10538119}, journal = {NeuroImage}, keywords = {Artificial neural networks,Brain segmentation,MRI,Registration-based segmentation,Support vector machine}, month = {jan}, number = {1}, pages = {238--247}, pmid = {17904870}, publisher = {Academic Press}, title = {{Registration and machine learning-based automated segmentation of subcortical and cerebellar brain structures}}, url = {http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=2253948{\&}tool=pmcentrez{\&}rendertype=abstract http://www.sciencedirect.com/science?{\_}ob=ArticleURL{\&}{\_}udi=B6WNP-4PGGP30-3{\&}{\_}user=440026{\&}{\_}rdoc=1{\&}{\_}fmt={\&}{\_}orig=search{\&}{\_}sort=d{\&}view=c{\&}{\_}acct=C000020939{\&}{\_}version=}, volume = {39}, year = {2008} } @techreport{Johnson2007, abstract = {The University of Iowa's Psychiatric Iowa Neuroimaging Consortium (PINC) has developed a program for mutual information registration of BRAINS2 [2] data using ITK [1] classes, called BRAINSFit. We have written a helper class, itk::MultiModal3DMutualRegistrationHelper to simplify im- plementation and testing of different transform representations and optimizers. We have added a trans- form meeting the ITK standard, itk::ScaleVersor3DTransform. BRAINSFit is based on the regis- tration examples from ITK, but adds new features, including the ability to employ different transform representations and optimization functions. Our goal was to determine best practices for registering 3D rigid multimodal MRI of the human brain. A version of the current program is employed here at PINC daily for automated processing of acquired brain images.}, annote = {From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Kent; Williams, Norman K; Williams, Kent) From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Kent) From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Kent; Williams, Norman K; Williams, Kent) From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Norman K; Williams, Kent) From Duplicate 2 (BRAINSFit - Johnson, Hans J.; Harris, Gregory; Williams, Norman K) From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Norman K) From Duplicate 2 ( BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J. (University of Iowa); Harris, Greg; Williams, Kent ) From Duplicate 2 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J; Harris, Gregory; Williams, Kent; Williams, Norman K; Williams, Kent) From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Kent; Williams, Norman K; Williams, Kent) From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Norman K; Williams, Kent) From Duplicate 2 (BRAINSFit - Johnson, Hans J.; Harris, Gregory; Williams, Norman K) From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Norman K) From Duplicate 2 ( BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J. (University of Iowa); Harris, Greg; Williams, Kent ) From Duplicate 2 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Kent) From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Kent; Williams, Norman K; Williams, Kent) From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Norman K; Williams, Kent) From Duplicate 2 (BRAINSFit - Johnson, Hans J.; Harris, Gregory; Williams, Norman K) From Duplicate 1 (BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J.; Harris, Gregory; Williams, Norman K) From Duplicate 2 ( BRAINSFit: Mutual Information Rigid Registrations of Whole-Brain 3D Images, Using the Insight Toolkit - Johnson, Hans J. (University of Iowa); Harris, Greg; Williams, Kent ) }, author = {Johnson, Hans and Harris, Greg and Williams, Kent}, booktitle = {Insight J}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Johnson, Harris, Williams/Insight J/Johnson, Harris, Williams - 2007 - BRAINSFit mutual information rigid registrations of whole-brain 3D images, using the insight toolkit.pdf:pdf}, keywords = {Mutual Information,Registration}, number = {1}, pages = {1--10}, title = {{BRAINSFit: mutual information rigid registrations of whole-brain 3D images, using the insight toolkit}}, url = {http://hdl.handle.net/1926/1291 papers2://publication/uuid/2965C56B-40A5-4111-B9E3-02B28C019DBF papers2://publication/uuid/858AE055-D019-47FA-BD17-7B6115C6221E papers2://publication/uuid/096DA036-956F-4AB9-8C4C-B0F9CCB9F7EE papers2://publication/uuid/1BA0}, volume = {57}, year = {2007} } @inproceedings{Miri2016, abstract = {{\textcopyright} 2016 SPIE.This work reports on a comparative study between five manual and automated methods for intra-subject pair-wise registration of images from different modalities. The study includes a variety of inter-modal image registrations (MR-CT, PET-CT, PET-MR) utilizing different methods including two manual point-based techniques using rigid and similarity transformations, one automated point-based approach based on Iterative Closest Point (ICP) algorithm, and two automated intensity-based methods using mutual information (MI) and normalized mutual information (NMI). These techniques were employed for inter-modal registration of brain images of 9 subjects from a publicly available dataset, and the results were evaluated qualitatively via checkerboard images and quantitatively using root mean square error and MI criteria. In addition, for each inter-modal registration, a paired t-test was performed on the quantitative results in order to find any significant difference between the results of the studied registration techniques.}, annote = {From Duplicate 2 (Comparative study of multimodal intra-subject image registration methods on a publicly available database - Miri, M.S.; Ghayoor, Ali; Johnson, Hans J.; Sonka, M.) {\#}{\#}CONTRIBUTIONS: I assisted with methods development and data analysis for this paper.  I had the primary contributions to the interpretation of validation results for this work.  I contributed torevising the intellectual content of the medical imaging methods applied.:{\#}{\#} {\#}{\#}JOURNAL{\_}TYPE: Journal :{\#}{\#}}, author = {Miri, Mohammad Saleh and Ghayoor, Ali and Johnson, Hans J. and Sonka, Milan}, booktitle = {Medical Imaging 2016: Biomedical Applications in Molecular, Structural, and Functional Imaging}, doi = {10.1117/12.2214209}, editor = {Gimi, Barjor and Krol, Andrzej}, file = {:Users/johnsonhj/Documents/Mendeley Desktop/Miri et al/Medical Imaging 2016 Biomedical Applications in Molecular, Structural, and Functional Imaging/Miri et al. - 2016 - Comparative study of multimodal intra-subject image registration methods on a publicly available database.pdf:pdf}, isbn = {9781510600232}, issn = {16057422}, keywords = {Iterative closest point,Multimo,[Intensity-based,intensity-based,iterative closest point,multimodal registration,mutual information,point-based}, month = {mar}, pages = {97881Z}, title = {{Comparative study of multimodal intra-subject image registration methods on a publicly available database}}, url = {http://proceedings.spiedigitallibrary.org/proceeding.aspx?doi=10.1117/12.2214209}, volume = {9788}, year = {2016} }