Repository: DepositOnce – institutional repository for research data and publications of TU Berlin https://depositonce.tu-berlin.de
@phdthesis { 11303_13422,
author = {März, Maximilian Arthus},
title = {Solving underdetermined inverse problems: From advanced sparsity models to deep learning},
school = {Technische Universität Berlin},
year = {2021},
type = {Doctoral Thesis},
address = {Berlin},
doi = {10.14279/depositonce-12206},
url = {http://dx.doi.org/10.14279/depositonce-12206},
keywords = {inverse problems, compressed sensing, deep learning, sparsity, optimization, inverse Probleme, tiefes Lernen, Optimierung},
abstract = {This cumulative dissertation investigates and designs methods for the reconstruction of unknown signals from severely underdetermined linear measurements. Such inverse problems arise in a wide range of applications, reaching from biomedical imaging modalities like computed tomography to seismic inversion in geophysics. Although addressing similar recovery tasks, the thesis is divided into two parts:
The first one is concerned with advancing the theory of model-based recovery methods in light of advanced sparsity notions. The methodology of compressed sensing has demonstrated that an unknown signal can be robustly recovered from few indirect and randomized measurements by exploiting its inherent structure. A popular choice to accomplish this task is to solve a convex optimization problem, based on sparsity-promoting l1-minimization. However, since real-world signals are usually not sparse themselves, a linear transformation is required in order to obtain a suitable low-complexity representation. Such an assumption leads to a synthesis- and an analysis-based sparsity model. For both notions, we derive novel sampling rates for l1-regularization under the assumption of sub-Gaussian random measurements. Furthermore, we break a complexity-bottleneck for the important special case of total variation minimization in one spatial dimension. Our findings defy the conventional wisdom, which promotes a uniform description of the sample complexity in terms of sparsity. Indeed, a common contribution of the present thesis is that sparsity alone does typically not characterize the success of signal recovery beyond orthonormal bases. Instead, we obtain more accurate predictions for the required number of measurements by taking other structural and signal-dependent properties into account.
The second part of the thesis explores the potential of deep-learning-based reconstruction methods in numerical simulation studies. Such schemes do not rely on an explicit formulation of a data model as in the first part, but infer structured solutions from the knowlegde of available training data. Despite their unprecedented empirical performance, to date, the operating principles of artificial neural networks are poorly understood from a mathematical perspective. The contributions to this area of research are twofold:
First, we analyze the robustness of learned end-to-end methods in an extensive numerical study. This effort is motivated by the fact that neural networks for classification are known to be susceptible to adversarial attacks. Contrary to previous claims in the literature, we demonstrate that this flaw does not necessarily carry over to deep-learning-based solution methods for inverse problems. Indeed, we show that standard architectures are remarkably robust against statistical and adversarial noise.
Secondly, we develop a hybrid reconstruction method for the severely ill-posed inverse problem of limited angle computed tomography. The effect of the particular subsampling structure on the measurements is well described by classical visibility results based on microlocal analysis. The proposed algorithm builds upon this mathematical characterization by recovering the visible part of the data via sparse regularization relying on a directional representation system. Only the information that is inaccessible to such a strategy is inferred by means of a deep neural network.}
}