Top

pyprUtilities module

This module provides utilities for the Python Phase Retrieval (PyPR) toolbox such as wave propagators, threshold operators, error metrics and others.

"""
This module provides utilities for the Python Phase Retrieval (PyPR) toolbox
such as wave propagators, threshold operators, error metrics and others.
"""

import numpy as np
import sys
import skimage
#from pyprProxOperators import *
# right now magnitude projection occurs twice (here and in pyprProx...)
# to avoid cross imports...not so sure how to solve this elegantly.

def magnitudeProjection(input_data, Psi):
    """
    Calculates the projection onto a magnitude constraint set.

    Input:

        input_data:  structure containing all input data
        Psi:        function to be projected, usually the iterate
                    of some algorithm

    Output:

        outputWave:  the wave function projected onto the magnitude set

    """
    propagatedWave = wavePropagator(input_data, Psi, 1)
    projectedWave = MagProj(input_data["problem"]["data"], propagatedWave)
    outputWave = wavePropagator(input_data, projectedWave, -1)

    return outputWave


def calculateKLDistance(input_data, Psi):
    """
    For a given wave function Psi we calculate the discrete
    Kullback-Leibler distance from psi to the set measurement
    set M.

    Based on Approx_PM_Fresnel_Poisson.m by D. Russell Luke.
    """

    # Determine all indices where the pointwise squared measurements
    # vanish.
    I = input_data["problem"]["data"]*input_data["problem"]["data"]
    idxVanishingI = np.nonzero(I == 0)
    I[idxVanishingI] = 1

    # Propagate the wave function into the Fresnel/Fourier domain
    # and calculated their pointwise modulus squared values.
    propPsi = wavePropagator(input_data, Psi, 1)
    propPsiIntensity = propPsi*np.conj(propPsi)

    # Check where those values are equal to the pointwise squared
    # measurements and obtain the indices where this is the case.
    # Set all entries of the normalizedPropPsiIntensity to 1 where
    # this is the case.

    normalizedPropPsiIntensity = propPsiIntensity/I
    normalizedPropPsiIntensity[idxVanishingI] = 1

    # Set all indicies of the propagated wave function to zero
    # where the pointwise squared measurements are zero.
    propPsiIntensity[idxVanishingI] = 0

    # Determine all entries where the normalizedPropPsiIntensity is
    # still zero and set them to 1.
    idxNormalizedPropPsiIntensity = normalizedPropPsiIntensity == 0
    normalizedPropPsiIntensity[idxNormalizedPropPsiIntensity] = 1

    # All entries where either the modulus squared of the propagated
    # wave function is equal to the measured magnitude squared or where
    # the propagated wave function vanishes is now equal to one. Hence,
    # the logarithm of those entries is zero and does not contribute
    # to the KLDistance.

    normalizedPropPsiIntensity = np.log(normalizedPropPsiIntensity)
    KLDistance = np.sum(propPsiIntensity*normalizedPropPsiIntensity + I - propPsiIntensity)
    return np.real(KLDistance)



def computeError(input_data, newIterate, lastIterate, shadow):
    """
    Computes the error using a specified norm given in the
    input_data structure.

    Input:

            input_data: structure containing all problem information.
            newIterate: x^(k+1)
            lastIterate: x^(k)
            shadow: P_B x^k

    Note that for simulated data, input_data.Psi contains the true solution
    and we can compare the real error. Otherwise we need to use some
    different errors like norm(newIterate - lastIterate).

    This function is work in progress and future versions should
    take into account the type of object defined in input_data["problemType"]
    for a more accurate and meaningful error.
    """

# implement metrics based on object type, e.g., for phase objects
# calculate an error of the phase, not of x itself.
    if input_data["problemType"] == "phase":
        if "data" in input_data:
            return np.linalg.norm(np.angle(newIterate)-np.angle(input_data["Psi"]))
            #return np.linalg.norm(newIterate-lastIterate)

    if "data" in input_data:
        if "metric" not in input_data["algorithm"]:
            error = np.linalg.norm(newIterate-input_data["Psi"])
        if input_data["algorithm"]["metric"] == "shadow":
            error = np.linalg.norm(shadow-input_data["Psi"])
        elif input_data["algorithm"]["metric"] == "norm":
            error = np.linalg.norm(newIterate-input_data["Psi"])
    else:
        if "metric" not in input_data["algorithm"]:
            input_data["algorithm"]["metric"] = "shadow"
        if input_data["algorithm"]["metric"] == "shadow":
            error = np.linalg.norm(shadow-magnitudeProjection(input_data, lastIterate))
        elif input_data["algorithm"]["metric"] == "setdistance":
            error = np.power(np.linalg.norm(magnitudeProjection(input_data, shadow)-shadow),2)/np.power(np.linalg.norm(shadow),2)
        else:
            error = np.linalg.norm(newIterate-lastIterate)
    return error

def computeRelaxationParameter(input_data, k):
    """"
    For a given input_data structure and an iteration number, a relaxation
    parameter is computed.

        Input: input_data structure.
        Output: Relaxation parameter beta_k for the RAAR algorithm.
    """
    if "parameter" in input_data["algorithm"]:
        if input_data["algorithm"]["parameter"] == "dynamic":
            if "beta" in input_data["algorithm"]:
                if "null" in input_data["algorithm"]["beta"]:
                    beta_0 = input_data["algorithm"]["beta"]["null"]
                else:
                        beta_0 = 0.99
                if "max" in input_data["algorithm"]["beta"]:
                    beta_max = input_data["algorithm"]["beta"]["max"]
                else:
                    beta_max = 0.55
                if "switch" in input_data["algorithm"]["beta"]:
                    beta_switch = input_data["algorithm"]["beta"]["switch"]
                else:
                    beta_switch = 20
            else:
                beta_0 = 0.99
                beta_max = 0.55
                beta_switch = 20
            beta = np.exp(np.power(-k/beta_switch,3))*beta_0+(1-np.exp(np.power(-k/beta_switch,3)))*beta_max
        elif input_data["algorithm"]["parameter"] == "constant":
            if "beta" in input_data["algorithm"]:
                if "value" in input_data["algorithm"]["beta"]:
                    beta = input_data["algorithm"]["beta"]["value"]
            else: beta = 0.55
    else:
        beta = 0.55
    return beta


def computeShrinkageParameter(input_data, k):
    """
    For a given input_data structure and an iteration number k, a shrinkage /threshold
    parameter is computed.

        Input: input_data structure.
               Iteration number k.
        Output: Shrinkage parameter gamma_k for the proximity
                operator.
    """
    if "parameter" in input_data["problem"]["constraint"]:
        if input_data["problem"]["constraint"]["parameter"] == "dynamic":
            gamma = input_data["problem"]["constraint"]["gamma0"]/k
        elif input_data["problem"]["constraint"]["parameter"] == "constant":
            gamma = input_data["problem"]["constraint"]["gamma0"]
    else:
        if "gamma0" in input_data["problem"]["constraint"]:
            gamma = input_data["problem"]["constraint"]["gamma0"]
        else:
            sys.exit("No shrinkage parameter defined in input_data['algorithm']['constraint'].")
    return gamma


def createNoisyMeasurement(input_data):
    """
    Adds noise to the data.

    Given an input_data structure and a measurement absPsi, this
    functions adds noise to the clean (simulated) data. Depending
    on the input_data.problem.noise, several variants and intensities
    are possible.

    This function is heavily work in progress and up to now
    only supports Poisson noise.
    """
    if input_data["noise"]["type"] == 'poisson':
        absPsi = np.power(np.abs(input_data["problem"]["data"]),2)
        PsiNoisy = np.floor(absPsi * input_data["noise"]["level"])
        PsiNoisy = skimage.util.random_noise(PsiNoisy, 'poisson', clip=False)/input_data["noise"]["level"]
        PsiNoisy = np.abs(np.sqrt(PsiNoisy))
    return PsiNoisy


def createSupportIdx(input_data, R, type):
    """
    Creates a vector containing the indicies of all
    elements which lie a) inside a ball of radius R or b) a box with
    side length R. R has to be in the interval (0,1). By default,
    a box is chosen.
    """
    ndim = np.asarray(input_data["problem"]["data"].shape)
    xn = np.linspace(-1,1, ndim[1])
    yn = np.linspace(-1,1, ndim[0])
    grid = np.meshgrid(xn, yn)
    if type == 'ball':
        supportIdx = np.nonzero(np.power(grid[0],2) + np.power(grid[1],2) <= np.power(R,2))
    else:
        supportIdx = np.nonzero((np.abs(grid[0]) <= R) & (np.abs(grid[1]) <= R))
    return supportIdx


def MagProj(constr, u):
    """
    The following function is based on MagProj.m written by Russell Luke, see
    <a href="http://num.math.uni-goettingen.de/proxtoolbox/">Proxtoolbox</a>.
    This function implements a numerically stable way to project onto a magnitude constraint.
    """
    EPSILON = 1e-10
    # EPSILON = 1e-15*np.max(u)

    # Fancy way of computing the projections.  NOT EXACT:
    # it's really the gradient of a perturbed squared distance
    # function to the constraint set as described in Luke-Burke-Lyon,
    # SIREV 2002.
    modsq_u = np.conj(u)*u
    denom = np.power(modsq_u+EPSILON, 0.5)
    denom2 = modsq_u+EPSILON
    r_eps = (modsq_u / denom) - constr
    dr_eps = (denom2 + EPSILON) / (denom2 * denom)
    unew = (1-dr_eps * r_eps) * u
    return unew


def wavePropagator(input_data, Psi, type=1):
    """
    Computes the wave propagation for a given wave function Psi
    object and type. If no type is given, a forward transform is
    perforemd, if type=-1, an inverse transform is performed. This file can
    be modified to your taste if you want to use and allow for different
    propagators.
    """
    if input_data["problem"]["propagator"] == 'Fresnel':
        propagatedWave = prop_fresnel(Psi, type*input_data["problem"]["Fx"], type*input_data["problem"]["Fy"])
    elif input_data["problem"]["propagator"] == 'Fourier':
        if type == -1:
            propagatedWave = np.fft.ifft2(np.fft.ifftshift(Psi))
        else:
            propagatedWave = np.fft.fftshift(np.fft.fft2(Psi))
    else:
        sys.exit("No propagation method defined in input_data object")
    return propagatedWave

def prop_fresnel(im, Fx,Fy,f=None):
    """
    prop_fresnel_ff(im,Fx,Fy,f) propagates with fresnelnumbers to the

    Propagation based on just the fresnelnumber and not on special values

    The Propagation kernel is sampled in real space, which is better for
    propagations to smaller fresnelnumbers (stronger holographic regime)

    Author: Martin Krenkel

    Translated to Python by Stefan Loock.
    """

    N = np.asarray(im.shape)
    Ny = N[0]
    Nx = N[1]
    px = np.abs(1/(Nx*Fx))
    py = np.abs(1/(Ny*Fy))
    if f is None:
        f = max(px,py)
    if np.max(N)*f > 4000:
        print("Warning: Enlarged image would be very large (" + str(Nx*f) + " x " + str(Ny*f) + "), maybe manually enter magnification factor f.")
    if f>1:
        print("Probe-FOV is enlarged by a factor " + str(f) + " for propagation.")

        # enlarge array to these sizes
        pady = np.round((np.ceil(f)*Ny - Ny)/2).astype(int)
        padx = np.round((np.ceil(f)*Nx - Nx)/2).astype(int)
        im_l = np.pad(im, ((pady, pady), (padx, padx)), 'edge')

        # new coordinate system
        N_l = np.asarray(im_l.shape)
        Ny_l = N_l[0]
        Nx_l = N_l[1]
        dqy = 2*np.pi / Ny_l
        dqx = 2*np.pi / Nx_l

        Q = np.meshgrid((range(1,Nx_l+1)-np.floor(Nx_l/2)-1)*dqx, ((range(1,Ny_l+1)-np.floor(Ny_l/2)-1)*dqy))
        kappa_x = -1/2*np.fft.ifftshift(np.power(Q[0],2))
        kappa_y = -1/2*np.fft.ifftshift(np.power(Q[1],2))

        # propagation
        Im_l = np.fft.fftshift(np.fft.ifft2(np.fft.fft2(np.fft.ifftshift(im_l))*np.exp(1j*kappa_x/(2*np.pi*Fx)+1j*kappa_y/(2*np.pi*Fy))))
        Im = Im = Im_l[pady:Ny_l-pady,padx:Nx_l-padx]

    else:
        dqx = 2*np.pi/Nx
        dqy = 2*np.pi/Ny
        Q = np.meshgrid((range(1,Nx+1)-np.floor(Nx/2)-1)*dqx, ((range(1,Ny+1)-np.floor(Ny/2)-1)*dqy))
        kappa_x = -1/2*np.fft.ifftshift(np.power(Q[0],2))
        kappa_y = -1/2*np.fft.ifftshift(np.power(Q[1],2))

        # propagation
        Im = np.fft.fftshift(np.fft.ifft2(np.fft.fft2(np.fft.ifftshift(im))*np.exp(1j*kappa_x/(2*np.pi*Fx)+1j*kappa_y/(2*np.pi*Fy))))

    return Im

def retrieveProximityName(input_data):
    """
    A dirty little hack to improve simplicity
    of evaluation of proximity operators. Given the specified
    constraint name in the input_data object, the correct name
    for the Python function which contains the corresponding
    proximity operator is returned as a string. This is hard-coded
    here and can be extended to your liking.
    """

    if "name" not in input_data["problem"]["constraint"]:
        sys.exit("No constraint name defined.")
    if (input_data["problem"]["constraint"]["name"] == "shearlet thresholding") or (input_data["problem"]["constraint"]["name"] == "shearlet thresholding and range"):
        proximityOperatorName = "proxShearlet"
    elif input_data["problem"]["constraint"]["name"] == "support":
        proximityOperatorName = "proxSupportConstraint"
    elif input_data["problem"]["constraint"]["name"] == "range":
        proximityOperatorName = "proxRangeConstraint"
    elif input_data["problem"]["constraint"]["name"] == "support and range":
        proximityOperatorName = "proxSupportRangeConstraint"
    else:
        sys.exit("The provided constraint name " + input_data["problem"]["constraint"]["name"]
        + " is not supported. Check retrieveProximityName in pyprUtilities.py for details.")
    return proximityOperatorName


def retrieveShrinkageName(input_data):
    """
    A dirty little hack to improve simplicity
    of evaluation of shrinkage operators. Given the specified
    constraint name in the input_data object, the correct name
    for the Python function which contains the corresponding
    shrinkage operator is returned as a string. This is hard-coded
    here and can be extended to your liking.
    """

    if "info" not in input_data["problem"]["constraint"]:
        sys.exit("No shrinkage mapping defined.")
    if input_data["problem"]["constraint"]["info"] == "soft thresholding":
        shrinkageOperatorName = "softThresholding"
    elif input_data["problem"]["constraint"]["info"] == "smooth hard":
        shrinkageOperatorName = "smoothHardThresholding"
    else:
        sys.exit("The provided shrinkage name " + input_data["problem"]["constraint"]["info"] + " is not supported."
                 + "Check retrieveShrinkageName in pyprUtilities.py for details.")
    return shrinkageOperatorName

def smoothHardThresholding(input_data, Psi, gamma):
    """
    Computes Smooth-Hard-Thresholding discussed in Section 4.6
    (Generalizations to other threshold fujnctions) of
    Phase Retrieval with Sparsity Constraint, Stefan Loock,
    PhD thesis, Georg-August-Universität Göttingen
    """

    if "alpha" in input_data["problem"]["constraint"]:
        alpha = input_data["problem"]["constraint"]["alpha"]
    else:
        alpha = 0.0001

    hThrIdxs = np.abs(Psi) <= gamma
    Psi[hThrIdxs] = 0
    smoothHardPsi = np.sign(Psi) * np.abs(Psi) * np.exp(-alpha/np.power(np.exp(np.abs(Psi)-gamma)-1,2))
    return smoothHardPsi


def softThresholding(input_data, Psi, gamma):
    """
    Applies the soft-thresholding operator with parameter gamma to
    a real-valued matrix/array Psi.
    """
    hThrIdxs = np.abs(Psi) <= gamma
    Psi[hThrIdxs] = 0
    sThrIdxs = np.abs(Psi) > gamma
    Psi[sThrIdxs] = Psi[sThrIdxs] - np.sign(Psi[sThrIdxs])*gamma
    return Psi

Functions

def MagProj(

constr, u)

The following function is based on MagProj.m written by Russell Luke, see Proxtoolbox. This function implements a numerically stable way to project onto a magnitude constraint.

def MagProj(constr, u):
    """
    The following function is based on MagProj.m written by Russell Luke, see
    <a href="http://num.math.uni-goettingen.de/proxtoolbox/">Proxtoolbox</a>.
    This function implements a numerically stable way to project onto a magnitude constraint.
    """
    EPSILON = 1e-10
    # EPSILON = 1e-15*np.max(u)

    # Fancy way of computing the projections.  NOT EXACT:
    # it's really the gradient of a perturbed squared distance
    # function to the constraint set as described in Luke-Burke-Lyon,
    # SIREV 2002.
    modsq_u = np.conj(u)*u
    denom = np.power(modsq_u+EPSILON, 0.5)
    denom2 = modsq_u+EPSILON
    r_eps = (modsq_u / denom) - constr
    dr_eps = (denom2 + EPSILON) / (denom2 * denom)
    unew = (1-dr_eps * r_eps) * u
    return unew

def calculateKLDistance(

input_data, Psi)

For a given wave function Psi we calculate the discrete Kullback-Leibler distance from psi to the set measurement set M.

Based on Approx_PM_Fresnel_Poisson.m by D. Russell Luke.

def calculateKLDistance(input_data, Psi):
    """
    For a given wave function Psi we calculate the discrete
    Kullback-Leibler distance from psi to the set measurement
    set M.

    Based on Approx_PM_Fresnel_Poisson.m by D. Russell Luke.
    """

    # Determine all indices where the pointwise squared measurements
    # vanish.
    I = input_data["problem"]["data"]*input_data["problem"]["data"]
    idxVanishingI = np.nonzero(I == 0)
    I[idxVanishingI] = 1

    # Propagate the wave function into the Fresnel/Fourier domain
    # and calculated their pointwise modulus squared values.
    propPsi = wavePropagator(input_data, Psi, 1)
    propPsiIntensity = propPsi*np.conj(propPsi)

    # Check where those values are equal to the pointwise squared
    # measurements and obtain the indices where this is the case.
    # Set all entries of the normalizedPropPsiIntensity to 1 where
    # this is the case.

    normalizedPropPsiIntensity = propPsiIntensity/I
    normalizedPropPsiIntensity[idxVanishingI] = 1

    # Set all indicies of the propagated wave function to zero
    # where the pointwise squared measurements are zero.
    propPsiIntensity[idxVanishingI] = 0

    # Determine all entries where the normalizedPropPsiIntensity is
    # still zero and set them to 1.
    idxNormalizedPropPsiIntensity = normalizedPropPsiIntensity == 0
    normalizedPropPsiIntensity[idxNormalizedPropPsiIntensity] = 1

    # All entries where either the modulus squared of the propagated
    # wave function is equal to the measured magnitude squared or where
    # the propagated wave function vanishes is now equal to one. Hence,
    # the logarithm of those entries is zero and does not contribute
    # to the KLDistance.

    normalizedPropPsiIntensity = np.log(normalizedPropPsiIntensity)
    KLDistance = np.sum(propPsiIntensity*normalizedPropPsiIntensity + I - propPsiIntensity)
    return np.real(KLDistance)

def computeError(

input_data, newIterate, lastIterate, shadow)

Computes the error using a specified norm given in the input_data structure.

Input:

    input_data: structure containing all problem information.
    newIterate: x^(k+1)
    lastIterate: x^(k)
    shadow: P_B x^k

Note that for simulated data, input_data.Psi contains the true solution and we can compare the real error. Otherwise we need to use some different errors like norm(newIterate - lastIterate).

This function is work in progress and future versions should take into account the type of object defined in input_data["problemType"] for a more accurate and meaningful error.

def computeError(input_data, newIterate, lastIterate, shadow):
    """
    Computes the error using a specified norm given in the
    input_data structure.

    Input:

            input_data: structure containing all problem information.
            newIterate: x^(k+1)
            lastIterate: x^(k)
            shadow: P_B x^k

    Note that for simulated data, input_data.Psi contains the true solution
    and we can compare the real error. Otherwise we need to use some
    different errors like norm(newIterate - lastIterate).

    This function is work in progress and future versions should
    take into account the type of object defined in input_data["problemType"]
    for a more accurate and meaningful error.
    """

# implement metrics based on object type, e.g., for phase objects
# calculate an error of the phase, not of x itself.
    if input_data["problemType"] == "phase":
        if "data" in input_data:
            return np.linalg.norm(np.angle(newIterate)-np.angle(input_data["Psi"]))
            #return np.linalg.norm(newIterate-lastIterate)

    if "data" in input_data:
        if "metric" not in input_data["algorithm"]:
            error = np.linalg.norm(newIterate-input_data["Psi"])
        if input_data["algorithm"]["metric"] == "shadow":
            error = np.linalg.norm(shadow-input_data["Psi"])
        elif input_data["algorithm"]["metric"] == "norm":
            error = np.linalg.norm(newIterate-input_data["Psi"])
    else:
        if "metric" not in input_data["algorithm"]:
            input_data["algorithm"]["metric"] = "shadow"
        if input_data["algorithm"]["metric"] == "shadow":
            error = np.linalg.norm(shadow-magnitudeProjection(input_data, lastIterate))
        elif input_data["algorithm"]["metric"] == "setdistance":
            error = np.power(np.linalg.norm(magnitudeProjection(input_data, shadow)-shadow),2)/np.power(np.linalg.norm(shadow),2)
        else:
            error = np.linalg.norm(newIterate-lastIterate)
    return error

def computeRelaxationParameter(

input_data, k)

" For a given input_data structure and an iteration number, a relaxation parameter is computed.

Input: input_data structure.
Output: Relaxation parameter beta_k for the RAAR algorithm.
def computeRelaxationParameter(input_data, k):
    """"
    For a given input_data structure and an iteration number, a relaxation
    parameter is computed.

        Input: input_data structure.
        Output: Relaxation parameter beta_k for the RAAR algorithm.
    """
    if "parameter" in input_data["algorithm"]:
        if input_data["algorithm"]["parameter"] == "dynamic":
            if "beta" in input_data["algorithm"]:
                if "null" in input_data["algorithm"]["beta"]:
                    beta_0 = input_data["algorithm"]["beta"]["null"]
                else:
                        beta_0 = 0.99
                if "max" in input_data["algorithm"]["beta"]:
                    beta_max = input_data["algorithm"]["beta"]["max"]
                else:
                    beta_max = 0.55
                if "switch" in input_data["algorithm"]["beta"]:
                    beta_switch = input_data["algorithm"]["beta"]["switch"]
                else:
                    beta_switch = 20
            else:
                beta_0 = 0.99
                beta_max = 0.55
                beta_switch = 20
            beta = np.exp(np.power(-k/beta_switch,3))*beta_0+(1-np.exp(np.power(-k/beta_switch,3)))*beta_max
        elif input_data["algorithm"]["parameter"] == "constant":
            if "beta" in input_data["algorithm"]:
                if "value" in input_data["algorithm"]["beta"]:
                    beta = input_data["algorithm"]["beta"]["value"]
            else: beta = 0.55
    else:
        beta = 0.55
    return beta

def computeShrinkageParameter(

input_data, k)

For a given input_data structure and an iteration number k, a shrinkage /threshold parameter is computed.

Input: input_data structure.
       Iteration number k.
Output: Shrinkage parameter gamma_k for the proximity
        operator.
def computeShrinkageParameter(input_data, k):
    """
    For a given input_data structure and an iteration number k, a shrinkage /threshold
    parameter is computed.

        Input: input_data structure.
               Iteration number k.
        Output: Shrinkage parameter gamma_k for the proximity
                operator.
    """
    if "parameter" in input_data["problem"]["constraint"]:
        if input_data["problem"]["constraint"]["parameter"] == "dynamic":
            gamma = input_data["problem"]["constraint"]["gamma0"]/k
        elif input_data["problem"]["constraint"]["parameter"] == "constant":
            gamma = input_data["problem"]["constraint"]["gamma0"]
    else:
        if "gamma0" in input_data["problem"]["constraint"]:
            gamma = input_data["problem"]["constraint"]["gamma0"]
        else:
            sys.exit("No shrinkage parameter defined in input_data['algorithm']['constraint'].")
    return gamma

def createNoisyMeasurement(

input_data)

Adds noise to the data.

Given an input_data structure and a measurement absPsi, this functions adds noise to the clean (simulated) data. Depending on the input_data.problem.noise, several variants and intensities are possible.

This function is heavily work in progress and up to now only supports Poisson noise.

def createNoisyMeasurement(input_data):
    """
    Adds noise to the data.

    Given an input_data structure and a measurement absPsi, this
    functions adds noise to the clean (simulated) data. Depending
    on the input_data.problem.noise, several variants and intensities
    are possible.

    This function is heavily work in progress and up to now
    only supports Poisson noise.
    """
    if input_data["noise"]["type"] == 'poisson':
        absPsi = np.power(np.abs(input_data["problem"]["data"]),2)
        PsiNoisy = np.floor(absPsi * input_data["noise"]["level"])
        PsiNoisy = skimage.util.random_noise(PsiNoisy, 'poisson', clip=False)/input_data["noise"]["level"]
        PsiNoisy = np.abs(np.sqrt(PsiNoisy))
    return PsiNoisy

def createSupportIdx(

input_data, R, type)

Creates a vector containing the indicies of all elements which lie a) inside a ball of radius R or b) a box with side length R. R has to be in the interval (0,1). By default, a box is chosen.

def createSupportIdx(input_data, R, type):
    """
    Creates a vector containing the indicies of all
    elements which lie a) inside a ball of radius R or b) a box with
    side length R. R has to be in the interval (0,1). By default,
    a box is chosen.
    """
    ndim = np.asarray(input_data["problem"]["data"].shape)
    xn = np.linspace(-1,1, ndim[1])
    yn = np.linspace(-1,1, ndim[0])
    grid = np.meshgrid(xn, yn)
    if type == 'ball':
        supportIdx = np.nonzero(np.power(grid[0],2) + np.power(grid[1],2) <= np.power(R,2))
    else:
        supportIdx = np.nonzero((np.abs(grid[0]) <= R) & (np.abs(grid[1]) <= R))
    return supportIdx

def magnitudeProjection(

input_data, Psi)

Calculates the projection onto a magnitude constraint set.

Input:

input_data:  structure containing all input data
Psi:        function to be projected, usually the iterate
            of some algorithm

Output:

outputWave:  the wave function projected onto the magnitude set
def magnitudeProjection(input_data, Psi):
    """
    Calculates the projection onto a magnitude constraint set.

    Input:

        input_data:  structure containing all input data
        Psi:        function to be projected, usually the iterate
                    of some algorithm

    Output:

        outputWave:  the wave function projected onto the magnitude set

    """
    propagatedWave = wavePropagator(input_data, Psi, 1)
    projectedWave = MagProj(input_data["problem"]["data"], propagatedWave)
    outputWave = wavePropagator(input_data, projectedWave, -1)

    return outputWave

def prop_fresnel(

im, Fx, Fy, f=None)

prop_fresnel_ff(im,Fx,Fy,f) propagates with fresnelnumbers to the

Propagation based on just the fresnelnumber and not on special values

The Propagation kernel is sampled in real space, which is better for propagations to smaller fresnelnumbers (stronger holographic regime)

Author: Martin Krenkel

Translated to Python by Stefan Loock.

def prop_fresnel(im, Fx,Fy,f=None):
    """
    prop_fresnel_ff(im,Fx,Fy,f) propagates with fresnelnumbers to the

    Propagation based on just the fresnelnumber and not on special values

    The Propagation kernel is sampled in real space, which is better for
    propagations to smaller fresnelnumbers (stronger holographic regime)

    Author: Martin Krenkel

    Translated to Python by Stefan Loock.
    """

    N = np.asarray(im.shape)
    Ny = N[0]
    Nx = N[1]
    px = np.abs(1/(Nx*Fx))
    py = np.abs(1/(Ny*Fy))
    if f is None:
        f = max(px,py)
    if np.max(N)*f > 4000:
        print("Warning: Enlarged image would be very large (" + str(Nx*f) + " x " + str(Ny*f) + "), maybe manually enter magnification factor f.")
    if f>1:
        print("Probe-FOV is enlarged by a factor " + str(f) + " for propagation.")

        # enlarge array to these sizes
        pady = np.round((np.ceil(f)*Ny - Ny)/2).astype(int)
        padx = np.round((np.ceil(f)*Nx - Nx)/2).astype(int)
        im_l = np.pad(im, ((pady, pady), (padx, padx)), 'edge')

        # new coordinate system
        N_l = np.asarray(im_l.shape)
        Ny_l = N_l[0]
        Nx_l = N_l[1]
        dqy = 2*np.pi / Ny_l
        dqx = 2*np.pi / Nx_l

        Q = np.meshgrid((range(1,Nx_l+1)-np.floor(Nx_l/2)-1)*dqx, ((range(1,Ny_l+1)-np.floor(Ny_l/2)-1)*dqy))
        kappa_x = -1/2*np.fft.ifftshift(np.power(Q[0],2))
        kappa_y = -1/2*np.fft.ifftshift(np.power(Q[1],2))

        # propagation
        Im_l = np.fft.fftshift(np.fft.ifft2(np.fft.fft2(np.fft.ifftshift(im_l))*np.exp(1j*kappa_x/(2*np.pi*Fx)+1j*kappa_y/(2*np.pi*Fy))))
        Im = Im = Im_l[pady:Ny_l-pady,padx:Nx_l-padx]

    else:
        dqx = 2*np.pi/Nx
        dqy = 2*np.pi/Ny
        Q = np.meshgrid((range(1,Nx+1)-np.floor(Nx/2)-1)*dqx, ((range(1,Ny+1)-np.floor(Ny/2)-1)*dqy))
        kappa_x = -1/2*np.fft.ifftshift(np.power(Q[0],2))
        kappa_y = -1/2*np.fft.ifftshift(np.power(Q[1],2))

        # propagation
        Im = np.fft.fftshift(np.fft.ifft2(np.fft.fft2(np.fft.ifftshift(im))*np.exp(1j*kappa_x/(2*np.pi*Fx)+1j*kappa_y/(2*np.pi*Fy))))

    return Im

def retrieveProximityName(

input_data)

A dirty little hack to improve simplicity of evaluation of proximity operators. Given the specified constraint name in the input_data object, the correct name for the Python function which contains the corresponding proximity operator is returned as a string. This is hard-coded here and can be extended to your liking.

def retrieveProximityName(input_data):
    """
    A dirty little hack to improve simplicity
    of evaluation of proximity operators. Given the specified
    constraint name in the input_data object, the correct name
    for the Python function which contains the corresponding
    proximity operator is returned as a string. This is hard-coded
    here and can be extended to your liking.
    """

    if "name" not in input_data["problem"]["constraint"]:
        sys.exit("No constraint name defined.")
    if (input_data["problem"]["constraint"]["name"] == "shearlet thresholding") or (input_data["problem"]["constraint"]["name"] == "shearlet thresholding and range"):
        proximityOperatorName = "proxShearlet"
    elif input_data["problem"]["constraint"]["name"] == "support":
        proximityOperatorName = "proxSupportConstraint"
    elif input_data["problem"]["constraint"]["name"] == "range":
        proximityOperatorName = "proxRangeConstraint"
    elif input_data["problem"]["constraint"]["name"] == "support and range":
        proximityOperatorName = "proxSupportRangeConstraint"
    else:
        sys.exit("The provided constraint name " + input_data["problem"]["constraint"]["name"]
        + " is not supported. Check retrieveProximityName in pyprUtilities.py for details.")
    return proximityOperatorName

def retrieveShrinkageName(

input_data)

A dirty little hack to improve simplicity of evaluation of shrinkage operators. Given the specified constraint name in the input_data object, the correct name for the Python function which contains the corresponding shrinkage operator is returned as a string. This is hard-coded here and can be extended to your liking.

def retrieveShrinkageName(input_data):
    """
    A dirty little hack to improve simplicity
    of evaluation of shrinkage operators. Given the specified
    constraint name in the input_data object, the correct name
    for the Python function which contains the corresponding
    shrinkage operator is returned as a string. This is hard-coded
    here and can be extended to your liking.
    """

    if "info" not in input_data["problem"]["constraint"]:
        sys.exit("No shrinkage mapping defined.")
    if input_data["problem"]["constraint"]["info"] == "soft thresholding":
        shrinkageOperatorName = "softThresholding"
    elif input_data["problem"]["constraint"]["info"] == "smooth hard":
        shrinkageOperatorName = "smoothHardThresholding"
    else:
        sys.exit("The provided shrinkage name " + input_data["problem"]["constraint"]["info"] + " is not supported."
                 + "Check retrieveShrinkageName in pyprUtilities.py for details.")
    return shrinkageOperatorName

def smoothHardThresholding(

input_data, Psi, gamma)

Computes Smooth-Hard-Thresholding discussed in Section 4.6 (Generalizations to other threshold fujnctions) of Phase Retrieval with Sparsity Constraints, Stefan Loock, PhD thesis, Georg-August-Universität Göttingen

def smoothHardThresholding(input_data, Psi, gamma):
    """
    Computes Smooth-Hard-Thresholding discussed in Section 4.6
    (Generalizations to other threshold fujnctions) of
    'Phase Retrieval with Sparsity Constraints', Stefan Loock,
    PhD thesis, Georg-August-Universität Göttingen
    """

    if "alpha" in input_data["problem"]["constraint"]:
        alpha = input_data["problem"]["constraint"]["alpha"]
    else:
        alpha = 0.0001

    hThrIdxs = np.abs(Psi) <= gamma
    Psi[hThrIdxs] = 0
    smoothHardPsi = np.sign(Psi) * np.abs(Psi) * np.exp(-alpha/np.power(np.exp(np.abs(Psi)-gamma)-1,2))
    return smoothHardPsi

def softThresholding(

input_data, Psi, gamma)

Applies the soft-thresholding operator with parameter gamma to a real-valued matrix/array Psi.

def softThresholding(input_data, Psi, gamma):
    """
    Applies the soft-thresholding operator with parameter gamma to
    a real-valued matrix/array Psi.
    """
    hThrIdxs = np.abs(Psi) <= gamma
    Psi[hThrIdxs] = 0
    sThrIdxs = np.abs(Psi) > gamma
    Psi[sThrIdxs] = Psi[sThrIdxs] - np.sign(Psi[sThrIdxs])*gamma
    return Psi

def wavePropagator(

input_data, Psi, type=1)

Computes the wave propagation for a given wave function Psi object and type. If no type is given, a forward transform is perforemd, if type=-1, an inverse transform is performed. This file can be modified to your taste if you want to use and allow for different propagators.

def wavePropagator(input_data, Psi, type=1):
    """
    Computes the wave propagation for a given wave function Psi
    object and type. If no type is given, a forward transform is
    perforemd, if type=-1, an inverse transform is performed. This file can
    be modified to your taste if you want to use and allow for different
    propagators.
    """
    if input_data["problem"]["propagator"] == 'Fresnel':
        propagatedWave = prop_fresnel(Psi, type*input_data["problem"]["Fx"], type*input_data["problem"]["Fy"])
    elif input_data["problem"]["propagator"] == 'Fourier':
        if type == -1:
            propagatedWave = np.fft.ifft2(np.fft.ifftshift(Psi))
        else:
            propagatedWave = np.fft.fftshift(np.fft.fft2(Psi))
    else:
        sys.exit("No propagation method defined in input_data object")
    return propagatedWave