diff options
| author | epapoutsellis <epapoutsellis@gmail.com> | 2019-05-07 23:03:30 +0100 | 
|---|---|---|
| committer | epapoutsellis <epapoutsellis@gmail.com> | 2019-05-07 23:03:30 +0100 | 
| commit | 0a1af2dd369c13639c0348d89a156d762f9832eb (patch) | |
| tree | 56d116cb8611d5d083a2100844c530f1836ae96a | |
| parent | df028207491f76c03519a22eb5211102b88889da (diff) | |
| download | framework-0a1af2dd369c13639c0348d89a156d762f9832eb.tar.gz framework-0a1af2dd369c13639c0348d89a156d762f9832eb.tar.bz2 framework-0a1af2dd369c13639c0348d89a156d762f9832eb.tar.xz framework-0a1af2dd369c13639c0348d89a156d762f9832eb.zip | |
delete build pycache
69 files changed, 0 insertions, 8813 deletions
| diff --git a/Wrappers/Python/build/lib/ccpi/__init__.py b/Wrappers/Python/build/lib/ccpi/__init__.py deleted file mode 100644 index cf2d93d..0000000 --- a/Wrappers/Python/build/lib/ccpi/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2018 Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License.
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/contrib/__init__.py b/Wrappers/Python/build/lib/ccpi/contrib/__init__.py deleted file mode 100644 index e69de29..0000000 --- a/Wrappers/Python/build/lib/ccpi/contrib/__init__.py +++ /dev/null diff --git a/Wrappers/Python/build/lib/ccpi/contrib/optimisation/__init__.py b/Wrappers/Python/build/lib/ccpi/contrib/optimisation/__init__.py deleted file mode 100644 index e69de29..0000000 --- a/Wrappers/Python/build/lib/ccpi/contrib/optimisation/__init__.py +++ /dev/null diff --git a/Wrappers/Python/build/lib/ccpi/contrib/optimisation/algorithms/__init__.py b/Wrappers/Python/build/lib/ccpi/contrib/optimisation/algorithms/__init__.py deleted file mode 100644 index e69de29..0000000 --- a/Wrappers/Python/build/lib/ccpi/contrib/optimisation/algorithms/__init__.py +++ /dev/null diff --git a/Wrappers/Python/build/lib/ccpi/contrib/optimisation/algorithms/spdhg.py b/Wrappers/Python/build/lib/ccpi/contrib/optimisation/algorithms/spdhg.py deleted file mode 100644 index 263a7cd..0000000 --- a/Wrappers/Python/build/lib/ccpi/contrib/optimisation/algorithms/spdhg.py +++ /dev/null @@ -1,338 +0,0 @@ -#   Copyright 2018 Matthias Ehrhardt, Edoardo Pasca
 -
 -#   Licensed under the Apache License, Version 2.0 (the "License");
 -#   you may not use this file except in compliance with the License.
 -#   You may obtain a copy of the License at
 -
 -#       http://www.apache.org/licenses/LICENSE-2.0
 -
 -#   Unless required by applicable law or agreed to in writing, software
 -#   distributed under the License is distributed on an "AS IS" BASIS,
 -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 -#   See the License for the specific language governing permissions and
 -#   limitations under the License.
 -
 -from __future__ import absolute_import
 -from __future__ import division
 -from __future__ import print_function
 -from __future__ import unicode_literals
 -
 -import numpy
 -
 -from ccpi.optimisation.funcs import Function
 -from ccpi.framework import ImageData 
 -from ccpi.framework import AcquisitionData
 -
 -
 -class spdhg():
 -    """Computes a saddle point with a stochastic PDHG.
 -
 -    This means, a solution (x*, y*), y* = (y*_1, ..., y*_n) such that
 -
 -    (x*, y*) in arg min_x max_y sum_i=1^n <y_i, A_i> - f*[i](y_i) + g(x)
 -
 -    where g : X -> IR_infty and f[i] : Y[i] -> IR_infty are convex, l.s.c. and
 -    proper functionals. For this algorithm, they all may be non-smooth and no
 -    strong convexity is assumed.
 -
 -    Parameters
 -    ----------
 -    f : list of functions
 -        Functionals Y[i] -> IR_infty that all have a convex conjugate with a
 -        proximal operator, i.e.
 -        f[i].convex_conj.prox(sigma[i]) : Y[i] -> Y[i].
 -    g : function
 -        Functional X -> IR_infty that has a proximal operator, i.e.
 -        g.prox(tau) : X -> X.
 -    A : list of functions
 -        Operators A[i] : X -> Y[i] that possess adjoints: A[i].adjoint
 -    x : primal variable, optional
 -        By default equals 0.
 -    y : dual variable, optional
 -        Part of a product space. By default equals 0.
 -    z : variable, optional
 -        Adjoint of dual variable, z = A^* y. By default equals 0 if y = 0.
 -    tau : scalar / vector / matrix, optional
 -        Step size for primal variable. Note that the proximal operator of g
 -        has to be well-defined for this input.
 -    sigma : scalar, optional
 -        Scalar / vector / matrix used as step size for dual variable. Note that
 -        the proximal operator related to f (see above) has to be well-defined
 -        for this input.
 -    prob : list of scalars, optional
 -        Probabilities prob[i] that a subset i is selected in each iteration.
 -        If fun_select is not given, then the sum of all probabilities must
 -        equal 1.
 -    A_norms : list of scalars, optional
 -        Norms of the operators in A. Can be used to determine the step sizes
 -        tau and sigma and the probabilities prob.
 -    fun_select : function, optional
 -        Function that selects blocks at every iteration IN -> {1,...,n}. By
 -        default this is serial sampling, fun_select(k) selects an index
 -        i \in {1,...,n} with probability prob[i].
 -
 -    References
 -    ----------
 -    [CERS2018] A. Chambolle, M. J. Ehrhardt, P. Richtarik and C.-B. Schoenlieb,
 -    *Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling
 -    and Imaging Applications*. SIAM Journal on Optimization, 28(4), 2783-2808
 -    (2018) http://doi.org/10.1007/s10851-010-0251-1 
 -
 -    [E+2017] M. J. Ehrhardt, P. J. Markiewicz, P. Richtarik, J. Schott,
 -    A. Chambolle and C.-B. Schoenlieb, *Faster PET reconstruction with a
 -    stochastic primal-dual hybrid gradient method*. Wavelets and Sparsity XVII,
 -    58 (2017) http://doi.org/10.1117/12.2272946.
 -    
 -    [EMS2018] M. J. Ehrhardt, P. J. Markiewicz and C.-B. Schoenlieb, *Faster 
 -    PET Reconstruction with Non-Smooth Priors by Randomization and 
 -    Preconditioning*. (2018) ArXiv: http://arxiv.org/abs/1808.07150 
 -    """
 -    
 -    def __init__(self, f, g, A, x=None, y=None, z=None, tau=None, sigma=None, 
 -                 prob=None, A_norms=None, fun_select=None):
 -        # fun_select is optional and by default performs serial sampling
 -                
 -        if x is None:
 -            x = A[0].allocate_direct(0)
 -            
 -        if y is None:
 -            if z is not None:
 -                raise ValueError('y and z have to be defaulted together')
 -                       
 -            y = [Ai.allocate_adjoint(0) for Ai in A]
 -            z = 0 * x.copy()
 -            
 -        else:
 -            if z is None:
 -                raise ValueError('y and z have to be defaulted together')
 -                            
 -        if A_norms is not None:
 -            if tau is not None or sigma is not None or prob is not None:
 -                raise ValueError('Either A_norms or (tau, sigma, prob) must '
 -                                 'be given')
 -                
 -            tau = 1 / sum(A_norms)
 -            sigma = [1 / nA for nA in A_norms]
 -            prob = [nA / sum(A_norms) for nA in A_norms]
 -
 -            #uniform prob, needs different sigma and tau
 -            #n = len(A)
 -            #prob = [1./n] * n
 -                        
 -        if fun_select is None:
 -            if prob is None:
 -                raise ValueError('prob was not determined')
 -                
 -            def fun_select(k):
 -                return [int(numpy.random.choice(len(A), 1, p=prob))]
 -
 -        self.iter = 0
 -        self.x = x
 -        
 -        self.y = y
 -        self.z = z
 -        
 -        self.f = f
 -        self.g = g
 -        self.A = A
 -        self.tau = tau
 -        self.sigma = sigma
 -        self.prob = prob
 -        self.fun_select = fun_select
 -
 -        # Initialize variables
 -        self.z_relax = z.copy()
 -        self.tmp = self.x.copy()
 -        
 -    def update(self):
 -        # select block
 -        selected = self.fun_select(self.iter)
 -
 -        # update primal variable
 -        #tmp = (self.x - self.tau * self.z_relax).as_array()
 -        #self.x.fill(self.g.prox(tmp, self.tau))
 -        self.tmp = - self.tau * self.z_relax
 -        self.tmp += self.x
 -        self.x = self.g.prox(self.tmp, self.tau)
 -
 -        # update dual variable and z, z_relax
 -        self.z_relax = self.z.copy()
 -        for i in selected:
 -            # save old yi
 -            y_old = self.y[i].copy()
 -
 -            # y[i]= prox(tmp)
 -            tmp = y_old + self.sigma[i] * self.A[i].direct(self.x)
 -            self.y[i] = self.f[i].convex_conj.prox(tmp, self.sigma[i])
 -
 -            # update adjoint of dual variable
 -            dz = self.A[i].adjoint(self.y[i] - y_old)
 -            self.z += dz
 -
 -            # compute extrapolation
 -            self.z_relax += (1 + 1 / self.prob[i]) * dz
 -
 -        self.iter += 1            
 -
 -
 -## Functions
 -    
 -class KullbackLeibler(Function):
 -    def __init__(self, data, background):
 -        self.data = data
 -        self.background = background
 -        self.__offset = None
 -        
 -    def __call__(self, x):
 -        """Return the KL-diveregnce in the point ``x``.
 -
 -        If any components of ``x`` is non-positive, the value is positive
 -        infinity.
 -
 -        Needs one extra array of memory of the size of `prior`.
 -        """
 -
 -        # define short variable names
 -        y = self.data
 -        r = self.background
 -
 -        # Compute
 -        #   sum(x + r - y + y * log(y / (x + r)))
 -        # = sum(x - y * log(x + r)) + self.offset
 -        # Assume that
 -        #   x + r > 0
 -
 -        # sum the result up
 -        obj = numpy.sum(x - y * numpy.log(x + r)) + self.offset()
 -
 -        if numpy.isnan(obj):
 -            # In this case, some element was less than or equal to zero
 -            return numpy.inf
 -        else:
 -            return obj
 -    
 -    @property
 -    def convex_conj(self):
 -        """The convex conjugate functional of the KL-functional."""
 -        return KullbackLeiblerConvexConjugate(self.data, self.background)
 -    
 -    def offset(self):
 -        """The offset which is independent of the unknown."""
 -    
 -        if self.__offset is None:
 -            tmp = self.domain.element()
 -    
 -            # define short variable names
 -            y = self.data
 -            r = self.background
 -    
 -            tmp = self.domain.element(numpy.maximum(y, 1))
 -            tmp = r - y + y * numpy.log(tmp)
 -    
 -            # sum the result up
 -            self.__offset = numpy.sum(tmp)
 -    
 -        return self.__offset
 -
 -#     def __repr__(self):
 -#         """to be added???"""
 -#         """Return ``repr(self)``."""
 -        # return '{}({!r}, {!r}, {!r})'.format(self.__class__.__name__,
 -          ##                                    self.domain, self.data,
 -           #                                   self.background)
 -
 -    
 -class KullbackLeiblerConvexConjugate(Function):
 -    """The convex conjugate of Kullback-Leibler divergence functional.
 -
 -    Notes
 -    -----
 -    The functional :math:`F^*` with prior :math:`g>0` is given by:
 -
 -    .. math::
 -        F^*(x)
 -        =
 -        \\begin{cases}
 -            \\sum_{i} \left( -g_i \ln(1 - x_i) \\right)
 -            & \\text{if } x_i < 1 \\forall i
 -            \\\\
 -            +\\infty & \\text{else}
 -        \\end{cases}
 -
 -    See Also
 -    --------
 -    KullbackLeibler : convex conjugate functional
 -    """
 -
 -    def __init__(self, data, background):
 -        self.data = data
 -        self.background = background
 -
 -    def __call__(self, x):
 -        y = self.data
 -        r = self.background
 -
 -        tmp = numpy.sum(- x * r - y * numpy.log(1 - x))
 -
 -        if numpy.isnan(tmp):
 -            # In this case, some element was larger than or equal to one
 -            return numpy.inf
 -        else:
 -            return tmp
 -
 -
 -    def prox(self, x, tau, out=None):
 -        # Let y = data, r = background, z = x + tau * r
 -        # Compute 0.5 * (z + 1 - sqrt((z - 1)**2 + 4 * tau * y))
 -        # Currently it needs 3 extra copies of memory.
 -
 -        if out is None:
 -            out = x.copy()
 -
 -        # define short variable names
 -        try: # this should be standard SIRF/CIL mode
 -            y = self.data.as_array()
 -            r = self.background.as_array()
 -            x = x.as_array()
 -            
 -            try:
 -                taua = tau.as_array()
 -            except:
 -                taua = tau
 -    
 -            z = x + tau * r
 -                        
 -            out.fill(0.5 * (z + 1 - numpy.sqrt((z - 1) ** 2 + 4 * taua * y)))
 -            
 -            return out
 -            
 -        except: # e.g. for NumPy
 -            y = self.data
 -            r = self.background
 -
 -            try:
 -                taua = tau.as_array()
 -            except:
 -                taua = tau
 -    
 -            z = x + tau * r
 -                        
 -            out[:] = 0.5 * (z + 1 - numpy.sqrt((z - 1) ** 2 + 4 * taua * y))
 -            
 -            return out 
 -   
 -    @property
 -    def convex_conj(self):
 -        return KullbackLeibler(self.data, self.background)
 -
 -
 -def mult(x, y):
 -    try:
 -        xa = x.as_array()
 -    except:
 -        xa = x
 -        
 -    out = y.clone()
 -    out.fill(xa * y.as_array())
 -
 -    return out
 diff --git a/Wrappers/Python/build/lib/ccpi/data/__init__.py b/Wrappers/Python/build/lib/ccpi/data/__init__.py deleted file mode 100644 index af10536..0000000 --- a/Wrappers/Python/build/lib/ccpi/data/__init__.py +++ /dev/null @@ -1,67 +0,0 @@ -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2018 Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License. - - -from ccpi.framework import ImageData -import numpy -from PIL import Image -import os -import os.path  - -data_dir = os.path.abspath(os.path.dirname(__file__)) - -           -def camera(**kwargs): - -    tmp = Image.open(os.path.join(data_dir, 'camera.png')) -     -    size = kwargs.get('size',(512, 512)) -     -    data = numpy.array(tmp.resize(size)) -         -    data = data/data.max() -     -    return ImageData(data)  - - -def boat(**kwargs): - -    tmp = Image.open(os.path.join(data_dir, 'boat.tiff')) -     -    size = kwargs.get('size',(512, 512)) -     -    data = numpy.array(tmp.resize(size)) -         -    data = data/data.max() -     -    return ImageData(data)   - - -def peppers(**kwargs): - -    tmp = Image.open(os.path.join(data_dir, 'peppers.tiff')) -     -    size = kwargs.get('size',(512, 512)) -     -    data = numpy.array(tmp.resize(size)) -         -    data = data/data.max() -     -    return ImageData(data)     -     diff --git a/Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py b/Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py deleted file mode 100644 index 166014b..0000000 --- a/Wrappers/Python/build/lib/ccpi/framework/BlockDataContainer.py +++ /dev/null @@ -1,484 +0,0 @@ -    # -*- coding: utf-8 -*- -""" -Created on Tue Mar  5 16:04:45 2019 - -@author: ofn77899 -""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import numpy -from numbers import Number -import functools -from ccpi.framework import DataContainer -#from ccpi.framework import AcquisitionData, ImageData -#from ccpi.optimisation.operators import Operator, LinearOperator -  -class BlockDataContainer(object): -    '''Class to hold DataContainers as column vector -     -    Provides basic algebra between BlockDataContainer's, DataContainer's and -    subclasses and Numbers - -    1) algebra between `BlockDataContainer`s will be element-wise, only if  -       the shape of the 2 `BlockDataContainer`s is the same, otherwise it  -       will fail  -    2) algebra between `BlockDataContainer`s and `list` or `numpy array` will  -       work as long as the number of `rows` and element of the arrays match, -       indipendently on the fact that the `BlockDataContainer` could be nested -    3) algebra between `BlockDataContainer` and one `DataContainer` is possible. -       It will require that all the `DataContainers` in the block to be -       compatible with the `DataContainer` we want to algebra with. Should we  -       require that the `DataContainer` is the same type? Like `ImageData` or `AcquisitionData`? -    4) algebra between `BlockDataContainer` and a `Number` is possible and it -       will be done with each element of the `BlockDataContainer` even if nested - -    A = [ [B,C] , D]  -    A * 3 = [ 3 * [B,C] , 3* D] = [ [ 3*B, 3*C]  , 3*D ] -     -    ''' -    ADD = 'add' -    SUBTRACT = 'subtract' -    MULTIPLY = 'multiply' -    DIVIDE = 'divide' -    POWER = 'power' -    __array_priority__ = 1 -    __container_priority__ = 2 -    def __init__(self, *args, **kwargs): -        '''''' -        self.containers = args -        self.index = 0 -        shape = kwargs.get('shape', None) -        if shape is None: -           shape = (len(args),1) -#        shape = (len(args),1) -        self.shape = shape - -        n_elements = functools.reduce(lambda x,y: x*y, shape, 1) -        if len(args) != n_elements: -            raise ValueError( -                    'Dimension and size do not match: expected {} got {}' -                    .format(n_elements, len(args))) - -         -    def __iter__(self): -        '''BlockDataContainer is Iterable''' -        return self -    def next(self): -        '''python2 backwards compatibility''' -        return self.__next__() -    def __next__(self): -        try: -            out = self[self.index] -        except IndexError as ie: -            raise StopIteration() -        self.index+=1 -        return out -     -    def is_compatible(self, other): -        '''basic check if the size of the 2 objects fit''' - -        if isinstance(other, Number): -            return True    -        elif isinstance(other, (list, numpy.ndarray)) : -            for ot in other: -                if not isinstance(ot, (Number,\ -                                 numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ -                                 numpy.float, numpy.float16, numpy.float32, numpy.float64, \ -                                 numpy.complex)): -                    raise ValueError('List/ numpy array can only contain numbers {}'\ -                                     .format(type(ot))) -            return len(self.containers) == len(other) -        elif issubclass(other.__class__, DataContainer): -            ret = True -            for i, el in enumerate(self.containers): -                if isinstance(el, BlockDataContainer): -                    a = el.is_compatible(other) -                else: -                    a = el.shape == other.shape -                ret = ret and a -            return ret -            #return self.get_item(0).shape == other.shape -        return len(self.containers) == len(other.containers) - -    def get_item(self, row): -        if row > self.shape[0]: -            raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) -        return self.containers[row] - -    def __getitem__(self, row): -        return self.get_item(row) -                 -    def add(self, other, *args, **kwargs): -        '''Algebra: add method of BlockDataContainer with number/DataContainer or BlockDataContainer -         -        :param: other (number, DataContainer or subclasses or BlockDataContainer -        :param: out (optional): provides a placehold for the resul. -        ''' -        out = kwargs.get('out', None) -        if out is not None: -            self.binary_operations(BlockDataContainer.ADD, other, *args, **kwargs) -        else: -            return self.binary_operations(BlockDataContainer.ADD, other, *args, **kwargs) -    def subtract(self, other, *args, **kwargs): -        '''Algebra: subtract method of BlockDataContainer with number/DataContainer or BlockDataContainer -         -        :param: other (number, DataContainer or subclasses or BlockDataContainer -        :param: out (optional): provides a placehold for the resul. -        ''' -        out = kwargs.get('out', None) -        if out is not None: -            self.binary_operations(BlockDataContainer.SUBTRACT, other, *args, **kwargs) -        else: -            return self.binary_operations(BlockDataContainer.SUBTRACT, other, *args, **kwargs) -    def multiply(self, other, *args, **kwargs): -        '''Algebra: multiply method of BlockDataContainer with number/DataContainer or BlockDataContainer -         -        :param: other (number, DataContainer or subclasses or BlockDataContainer -        :param: out (optional): provides a placehold for the resul. -        ''' -        out = kwargs.get('out', None) -        if out is not None: -            self.binary_operations(BlockDataContainer.MULTIPLY, other, *args, **kwargs) -        else: -            return self.binary_operations(BlockDataContainer.MULTIPLY, other, *args, **kwargs) -    def divide(self, other, *args, **kwargs): -        '''Algebra: divide method of BlockDataContainer with number/DataContainer or BlockDataContainer -         -        :param: other (number, DataContainer or subclasses or BlockDataContainer -        :param: out (optional): provides a placehold for the resul. -        ''' -        out = kwargs.get('out', None) -        if out is not None: -            self.binary_operations(BlockDataContainer.DIVIDE, other, *args, **kwargs) -        else: -            return self.binary_operations(BlockDataContainer.DIVIDE, other, *args, **kwargs) -     - -    def binary_operations(self, operation, other, *args, **kwargs): -        '''Algebra: generic method of algebric operation with BlockDataContainer with number/DataContainer or BlockDataContainer -         -        Provides commutativity with DataContainer and subclasses, i.e. this  -        class's reverse algebric methods take precedence w.r.t. direct algebric -        methods of DataContainer and subclasses. -         -        This method is not to be used directly -        ''' -        if not self.is_compatible(other): -            raise ValueError('Incompatible for divide') -        out = kwargs.get('out', None) -        if isinstance(other, Number) or issubclass(other.__class__, DataContainer): -            # try to do algebra with one DataContainer. Will raise error if not compatible -            kw = kwargs.copy() -            res = [] -            for i,el in enumerate(self.containers): -                if operation == BlockDataContainer.ADD: -                    op = el.add -                elif operation == BlockDataContainer.SUBTRACT: -                    op = el.subtract -                elif operation == BlockDataContainer.MULTIPLY: -                    op = el.multiply -                elif operation == BlockDataContainer.DIVIDE: -                    op = el.divide -                elif operation == BlockDataContainer.POWER: -                    op = el.power -                else: -                    raise ValueError('Unsupported operation', operation) -                if out is not None: -                    kw['out'] = out.get_item(i) -                    op(other, *args, **kw) -                else: -                    res.append(op(other, *args, **kw)) -            if out is not None: -                return -            else: -                return type(self)(*res, shape=self.shape) -        elif isinstance(other, (list, numpy.ndarray, BlockDataContainer)): -            # try to do algebra with one DataContainer. Will raise error if not compatible -            kw = kwargs.copy() -            res = [] -            if isinstance(other, BlockDataContainer): -                the_other = other.containers -            else: -                the_other = other -            for i,zel in enumerate(zip ( self.containers, the_other) ): -                el = zel[0] -                ot = zel[1] -                if operation == BlockDataContainer.ADD: -                    op = el.add -                elif operation == BlockDataContainer.SUBTRACT: -                    op = el.subtract -                elif operation == BlockDataContainer.MULTIPLY: -                    op = el.multiply -                elif operation == BlockDataContainer.DIVIDE: -                    op = el.divide -                elif operation == BlockDataContainer.POWER: -                    op = el.power -                else: -                    raise ValueError('Unsupported operation', operation) -                if out is not None: -                    kw['out'] = out.get_item(i) -                    op(ot, *args, **kw) -                else: -                    res.append(op(ot, *args, **kw)) -            if out is not None: -                return -            else: -                return type(self)(*res, shape=self.shape) -            return type(self)(*[ operation(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) -        else: -            raise ValueError('Incompatible type {}'.format(type(other))) -     - -    def power(self, other, *args, **kwargs): -        if not self.is_compatible(other): -            raise ValueError('Incompatible for power') -        out = kwargs.get('out', None) -        if isinstance(other, Number): -            return type(self)(*[ el.power(other, *args, **kwargs) for el in self.containers], shape=self.shape) -        elif isinstance(other, list) or isinstance(other, numpy.ndarray): -            return type(self)(*[ el.power(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) -        return type(self)(*[ el.power(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) -     -    def maximum(self,other, *args, **kwargs): -        if not self.is_compatible(other): -            raise ValueError('Incompatible for maximum') -        out = kwargs.get('out', None) -        if isinstance(other, Number): -            return type(self)(*[ el.maximum(other, *args, **kwargs) for el in self.containers], shape=self.shape) -        elif isinstance(other, list) or isinstance(other, numpy.ndarray): -            return type(self)(*[ el.maximum(ot, *args, **kwargs) for el,ot in zip(self.containers,other)], shape=self.shape) -        return type(self)(*[ el.maximum(ot, *args, **kwargs) for el,ot in zip(self.containers,other.containers)], shape=self.shape) -     -    ## unary operations     -    def abs(self, *args,  **kwargs): -        return type(self)(*[ el.abs(*args, **kwargs) for el in self.containers], shape=self.shape) -    def sign(self, *args,  **kwargs): -        return type(self)(*[ el.sign(*args, **kwargs) for el in self.containers], shape=self.shape) -    def sqrt(self, *args,  **kwargs): -        return type(self)(*[ el.sqrt(*args, **kwargs) for el in self.containers], shape=self.shape) -    def conjugate(self, out=None): -        return type(self)(*[el.conjugate() for el in self.containers], shape=self.shape) -     -    ## reductions -     -    def sum(self, *args, **kwargs): -        return numpy.sum([ el.sum(*args, **kwargs) for el in self.containers]) -     -    def squared_norm(self): -        y = numpy.asarray([el.squared_norm() for el in self.containers]) -        return y.sum()  -         -     -    def norm(self): -        return numpy.sqrt(self.squared_norm())    -     -    def pnorm(self, p=2): -                         -        if p==1:             -            return sum(self.abs())         -        elif p==2:                     -            return sum([el*el for el in self.containers]).sqrt() -        else: -            return ValueError('Not implemented') -                 -    def copy(self): -        '''alias of clone'''     -        return self.clone() -    def clone(self): -        return type(self)(*[el.copy() for el in self.containers], shape=self.shape) -    def fill(self, other): -        if isinstance (other, BlockDataContainer): -            if not self.is_compatible(other): -                raise ValueError('Incompatible containers') -            for el,ot in zip(self.containers, other.containers): -                el.fill(ot) -        else: -            return ValueError('Cannot fill with object provided {}'.format(type(other))) -     -    def __add__(self, other): -        return self.add( other ) -    # __radd__ -     -    def __sub__(self, other): -        return self.subtract( other ) -    # __rsub__ -     -    def __mul__(self, other): -        return self.multiply(other) -    # __rmul__ -     -    def __div__(self, other): -        return self.divide(other) -    # __rdiv__ -    def __truediv__(self, other): -        return self.divide(other) -     -    def __pow__(self, other): -        return self.power(other) -    # reverse operand -    def __radd__(self, other): -        '''Reverse addition -         -        to make sure that this method is called rather than the __mul__ of a numpy array -        the class constant __array_priority__ must be set > 0 -        https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ -        ''' -        return self + other -    # __radd__ -     -    def __rsub__(self, other): -        '''Reverse subtraction -         -        to make sure that this method is called rather than the __mul__ of a numpy array -        the class constant __array_priority__ must be set > 0 -        https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ -        ''' -        return (-1 * self) + other -    # __rsub__ -     -    def __rmul__(self, other): -        '''Reverse multiplication -         -        to make sure that this method is called rather than the __mul__ of a numpy array -        the class constant __array_priority__ must be set > 0 -        https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ -        ''' -        return self * other -    # __rmul__ -     -    def __rdiv__(self, other): -        '''Reverse division -         -        to make sure that this method is called rather than the __mul__ of a numpy array -        the class constant __array_priority__ must be set > 0 -        https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ -        ''' -        return pow(self / other, -1) -    # __rdiv__ -    def __rtruediv__(self, other): -        '''Reverse truedivision -         -        to make sure that this method is called rather than the __mul__ of a numpy array -        the class constant __array_priority__ must be set > 0 -        https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ -        ''' -        return self.__rdiv__(other) -     -    def __rpow__(self, other): -        '''Reverse power -         -        to make sure that this method is called rather than the __mul__ of a numpy array -        the class constant __array_priority__ must be set > 0 -        https://docs.scipy.org/doc/numpy-1.15.1/reference/arrays.classes.html#numpy.class.__array_priority__ -        ''' -        return other.power(self) -     -    def __iadd__(self, other): -        '''Inline addition''' -        if isinstance (other, BlockDataContainer): -            for el,ot in zip(self.containers, other.containers): -                el += ot -        elif isinstance(other, Number): -            for el in self.containers: -                el += other -        elif isinstance(other, list) or isinstance(other, numpy.ndarray): -            if not self.is_compatible(other): -                raise ValueError('Incompatible for __iadd__') -            for el,ot in zip(self.containers, other): -                el += ot -        return self -    # __iadd__ -     -    def __isub__(self, other): -        '''Inline subtraction''' -        if isinstance (other, BlockDataContainer): -            for el,ot in zip(self.containers, other.containers): -                el -= ot -        elif isinstance(other, Number): -            for el in self.containers: -                el -= other -        elif isinstance(other, list) or isinstance(other, numpy.ndarray): -            if not self.is_compatible(other): -                raise ValueError('Incompatible for __isub__') -            for el,ot in zip(self.containers, other): -                el -= ot -        return self -    # __isub__ -     -    def __imul__(self, other): -        '''Inline multiplication''' -        if isinstance (other, BlockDataContainer): -            for el,ot in zip(self.containers, other.containers): -                el *= ot -        elif isinstance(other, Number): -            for el in self.containers: -                el *= other -        elif isinstance(other, list) or isinstance(other, numpy.ndarray): -            if not self.is_compatible(other): -                raise ValueError('Incompatible for __imul__') -            for el,ot in zip(self.containers, other): -                el *= ot -        return self -    # __imul__ -     -    def __idiv__(self, other): -        '''Inline division''' -        if isinstance (other, BlockDataContainer): -            for el,ot in zip(self.containers, other.containers): -                el /= ot -        elif isinstance(other, Number): -            for el in self.containers: -                el /= other -        elif isinstance(other, list) or isinstance(other, numpy.ndarray): -            if not self.is_compatible(other): -                raise ValueError('Incompatible for __idiv__') -            for el,ot in zip(self.containers, other): -                el /= ot -        return self -    # __rdiv__ -    def __itruediv__(self, other): -        '''Inline truedivision''' -        return self.__idiv__(other) -     -    def dot(self, other): -#         -        tmp = [ self.containers[i].dot(other.containers[i]) for i in range(self.shape[0])] -        return sum(tmp) -     -        -     -if __name__ == '__main__': -     -    from ccpi.framework import ImageGeometry, BlockGeometry -    import numpy - -    N, M = 2, 3 -    ig = ImageGeometry(N, M)     -    BG = BlockGeometry(ig, ig) -     -    U = BG.allocate('random_int') -    V = BG.allocate('random_int') -     -     -    print ("test sum BDC " ) -    w = U[0].as_array() + U[1].as_array()     -    w1 = sum(U).as_array()     -    numpy.testing.assert_array_equal(w, w1) -     -    print ("test sum BDC " ) -    z = numpy.sqrt(U[0].as_array()**2 + U[1].as_array()**2) -    z1 = sum(U**2).sqrt().as_array()     -    numpy.testing.assert_array_equal(z, z1)    -     -    z2 = U.pnorm(2) -     -    zzz = U.dot(V) -     -    - - - - diff --git a/Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py b/Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py deleted file mode 100644 index ed44d99..0000000 --- a/Wrappers/Python/build/lib/ccpi/framework/BlockGeometry.py +++ /dev/null @@ -1,80 +0,0 @@ -from __future__ import absolute_import
 -from __future__ import division
 -from __future__ import print_function
 -from __future__ import unicode_literals
 -
 -import numpy
 -from numbers import Number
 -import functools
 -from ccpi.framework import BlockDataContainer
 -#from ccpi.optimisation.operators import Operator, LinearOperator
 - 
 -class BlockGeometry(object):
 -    '''Class to hold Geometry as column vector'''
 -    #__array_priority__ = 1
 -    def __init__(self, *args, **kwargs):
 -        ''''''
 -        self.geometries = args
 -        self.index = 0
 -
 -        shape = (len(args),1)
 -        self.shape = shape
 -
 -        n_elements = functools.reduce(lambda x,y: x*y, shape, 1)
 -        if len(args) != n_elements:
 -            raise ValueError(
 -                    'Dimension and size do not match: expected {} got {}'
 -                    .format(n_elements, len(args)))
 -                      
 -            
 -    def get_item(self, index):
 -        '''returns the Geometry in the BlockGeometry located at position index'''
 -        return self.geometries[index]            
 -
 -    def allocate(self, value=0, dimension_labels=None, **kwargs):
 -        
 -        symmetry = kwargs.get('symmetry',False)        
 -        containers = [geom.allocate(value) for geom in self.geometries]
 -        
 -        if symmetry == True:
 -                        
 -            # for 2x2       
 -            # [ ig11, ig12\
 -            #   ig21, ig22]
 -            
 -            # Row-wise Order
 -            
 -            if len(containers)==4:
 -                containers[1]=containers[2]
 -            
 -            # for 3x3  
 -            # [ ig11, ig12, ig13\
 -            #   ig21, ig22, ig23\
 -            #   ig31, ig32, ig33]            
 -                      
 -            elif len(containers)==9:
 -                containers[1]=containers[3]
 -                containers[2]=containers[6]
 -                containers[5]=containers[7]
 -            
 -            # for 4x4  
 -            # [ ig11, ig12, ig13, ig14\
 -            #   ig21, ig22, ig23, ig24\
 -            #   ig31, ig32, ig33, ig34
 -            #   ig41, ig42, ig43, ig44]   
 -            
 -            elif len(containers) == 16:
 -                containers[1]=containers[4]
 -                containers[2]=containers[8]
 -                containers[3]=containers[12]
 -                containers[6]=containers[9]
 -                containers[7]=containers[10]
 -                containers[11]=containers[15]
 -                
 -                
 -                
 -        
 -        return BlockDataContainer(*containers)
 -    
 -    
 -         
 diff --git a/Wrappers/Python/build/lib/ccpi/framework/__init__.py b/Wrappers/Python/build/lib/ccpi/framework/__init__.py deleted file mode 100644 index 229edb5..0000000 --- a/Wrappers/Python/build/lib/ccpi/framework/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# -*- coding: utf-8 -*-
 -"""
 -Created on Tue Mar  5 16:00:18 2019
 -
 -@author: ofn77899
 -"""
 -from __future__ import absolute_import
 -from __future__ import division
 -from __future__ import print_function
 -from __future__ import unicode_literals
 -
 -import numpy
 -import sys
 -from datetime import timedelta, datetime
 -import warnings
 -from functools import reduce
 -
 -
 -from .framework import DataContainer
 -from .framework import ImageData, AcquisitionData
 -from .framework import ImageGeometry, AcquisitionGeometry
 -from .framework import find_key, message
 -from .framework import DataProcessor
 -from .framework import AX, PixelByPixelDataProcessor, CastDataContainer
 -from .BlockDataContainer import BlockDataContainer
 -from .BlockGeometry import BlockGeometry
 diff --git a/Wrappers/Python/build/lib/ccpi/framework/framework.py b/Wrappers/Python/build/lib/ccpi/framework/framework.py deleted file mode 100644 index dbe7d0a..0000000 --- a/Wrappers/Python/build/lib/ccpi/framework/framework.py +++ /dev/null @@ -1,1437 +0,0 @@ -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2018-2019 Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import numpy -import sys -from datetime import timedelta, datetime -import warnings -from functools import reduce -from numbers import Number - - -def find_key(dic, val): -    """return the key of dictionary dic given the value""" -    return [k for k, v in dic.items() if v == val][0] - -def message(cls, msg, *args): -    msg = "{0}: " + msg -    for i in range(len(args)): -        msg += " {%d}" %(i+1) -    args = list(args) -    args.insert(0, cls.__name__ ) -     -    return msg.format(*args ) - - -class ImageGeometry(object): -    RANDOM = 'random' -    RANDOM_INT = 'random_int' -    CHANNEL = 'channel' -    ANGLE = 'angle' -    VERTICAL = 'vertical' -    HORIZONTAL_X = 'horizontal_x' -    HORIZONTAL_Y = 'horizontal_y' -     -    def __init__(self,  -                 voxel_num_x=0,  -                 voxel_num_y=0,  -                 voxel_num_z=0,  -                 voxel_size_x=1,  -                 voxel_size_y=1,  -                 voxel_size_z=1,  -                 center_x=0,  -                 center_y=0,  -                 center_z=0,  -                 channels=1): -         -        self.voxel_num_x = voxel_num_x -        self.voxel_num_y = voxel_num_y -        self.voxel_num_z = voxel_num_z -        self.voxel_size_x = voxel_size_x -        self.voxel_size_y = voxel_size_y -        self.voxel_size_z = voxel_size_z -        self.center_x = center_x -        self.center_y = center_y -        self.center_z = center_z   -        self.channels = channels -         -        # this is some code repetition -        if self.channels > 1:             -            if self.voxel_num_z>1: -                self.length = 4 -                self.shape = (self.channels, self.voxel_num_z, self.voxel_num_y, self.voxel_num_x) -                dim_labels = [ImageGeometry.CHANNEL, ImageGeometry.VERTICAL, -                ImageGeometry.HORIZONTAL_Y, ImageGeometry.HORIZONTAL_X] -            else: -                self.length = 3 -                self.shape = (self.channels, self.voxel_num_y, self.voxel_num_x) -                dim_labels = [ImageGeometry.CHANNEL, ImageGeometry.HORIZONTAL_Y, ImageGeometry.HORIZONTAL_X] -        else: -            if self.voxel_num_z>1: -                self.length = 3 -                self.shape = (self.voxel_num_z, self.voxel_num_y, self.voxel_num_x) -                dim_labels = [ImageGeometry.VERTICAL, ImageGeometry.HORIZONTAL_Y, -                 ImageGeometry.HORIZONTAL_X] -            else: -                self.length = 2   -                self.shape = (self.voxel_num_y, self.voxel_num_x) -                dim_labels = [ImageGeometry.HORIZONTAL_Y, ImageGeometry.HORIZONTAL_X] -         -        self.dimension_labels = dim_labels -         -    def get_min_x(self): -        return self.center_x - 0.5*self.voxel_num_x*self.voxel_size_x -         -    def get_max_x(self): -        return self.center_x + 0.5*self.voxel_num_x*self.voxel_size_x -         -    def get_min_y(self): -        return self.center_y - 0.5*self.voxel_num_y*self.voxel_size_y -         -    def get_max_y(self): -        return self.center_y + 0.5*self.voxel_num_y*self.voxel_size_y -         -    def get_min_z(self): -        if not self.voxel_num_z == 0: -            return self.center_z - 0.5*self.voxel_num_z*self.voxel_size_z -        else: -            return 0 -         -    def get_max_z(self): -        if not self.voxel_num_z == 0: -            return self.center_z + 0.5*self.voxel_num_z*self.voxel_size_z -        else: -            return 0 -         -    def clone(self): -        '''returns a copy of ImageGeometry''' -        return ImageGeometry( -                            self.voxel_num_x,  -                            self.voxel_num_y,  -                            self.voxel_num_z,  -                            self.voxel_size_x,  -                            self.voxel_size_y,  -                            self.voxel_size_z,  -                            self.center_x,  -                            self.center_y,  -                            self.center_z,  -                            self.channels) -    def __str__ (self): -        repres = "" -        repres += "Number of channels: {0}\n".format(self.channels) -        repres += "voxel_num : x{0},y{1},z{2}\n".format(self.voxel_num_x, self.voxel_num_y, self.voxel_num_z) -        repres += "voxel_size : x{0},y{1},z{2}\n".format(self.voxel_size_x, self.voxel_size_y, self.voxel_size_z) -        repres += "center : x{0},y{1},z{2}\n".format(self.center_x, self.center_y, self.center_z) -        return repres -    def allocate(self, value=0, dimension_labels=None, **kwargs): -        '''allocates an ImageData according to the size expressed in the instance''' -        out = ImageData(geometry=self) -        if isinstance(value, Number): -            if value != 0: -                out += value -        else: -            if value == ImageGeometry.RANDOM: -                seed = kwargs.get('seed', None) -                if seed is not None: -                    numpy.random.seed(seed)  -                out.fill(numpy.random.random_sample(self.shape)) -            elif value == ImageGeometry.RANDOM_INT: -                seed = kwargs.get('seed', None) -                if seed is not None: -                    numpy.random.seed(seed) -                max_value = kwargs.get('max_value', 100) -                out.fill(numpy.random.randint(max_value,size=self.shape)) -            else: -                raise ValueError('Value {} unknown'.format(value)) -        if dimension_labels is not None: -            if dimension_labels != self.dimension_labels: -                return out.subset(dimensions=dimension_labels) -        return out -    # The following methods return 2 members of the class, therefore I  -    # don't think we need to implement them.  -    # Additionally using __len__ is confusing as one would think this is  -    # an iterable.  -    #def __len__(self): -    #    '''returns the length of the geometry''' -    #    return self.length -    #def shape(self): -    #    '''Returns the shape of the array of the ImageData it describes''' -    #    return self.shape - -class AcquisitionGeometry(object): -    RANDOM = 'random' -    RANDOM_INT = 'random_int' -    ANGLE_UNIT = 'angle_unit' -    DEGREE = 'degree' -    RADIAN = 'radian' -    CHANNEL = 'channel' -    ANGLE = 'angle' -    VERTICAL = 'vertical' -    HORIZONTAL = 'horizontal' -    def __init__(self,  -                 geom_type,  -                 dimension,  -                 angles,  -                 pixel_num_h=0,  -                 pixel_size_h=1,  -                 pixel_num_v=0,  -                 pixel_size_v=1,  -                 dist_source_center=None,  -                 dist_center_detector=None,  -                 channels=1, -                 **kwargs -                 ): -        """ -        General inputs for standard type projection geometries -        detectorDomain or detectorpixelSize: -            If 2D -                If scalar: Width of detector or single detector pixel -                If 2-vec: Error -            If 3D -                If scalar: Width in both dimensions -                If 2-vec: Vertical then horizontal size -        grid -            If 2D -                If scalar: number of detectors -                If 2-vec: error -            If 3D -                If scalar: Square grid that size -                If 2-vec vertical then horizontal size -        cone or parallel -        2D or 3D -        parallel_parameters: ? -        cone_parameters: -            source_to_center_dist (if parallel: NaN) -            center_to_detector_dist (if parallel: NaN) -        standard or nonstandard (vec) geometry -        angles -        angles_format radians or degrees -        """ -        self.geom_type = geom_type   # 'parallel' or 'cone' -        self.dimension = dimension # 2D or 3D -        self.angles = angles -        num_of_angles = len (angles) -         -        self.dist_source_center = dist_source_center -        self.dist_center_detector = dist_center_detector -         -        self.pixel_num_h = pixel_num_h -        self.pixel_size_h = pixel_size_h -        self.pixel_num_v = pixel_num_v -        self.pixel_size_v = pixel_size_v -         -        self.channels = channels -        self.angle_unit=kwargs.get(AcquisitionGeometry.ANGLE_UNIT,  -                               AcquisitionGeometry.DEGREE) -        if channels > 1: -            if pixel_num_v > 1: -                shape = (channels, num_of_angles , pixel_num_v, pixel_num_h) -                dim_labels = [AcquisitionGeometry.CHANNEL , -                 AcquisitionGeometry.ANGLE , AcquisitionGeometry.VERTICAL , -                 AcquisitionGeometry.HORIZONTAL] -            else: -                shape = (channels , num_of_angles, pixel_num_h) -                dim_labels = [AcquisitionGeometry.CHANNEL , -                 AcquisitionGeometry.ANGLE, AcquisitionGeometry.HORIZONTAL] -        else: -            if pixel_num_v > 1: -                shape = (num_of_angles, pixel_num_v, pixel_num_h) -                dim_labels = [AcquisitionGeometry.ANGLE , AcquisitionGeometry.VERTICAL , -                 AcquisitionGeometry.HORIZONTAL] -            else: -                shape = (num_of_angles, pixel_num_h) -                dim_labels = [AcquisitionGeometry.ANGLE, AcquisitionGeometry.HORIZONTAL] -        self.shape = shape - -        self.dimension_labels = dim_labels -         -    def clone(self): -        '''returns a copy of the AcquisitionGeometry''' -        return AcquisitionGeometry(self.geom_type, -                                   self.dimension,  -                                   self.angles,  -                                   self.pixel_num_h,  -                                   self.pixel_size_h,  -                                   self.pixel_num_v,  -                                   self.pixel_size_v,  -                                   self.dist_source_center,  -                                   self.dist_center_detector,  -                                   self.channels) -         -    def __str__ (self): -        repres = "" -        repres += "Number of dimensions: {0}\n".format(self.dimension) -        repres += "angles: {0}\n".format(self.angles) -        repres += "voxel_num : h{0},v{1}\n".format(self.pixel_num_h, self.pixel_num_v) -        repres += "voxel size: h{0},v{1}\n".format(self.pixel_size_h, self.pixel_size_v) -        repres += "geometry type: {0}\n".format(self.geom_type) -        repres += "distance source-detector: {0}\n".format(self.dist_source_center) -        repres += "distance center-detector: {0}\n".format(self.dist_source_center) -        repres += "number of channels: {0}\n".format(self.channels) -        return repres -    def allocate(self, value=0, dimension_labels=None): -        '''allocates an AcquisitionData according to the size expressed in the instance''' -        out = AcquisitionData(geometry=self) -        if isinstance(value, Number): -            if value != 0: -                out += value -        else: -            if value == AcquisitionData.RANDOM: -                seed = kwargs.get('seed', None) -                if seed is not None: -                    numpy.random.seed(seed)  -                out.fill(numpy.random.random_sample(self.shape)) -            elif value == AcquisitionData.RANDOM_INT: -                seed = kwargs.get('seed', None) -                if seed is not None: -                    numpy.random.seed(seed) -                max_value = kwargs.get('max_value', 100) -                out.fill(numpy.random.randint(max_value,size=self.shape)) -            else: -                raise ValueError('Value {} unknown'.format(value)) -        if dimension_labels is not None: -            if dimension_labels != self.dimension_labels: -                return out.subset(dimensions=dimension_labels) -        return out -     -class DataContainer(object): -    '''Generic class to hold data -     -    Data is currently held in a numpy arrays''' -     -    __container_priority__ = 1 -    def __init__ (self, array, deep_copy=True, dimension_labels=None,  -                  **kwargs): -        '''Holds the data''' -         -        self.shape = numpy.shape(array) -        self.number_of_dimensions = len (self.shape) -        self.dimension_labels = {} -        self.geometry = None # Only relevant for AcquisitionData and ImageData -         -        if dimension_labels is not None and \ -           len (dimension_labels) == self.number_of_dimensions: -            for i in range(self.number_of_dimensions): -                self.dimension_labels[i] = dimension_labels[i] -        else: -            for i in range(self.number_of_dimensions): -                self.dimension_labels[i] = 'dimension_{0:02}'.format(i) -         -        if type(array) == numpy.ndarray: -            if deep_copy: -                self.array = array.copy() -            else: -                self.array = array     -        else: -            raise TypeError('Array must be NumpyArray, passed {0}'\ -                            .format(type(array))) -         -        # finally copy the geometry -        if 'geometry' in kwargs.keys(): -            self.geometry = kwargs['geometry'] -        else: -            # assume it is parallel beam -            pass -         -    def get_dimension_size(self, dimension_label): -        if dimension_label in self.dimension_labels.values(): -            acq_size = -1 -            for k,v in self.dimension_labels.items(): -                if v == dimension_label: -                    acq_size = self.shape[k] -            return acq_size -        else: -            raise ValueError('Unknown dimension {0}. Should be one of'.format(dimension_label, -                             self.dimension_labels)) -    def get_dimension_axis(self, dimension_label): -        if dimension_label in self.dimension_labels.values(): -            for k,v in self.dimension_labels.items(): -                if v == dimension_label: -                    return k -        else: -            raise ValueError('Unknown dimension {0}. Should be one of'.format(dimension_label, -                             self.dimension_labels.values())) -                         - -    def as_array(self, dimensions=None): -        '''Returns the DataContainer as Numpy Array -         -        Returns the pointer to the array if dimensions is not set. -        If dimensions is set, it first creates a new DataContainer with the subset -        and then it returns the pointer to the array''' -        if dimensions is not None: -            return self.subset(dimensions).as_array() -        return self.array -     -     -    def subset(self, dimensions=None, **kw): -        '''Creates a DataContainer containing a subset of self according to the  -        labels in dimensions''' -        if dimensions is None: -            if kw == {}: -                return self.array.copy() -            else: -                reduced_dims = [v for k,v in self.dimension_labels.items()] -                for dim_l, dim_v in kw.items(): -                    for k,v in self.dimension_labels.items(): -                        if v == dim_l: -                            reduced_dims.pop(k) -                return self.subset(dimensions=reduced_dims, **kw) -        else: -            # check that all the requested dimensions are in the array -            # this is done by checking the dimension_labels -            proceed = True -            unknown_key = '' -            # axis_order contains the order of the axis that the user wants -            # in the output DataContainer -            axis_order = [] -            if type(dimensions) == list: -                for dl in dimensions: -                    if dl not in self.dimension_labels.values(): -                        proceed = False -                        unknown_key = dl -                        break -                    else: -                        axis_order.append(find_key(self.dimension_labels, dl)) -                if not proceed: -                    raise KeyError('Subset error: Unknown key specified {0}'.format(dl)) -                     -                # slice away the unwanted data from the array -                unwanted_dimensions = self.dimension_labels.copy() -                left_dimensions = [] -                for ax in sorted(axis_order): -                    this_dimension = unwanted_dimensions.pop(ax) -                    left_dimensions.append(this_dimension) -                #print ("unwanted_dimensions {0}".format(unwanted_dimensions)) -                #print ("left_dimensions {0}".format(left_dimensions)) -                #new_shape = [self.shape[ax] for ax in axis_order] -                #print ("new_shape {0}".format(new_shape)) -                command = "self.array[" -                for i in range(self.number_of_dimensions): -                    if self.dimension_labels[i] in unwanted_dimensions.values(): -                        value = 0 -                        for k,v in kw.items(): -                            if k == self.dimension_labels[i]: -                                value = v -                                 -                        command = command + str(value) -                    else: -                        command = command + ":" -                    if i < self.number_of_dimensions -1: -                        command = command + ',' -                command = command + ']' -                 -                cleaned = eval(command) -                # cleaned has collapsed dimensions in the same order of -                # self.array, but we want it in the order stated in the  -                # "dimensions".  -                # create axes order for numpy.transpose -                axes = [] -                for key in dimensions: -                    #print ("key {0}".format( key)) -                    for i in range(len( left_dimensions )): -                        ld = left_dimensions[i] -                        #print ("ld {0}".format( ld)) -                        if ld == key: -                            axes.append(i) -                #print ("axes {0}".format(axes)) -                 -                cleaned = numpy.transpose(cleaned, axes).copy() -                 -                return type(self)(cleaned , True, dimensions) -     -    def fill(self, array, **dimension): -        '''fills the internal numpy array with the one provided''' -        if dimension == {}: -            if issubclass(type(array), DataContainer) or\ -               issubclass(type(array), numpy.ndarray): -                if array.shape != self.shape: -                    raise ValueError('Cannot fill with the provided array.' + \ -                                     'Expecting {0} got {1}'.format( -                                     self.shape,array.shape)) -                if issubclass(type(array), DataContainer): -                    numpy.copyto(self.array, array.array) -                else: -                    #self.array[:] = array -                    numpy.copyto(self.array, array) -        else: -             -            command = 'self.array[' -            i = 0 -            for k,v in self.dimension_labels.items(): -                for dim_label, dim_value in dimension.items():     -                    if dim_label == v: -                        command = command + str(dim_value) -                    else: -                        command = command + ":" -                if i < self.number_of_dimensions -1: -                    command = command + ',' -                i += 1 -            command = command + "] = array[:]"  -            exec(command) -             -         -    def check_dimensions(self, other): -        return self.shape == other.shape -     -    ## algebra  -     -    def __add__(self, other): -        return self.add(other) -    def __mul__(self, other): -        return self.multiply(other) -    def __sub__(self, other): -        return self.subtract(other) -    def __div__(self, other): -        return self.divide(other) -    def __truediv__(self, other): -        return self.divide(other) -    def __pow__(self, other): -        return self.power(other) -     -     -     -    # reverse operand -    def __radd__(self, other): -        return self + other -    # __radd__ -     -    def __rsub__(self, other): -        return (-1 * self) + other -    # __rsub__ -     -    def __rmul__(self, other): -        return self * other -    # __rmul__ -     -    def __rdiv__(self, other): -        print ("call __rdiv__") -        return pow(self / other, -1) -    # __rdiv__ -    def __rtruediv__(self, other): -        return self.__rdiv__(other) -     -    def __rpow__(self, other): -        if isinstance(other, (int, float)) : -            fother = numpy.ones(numpy.shape(self.array)) * other -            return type(self)(fother ** self.array ,  -                           dimension_labels=self.dimension_labels, -                           geometry=self.geometry) -        elif issubclass(type(other), DataContainer): -            if self.check_dimensions(other): -                return type(self)(other.as_array() ** self.array ,  -                           dimension_labels=self.dimension_labels, -                           geometry=self.geometry) -            else: -                raise ValueError('Dimensions do not match') -    # __rpow__ -     -    # in-place arithmetic operators: -    # (+=, -=, *=, /= , //=, -    # must return self -     -    def __iadd__(self, other): -        kw = {'out':self} -        return self.add(other, **kw) -     -    def __imul__(self, other): -        kw = {'out':self} -        return self.multiply(other, **kw) -     -    def __isub__(self, other): -        kw = {'out':self} -        return self.subtract(other, **kw) -     -    def __idiv__(self, other): -        kw = {'out':self} -        return self.divide(other, **kw) -     -    def __itruediv__(self, other): -        kw = {'out':self} -        return self.divide(other, **kw) -     - -     -    def __str__ (self, representation=False): -        repres = "" -        repres += "Number of dimensions: {0}\n".format(self.number_of_dimensions) -        repres += "Shape: {0}\n".format(self.shape) -        repres += "Axis labels: {0}\n".format(self.dimension_labels) -        if representation: -            repres += "Representation: \n{0}\n".format(self.array) -        return repres -     -    def clone(self): -        '''returns a copy of itself''' -         -        return type(self)(self.array,  -                          dimension_labels=self.dimension_labels, -                          deep_copy=True, -                          geometry=self.geometry ) -     -    def get_data_axes_order(self,new_order=None): -        '''returns the axes label of self as a list -         -        if new_order is None returns the labels of the axes as a sorted-by-key list -        if new_order is a list of length number_of_dimensions, returns a list -        with the indices of the axes in new_order with respect to those in  -        self.dimension_labels: i.e. -          self.dimension_labels = {0:'horizontal',1:'vertical'} -          new_order = ['vertical','horizontal'] -          returns [1,0] -        ''' -        if new_order is None: -             -            axes_order = [i for i in range(len(self.shape))] -            for k,v in self.dimension_labels.items(): -                axes_order[k] = v -            return axes_order -        else: -            if len(new_order) == self.number_of_dimensions: -                axes_order = [i for i in range(self.number_of_dimensions)] -                 -                for i in range(len(self.shape)): -                    found = False -                    for k,v in self.dimension_labels.items(): -                        if new_order[i] == v: -                            axes_order[i] = k -                            found = True -                    if not found: -                        raise ValueError('Axis label {0} not found.'.format(new_order[i])) -                return axes_order -            else: -                raise ValueError('Expecting {0} axes, got {2}'\ -                                 .format(len(self.shape),len(new_order))) -         -                 -    def copy(self): -        '''alias of clone'''     -        return self.clone() -     -    ## binary operations -             -    def pixel_wise_binary(self, pwop, x2, *args,  **kwargs):     -        out = kwargs.get('out', None) -        if out is None: -            if isinstance(x2, (int, float, complex)): -                out = pwop(self.as_array() , x2 , *args, **kwargs ) -            elif isinstance(x2, (numpy.int, numpy.int8, numpy.int16, numpy.int32, numpy.int64,\ -                                 numpy.float, numpy.float16, numpy.float32, numpy.float64, \ -                                 numpy.complex)): -                out = pwop(self.as_array() , x2 , *args, **kwargs ) -            elif issubclass(type(x2) , DataContainer): -                out = pwop(self.as_array() , x2.as_array() , *args, **kwargs ) -            return type(self)(out, -                   deep_copy=False,  -                   dimension_labels=self.dimension_labels, -                   geometry=self.geometry) -             -         -        elif issubclass(type(out), DataContainer) and issubclass(type(x2), DataContainer): -            if self.check_dimensions(out) and self.check_dimensions(x2): -                kwargs['out'] = out.as_array() -                pwop(self.as_array(), x2.as_array(), *args, **kwargs ) -                #return type(self)(out.as_array(), -                #       deep_copy=False,  -                #       dimension_labels=self.dimension_labels, -                #       geometry=self.geometry) -                return out -            else: -                raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) -        elif issubclass(type(out), DataContainer) and isinstance(x2, (int,float,complex)): -            if self.check_dimensions(out): -                kwargs['out']=out.as_array() -                pwop(self.as_array(), x2, *args, **kwargs ) -                return out -            else: -                raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) -        elif issubclass(type(out), numpy.ndarray): -            if self.array.shape == out.shape and self.array.dtype == out.dtype: -                kwargs['out'] = out -                pwop(self.as_array(), x2, *args, **kwargs) -                #return type(self)(out, -                #       deep_copy=False,  -                #       dimension_labels=self.dimension_labels, -                #       geometry=self.geometry) -        else: -            raise ValueError (message(type(self),  "incompatible class:" , pwop.__name__, type(out))) -     -    def add(self, other, *args, **kwargs): -        if hasattr(other, '__container_priority__') and \ -           self.__class__.__container_priority__ < other.__class__.__container_priority__: -            return other.add(self, *args, **kwargs) -        return self.pixel_wise_binary(numpy.add, other, *args, **kwargs) -     -    def subtract(self, other, *args, **kwargs): -        if hasattr(other, '__container_priority__') and \ -           self.__class__.__container_priority__ < other.__class__.__container_priority__: -            return other.subtract(self, *args, **kwargs) -        return self.pixel_wise_binary(numpy.subtract, other, *args, **kwargs) - -    def multiply(self, other, *args, **kwargs): -        if hasattr(other, '__container_priority__') and \ -           self.__class__.__container_priority__ < other.__class__.__container_priority__: -            return other.multiply(self, *args, **kwargs) -        return self.pixel_wise_binary(numpy.multiply, other, *args, **kwargs) -     -    def divide(self, other, *args, **kwargs): -        if hasattr(other, '__container_priority__') and \ -           self.__class__.__container_priority__ < other.__class__.__container_priority__: -            return other.divide(self, *args, **kwargs) -        return self.pixel_wise_binary(numpy.divide, other, *args, **kwargs) -     -    def power(self, other, *args, **kwargs): -        return self.pixel_wise_binary(numpy.power, other, *args, **kwargs) -     -    def maximum(self, x2, *args, **kwargs): -        return self.pixel_wise_binary(numpy.maximum, x2, *args, **kwargs) -     -    def minimum(self,x2, out=None, *args, **kwargs): -        return self.pixel_wise_binary(numpy.minimum, x2=x2, out=out, *args, **kwargs) - -     -    ## unary operations -    def pixel_wise_unary(self, pwop, *args,  **kwargs): -        out = kwargs.get('out', None) -        if out is None: -            out = pwop(self.as_array() , *args, **kwargs ) -            return type(self)(out, -                       deep_copy=False,  -                       dimension_labels=self.dimension_labels, -                       geometry=self.geometry) -        elif issubclass(type(out), DataContainer): -            if self.check_dimensions(out): -                kwargs['out'] = out.as_array() -                pwop(self.as_array(), *args, **kwargs ) -            else: -                raise ValueError(message(type(self),"Wrong size for data memory: ", out.shape,self.shape)) -        elif issubclass(type(out), numpy.ndarray): -            if self.array.shape == out.shape and self.array.dtype == out.dtype: -                kwargs['out'] = out -                pwop(self.as_array(), *args, **kwargs) -        else: -            raise ValueError (message(type(self),  "incompatible class:" , pwop.__name__, type(out))) -     -    def abs(self, *args,  **kwargs): -        return self.pixel_wise_unary(numpy.abs, *args,  **kwargs) -     -    def sign(self, *args,  **kwargs): -        return self.pixel_wise_unary(numpy.sign, *args,  **kwargs) -     -    def sqrt(self, *args,  **kwargs): -        return self.pixel_wise_unary(numpy.sqrt, *args,  **kwargs) - -    def conjugate(self, *args,  **kwargs): -        return self.pixel_wise_unary(numpy.conjugate, *args,  **kwargs) -    #def __abs__(self): -    #    operation = FM.OPERATION.ABS -    #    return self.callFieldMath(operation, None, self.mask, self.maskOnValue) -    # __abs__ -     -    ## reductions -    def sum(self, *args, **kwargs): -        return self.as_array().sum(*args, **kwargs) -    def squared_norm(self): -        '''return the squared euclidean norm of the DataContainer viewed as a vector''' -        #shape = self.shape -        #size = reduce(lambda x,y:x*y, shape, 1) -        #y = numpy.reshape(self.as_array(), (size, )) -        return self.dot(self.conjugate()) -        #return self.dot(self) -    def norm(self): -        '''return the euclidean norm of the DataContainer viewed as a vector''' -        return numpy.sqrt(self.squared_norm()) -     -     -    def dot(self, other, *args, **kwargs): -        '''return the inner product of 2 DataContainers viewed as vectors''' -        method = kwargs.get('method', 'reduce') - -        if method not in ['numpy','reduce']: -            raise ValueError('dot: specified method not valid. Expecting numpy or reduce got {} '.format( -                    method)) - -        if self.shape == other.shape: -            # return (self*other).sum() -            if method == 'numpy': -                return numpy.dot(self.as_array().ravel(), other.as_array()) -            elif method == 'reduce': -                # see https://github.com/vais-ral/CCPi-Framework/pull/273 -                # notice that Python seems to be smart enough to use -                # the appropriate type to hold the result of the reduction -                sf = reduce(lambda x,y: x + y[0]*y[1], -                            zip(self.as_array().ravel(), -                                other.as_array().ravel()), -                            0) -                return sf -        else: -            raise ValueError('Shapes are not aligned: {} != {}'.format(self.shape, other.shape)) -    - -     -     -     -class ImageData(DataContainer): -    '''DataContainer for holding 2D or 3D DataContainer''' -    __container_priority__ = 1 -     -     -    def __init__(self,  -                 array = None,  -                 deep_copy=False,  -                 dimension_labels=None,  -                 **kwargs): -         -        self.geometry = kwargs.get('geometry', None) -        if array is None: -            if self.geometry is not None: -                shape, dimension_labels = self.get_shape_labels(self.geometry) -                     -                array = numpy.zeros( shape , dtype=numpy.float32)  -                super(ImageData, self).__init__(array, deep_copy, -                                 dimension_labels, **kwargs) -                 -            else: -                raise ValueError('Please pass either a DataContainer, ' +\ -                                 'a numpy array or a geometry') -        else: -            if self.geometry is not None: -                shape, labels = self.get_shape_labels(self.geometry, dimension_labels) -                if array.shape != shape: -                    raise ValueError('Shape mismatch {} {}'.format(shape, array.shape)) -             -            if issubclass(type(array) , DataContainer): -                # if the array is a DataContainer get the info from there -                if not ( array.number_of_dimensions == 2 or \ -                         array.number_of_dimensions == 3 or \ -                         array.number_of_dimensions == 4): -                    raise ValueError('Number of dimensions are not 2 or 3 or 4: {0}'\ -                                     .format(array.number_of_dimensions)) -                 -                #DataContainer.__init__(self, array.as_array(), deep_copy, -                #                 array.dimension_labels, **kwargs) -                super(ImageData, self).__init__(array.as_array(), deep_copy, -                                 array.dimension_labels, **kwargs) -            elif issubclass(type(array) , numpy.ndarray): -                if not ( array.ndim == 2 or array.ndim == 3 or array.ndim == 4 ): -                    raise ValueError( -                            'Number of dimensions are not 2 or 3 or 4 : {0}'\ -                            .format(array.ndim)) -                     -                if dimension_labels is None: -                    if array.ndim == 4: -                        dimension_labels = [ImageGeometry.CHANNEL,  -                                            ImageGeometry.VERTICAL, -                                            ImageGeometry.HORIZONTAL_Y, -                                            ImageGeometry.HORIZONTAL_X] -                    elif array.ndim == 3: -                        dimension_labels = [ImageGeometry.VERTICAL, -                                            ImageGeometry.HORIZONTAL_Y, -                                            ImageGeometry.HORIZONTAL_X] -                    else: -                        dimension_labels = [ ImageGeometry.HORIZONTAL_Y, -                                             ImageGeometry.HORIZONTAL_X]    -                 -                #DataContainer.__init__(self, array, deep_copy, dimension_labels, **kwargs) -                super(ImageData, self).__init__(array, deep_copy,  -                     dimension_labels, **kwargs) -        -        # load metadata from kwargs if present -        for key, value in kwargs.items(): -            if (type(value) == list or type(value) == tuple) and \ -                ( len (value) == 3 and len (value) == 2) : -                    if key == 'origin' :     -                        self.origin = value -                    if key == 'spacing' : -                        self.spacing = value -                         -        def subset(self, dimensions=None, **kw): -            # FIXME: this is clearly not rigth -            # it should be something like  -            # out = DataContainer.subset(self, dimensions, **kw) -            # followed by regeneration of the proper geometry.  -            out = super(ImageData, self).subset(dimensions, **kw) -            #out.geometry = self.recalculate_geometry(dimensions , **kw) -            out.geometry = self.geometry -            return out - -    def get_shape_labels(self, geometry, dimension_labels=None): -        channels  = geometry.channels -        horiz_x   = geometry.voxel_num_x -        horiz_y   = geometry.voxel_num_y -        vert      = 1 if geometry.voxel_num_z is None\ -                      else geometry.voxel_num_z # this should be 1 for 2D -        if dimension_labels is None: -            if channels > 1: -                if vert > 1: -                    shape = (channels, vert, horiz_y, horiz_x) -                    dim_labels = [ImageGeometry.CHANNEL,  -                                  ImageGeometry.VERTICAL, -                                  ImageGeometry.HORIZONTAL_Y, -                                  ImageGeometry.HORIZONTAL_X] -                else: -                    shape = (channels , horiz_y, horiz_x) -                    dim_labels = [ImageGeometry.CHANNEL, -                                  ImageGeometry.HORIZONTAL_Y, -                                  ImageGeometry.HORIZONTAL_X] -            else: -                if vert > 1: -                    shape = (vert, horiz_y, horiz_x) -                    dim_labels = [ImageGeometry.VERTICAL, -                                  ImageGeometry.HORIZONTAL_Y, -                                  ImageGeometry.HORIZONTAL_X] -                else: -                    shape = (horiz_y, horiz_x) -                    dim_labels = [ImageGeometry.HORIZONTAL_Y, -                                  ImageGeometry.HORIZONTAL_X] -            dimension_labels = dim_labels -        else: -            shape = [] -            for i in range(len(dimension_labels)): -                dim = dimension_labels[i] -                if dim == ImageGeometry.CHANNEL: -                    shape.append(channels) -                elif dim == ImageGeometry.HORIZONTAL_Y: -                    shape.append(horiz_y) -                elif dim == ImageGeometry.VERTICAL: -                    shape.append(vert) -                elif dim == ImageGeometry.HORIZONTAL_X: -                    shape.append(horiz_x) -            if len(shape) != len(dimension_labels): -                raise ValueError('Missing {0} axes {1} shape {2}'.format( -                        len(dimension_labels) - len(shape), dimension_labels, shape)) -            shape = tuple(shape) -             -        return (shape, dimension_labels) -                             - -class AcquisitionData(DataContainer): -    '''DataContainer for holding 2D or 3D sinogram''' -    __container_priority__ = 1 -     -     -    def __init__(self,  -                 array = None,  -                 deep_copy=True,  -                 dimension_labels=None,  -                 **kwargs): -        self.geometry = kwargs.get('geometry', None) -        if array is None: -            if 'geometry' in kwargs.keys(): -                geometry      = kwargs['geometry'] -                self.geometry = geometry -                 -                shape, dimension_labels = self.get_shape_labels(geometry, dimension_labels) -                 -                     -                array = numpy.zeros( shape , dtype=numpy.float32)  -                super(AcquisitionData, self).__init__(array, deep_copy, -                                 dimension_labels, **kwargs) -        else: -            if self.geometry is not None: -                shape, labels = self.get_shape_labels(self.geometry, dimension_labels) -                if array.shape != shape: -                    raise ValueError('Shape mismatch {} {}'.format(shape, array.shape)) -                     -            if issubclass(type(array) ,DataContainer): -                # if the array is a DataContainer get the info from there -                if not ( array.number_of_dimensions == 2 or \ -                         array.number_of_dimensions == 3 or \ -                         array.number_of_dimensions == 4): -                    raise ValueError('Number of dimensions are not 2 or 3 or 4: {0}'\ -                                     .format(array.number_of_dimensions)) -                 -                #DataContainer.__init__(self, array.as_array(), deep_copy, -                #                 array.dimension_labels, **kwargs) -                super(AcquisitionData, self).__init__(array.as_array(), deep_copy, -                                 array.dimension_labels, **kwargs) -            elif issubclass(type(array) ,numpy.ndarray): -                if not ( array.ndim == 2 or array.ndim == 3 or array.ndim == 4 ): -                    raise ValueError( -                            'Number of dimensions are not 2 or 3 or 4 : {0}'\ -                            .format(array.ndim)) -                     -                if dimension_labels is None: -                    if array.ndim == 4: -                        dimension_labels = [AcquisitionGeometry.CHANNEL, -                                            AcquisitionGeometry.ANGLE, -                                            AcquisitionGeometry.VERTICAL, -                                            AcquisitionGeometry.HORIZONTAL] -                    elif array.ndim == 3: -                        dimension_labels = [AcquisitionGeometry.ANGLE, -                                            AcquisitionGeometry.VERTICAL, -                                            AcquisitionGeometry.HORIZONTAL] -                    else: -                        dimension_labels = [AcquisitionGeometry.ANGLE, -                                            AcquisitionGeometry.HORIZONTAL] - -                super(AcquisitionData, self).__init__(array, deep_copy,  -                     dimension_labels, **kwargs) -                 -    def get_shape_labels(self, geometry, dimension_labels=None): -        channels      = geometry.channels -        horiz         = geometry.pixel_num_h -        vert          = geometry.pixel_num_v -        angles        = geometry.angles -        num_of_angles = numpy.shape(angles)[0] -         -        if dimension_labels is None: -            if channels > 1: -                if vert > 1: -                    shape = (channels, num_of_angles , vert, horiz) -                    dim_labels = [AcquisitionGeometry.CHANNEL, -                                  AcquisitionGeometry.ANGLE, -                                  AcquisitionGeometry.VERTICAL, -                                  AcquisitionGeometry.HORIZONTAL] -                else: -                    shape = (channels , num_of_angles, horiz) -                    dim_labels = [AcquisitionGeometry.CHANNEL, -                                  AcquisitionGeometry.ANGLE, -                                  AcquisitionGeometry.HORIZONTAL] -            else: -                if vert > 1: -                    shape = (num_of_angles, vert, horiz) -                    dim_labels = [AcquisitionGeometry.ANGLE, -                                  AcquisitionGeometry.VERTICAL, -                                  AcquisitionGeometry.HORIZONTAL -                                  ] -                else: -                    shape = (num_of_angles, horiz) -                    dim_labels = [AcquisitionGeometry.ANGLE, -                                  AcquisitionGeometry.HORIZONTAL -                                  ] -             -            dimension_labels = dim_labels -        else: -            shape = [] -            for i in range(len(dimension_labels)): -                dim = dimension_labels[i] -                 -                if dim == AcquisitionGeometry.CHANNEL: -                    shape.append(channels) -                elif dim == AcquisitionGeometry.ANGLE: -                    shape.append(num_of_angles) -                elif dim == AcquisitionGeometry.VERTICAL: -                    shape.append(vert) -                elif dim == AcquisitionGeometry.HORIZONTAL: -                    shape.append(horiz) -            if len(shape) != len(dimension_labels): -                raise ValueError('Missing {0} axes.\nExpected{1} got {2}'\ -                    .format( -                        len(dimension_labels) - len(shape), -                        dimension_labels, shape)  -                    ) -            shape = tuple(shape) -        return (shape, dimension_labels) -     -                 -             -class DataProcessor(object): -     -    '''Defines a generic DataContainer processor -     -    accepts DataContainer as inputs and  -    outputs DataContainer -    additional attributes can be defined with __setattr__ -    ''' -     -    def __init__(self, **attributes): -        if not 'store_output' in attributes.keys(): -            attributes['store_output'] = True -            attributes['output'] = False -            attributes['runTime'] = -1 -            attributes['mTime'] = datetime.now() -            attributes['input'] = None -        for key, value in attributes.items(): -            self.__dict__[key] = value -         -     -    def __setattr__(self, name, value): -        if name == 'input': -            self.set_input(value) -        elif name in self.__dict__.keys(): -            self.__dict__[name] = value -            self.__dict__['mTime'] = datetime.now() -        else: -            raise KeyError('Attribute {0} not found'.format(name)) -        #pass -     -    def set_input(self, dataset): -        if issubclass(type(dataset), DataContainer): -            if self.check_input(dataset): -                self.__dict__['input'] = dataset -        else: -            raise TypeError("Input type mismatch: got {0} expecting {1}"\ -                            .format(type(dataset), DataContainer)) -     -    def check_input(self, dataset): -        '''Checks parameters of the input DataContainer -         -        Should raise an Error if the DataContainer does not match expectation, e.g. -        if the expected input DataContainer is 3D and the Processor expects 2D. -        ''' -        raise NotImplementedError('Implement basic checks for input DataContainer') -         -    def get_output(self, out=None): -         -        for k,v in self.__dict__.items(): -            if v is None and k != 'output': -                raise ValueError('Key {0} is None'.format(k)) -        shouldRun = False -        if self.runTime == -1: -            shouldRun = True -        elif self.mTime > self.runTime: -            shouldRun = True -             -        # CHECK this -        if self.store_output and shouldRun: -            self.runTime = datetime.now() -            try: -                self.output = self.process(out=out) -                return self.output -            except TypeError as te: -                self.output = self.process() -                return self.output -        self.runTime = datetime.now() -        try: -            return self.process(out=out) -        except TypeError as te: -            return self.process() -             -     -    def set_input_processor(self, processor): -        if issubclass(type(processor), DataProcessor): -            self.__dict__['input'] = processor -        else: -            raise TypeError("Input type mismatch: got {0} expecting {1}"\ -                            .format(type(processor), DataProcessor)) -         -    def get_input(self): -        '''returns the input DataContainer -         -        It is useful in the case the user has provided a DataProcessor as -        input -        ''' -        if issubclass(type(self.input), DataProcessor): -            dsi = self.input.get_output() -        else: -            dsi = self.input -        return dsi -         -    def process(self, out=None): -        raise NotImplementedError('process must be implemented') -         -     -     - -class DataProcessor23D(DataProcessor): -    '''Regularizers DataProcessor -    ''' -             -    def check_input(self, dataset): -        '''Checks number of dimensions input DataContainer -         -        Expected input is 2D or 3D -        ''' -        if dataset.number_of_dimensions == 2 or \ -           dataset.number_of_dimensions == 3: -               return True -        else: -            raise ValueError("Expected input dimensions is 2 or 3, got {0}"\ -                             .format(dataset.number_of_dimensions)) -     -###### Example of DataProcessors - -class AX(DataProcessor): -    '''Example DataProcessor -    The AXPY routines perform a vector multiplication operation defined as - -    y := a*x -    where: - -    a is a scalar - -    x a DataContainer. -    ''' -     -    def __init__(self): -        kwargs = {'scalar':None,  -                  'input':None,  -                  } -         -        #DataProcessor.__init__(self, **kwargs) -        super(AX, self).__init__(**kwargs) -     -    def check_input(self, dataset): -        return True -         -    def process(self, out=None): -         -        dsi = self.get_input() -        a = self.scalar -        if out is None: -            y = DataContainer( a * dsi.as_array() , True,  -                        dimension_labels=dsi.dimension_labels ) -            #self.setParameter(output_dataset=y) -            return y -        else: -            out.fill(a * dsi.as_array()) -     - -###### Example of DataProcessors - -class CastDataContainer(DataProcessor): -    '''Example DataProcessor -    Cast a DataContainer array to a different type. - -    y := a*x -    where: - -    a is a scalar - -    x a DataContainer. -    ''' -     -    def __init__(self, dtype=None): -        kwargs = {'dtype':dtype,  -                  'input':None,  -                  } -         -        #DataProcessor.__init__(self, **kwargs) -        super(CastDataContainer, self).__init__(**kwargs) -     -    def check_input(self, dataset): -        return True -         -    def process(self, out=None): -         -        dsi = self.get_input() -        dtype = self.dtype -        if out is None: -            y = numpy.asarray(dsi.as_array(), dtype=dtype) -             -            return type(dsi)(numpy.asarray(dsi.as_array(), dtype=dtype), -                                dimension_labels=dsi.dimension_labels ) -        else: -            out.fill(numpy.asarray(dsi.as_array(), dtype=dtype)) -     -         -         -     -     -class PixelByPixelDataProcessor(DataProcessor): -    '''Example DataProcessor -     -    This processor applies a python function to each pixel of the DataContainer -     -    f is a python function - -    x a DataSet. -    ''' -     -    def __init__(self): -        kwargs = {'pyfunc':None,  -                  'input':None,  -                  } -        #DataProcessor.__init__(self, **kwargs) -        super(PixelByPixelDataProcessor, self).__init__(**kwargs) -         -    def check_input(self, dataset): -        return True -     -    def process(self, out=None): -         -        pyfunc = self.pyfunc -        dsi = self.get_input() -         -        eval_func = numpy.frompyfunc(pyfunc,1,1) - -         -        y = DataContainer( eval_func( dsi.as_array() ) , True,  -                    dimension_labels=dsi.dimension_labels ) -        return y -     - -         -         -if __name__ == '__main__': -    shape = (2,3,4,5) -    size = shape[0] -    for i in range(1, len(shape)): -        size = size * shape[i] -    #print("a refcount " , sys.getrefcount(a)) -    a = numpy.asarray([i for i in range( size )]) -    print("a refcount " , sys.getrefcount(a)) -    a = numpy.reshape(a, shape) -    print("a refcount " , sys.getrefcount(a)) -    ds = DataContainer(a, False, ['X', 'Y','Z' ,'W']) -    print("a refcount " , sys.getrefcount(a)) -    print ("ds label {0}".format(ds.dimension_labels)) -    subset = ['W' ,'X'] -    b = ds.subset( subset ) -    print("a refcount " , sys.getrefcount(a)) -    print ("b label {0} shape {1}".format(b.dimension_labels,  -           numpy.shape(b.as_array()))) -    c = ds.subset(['Z','W','X']) -    print("a refcount " , sys.getrefcount(a)) -     -    # Create a ImageData sharing the array with c -    volume0 = ImageData(c.as_array(), False, dimensions = c.dimension_labels) -    volume1 = ImageData(c, False) -     -    print ("volume0 {0} volume1 {1}".format(id(volume0.array), -           id(volume1.array))) -     -    # Create a ImageData copying the array from c -    volume2 = ImageData(c.as_array(), dimensions = c.dimension_labels) -    volume3 = ImageData(c) -     -    print ("volume2 {0} volume3 {1}".format(id(volume2.array), -           id(volume3.array))) -         -    # single number DataSet -    sn = DataContainer(numpy.asarray([1])) -     -    ax = AX() -    ax.scalar = 2 -    ax.set_input(c) -    #ax.apply() -    print ("ax  in {0} out {1}".format(c.as_array().flatten(), -           ax.get_output().as_array().flatten())) -     -    cast = CastDataContainer(dtype=numpy.float32) -    cast.set_input(c) -    out = cast.get_output() -    out *= 0  -    axm = AX() -    axm.scalar = 0.5 -    axm.set_input_processor(cast) -    axm.get_output(out) -    #axm.apply() -    print ("axm in {0} out {1}".format(c.as_array(), axm.get_output().as_array())) -     -    # check out in DataSetProcessor -   #a = numpy.asarray([i for i in range( size )]) -     -         -    # create a PixelByPixelDataProcessor -     -    #define a python function which will take only one input (the pixel value) -    pyfunc = lambda x: -x if x > 20 else x -    clip = PixelByPixelDataProcessor() -    clip.pyfunc = pyfunc  -    clip.set_input(c)     -    #clip.apply() -     -    print ("clip in {0} out {1}".format(c.as_array(), clip.get_output().as_array())) -     -    #dsp = DataProcessor() -    #dsp.set_input(ds) -    #dsp.input = a -    # pipeline - -    chain = AX() -    chain.scalar = 0.5 -    chain.set_input_processor(ax) -    print ("chain in {0} out {1}".format(ax.get_output().as_array(), chain.get_output().as_array())) -     -    # testing arithmetic operations -     -    print (b) -    print ((b+1)) -    print ((1+b)) -     -    print (b) -    print ((b*2)) -     -    print (b) -    print ((2*b)) -     -    print (b) -    print ((b/2)) -     -    print (b) -    print ((2/b)) -     -    print (b) -    print ((b**2)) -     -    print (b) -    print ((2**b)) -     -    print (type(volume3 + 2)) -     -    s = [i for i in range(3 * 4 * 4)] -    s = numpy.reshape(numpy.asarray(s), (3,4,4)) -    sino = AcquisitionData( s ) -     -    shape = (4,3,2) -    a = [i for i in range(2*3*4)] -    a = numpy.asarray(a) -    a = numpy.reshape(a, shape) -    print (numpy.shape(a)) -    ds = DataContainer(a, True, ['X', 'Y','Z']) -    # this means that I expect the X to be of length 2 , -    # y of length 3 and z of length 4 -    subset = ['Y' ,'Z'] -    b0 = ds.subset( subset ) -    print ("shape b 3,2? {0}".format(numpy.shape(b0.as_array()))) -    # expectation on b is that it is  -    # 3x2 cut at z = 0 -     -    subset = ['X' ,'Y'] -    b1 = ds.subset( subset , Z=1) -    print ("shape b 2,3? {0}".format(numpy.shape(b1.as_array()))) -     -     - -    # create VolumeData from geometry -    vgeometry = ImageGeometry(voxel_num_x=2, voxel_num_y=3, channels=2) -    vol = ImageData(geometry=vgeometry) -     -    sgeometry = AcquisitionGeometry(dimension=2, angles=numpy.linspace(0, 180, num=20),  -                                       geom_type='parallel', pixel_num_v=3, -                                       pixel_num_h=5 , channels=2) -    sino = AcquisitionData(geometry=sgeometry) -    sino2 = sino.clone() -     -    a0 = numpy.asarray([i for i in range(2*3*4)]) -    a1 = numpy.asarray([2*i for i in range(2*3*4)]) -     -             -    ds0 = DataContainer(numpy.reshape(a0,(2,3,4))) -    ds1 = DataContainer(numpy.reshape(a1,(2,3,4))) -     -    numpy.testing.assert_equal(ds0.dot(ds1), a0.dot(a1)) -     -    a2 = numpy.asarray([2*i for i in range(2*3*5)]) -    ds2 = DataContainer(numpy.reshape(a2,(2,3,5))) -     -#    # it should fail if the shape is wrong -#    try: -#        ds2.dot(ds0) -#        self.assertTrue(False) -#    except ValueError as ve: -#        self.assertTrue(True) -     diff --git a/Wrappers/Python/build/lib/ccpi/io/__init__.py b/Wrappers/Python/build/lib/ccpi/io/__init__.py deleted file mode 100644 index 9233d7a..0000000 --- a/Wrappers/Python/build/lib/ccpi/io/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# -*- coding: utf-8 -*-
 -#   This work is part of the Core Imaging Library developed by
 -#   Visual Analytics and Imaging System Group of the Science Technology
 -#   Facilities Council, STFC
 -
 -#   Copyright 2018 Edoardo Pasca
 -
 -#   Licensed under the Apache License, Version 2.0 (the "License");
 -#   you may not use this file except in compliance with the License.
 -#   You may obtain a copy of the License at
 -
 -#       http://www.apache.org/licenses/LICENSE-2.0
 -
 -#   Unless required by applicable law or agreed to in writing, software
 -#   distributed under the License is distributed on an "AS IS" BASIS,
 -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 -#   See the License for the specific language governing permissions and
 -#   limitations under the License.
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/io/reader.py b/Wrappers/Python/build/lib/ccpi/io/reader.py deleted file mode 100644 index 07e3bf9..0000000 --- a/Wrappers/Python/build/lib/ccpi/io/reader.py +++ /dev/null @@ -1,511 +0,0 @@ -# -*- coding: utf-8 -*-
 -#   This work is part of the Core Imaging Library developed by
 -#   Visual Analytics and Imaging System Group of the Science Technology
 -#   Facilities Council, STFC
 -
 -#   Copyright 2018 Jakob Jorgensen, Daniil Kazantsev, Edoardo Pasca and Srikanth Nagella
 -
 -#   Licensed under the Apache License, Version 2.0 (the "License");
 -#   you may not use this file except in compliance with the License.
 -#   You may obtain a copy of the License at
 -
 -#       http://www.apache.org/licenses/LICENSE-2.0
 -
 -#   Unless required by applicable law or agreed to in writing, software
 -#   distributed under the License is distributed on an "AS IS" BASIS,
 -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 -#   See the License for the specific language governing permissions and
 -#   limitations under the License.
 -
 -'''
 -This is a reader module with classes for loading 3D datasets. 
 -
 -@author: Mr. Srikanth Nagella
 -'''
 -from __future__ import absolute_import
 -from __future__ import division
 -from __future__ import print_function
 -from __future__ import unicode_literals
 -
 -from ccpi.framework import AcquisitionGeometry
 -from ccpi.framework import AcquisitionData
 -import numpy as np
 -import os
 -
 -h5pyAvailable = True
 -try:
 -    from h5py import File as NexusFile
 -except:
 -    h5pyAvailable = False
 -
 -pilAvailable = True
 -try:    
 -    from PIL import Image
 -except:
 -    pilAvailable = False
 -     
 -class NexusReader(object):
 -    '''
 -    Reader class for loading Nexus files. 
 -    '''
 -
 -    def __init__(self, nexus_filename=None):
 -        '''
 -        This takes in input as filename and loads the data dataset.
 -        '''
 -        self.flat = None
 -        self.dark = None
 -        self.angles = None
 -        self.geometry = None
 -        self.filename = nexus_filename
 -        self.key_path = 'entry1/tomo_entry/instrument/detector/image_key'
 -        self.data_path = 'entry1/tomo_entry/data/data'
 -        self.angle_path = 'entry1/tomo_entry/data/rotation_angle'
 -    
 -    def get_image_keys(self):
 -        try:
 -            with NexusFile(self.filename,'r') as file:    
 -                return np.array(file[self.key_path])
 -        except KeyError as ke:
 -            raise KeyError("get_image_keys: " , ke.args[0] , self.key_path)
 -            
 -    
 -    def load(self, dimensions=None, image_key_id=0):  
 -        '''
 -        This is generic loading function of flat field, dark field and projection data.
 -        '''      
 -        if not h5pyAvailable:
 -            raise Exception("Error: h5py is not installed")
 -        if self.filename is None:
 -            return        
 -        try:
 -            with NexusFile(self.filename,'r') as file:                
 -                image_keys = np.array(file[self.key_path])                
 -                projections = None
 -                if dimensions == None:
 -                    projections = np.array(file[self.data_path])
 -                    result = projections[image_keys==image_key_id]
 -                    return result
 -                else:
 -                    #When dimensions are specified they need to be mapped to image_keys
 -                    index_array = np.where(image_keys==image_key_id)
 -                    projection_indexes = index_array[0][dimensions[0]]
 -                    new_dimensions = list(dimensions)
 -                    new_dimensions[0]= projection_indexes
 -                    new_dimensions = tuple(new_dimensions)
 -                    result = np.array(file[self.data_path][new_dimensions])
 -                    return result
 -        except:
 -            print("Error reading nexus file")
 -            raise
 -        
 -    def load_projection(self, dimensions=None):
 -        '''
 -        Loads the projection data from the nexus file.
 -        returns: numpy array with projection data
 -        '''
 -        try:
 -            if 0 not in self.get_image_keys():
 -                raise ValueError("Projections are not in the data. Data Path " , 
 -                                 self.data_path)
 -        except KeyError as ke:
 -            raise KeyError(ke.args[0] , self.data_path)
 -        return self.load(dimensions, 0)
 -    
 -    def load_flat(self, dimensions=None):
 -        '''
 -        Loads the flat field data from the nexus file.
 -        returns: numpy array with flat field data
 -        '''        
 -        try:
 -            if 1 not in self.get_image_keys():
 -                raise ValueError("Flats are not in the data. Data Path " , 
 -                                 self.data_path)
 -        except KeyError as ke:
 -            raise KeyError(ke.args[0] , self.data_path)
 -        return self.load(dimensions, 1)
 -    
 -    def load_dark(self, dimensions=None):
 -        '''
 -        Loads the Dark field data from the nexus file.
 -        returns: numpy array with dark field data
 -        '''        
 -        try:
 -            if 2 not in self.get_image_keys():
 -                raise ValueError("Darks are not in the data. Data Path " , 
 -                             self.data_path)
 -        except KeyError as ke:
 -            raise KeyError(ke.args[0] , self.data_path)
 -        return self.load(dimensions, 2)
 -    
 -    def get_projection_angles(self):
 -        '''
 -        This function returns the projection angles
 -        '''
 -        if not h5pyAvailable:
 -            raise Exception("Error: h5py is not installed")
 -        if self.filename is None:
 -            return        
 -        try:
 -            with NexusFile(self.filename,'r') as file:                
 -                angles = np.array(file[self.angle_path],np.float32)
 -                image_keys = np.array(file[self.key_path])                
 -                return angles[image_keys==0]
 -        except:
 -            print("get_projection_angles Error reading nexus file")
 -            raise        
 -
 -    
 -    def get_sinogram_dimensions(self):
 -        '''
 -        Return the dimensions of the dataset
 -        '''
 -        if not h5pyAvailable:
 -            raise Exception("Error: h5py is not installed")
 -        if self.filename is None:
 -            return
 -        try:
 -            with NexusFile(self.filename,'r') as file:                
 -                projections = file[self.data_path]
 -                image_keys = np.array(file[self.key_path])
 -                dims = list(projections.shape)
 -                dims[0] = dims[1]
 -                dims[1] = np.sum(image_keys==0)
 -                return tuple(dims)
 -        except:
 -            print("Error reading nexus file")
 -            raise                
 -        
 -    def get_projection_dimensions(self):
 -        '''
 -        Return the dimensions of the dataset
 -        '''
 -        if not h5pyAvailable:
 -            raise Exception("Error: h5py is not installed")
 -        if self.filename is None:
 -            return
 -        try:
 -            with NexusFile(self.filename,'r') as file:
 -                try:                
 -                    projections = file[self.data_path]
 -                except KeyError as ke:
 -                    raise KeyError('Error: data path {0} not found\n{1}'\
 -                                   .format(self.data_path, 
 -                                           ke.args[0]))
 -                #image_keys = np.array(file[self.key_path])
 -                image_keys = self.get_image_keys()
 -                dims = list(projections.shape)
 -                dims[0] = np.sum(image_keys==0)
 -                return tuple(dims)
 -        except:
 -            print("Warning: Error reading image_keys trying accessing data on " , self.data_path)
 -            with NexusFile(self.filename,'r') as file:
 -                dims = file[self.data_path].shape
 -                return tuple(dims)
 -            
 -              
 -        
 -    def get_acquisition_data(self, dimensions=None):
 -        '''
 -        This method load the acquisition data and given dimension and returns an AcquisitionData Object
 -        '''
 -        data = self.load_projection(dimensions)
 -        dims = self.get_projection_dimensions()
 -        geometry = AcquisitionGeometry('parallel', '3D', 
 -                                       self.get_projection_angles(),
 -                                       pixel_num_h          = dims[2],
 -                                       pixel_size_h         = 1 ,
 -                                       pixel_num_v          = dims[1],
 -                                       pixel_size_v         = 1,
 -                                       dist_source_center   = None, 
 -                                       dist_center_detector = None, 
 -                                       channels             = 1)
 -        return AcquisitionData(data, geometry=geometry, 
 -                               dimension_labels=['angle','vertical','horizontal'])   
 -    
 -    def get_acquisition_data_subset(self, ymin=None, ymax=None):
 -        '''
 -        This method load the acquisition data and given dimension and returns an AcquisitionData Object
 -        '''
 -        if not h5pyAvailable:
 -            raise Exception("Error: h5py is not installed")
 -        if self.filename is None:
 -            return        
 -        try:
 -            
 -                
 -            with NexusFile(self.filename,'r') as file:    
 -                try:
 -                    dims = self.get_projection_dimensions()
 -                except KeyError:
 -                    pass
 -                dims = file[self.data_path].shape
 -                if ymin is None and ymax is None:
 -                    
 -                    try:
 -                        image_keys = self.get_image_keys()
 -                        print ("image_keys", image_keys)
 -                        projections = np.array(file[self.data_path])
 -                        data = projections[image_keys==0]
 -                    except KeyError as ke:
 -                        print (ke)
 -                        data = np.array(file[self.data_path])
 -                    
 -                else:
 -                    image_keys = self.get_image_keys()
 -                    print ("image_keys", image_keys)
 -                    projections = np.array(file[self.data_path])[image_keys==0]
 -                    if ymin is None:
 -                        ymin = 0
 -                        if ymax > dims[1]:
 -                            raise ValueError('ymax out of range')
 -                        data = projections[:,:ymax,:]
 -                    elif ymax is None:        
 -                        ymax = dims[1]
 -                        if ymin < 0:
 -                            raise ValueError('ymin out of range')
 -                        data = projections[:,ymin:,:]
 -                    else:
 -                        if ymax > dims[1]:
 -                            raise ValueError('ymax out of range')
 -                        if ymin < 0:
 -                            raise ValueError('ymin out of range')
 -                        
 -                        data = projections[: , ymin:ymax , :] 
 -                
 -        except:
 -            print("Error reading nexus file")
 -            raise
 -        
 -        
 -        try:
 -            angles = self.get_projection_angles()
 -        except KeyError as ke:
 -            n = data.shape[0]
 -            angles = np.linspace(0, n, n+1, dtype=np.float32)
 -        
 -        if ymax-ymin > 1:        
 -            
 -            geometry = AcquisitionGeometry('parallel', '3D', 
 -                                       angles,
 -                                       pixel_num_h          = dims[2],
 -                                       pixel_size_h         = 1 ,
 -                                       pixel_num_v          = ymax-ymin,
 -                                       pixel_size_v         = 1,
 -                                       dist_source_center   = None, 
 -                                       dist_center_detector = None, 
 -                                       channels             = 1)
 -            return AcquisitionData(data, False, geometry=geometry, 
 -                               dimension_labels=['angle','vertical','horizontal']) 
 -        elif ymax-ymin == 1:
 -            geometry = AcquisitionGeometry('parallel', '2D', 
 -                                       angles,
 -                                       pixel_num_h          = dims[2],
 -                                       pixel_size_h         = 1 ,
 -                                       dist_source_center   = None, 
 -                                       dist_center_detector = None, 
 -                                       channels             = 1)
 -            return AcquisitionData(data.squeeze(), False, geometry=geometry, 
 -                               dimension_labels=['angle','horizontal']) 
 -    def get_acquisition_data_slice(self, y_slice=0):
 -        return self.get_acquisition_data_subset(ymin=y_slice , ymax=y_slice+1)
 -    def get_acquisition_data_whole(self):
 -        with NexusFile(self.filename,'r') as file:    
 -            try:
 -                dims = self.get_projection_dimensions()
 -            except KeyError:
 -                print ("Warning: ")
 -                dims = file[self.data_path].shape
 -                
 -            ymin = 0 
 -            ymax = dims[1] - 1
 -            
 -            return self.get_acquisition_data_subset(ymin=ymin, ymax=ymax)
 -            
 -        
 -    
 -    def list_file_content(self):
 -        try:
 -            with NexusFile(self.filename,'r') as file:                
 -                file.visit(print)
 -        except:
 -            print("Error reading nexus file")
 -            raise  
 -    def get_acquisition_data_batch(self, bmin=None, bmax=None):
 -        if not h5pyAvailable:
 -            raise Exception("Error: h5py is not installed")
 -        if self.filename is None:
 -            return        
 -        try:
 -            
 -                
 -            with NexusFile(self.filename,'r') as file:    
 -                try:
 -                    dims = self.get_projection_dimensions()
 -                except KeyError:
 -                    dims = file[self.data_path].shape
 -                if bmin is None or bmax is None:
 -                    raise ValueError('get_acquisition_data_batch: please specify fastest index batch limits')
 -                    
 -                if bmin >= 0 and bmin < bmax and bmax <= dims[0]:
 -                    data = np.array(file[self.data_path][bmin:bmax])
 -                else:
 -                    raise ValueError('get_acquisition_data_batch: bmin {0}>0 bmax {1}<{2}'.format(bmin, bmax, dims[0]))
 -                    
 -        except:
 -            print("Error reading nexus file")
 -            raise
 -        
 -        
 -        try:
 -            angles = self.get_projection_angles()[bmin:bmax]
 -        except KeyError as ke:
 -            n = data.shape[0]
 -            angles = np.linspace(0, n, n+1, dtype=np.float32)[bmin:bmax]
 -        
 -        if bmax-bmin > 1:        
 -            
 -            geometry = AcquisitionGeometry('parallel', '3D', 
 -                                       angles,
 -                                       pixel_num_h          = dims[2],
 -                                       pixel_size_h         = 1 ,
 -                                       pixel_num_v          = bmax-bmin,
 -                                       pixel_size_v         = 1,
 -                                       dist_source_center   = None, 
 -                                       dist_center_detector = None, 
 -                                       channels             = 1)
 -            return AcquisitionData(data, False, geometry=geometry, 
 -                               dimension_labels=['angle','vertical','horizontal']) 
 -        elif bmax-bmin == 1:
 -            geometry = AcquisitionGeometry('parallel', '2D', 
 -                                       angles,
 -                                       pixel_num_h          = dims[2],
 -                                       pixel_size_h         = 1 ,
 -                                       dist_source_center   = None, 
 -                                       dist_center_detector = None, 
 -                                       channels             = 1)
 -            return AcquisitionData(data.squeeze(), False, geometry=geometry, 
 -                               dimension_labels=['angle','horizontal']) 
 -        
 -    
 -          
 -class XTEKReader(object):
 -    '''
 -    Reader class for loading XTEK files
 -    '''
 -    
 -    def __init__(self, xtek_config_filename=None):
 -        '''
 -        This takes in the xtek config filename and loads the dataset and the
 -        required geometry parameters
 -        '''       
 -        self.projections = None
 -        self.geometry = {}
 -        self.filename = xtek_config_filename
 -        self.load()
 -        
 -    def load(self):
 -        pixel_num_h = 0
 -        pixel_num_v = 0        
 -        xpixel_size = 0        
 -        ypixel_size = 0
 -        source_x = 0
 -        detector_x = 0
 -        with open(self.filename) as f:
 -            content = f.readlines()                    
 -        content = [x.strip() for x in content]
 -        for line in content:
 -            if line.startswith("SrcToObject"):
 -                source_x = float(line.split('=')[1])
 -            elif line.startswith("SrcToDetector"):
 -                detector_x = float(line.split('=')[1])
 -            elif line.startswith("DetectorPixelsY"):
 -                pixel_num_v = int(line.split('=')[1])
 -                #self.num_of_vertical_pixels = self.calc_v_alighment(self.num_of_vertical_pixels, self.pixels_per_voxel)
 -            elif line.startswith("DetectorPixelsX"):
 -                pixel_num_h = int(line.split('=')[1])
 -            elif line.startswith("DetectorPixelSizeX"):
 -                xpixel_size = float(line.split('=')[1])
 -            elif line.startswith("DetectorPixelSizeY"):
 -                ypixel_size = float(line.split('=')[1])   
 -            elif line.startswith("Projections"):
 -                self.num_projections = int(line.split('=')[1])
 -            elif line.startswith("InitialAngle"):
 -                self.initial_angle = float(line.split('=')[1])
 -            elif line.startswith("Name"):
 -                self.experiment_name = line.split('=')[1]
 -            elif line.startswith("Scattering"):
 -                self.scattering = float(line.split('=')[1])
 -            elif line.startswith("WhiteLevel"):
 -                self.white_level = float(line.split('=')[1])                
 -            elif line.startswith("MaskRadius"):
 -                self.mask_radius = float(line.split('=')[1])
 -                
 -        #Read Angles
 -        angles = self.read_angles()    
 -        self.geometry = AcquisitionGeometry('cone', '3D', angles, pixel_num_h, xpixel_size, pixel_num_v, ypixel_size, -1 * source_x, 
 -                 detector_x - source_x, 
 -                 )
 -        
 -    def read_angles(self):
 -        """
 -        Read the angles file .ang or _ctdata.txt file and returns the angles
 -        as an numpy array. 
 -        """ 
 -        input_path = os.path.dirname(self.filename)
 -        angles_ctdata_file = os.path.join(input_path, '_ctdata.txt')
 -        angles_named_file = os.path.join(input_path, self.experiment_name+'.ang')
 -        angles = np.zeros(self.num_projections,dtype='f')
 -        #look for _ctdata.txt
 -        if os.path.exists(angles_ctdata_file):
 -            #read txt file with angles
 -            with open(angles_ctdata_file) as f:
 -                content = f.readlines()
 -            #skip firt three lines
 -            #read the middle value of 3 values in each line as angles in degrees
 -            index = 0
 -            for line in content[3:]:
 -                self.angles[index]=float(line.split(' ')[1])
 -                index+=1
 -            angles = np.deg2rad(self.angles+self.initial_angle);
 -        elif os.path.exists(angles_named_file):
 -            #read the angles file which is text with first line as header
 -            with open(angles_named_file) as f:
 -                content = f.readlines()
 -            #skip first line
 -            index = 0
 -            for line in content[1:]:
 -                angles[index] = float(line.split(':')[1])
 -                index+=1
 -            angles = np.flipud(angles+self.initial_angle) #angles are in the reverse order
 -        else:
 -            raise RuntimeError("Can't find angles file")
 -        return angles  
 -    
 -    def load_projection(self, dimensions=None):
 -        '''
 -        This method reads the projection images from the directory and returns a numpy array
 -        '''  
 -        if not pilAvailable:
 -            raise('Image library pillow is not installed')
 -        if dimensions != None:
 -            raise('Extracting subset of data is not implemented')
 -        input_path = os.path.dirname(self.filename)
 -        pixels = np.zeros((self.num_projections, self.geometry.pixel_num_h, self.geometry.pixel_num_v), dtype='float32')
 -        for i in range(1, self.num_projections+1):
 -            im = Image.open(os.path.join(input_path,self.experiment_name+"_%04d"%i+".tif"))
 -            pixels[i-1,:,:] = np.fliplr(np.transpose(np.array(im))) ##Not sure this is the correct way to populate the image
 -            
 -        #normalising the data
 -        #TODO: Move this to a processor
 -        pixels = pixels - (self.white_level*self.scattering)/100.0
 -        pixels[pixels < 0.0] = 0.000001 # all negative values to approximately 0 as the std log of zero and non negative number is not defined
 -        return pixels
 -    
 -    def get_acquisition_data(self, dimensions=None):
 -        '''
 -        This method load the acquisition data and given dimension and returns an AcquisitionData Object
 -        '''
 -        data = self.load_projection(dimensions)
 -        return AcquisitionData(data, geometry=self.geometry)
 -    
 diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/__init__.py b/Wrappers/Python/build/lib/ccpi/optimisation/__init__.py deleted file mode 100644 index cf2d93d..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2018 Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License.
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/Algorithm.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/Algorithm.py deleted file mode 100644 index a14378c..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/Algorithm.py +++ /dev/null @@ -1,161 +0,0 @@ -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2019 Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License. -import time -from numbers import Integral - -class Algorithm(object): -    '''Base class for iterative algorithms - -      provides the minimal infrastructure. -      Algorithms are iterables so can be easily run in a for loop. They will -      stop as soon as the stop cryterion is met. -      The user is required to implement the set_up, __init__, update and -      and update_objective methods -       -      A courtesy method run is available to run n iterations. The method accepts -      a callback function that receives the current iteration number and the actual objective -      value and can be used to trigger print to screens and other user interactions. The run -      method will stop when the stopping cryterion is met.  -   ''' - -    def __init__(self, **kwargs): -        '''Constructor -         -        Set the minimal number of parameters: -            iteration: current iteration number -            max_iteration: maximum number of iterations -            memopt: whether to use memory optimisation () -            timing: list to hold the times it took to run each iteration -            update_objectice_interval: the interval every which we would save the current -                                       objective. 1 means every iteration, 2 every 2 iteration -                                       and so forth. This is by default 1 and should be increased -                                       when evaluating the objective is computationally expensive. -        ''' -        self.iteration = 0 -        self.__max_iteration = kwargs.get('max_iteration', 0) -        self.__loss = [] -        self.memopt = False -        self.timing = [] -        self.update_objective_interval = kwargs.get('update_objective_interval', 1) -    def set_up(self, *args, **kwargs): -        '''Set up the algorithm''' -        raise NotImplementedError() -    def update(self): -        '''A single iteration of the algorithm''' -        raise NotImplementedError() -     -    def should_stop(self): -        '''default stopping cryterion: number of iterations -         -        The user can change this in concrete implementatition of iterative algorithms.''' -        return self.max_iteration_stop_cryterion() -     -    def max_iteration_stop_cryterion(self): -        '''default stop cryterion for iterative algorithm: max_iteration reached''' -        return self.iteration >= self.max_iteration -    def __iter__(self): -        '''Algorithm is an iterable''' -        return self -    def next(self): -        '''Algorithm is an iterable -         -        python2 backwards compatibility''' -        return self.__next__() -    def __next__(self): -        '''Algorithm is an iterable -         -        calling this method triggers update and update_objective -        ''' -        if self.should_stop(): -            raise StopIteration() -        else: -            time0 = time.time() -            self.update() -            self.timing.append( time.time() - time0 ) -            if self.iteration % self.update_objective_interval == 0: -                self.update_objective() -            self.iteration += 1 -         -    def get_output(self): -        '''Returns the solution found''' -        return self.x -     -    def get_last_loss(self): -        '''Returns the last stored value of the loss function -         -        if update_objective_interval is 1 it is the value of the objective at the current -        iteration. If update_objective_interval > 1 it is the last stored value.  -        ''' -        return self.__loss[-1] -    def get_last_objective(self): -        '''alias to get_last_loss''' -        return self.get_last_loss() -    def update_objective(self): -        '''calculates the objective with the current solution''' -        raise NotImplementedError() -    @property -    def loss(self): -        '''returns the list of the values of the objective during the iteration -         -        The length of this list may be shorter than the number of iterations run when  -        the update_objective_interval > 1 -        ''' -        return self.__loss -    @property -    def objective(self): -        '''alias of loss''' -        return self.loss -    @property -    def max_iteration(self): -        '''gets the maximum number of iterations''' -        return self.__max_iteration -    @max_iteration.setter -    def max_iteration(self, value): -        '''sets the maximum number of iterations''' -        assert isinstance(value, int) -        self.__max_iteration = value -    @property -    def update_objective_interval(self): -        return self.__update_objective_interval -    @update_objective_interval.setter -    def update_objective_interval(self, value): -        if isinstance(value, Integral): -            if value >= 1: -                self.__update_objective_interval = value -            else: -                raise ValueError('Update objective interval must be an integer >= 1') -        else: -            raise ValueError('Update objective interval must be an integer >= 1') -    def run(self, iterations, verbose=True, callback=None): -        '''run n iterations and update the user with the callback if specified''' -        if self.should_stop(): -            print ("Stop cryterion has been reached.") -        i = 0 -         -        for _ in self: -            if (self.iteration -1) % self.update_objective_interval == 0:                 -                if verbose: -                    print ("Iteration {}/{}, = {}".format(self.iteration-1,  -                       self.max_iteration, self.get_last_objective()) ) -                if callback is not None: -                    callback(self.iteration -1, self.get_last_objective(), self.x) -            i += 1 -            if i == iterations: -                break -     diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/CGLS.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/CGLS.py deleted file mode 100644 index 4d4843c..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/CGLS.py +++ /dev/null @@ -1,87 +0,0 @@ -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2018 Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License. -""" -Created on Thu Feb 21 11:11:23 2019 - -@author: ofn77899 -""" - -from ccpi.optimisation.algorithms import Algorithm - -class CGLS(Algorithm): - -    '''Conjugate Gradient Least Squares algorithm - -    Parameters: -      x_init: initial guess -      operator: operator for forward/backward projections -      data: data to operate on -    ''' -    def __init__(self, **kwargs): -        super(CGLS, self).__init__() -        self.x        = kwargs.get('x_init', None) -        self.operator = kwargs.get('operator', None) -        self.data     = kwargs.get('data', None) -        if self.x is not None and self.operator is not None and \ -           self.data is not None: -            print ("Calling from creator") -            self.set_up(x_init  =kwargs['x_init'], -                               operator=kwargs['operator'], -                               data    =kwargs['data']) - -    def set_up(self, x_init, operator , data ): - -        self.r = data.copy() -        self.x = x_init.copy() - -        self.operator = operator -        self.d = operator.adjoint(self.r) - -         -        self.normr2 = self.d.squared_norm() -        #if isinstance(self.normr2, Iterable): -        #    self.normr2 = sum(self.normr2) -        #self.normr2 = numpy.sqrt(self.normr2) -        #print ("set_up" , self.normr2) - -    def update(self): - -        Ad = self.operator.direct(self.d) -        #norm = (Ad*Ad).sum() -        #if isinstance(norm, Iterable): -        #    norm = sum(norm) -        norm = Ad.squared_norm() -         -        alpha = self.normr2/norm -        self.x += (self.d * alpha) -        self.r -= (Ad * alpha) -        s  = self.operator.adjoint(self.r) - -        normr2_new = s.squared_norm() -        #if isinstance(normr2_new, Iterable): -        #    normr2_new = sum(normr2_new) -        #normr2_new = numpy.sqrt(normr2_new) -        #print (normr2_new) -         -        beta = normr2_new/self.normr2 -        self.normr2 = normr2_new -        self.d = s + beta*self.d - -    def update_objective(self): -        self.loss.append(self.r.squared_norm())
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FBPD.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FBPD.py deleted file mode 100644 index aa07359..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FBPD.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2019 Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License. -""" -Created on Thu Feb 21 11:09:03 2019 - -@author: ofn77899 -""" - -from ccpi.optimisation.algorithms import Algorithm -from ccpi.optimisation.functions import ZeroFunction -         -class FBPD(Algorithm): -    '''FBPD Algorithm -     -    Parameters: -      x_init: initial guess -      f: constraint -      g: data fidelity -      h: regularizer -      opt: additional algorithm  -    ''' -    constraint = None -    data_fidelity = None -    regulariser = None -    def __init__(self, **kwargs): -        pass -    def set_up(self, x_init, operator=None, constraint=None, data_fidelity=None,\ -         regulariser=None, opt=None): - -        # default inputs -        if constraint    is None:  -            self.constraint    = ZeroFun() -        else: -            self.constraint = constraint -        if data_fidelity is None: -            data_fidelity = ZeroFun() -        else: -            self.data_fidelity = data_fidelity -        if regulariser   is None: -            self.regulariser   = ZeroFun() -        else: -            self.regulariser = regulariser -         -        # algorithmic parameters -         -         -        # step-sizes -        self.tau   = 2 / (self.data_fidelity.L + 2) -        self.sigma = (1/self.tau - self.data_fidelity.L/2) / self.regulariser.L -         -        self.inv_sigma = 1/self.sigma -     -        # initialization -        self.x = x_init -        self.y = operator.direct(self.x) -         -     -    def update(self): -     -        # primal forward-backward step -        x_old = self.x -        self.x = self.x - self.tau * ( self.data_fidelity.grad(self.x) + self.operator.adjoint(self.y) ) -        self.x = self.constraint.prox(self.x, self.tau); -     -        # dual forward-backward step -        self.y = self.y + self.sigma * self.operator.direct(2*self.x - x_old); -        self.y = self.y - self.sigma * self.regulariser.prox(self.inv_sigma*self.y, self.inv_sigma);    - -        # time and criterion -        self.loss = self.constraint(self.x) + self.data_fidelity(self.x) + self.regulariser(self.operator.direct(self.x)) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FISTA.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FISTA.py deleted file mode 100644 index ee51049..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/FISTA.py +++ /dev/null @@ -1,121 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Thu Feb 21 11:07:30 2019 - -@author: ofn77899 -""" - -from ccpi.optimisation.algorithms import Algorithm -from ccpi.optimisation.functions import ZeroFunction -import numpy - -class FISTA(Algorithm): -    '''Fast Iterative Shrinkage-Thresholding Algorithm -     -    Beck, A. and Teboulle, M., 2009. A fast iterative shrinkage-thresholding  -    algorithm for linear inverse problems.  -    SIAM journal on imaging sciences,2(1), pp.183-202. -     -    Parameters: -      x_init: initial guess -      f: data fidelity -      g: regularizer -      h: -      opt: additional algorithm  -    ''' - -    def __init__(self, **kwargs): -        '''initialisation can be done at creation time if all  -        proper variables are passed or later with set_up''' -        super(FISTA, self).__init__() -        self.f = None -        self.g = None -        self.invL = None -        self.t_old = 1 -        args = ['x_init', 'f', 'g', 'opt'] -        for k,v in kwargs.items(): -            if k in args: -                args.pop(args.index(k)) -        if len(args) == 0: -            return self.set_up(kwargs['x_init'], -                               f=kwargs['f'], -                               g=kwargs['g'], -                               opt=kwargs['opt']) -     -    def set_up(self, x_init, f=None, g=None, opt=None): -         -        # default inputs -        if f   is None:  -            self.f = ZeroFunction() -        else: -            self.f = f -        if g   is None: -            g = ZeroFunction() -            self.g = g -        else: -            self.g = g -         -        # algorithmic parameters -        if opt is None:  -            opt = {'tol': 1e-4, 'memopt':False} -         -        self.tol = opt['tol'] if 'tol' in opt.keys() else 1e-4 -        memopt = opt['memopt'] if 'memopt' in opt.keys() else False -        self.memopt = memopt -             -        # initialization -        if memopt: -            self.y = x_init.copy() -            self.x_old = x_init.copy() -            self.x = x_init.copy() -            self.u = x_init.copy()             -        else: -            self.x_old = x_init.copy() -            self.y = x_init.copy() -         -        #timing = numpy.zeros(max_iter) -        #criter = numpy.zeros(max_iter) -         -     -        self.invL = 1/f.L -         -        self.t_old = 1 -             -    def update(self): -    # algorithm loop -    #for it in range(0, max_iter): -     -        if self.memopt: -            # u = y - invL*f.grad(y) -            # store the result in x_old -            self.f.gradient(self.y, out=self.u) -            self.u.__imul__( -self.invL ) -            self.u.__iadd__( self.y ) -            # x = g.prox(u,invL) -            self.g.proximal(self.u, self.invL, out=self.x) -             -            self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2))) -             -            # y = x + (t_old-1)/t*(x-x_old) -            self.x.subtract(self.x_old, out=self.y) -            self.y.__imul__ ((self.t_old-1)/self.t) -            self.y.__iadd__( self.x ) -             -            self.x_old.fill(self.x) -            self.t_old = self.t -             -             -        else: -            u = self.y - self.invL*self.f.gradient(self.y) -             -            self.x = self.g.proximal(u,self.invL) -             -            self.t = 0.5*(1 + numpy.sqrt(1 + 4*(self.t_old**2))) -             -            self.y = self.x + (self.t_old-1)/self.t*(self.x-self.x_old) -             -            self.x_old = self.x.copy() -            self.t_old = self.t -         -    def update_objective(self): -        self.loss.append( self.f(self.x) + self.g(self.x) )
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/GradientDescent.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/GradientDescent.py deleted file mode 100644 index 14763c5..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/GradientDescent.py +++ /dev/null @@ -1,76 +0,0 @@ -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2019 Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License. -""" -Created on Thu Feb 21 11:05:09 2019 - -@author: ofn77899 -""" -from ccpi.optimisation.algorithms import Algorithm - -class GradientDescent(Algorithm): -    '''Implementation of Gradient Descent algorithm -    ''' - -    def __init__(self, **kwargs): -        '''initialisation can be done at creation time if all  -        proper variables are passed or later with set_up''' -        super(GradientDescent, self).__init__() -        self.x = None -        self.rate = 0 -        self.objective_function = None -        self.regulariser = None -        args = ['x_init', 'objective_function', 'rate'] -        for k,v in kwargs.items(): -            if k in args: -                args.pop(args.index(k)) -        if len(args) == 0: -            return self.set_up(x_init=kwargs['x_init'], -                               objective_function=kwargs['objective_function'], -                               rate=kwargs['rate']) -     -    def should_stop(self): -        '''stopping cryterion, currently only based on number of iterations''' -        return self.iteration >= self.max_iteration -     -    def set_up(self, x_init, objective_function, rate): -        '''initialisation of the algorithm''' -        self.x = x_init.copy() -        self.objective_function = objective_function -        self.rate = rate -        self.loss.append(objective_function(x_init)) -        self.iteration = 0 -        try: -            self.memopt = self.objective_function.memopt -        except AttributeError as ae: -            self.memopt = False -        if self.memopt: -            self.x_update = x_init.copy() - -    def update(self): -        '''Single iteration''' -        if self.memopt: -            self.objective_function.gradient(self.x, out=self.x_update) -            self.x_update *= -self.rate -            self.x += self.x_update -        else: -            self.x += -self.rate * self.objective_function.gradient(self.x) - -    def update_objective(self): -        self.loss.append(self.objective_function(self.x)) -         diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py deleted file mode 100644 index 39b092b..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/PDHG.py +++ /dev/null @@ -1,178 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Mon Feb  4 16:18:06 2019 - -@author: evangelos -""" -from ccpi.optimisation.algorithms import Algorithm -from ccpi.framework import ImageData, DataContainer -import numpy as np -import numpy -import time -from ccpi.optimisation.operators import BlockOperator -from ccpi.framework import BlockDataContainer -from ccpi.optimisation.functions import FunctionOperatorComposition - -class PDHG(Algorithm): -    '''Primal Dual Hybrid Gradient''' - -    def __init__(self, **kwargs): -        super(PDHG, self).__init__(max_iteration=kwargs.get('max_iteration',0)) -        self.f        = kwargs.get('f', None) -        self.operator = kwargs.get('operator', None) -        self.g        = kwargs.get('g', None) -        self.tau      = kwargs.get('tau', None) -        self.sigma    = kwargs.get('sigma', None) -         -        if self.f is not None and self.operator is not None and \ -           self.g is not None: -            print ("Calling from creator") -            self.set_up(self.f, -                        self.g, -                        self.operator, -                        self.tau,  -                        self.sigma) - -    def set_up(self, f, g, operator, tau = None, sigma = None, opt = None, **kwargs): -        # algorithmic parameters -        self.operator = operator -        self.f = f -        self.g = g -        self.tau = tau -        self.sigma = sigma -        self.opt = opt -        if sigma is None and tau is None: -            raise ValueError('Need sigma*tau||K||^2<1')  - -        self.x_old = self.operator.domain_geometry().allocate() -        self.x_tmp = self.x_old.copy() -        self.x = self.x_old.copy() -     -        self.y_old = self.operator.range_geometry().allocate() -        self.y_tmp = self.y_old.copy() -        self.y = self.y_old.copy() - -        self.xbar = self.x_old.copy() - -        # relaxation parameter -        self.theta = 1 - -    def update(self): -         -        # Gradient descent, Dual problem solution -        self.operator.direct(self.xbar, out=self.y_tmp) -        self.y_tmp *= self.sigma -        self.y_tmp += self.y_old - -        #self.y = self.f.proximal_conjugate(self.y_old, self.sigma) -        self.f.proximal_conjugate(self.y_tmp, self.sigma, out=self.y) -         -        # Gradient ascent, Primal problem solution -        self.operator.adjoint(self.y, out=self.x_tmp) -        self.x_tmp *= -1*self.tau -        self.x_tmp += self.x_old - -             -        self.g.proximal(self.x_tmp, self.tau, out=self.x) - -        #Update -        self.x.subtract(self.x_old, out=self.xbar) -        self.xbar *= self.theta -        self.xbar += self.x - -        self.x_old.fill(self.x) -        self.y_old.fill(self.y) - -    def update_objective(self): - -        p1 = self.f(self.operator.direct(self.x)) + self.g(self.x) -        d1 = -(self.f.convex_conjugate(self.y) + self.g.convex_conjugate(-1*self.operator.adjoint(self.y))) - -        self.loss.append([p1,d1,p1-d1]) - - - -def PDHG_old(f, g, operator, tau = None, sigma = None, opt = None, **kwargs): -         -    # algorithmic parameters -    if opt is None:  -        opt = {'tol': 1e-6, 'niter': 500, 'show_iter': 100, \ -               'memopt': False}  -         -    if sigma is None and tau is None: -        raise ValueError('Need sigma*tau||K||^2<1')  -                 -    niter = opt['niter'] if 'niter' in opt.keys() else 1000 -    tol = opt['tol'] if 'tol' in opt.keys() else 1e-4 -    memopt = opt['memopt'] if 'memopt' in opt.keys() else False   -    show_iter = opt['show_iter'] if 'show_iter' in opt.keys() else False  -    stop_crit = opt['stop_crit'] if 'stop_crit' in opt.keys() else False  - -    x_old = operator.domain_geometry().allocate() -    y_old = operator.range_geometry().allocate()        -             -    xbar = x_old.copy() -    x_tmp = x_old.copy() -    x = x_old.copy() -     -    y_tmp = y_old.copy() -    y = y_tmp.copy() - -         -    # relaxation parameter -    theta = 1 -     -    t = time.time() -     -    primal = [] -    dual = [] -    pdgap = [] -     -     -    for i in range(niter): -         - -         -        if memopt: -            operator.direct(xbar, out = y_tmp)              -            y_tmp *= sigma -            y_tmp += y_old  -        else: -            y_tmp = y_old +  sigma * operator.direct(xbar)                         -         -        f.proximal_conjugate(y_tmp, sigma, out=y) -         -        if memopt: -            operator.adjoint(y, out = x_tmp)    -            x_tmp *= -1*tau -            x_tmp += x_old -        else: -            x_tmp = x_old - tau*operator.adjoint(y) -             -        g.proximal(x_tmp, tau, out=x) -              -        x.subtract(x_old, out=xbar) -        xbar *= theta -        xbar += x -                                               -        x_old.fill(x) -        y_old.fill(y) -                     -        if i%10==0: -             -            p1 = f(operator.direct(x)) + g(x) -            d1 = - ( f.convex_conjugate(y) + g.convex_conjugate(-1*operator.adjoint(y)) )             -            primal.append(p1) -            dual.append(d1) -            pdgap.append(p1-d1)  -            print(p1, d1, p1-d1) -             -         -                          -    t_end = time.time()         -         -    return x, t_end - t, primal, dual, pdgap - - - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/SIRT.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/SIRT.py deleted file mode 100644 index 30584d4..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/SIRT.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2018 Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License. - -from ccpi.optimisation.algorithms import Algorithm - -class SIRT(Algorithm): - -    '''Simultaneous Iterative Reconstruction Technique - -    Parameters: -      x_init: initial guess -      operator: operator for forward/backward projections -      data: data to operate on -      constraint: Function with prox-method, for example IndicatorBox to  -                  enforce box constraints, default is None). -    ''' -    def __init__(self, **kwargs): -        super(SIRT, self).__init__() -        self.x          = kwargs.get('x_init', None) -        self.operator   = kwargs.get('operator', None) -        self.data       = kwargs.get('data', None) -        self.constraint = kwargs.get('constraint', None) -        if self.x is not None and self.operator is not None and \ -           self.data is not None: -            print ("Calling from creator") -            self.set_up(x_init=kwargs['x_init'], -                        operator=kwargs['operator'], -                        data=kwargs['data'], -                        constraint=kwargs['constraint']) - -    def set_up(self, x_init, operator , data, constraint=None ): -         -        self.x = x_init.copy() -        self.operator = operator -        self.data = data -        self.constraint = constraint -         -        self.r = data.copy() -         -        self.relax_par = 1.0 -         -        # Set up scaling matrices D and M. -        self.M = 1/self.operator.direct(self.operator.domain_geometry().allocate(value=1.0))         -        self.D = 1/self.operator.adjoint(self.operator.range_geometry().allocate(value=1.0)) - - -    def update(self): -         -        self.r = self.data - self.operator.direct(self.x) -         -        self.x += self.relax_par * (self.D*self.operator.adjoint(self.M*self.r)) -         -        if self.constraint != None: -            self.x = self.constraint.prox(self.x,None) - -    def update_objective(self): -        self.loss.append(self.r.squared_norm())
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/__init__.py b/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/__init__.py deleted file mode 100644 index 2dbacfc..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algorithms/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2019 Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License. -""" -Created on Thu Feb 21 11:03:13 2019 - -@author: ofn77899 -""" - -from .Algorithm import Algorithm -from .CGLS import CGLS -from .SIRT import SIRT -from .GradientDescent import GradientDescent -from .FISTA import FISTA -from .FBPD import FBPD -from .PDHG import PDHG -from .PDHG import PDHG_old - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/algs.py b/Wrappers/Python/build/lib/ccpi/optimisation/algs.py deleted file mode 100644 index f5ba85e..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/algs.py +++ /dev/null @@ -1,307 +0,0 @@ -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2018 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License. - -import numpy -import time - - - - -def FISTA(x_init, f=None, g=None, opt=None): -    '''Fast Iterative Shrinkage-Thresholding Algorithm -     -    Beck, A. and Teboulle, M., 2009. A fast iterative shrinkage-thresholding  -    algorithm for linear inverse problems.  -    SIAM journal on imaging sciences,2(1), pp.183-202. -     -    Parameters: -      x_init: initial guess -      f: data fidelity -      g: regularizer -      h: -      opt: additional algorithm  -    ''' -    # default inputs -    if f   is None: f = ZeroFun() -    if g   is None: g = ZeroFun() -     -    # algorithmic parameters -    if opt is None:  -        opt = {'tol': 1e-4, 'iter': 1000, 'memopt':False} -     -    max_iter = opt['iter'] if 'iter' in opt.keys() else 1000 -    tol = opt['tol'] if 'tol' in opt.keys() else 1e-4 -    memopt = opt['memopt'] if 'memopt' in opt.keys() else False -         -         -    # initialization -    if memopt: -        y = x_init.clone() -        x_old = x_init.clone() -        x = x_init.clone() -        u = x_init.clone() -    else: -        x_old = x_init -        y = x_init; -     -    timing = numpy.zeros(max_iter) -    criter = numpy.zeros(max_iter) -     -    invL = 1/f.L -     -    t_old = 1 -     -#    c = f(x_init) + g(x_init) - -    # algorithm loop -    for it in range(0, max_iter): -     -        time0 = time.time() -        if memopt: -            # u = y - invL*f.grad(y) -            # store the result in x_old -            f.gradient(y, out=u) -            u.__imul__( -invL ) -            u.__iadd__( y ) -            # x = g.prox(u,invL) -            g.proximal(u, invL, out=x) -             -            t = 0.5*(1 + numpy.sqrt(1 + 4*(t_old**2))) -             -            # y = x + (t_old-1)/t*(x-x_old) -            x.subtract(x_old, out=y) -            y.__imul__ ((t_old-1)/t) -            y.__iadd__( x ) -             -            x_old.fill(x) -            t_old = t -             -             -        else: -            u = y - invL*f.gradient(y) -             -            x = g.proximal(u,invL) -             -            t = 0.5*(1 + numpy.sqrt(1 + 4*(t_old**2))) -             -            y = x + (t_old-1)/t*(x-x_old) -             -            x_old = x.copy() -            t_old = t -         -        # time and criterion -#        timing[it] = time.time() - time0 -#        criter[it] = f(x) + g(x); -         -        # stopping rule -        #if np.linalg.norm(x - x_old) < tol * np.linalg.norm(x_old) and it > 10: -        #   break -     -        #print(it, 'out of', 10, 'iterations', end='\r'); - -    #criter = criter[0:it+1]; -#    timing = numpy.cumsum(timing[0:it+1]); -     -    return x #, it, timing, criter - -def FBPD(x_init, operator=None, constraint=None, data_fidelity=None,\ -         regulariser=None, opt=None): -    '''FBPD Algorithm -     -    Parameters: -      x_init: initial guess -      f: constraint -      g: data fidelity -      h: regularizer -      opt: additional algorithm  -    ''' -    # default inputs -    if constraint    is None: constraint    = ZeroFun() -    if data_fidelity is None: data_fidelity = ZeroFun() -    if regulariser   is None: regulariser   = ZeroFun() -     -    # algorithmic parameters -    if opt is None:  -        opt = {'tol': 1e-4, 'iter': 1000} -    else: -        try: -            max_iter = opt['iter'] -        except KeyError as ke: -            opt[ke] = 1000 -        try: -            opt['tol'] = 1000 -        except KeyError as ke: -            opt[ke] = 1e-4 -    tol = opt['tol'] -    max_iter = opt['iter'] -    memopt = opt['memopts'] if 'memopts' in opt.keys() else False -     -    # step-sizes -    tau   = 2 / (data_fidelity.L + 2) -    sigma = (1/tau - data_fidelity.L/2) / regulariser.L -    inv_sigma = 1/sigma - -    # initialization -    x = x_init -    y = operator.direct(x); -     -    timing = numpy.zeros(max_iter) -    criter = numpy.zeros(max_iter) - -     -     -         -    # algorithm loop -    for it in range(0, max_iter): -     -        t = time.time() -     -        # primal forward-backward step -        x_old = x; -        x = x - tau * ( data_fidelity.grad(x) + operator.adjoint(y) ); -        x = constraint.prox(x, tau); -     -        # dual forward-backward step -        y = y + sigma * operator.direct(2*x - x_old); -        y = y - sigma * regulariser.prox(inv_sigma*y, inv_sigma);    - -        # time and criterion -        timing[it] = time.time() - t -        criter[it] = constraint(x) + data_fidelity(x) + regulariser(operator.direct(x)) -            -        # stopping rule -        #if np.linalg.norm(x - x_old) < tol * np.linalg.norm(x_old) and it > 10: -        #   break - -    criter = criter[0:it+1] -    timing = numpy.cumsum(timing[0:it+1]) -     -    return x, it, timing, criter - -def CGLS(x_init, operator , data , opt=None): -    '''Conjugate Gradient Least Squares algorithm -     -    Parameters: -      x_init: initial guess -      operator: operator for forward/backward projections -      data: data to operate on -      opt: additional algorithm  -    ''' -     -    if opt is None:  -        opt = {'tol': 1e-4, 'iter': 1000} -    else: -        try: -            max_iter = opt['iter'] -        except KeyError as ke: -            opt[ke] = 1000 -        try: -            opt['tol'] = 1000 -        except KeyError as ke: -            opt[ke] = 1e-4 -    tol = opt['tol'] -    max_iter = opt['iter'] -     -    r = data.copy() -    x = x_init.copy() -     -    d = operator.adjoint(r) -     -    normr2 = (d**2).sum() -     -    timing = numpy.zeros(max_iter) -    criter = numpy.zeros(max_iter) - -    # algorithm loop -    for it in range(0, max_iter): -     -        t = time.time() -         -        Ad = operator.direct(d) -        alpha = normr2/( (Ad**2).sum() ) -        x  = x + alpha*d -        r  = r - alpha*Ad -        s  = operator.adjoint(r) -         -        normr2_new = (s**2).sum() -        beta = normr2_new/normr2 -        normr2 = normr2_new -        d = s + beta*d -         -        # time and criterion -        timing[it] = time.time() - t -        criter[it] = (r**2).sum() -     -    return x, it, timing, criter - -def SIRT(x_init, operator , data , opt=None, constraint=None): -    '''Simultaneous Iterative Reconstruction Technique -     -    Parameters: -      x_init: initial guess -      operator: operator for forward/backward projections -      data: data to operate on -      opt: additional algorithm  -      constraint: func of Indicator type specifying convex constraint. -    ''' -     -    if opt is None:  -        opt = {'tol': 1e-4, 'iter': 1000} -    else: -        try: -            max_iter = opt['iter'] -        except KeyError as ke: -            opt[ke] = 1000 -        try: -            opt['tol'] = 1000 -        except KeyError as ke: -            opt[ke] = 1e-4 -    tol = opt['tol'] -    max_iter = opt['iter'] -     -    x = x_init.clone() -     -    timing = numpy.zeros(max_iter) -    criter = numpy.zeros(max_iter) -     -    # Relaxation parameter must be strictly between 0 and 2. For now fix at 1.0 -    relax_par = 1.0 -     -    # Set up scaling matrices D and M. -    M = 1/operator.direct(operator.domain_geometry().allocate(value=1.0)) -    D = 1/operator.adjoint(operator.range_geometry().allocate(value=1.0)) -     -    # algorithm loop -    for it in range(0, max_iter): -        t = time.time() -        r = data - operator.direct(x) -         -        x = x + relax_par * (D*operator.adjoint(M*r)) - -        if constraint != None: -            x = constraint.prox(x,None) -         -        timing[it] = time.time() - t -        if it > 0: -            criter[it-1] = (r**2).sum() -     -    r = data - operator.direct(x) -    criter[it] = (r**2).sum() -    return x, it, timing,  criter -     diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/funcs.py b/Wrappers/Python/build/lib/ccpi/optimisation/funcs.py deleted file mode 100644 index b2b9791..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/funcs.py +++ /dev/null @@ -1,265 +0,0 @@ -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2018 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License. - -import numpy -from ccpi.framework import DataContainer -import warnings -from ccpi.optimisation.functions import Function -def isSizeCorrect(data1 ,data2): -    if issubclass(type(data1), DataContainer) and \ -       issubclass(type(data2), DataContainer): -        # check dimensionality -        if data1.check_dimensions(data2): -            return True -    elif issubclass(type(data1) , numpy.ndarray) and \ -         issubclass(type(data2) , numpy.ndarray): -        return data1.shape == data2.shape -    else: -        raise ValueError("{0}: getting two incompatible types: {1} {2}"\ -                         .format('Function', type(data1), type(data2))) -    return False -class Norm2(Function): -     -    def __init__(self,  -                 gamma=1.0,  -                 direction=None): -        super(Norm2, self).__init__() -        self.gamma     = gamma; -        self.direction = direction;  -     -    def __call__(self, x, out=None): -         -        if out is None: -            xx = numpy.sqrt(numpy.sum(numpy.square(x.as_array()), self.direction, -                                  keepdims=True)) -        else: -            if isSizeCorrect(out, x): -                # check dimensionality -                if issubclass(type(out), DataContainer): -                    arr = out.as_array() -                    numpy.square(x.as_array(), out=arr) -                    xx = numpy.sqrt(numpy.sum(arr, self.direction, keepdims=True)) -                         -                elif issubclass(type(out) , numpy.ndarray): -                    numpy.square(x.as_array(), out=out) -                    xx = numpy.sqrt(numpy.sum(out, self.direction, keepdims=True)) -            else: -                raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) -         -        p  = numpy.sum(self.gamma*xx)         -         -        return p -     -    def prox(self, x, tau): - -        xx = numpy.sqrt(numpy.sum( numpy.square(x.as_array()), self.direction,  -                                  keepdims=True )) -        xx = numpy.maximum(0, 1 - tau*self.gamma / xx) -        p  = x.as_array() * xx -         -        return type(x)(p,geometry=x.geometry) -    def proximal(self, x, tau, out=None): -        if out is None: -            return self.prox(x,tau) -        else: -            if isSizeCorrect(out, x): -                # check dimensionality -                if issubclass(type(out), DataContainer): -                    numpy.square(x.as_array(), out = out.as_array()) -                    xx = numpy.sqrt(numpy.sum( out.as_array() , self.direction,  -                                  keepdims=True )) -                    xx = numpy.maximum(0, 1 - tau*self.gamma / xx) -                    x.multiply(xx, out= out.as_array()) -                     -                         -                elif issubclass(type(out) , numpy.ndarray): -                    numpy.square(x.as_array(), out=out) -                    xx = numpy.sqrt(numpy.sum(out, self.direction, keepdims=True)) -                     -                    xx = numpy.maximum(0, 1 - tau*self.gamma / xx) -                    x.multiply(xx, out= out) -            else: -                raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) -         - -         - -# Define a class for squared 2-norm -class Norm2sq(Function): -    ''' -    f(x) = c*||A*x-b||_2^2 -     -    which has  -     -    grad[f](x) = 2*c*A^T*(A*x-b) -     -    and Lipschitz constant -     -    L = 2*c*||A||_2^2 = 2*s1(A)^2 -     -    where s1(A) is the largest singular value of A. -     -    ''' -     -    def __init__(self,A,b,c=1.0,memopt=False): -        super(Norm2sq, self).__init__() -     -        self.A = A  # Should be an operator, default identity -        self.b = b  # Default zero DataSet? -        self.c = c  # Default 1. -        if memopt: -            try: -                self.range_tmp = A.range_geometry().allocate() -                self.domain_tmp = A.domain_geometry().allocate() -                self.memopt = True -            except NameError as ne: -                warnings.warn(str(ne)) -                self.memopt = False -            except NotImplementedError as nie: -                print (nie) -                warnings.warn(str(nie)) -                self.memopt = False -        else: -            self.memopt = False -         -        # Compute the Lipschitz parameter from the operator if possible -        # Leave it initialised to None otherwise -        try: -            self.L = 2.0*self.c*(self.A.norm()**2) -        except AttributeError as ae: -            pass -        except NotImplementedError as noe: -            pass -         -    #def grad(self,x): -    #    return self.gradient(x, out=None) - -    def __call__(self,x): -        #return self.c* np.sum(np.square((self.A.direct(x) - self.b).ravel())) -        #if out is None: -        #    return self.c*( ( (self.A.direct(x)-self.b)**2).sum() ) -        #else: -        y = self.A.direct(x) -        y.__isub__(self.b) -        #y.__imul__(y) -        #return y.sum() * self.c -        try: -            return y.squared_norm() * self.c -        except AttributeError as ae: -            # added for compatibility with SIRF  -            return (y.norm()**2) * self.c -     -    def gradient(self, x, out = None): -        if self.memopt: -            #return 2.0*self.c*self.A.adjoint( self.A.direct(x) - self.b ) -             -            self.A.direct(x, out=self.range_tmp) -            self.range_tmp -= self.b  -            self.A.adjoint(self.range_tmp, out=out) -            #self.direct_placehold.multiply(2.0*self.c, out=out) -            out *= (self.c * 2.0) -        else: -            return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) - - - -# Box constraints indicator function. Calling returns 0 if argument is within  -# the box. The prox operator is projection onto the box. Only implements one  -# scalar lower and one upper as constraint on all elements. Should generalise  -# to vectors to allow different constraints one elements. -class IndicatorBox(Function): -     -    def __init__(self,lower=-numpy.inf,upper=numpy.inf): -        # Do nothing -        super(IndicatorBox, self).__init__() -        self.lower = lower -        self.upper = upper -         -     -    def __call__(self,x): -         -        if (numpy.all(x.array>=self.lower) and  -            numpy.all(x.array <= self.upper) ): -            val = 0 -        else: -            val = numpy.inf -        return val -     -    def prox(self,x,tau=None): -        return  (x.maximum(self.lower)).minimum(self.upper) -     -    def proximal(self, x, tau, out=None): -        if out is None: -            return self.prox(x, tau) -        else: -            if not x.shape == out.shape: -                raise ValueError('Norm1 Incompatible size:', -                                 x.shape, out.shape) -            #(x.abs() - tau*self.gamma).maximum(0) * x.sign() -            x.abs(out = out) -            out.__isub__(tau*self.gamma) -            out.maximum(0, out=out) -            if self.sign_x is None or not x.shape == self.sign_x.shape: -                self.sign_x = x.sign() -            else: -                x.sign(out=self.sign_x) -                 -            out.__imul__( self.sign_x ) - -# A more interesting example, least squares plus 1-norm minimization. -# Define class to represent 1-norm including prox function -class Norm1(Function): -     -    def __init__(self,gamma): -        super(Norm1, self).__init__() -        self.gamma = gamma -        self.L = 1 -        self.sign_x = None -     -    def __call__(self,x,out=None): -        if out is None: -            return self.gamma*(x.abs().sum()) -        else: -            if not x.shape == out.shape: -                raise ValueError('Norm1 Incompatible size:', -                                 x.shape, out.shape) -            x.abs(out=out) -            return out.sum() * self.gamma -     -    def prox(self,x,tau): -        return (x.abs() - tau*self.gamma).maximum(0) * x.sign() -     -    def proximal(self, x, tau, out=None): -        if out is None: -            return self.prox(x, tau) -        else: -            if isSizeCorrect(x,out): -                # check dimensionality -                if issubclass(type(out), DataContainer): -                    v = (x.abs() - tau*self.gamma).maximum(0) -                    x.sign(out=out) -                    out *= v -                    #out.fill(self.prox(x,tau))     -                elif issubclass(type(out) , numpy.ndarray): -                    v = (x.abs() - tau*self.gamma).maximum(0) -                    out[:] = x.sign() -                    out *= v -                    #out[:] = self.prox(x,tau) -            else: -                raise ValueError ('Wrong size: x{0} out{1}'.format(x.shape,out.shape) ) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py deleted file mode 100644 index 0836324..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/BlockFunction.py +++ /dev/null @@ -1,221 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar  8 10:01:31 2019 - -@author: evangelos -""" - -from ccpi.optimisation.functions import Function -from ccpi.framework import BlockDataContainer -from numbers import Number - -class BlockFunction(Function): -     -    '''BlockFunction acts as a separable sum function, i.e., -     -      f = [f_1,...,f_n] -       -      f([x_1,...,x_n]) = f_1(x_1) +  .... + f_n(x_n) - -    ''' -    def __init__(self, *functions): -         -        self.functions = functions       -        self.length = len(self.functions) -         -        super(BlockFunction, self).__init__() -         -    def __call__(self, x): -         -        '''Evaluates the BlockFunction at a BlockDataContainer x -         -        :param: x (BlockDataContainer): must have as many rows as self.length - -        returns sum(f_i(x_i)) -        ''' -         -        if self.length != x.shape[0]: -            raise ValueError('BlockFunction and BlockDataContainer have incompatible size') -        t = 0                 -        for i in range(x.shape[0]): -            t += self.functions[i](x.get_item(i)) -        return t -     -    def convex_conjugate(self, x): -         -        ''' Evaluate convex conjugate of BlockFunction at x -         -        returns sum(f_i^{*}(x_i)) -         -        '''        -        t = 0                 -        for i in range(x.shape[0]): -            t += self.functions[i].convex_conjugate(x.get_item(i)) -        return t   -     -     -    def proximal_conjugate(self, x, tau, out = None): -         -        ''' Evaluate Proximal Operator of tau * f(\cdot) at x -         -        prox_{tau*f}(x) = sum_{i} prox_{tau*f_{i}}(x_{i})  -         -         -        ''' - -        if out is not None: -            if isinstance(tau, Number): -                for i in range(self.length): -                    self.functions[i].proximal_conjugate(x.get_item(i), tau, out=out.get_item(i)) -            else: -                for i in range(self.length): -                    self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i),out=out.get_item(i)) -             -        else: -                 -            out = [None]*self.length -            if isinstance(tau, Number): -                for i in range(self.length): -                    out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau) -            else: -                for i in range(self.length): -                    out[i] = self.functions[i].proximal_conjugate(x.get_item(i), tau.get_item(i)) -             -            return BlockDataContainer(*out)  - -     -    def proximal(self, x, tau, out = None): -         -        ''' Evaluate Proximal Operator of tau * f^{*}(\cdot) at x -         -        prox_{tau*f^{*}}(x) = sum_{i} prox_{tau*f^{*}_{i}}(x_{i})  -         -         -        ''' -         -        if out is None: -                 -            out = [None]*self.length -            if isinstance(tau, Number): -                for i in range(self.length): -                    out[i] = self.functions[i].proximal(x.get_item(i), tau) -            else: -                for i in range(self.length): -                    out[i] = self.functions[i].proximal(x.get_item(i), tau.get_item(i))                         -             -            return BlockDataContainer(*out)   -         -        else: -            if isinstance(tau, Number): -                for i in range(self.length): -                    self.functions[i].proximal(x.get_item(i), tau, out[i]) -            else: -                for i in range(self.length): -                    self.functions[i].proximal(x.get_item(i), tau.get_item(i), out[i]) -             -             -     -    def gradient(self,x, out=None): -         -        ''' Evaluate gradient of f at x: f'(x)  -         -        returns: BlockDataContainer [f_{1}'(x_{1}), ... , f_{n}'(x_{n})] -                 -        ''' -         -        out = [None]*self.length -        for i in range(self.length): -            out[i] = self.functions[i].gradient(x.get_item(i)) -             -        return  BlockDataContainer(*out)            -                             -     -     -if __name__ == '__main__': -     -    M, N, K = 2,3,5 -     -    from ccpi.optimisation.functions import L2NormSquared, MixedL21Norm -    from ccpi.framework import ImageGeometry, BlockGeometry -    from ccpi.optimisation.operators import Gradient, Identity, BlockOperator -    import numpy -    import numpy as np -     -     -    ig = ImageGeometry(M, N) -    BG = BlockGeometry(ig, ig) -     -    u = ig.allocate('random_int') -    B = BlockOperator( Gradient(ig), Identity(ig) ) -     -    U = B.direct(u) -    b = ig.allocate('random_int') -     -    f1 =  10 * MixedL21Norm() -    f2 =  0.5 * L2NormSquared(b=b)     -     -    f = BlockFunction(f1, f2) -    tau = 0.3 -     -    print( " without out " ) -    res_no_out = f.proximal_conjugate( U, tau) -    res_out = B.range_geometry().allocate() -    f.proximal_conjugate( U, tau, out = res_out) -     -    numpy.testing.assert_array_almost_equal(res_no_out[0][0].as_array(), \ -                                            res_out[0][0].as_array(), decimal=4)  -     -    numpy.testing.assert_array_almost_equal(res_no_out[0][1].as_array(), \ -                                            res_out[0][1].as_array(), decimal=4)  - -    numpy.testing.assert_array_almost_equal(res_no_out[1].as_array(), \ -                                            res_out[1].as_array(), decimal=4)      -     -                     -     -    ########################################################################## -     -     -     -     -     -     -     -#    zzz = B.range_geometry().allocate('random_int') -#    www = B.range_geometry().allocate() -#    www.fill(zzz) -     -#    res[0].fill(z) -     -     -     -     -#    f.proximal_conjugate(z, sigma, out = res) -     -#    print(z1[0][0].as_array())     -#    print(res[0][0].as_array()) -     -     -     -     -#    U = BG.allocate('random_int') -#    RES = BG.allocate() -#    f = BlockFunction(f1, f2) -#     -#    z = f.proximal_conjugate(U, 0.2) -#    f.proximal_conjugate(U, 0.2, out = RES) -#     -#    print(z[0].as_array()) -#    print(RES[0].as_array()) -#     -#    print(z[1].as_array()) -#    print(RES[1].as_array())     -     -     -     -     -     - - -    
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/Function.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/Function.py deleted file mode 100644 index ba33666..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/Function.py +++ /dev/null @@ -1,69 +0,0 @@ -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2018-2019 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License. - -import warnings -from ccpi.optimisation.functions.ScaledFunction import ScaledFunction - -class Function(object): -    '''Abstract class representing a function -     -    Members: -    L is the Lipschitz constant of the gradient of the Function  -    ''' -    def __init__(self): -        self.L = None - -    def __call__(self,x, out=None): -        '''Evaluates the function at x ''' -        raise NotImplementedError - -    def gradient(self, x, out=None): -        '''Returns the gradient of the function at x, if the function is differentiable''' -        raise NotImplementedError - -    def proximal(self, x, tau, out=None): -        '''This returns the proximal operator for the function at x, tau''' -        raise NotImplementedError - -    def convex_conjugate(self, x, out=None): -        '''This evaluates the convex conjugate of the function at x''' -        raise NotImplementedError - -    def proximal_conjugate(self, x, tau, out = None): -        '''This returns the proximal operator for the convex conjugate of the function at x, tau''' -        raise NotImplementedError - -    def grad(self, x): -        '''Alias of gradient(x,None)''' -        warnings.warn('''This method will disappear in following  -        versions of the CIL. Use gradient instead''', DeprecationWarning) -        return self.gradient(x, out=None) - -    def prox(self, x, tau): -        '''Alias of proximal(x, tau, None)''' -        warnings.warn('''This method will disappear in following  -        versions of the CIL. Use proximal instead''', DeprecationWarning) -        return self.proximal(x, tau, out=None) - -    def __rmul__(self, scalar): -        '''Defines the multiplication by a scalar on the left - -        returns a ScaledFunction''' -        return ScaledFunction(self, scalar) - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition.py deleted file mode 100644 index 8895f3d..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition.py +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar  8 09:55:36 2019 - -@author: evangelos -""" - -from ccpi.optimisation.functions import Function -from ccpi.optimisation.functions import ScaledFunction - - -class FunctionOperatorComposition(Function): -     -    ''' Function composition with Operator, i.e., f(Ax) -     -        A: operator -        f: function -     -    ''' -     -    def __init__(self, function, operator): -         -        super(FunctionOperatorComposition, self).__init__() -         -        self.function = function      -        self.operator = operator -        self.L = function.L * operator.norm()**2  -         -         -    def __call__(self, x): -         -        ''' Evaluate FunctionOperatorComposition at x -         -        returns f(Ax) -         -        ''' -     -        return self.function(self.operator.direct(x))   -     -    def gradient(self, x, out=None): -#         -        ''' Gradient takes into account the Operator''' -        if out is None: -            return self.operator.adjoint(self.function.gradient(self.operator.direct(x))) -        else:  -            tmp = self.operator.range_geometry().allocate() -            self.operator.direct(x, out=tmp) -            self.function.gradient(tmp, out=tmp) -            self.operator.adjoint(tmp, out=out) - -     -     - -    #TODO do not know if we need it -    #def call_adjoint(self, x): -    # -    #    return self.function(self.operator.adjoint(x))   - - -    #def convex_conjugate(self, x): -    #     -    #        ''' convex_conjugate does not take into account the Operator''' -    #    return self.function.convex_conjugate(x) - -     - - -                 -if __name__ == '__main__':    - -    from ccpi.framework import ImageGeometry -    from ccpi.optimisation.operators import Gradient -    from ccpi.optimisation.functions import L2NormSquared -     -    M, N, K = 2,3 -    ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N) -     -    G = Gradient(ig) -    alpha = 0.5 -     -    f = L2NormSquared()     -    f_comp = FunctionOperatorComposition(G, alpha * f) -    x = ig.allocate('random_int') -    print(f_comp.gradient(x).shape -           -          ) -     - -              -         -                       
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition_old.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition_old.py deleted file mode 100644 index 70511bb..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/FunctionOperatorComposition_old.py +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar  8 09:55:36 2019 - -@author: evangelos -""" - -from ccpi.optimisation.functions import Function -from ccpi.optimisation.functions import ScaledFunction - - -class FunctionOperatorComposition(Function): -     -    ''' Function composition with Operator, i.e., f(Ax) -     -        A: operator -        f: function -     -    ''' -     -    def __init__(self, operator, function): -         -        super(FunctionOperatorComposition, self).__init__() -        self.function = function      -        self.operator = operator -        alpha = 1 -         -        if isinstance (function, ScaledFunction): -            alpha = function.scalar -        self.L = 2 * alpha * operator.norm()**2 -         -         -    def __call__(self, x): -         -        ''' Evaluate FunctionOperatorComposition at x -         -        returns f(Ax) -         -        ''' -     -        return self.function(self.operator.direct(x))    - -    #TODO do not know if we need it -    def call_adjoint(self, x): -     -        return self.function(self.operator.adjoint(x))   - - -    def convex_conjugate(self, x): -         -        ''' convex_conjugate does not take into account the Operator''' -        return self.function.convex_conjugate(x) - -    def proximal(self, x, tau, out=None): -         -        '''proximal does not take into account the Operator'''                 -        if out is None: -            return self.function.proximal(x, tau) -        else: -            self.function.proximal(x, tau, out=out) -             - -    def proximal_conjugate(self, x, tau, out=None):     - -        ''' proximal conjugate does not take into account the Operator''' -        if out is None: -            return self.function.proximal_conjugate(x, tau) -        else: -            self.function.proximal_conjugate(x, tau, out=out)  - -    def gradient(self, x, out=None): -         -        ''' Gradient takes into account the Operator''' -        if out is None: -            return self.operator.adjoint( -                self.function.gradient(self.operator.direct(x)) -                ) -        else: -            self.operator.adjoint( -                self.function.gradient(self.operator.direct(x),  -                out=out) -            ) -         -                       
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/IndicatorBox.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/IndicatorBox.py deleted file mode 100644 index df8dc89..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/IndicatorBox.py +++ /dev/null @@ -1,65 +0,0 @@ -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2018-2019 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License. -from ccpi.optimisation.functions import Function -import numpy - -class IndicatorBox(Function): -    '''Box constraints indicator function.  -     -    Calling returns 0 if argument is within the box. The prox operator is projection onto the box.  -    Only implements one scalar lower and one upper as constraint on all elements. Should generalise -    to vectors to allow different constraints one elements. -''' -     -    def __init__(self,lower=-numpy.inf,upper=numpy.inf): -        # Do nothing -        super(IndicatorBox, self).__init__() -        self.lower = lower -        self.upper = upper -         -     -    def __call__(self,x): -         -        if (numpy.all(x.array>=self.lower) and  -            numpy.all(x.array <= self.upper) ): -            val = 0 -        else: -            val = numpy.inf -        return val -     -    def prox(self,x,tau=None): -        return  (x.maximum(self.lower)).minimum(self.upper) -     -    def proximal(self, x, tau, out=None): -        if out is None: -            return self.prox(x, tau) -        else: -            if not x.shape == out.shape: -                raise ValueError('Norm1 Incompatible size:', -                                 x.shape, out.shape) -            #(x.abs() - tau*self.gamma).maximum(0) * x.sign() -            x.abs(out = out) -            out.__isub__(tau*self.gamma) -            out.maximum(0, out=out) -            if self.sign_x is None or not x.shape == self.sign_x.shape: -                self.sign_x = x.sign() -            else: -                x.sign(out=self.sign_x) -                 -            out.__imul__( self.sign_x ) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/KullbackLeibler.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/KullbackLeibler.py deleted file mode 100644 index cf1a244..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/KullbackLeibler.py +++ /dev/null @@ -1,136 +0,0 @@ -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License. - -import numpy -from ccpi.optimisation.functions import Function -from ccpi.optimisation.functions.ScaledFunction import ScaledFunction  -from ccpi.framework import ImageData, ImageGeometry -import functools - -class KullbackLeibler(Function): -     -    ''' Assume that data > 0 -                 -    ''' -     -    def __init__(self,data, **kwargs): -         -        super(KullbackLeibler, self).__init__() -         -        self.b = data         -        self.bnoise = kwargs.get('bnoise', 0) - -                                                 -    def __call__(self, x): -                 -        res_tmp = numpy.zeros(x.shape) -         -        tmp = x + self.bnoise  -        ind = x.as_array()>0 -         -        res_tmp[ind] = x.as_array()[ind] - self.b.as_array()[ind] * numpy.log(tmp.as_array()[ind]) -         -        return res_tmp.sum() - -    def log(self, datacontainer): -        '''calculates the in-place log of the datacontainer''' -        if not functools.reduce(lambda x,y: x and y>0, -                                datacontainer.as_array().ravel(), True): -            raise ValueError('KullbackLeibler. Cannot calculate log of negative number') -        datacontainer.fill( numpy.log(datacontainer.as_array()) ) - -         -    def gradient(self, x, out=None): -         -        #TODO Division check -        if out is None: -            return 1 - self.b/(x + self.bnoise) -        else: - -            x.add(self.bnoise, out=out) -            self.b.divide(out, out=out) -            out.subtract(1, out=out) -            out *= -1 -             -    def convex_conjugate(self, x): -         -        tmp = self.b/(1-x) -        ind = tmp.as_array()>0 -         -        return (self.b.as_array()[ind] * (numpy.log(tmp.as_array()[ind])-1)).sum() - -     -    def proximal(self, x, tau, out=None): -         -        if out is None:         -            return 0.5 *( (x - self.bnoise - tau) + ( (x + self.bnoise - tau)**2 + 4*tau*self.b   ) .sqrt() ) -        else: -             -            tmp =  0.5 *( (x - self.bnoise - tau) +  -                        ( (x + self.bnoise - tau)**2 + 4*tau*self.b   ) .sqrt() -                        ) -            x.add(self.bnoise, out=out) -            out -= tau -            out *= out -            tmp = self.b * (4 * tau) -            out.add(tmp, out=out) -            out.sqrt(out=out) -             -            x.subtract(self.bnoise, out=tmp) -            tmp -= tau -             -            out += tmp -             -            out *= 0.5 -                             -    def proximal_conjugate(self, x, tau, out=None): - -                 -        if out is None: -            z = x + tau * self.bnoise -            return 0.5*((z + 1) - ((z-1)**2 + 4 * tau * self.b).sqrt()) -        else: - -            z_m = x + tau * self.bnoise -1 -            self.b.multiply(4*tau, out=out) -            z_m.multiply(z_m, out=z_m) -            out += z_m - -            out.sqrt(out=out) -                         -            out *= -1 -            out += tmp2 -            out *= 0.5 - -         -     -    def __rmul__(self, scalar): -         -        ''' Multiplication of L2NormSquared with a scalar -         -        Returns: ScaledFunction -                         -        ''' -         -        return ScaledFunction(self, scalar)      -         -         - -     -         diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/L1Norm.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/L1Norm.py deleted file mode 100644 index 4e53f2c..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/L1Norm.py +++ /dev/null @@ -1,234 +0,0 @@ -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License. - - -from ccpi.optimisation.functions import Function -from ccpi.optimisation.functions.ScaledFunction import ScaledFunction         -from ccpi.optimisation.operators import ShrinkageOperator  -  - -class L1Norm(Function): -     -    '''  -     -    Class: L1Norm -         -    Cases:  a) f(x) = ||x||_{1} -     -            b) f(x) = ||x - b||_{1} -                              -    '''       -     -    def __init__(self, **kwargs): -         -        super(L1Norm, self).__init__() -        self.b = kwargs.get('b',None)  -         -    def __call__(self, x): -         -        ''' Evaluate L1Norm at x: f(x) ''' -         -        y = x -        if self.b is not None:  -            y = x - self.b -        return y.abs().sum()   -     -    def gradient(self,x): -        #TODO implement subgradient??? -        return ValueError('Not Differentiable')    -     -    def convex_conjugate(self,x): -        #TODO implement Indicator infty??? - -        y = 0         -        if self.b is not None: -            y =  0 + (self.b * x).sum() -        return y   -     -    def proximal(self, x, tau, out=None): -         -        # TODO implement shrinkage operator, we will need it later e.g SplitBregman -         -        if out is None: -            if self.b is not None: -                return self.b + ShrinkageOperator.__call__(self, x - self.b, tau) -            else: -                return ShrinkageOperator.__call__(self, x, tau)              -        else: -            if self.b is not None: -                out.fill(self.b + ShrinkageOperator.__call__(self, x - self.b, tau)) -            else: -                out.fill(ShrinkageOperator.__call__(self, x, tau)) -                                     -    def proximal_conjugate(self, x, tau, out=None): -         -        if out is None: -            if self.b is not None: -                return (x - tau*self.b).divide((x - tau*self.b).abs().maximum(1.0)) -            else: -                return x.divide(x.abs().maximum(1.0)) -        else: -            if self.b is not None: -                out.fill((x - tau*self.b).divide((x - tau*self.b).abs().maximum(1.0))) -            else: -                out.fill(x.divide(x.abs().maximum(1.0)) )                 -             -    def __rmul__(self, scalar): -        return ScaledFunction(self, scalar) - - -#import numpy as np -##from ccpi.optimisation.funcs import Function -#from ccpi.optimisation.functions import Function -#from ccpi.framework import DataContainer, ImageData  -# -# -#############################   L1NORM FUNCTIONS   ############################# -#class SimpleL1Norm(Function): -#     -#    def __init__(self, alpha=1): -#         -#        super(SimpleL1Norm, self).__init__()          -#        self.alpha = alpha -#         -#    def __call__(self, x): -#        return self.alpha * x.abs().sum() -#     -#    def gradient(self,x): -#        return ValueError('Not Differentiable') -#             -#    def convex_conjugate(self,x): -#        return 0 -#     -#    def proximal(self, x, tau): -#        ''' Soft Threshold''' -#        return x.sign() * (x.abs() - tau * self.alpha).maximum(0) -#         -#    def proximal_conjugate(self, x, tau): -#        return x.divide((x.abs()/self.alpha).maximum(1.0)) -     -#class L1Norm(SimpleL1Norm): -#     -#    def __init__(self, alpha=1, **kwargs): -#         -#        super(L1Norm, self).__init__()          -#        self.alpha = alpha  -#        self.b = kwargs.get('b',None) -#         -#    def __call__(self, x): -#         -#        if self.b is None: -#            return SimpleL1Norm.__call__(self, x) -#        else: -#            return SimpleL1Norm.__call__(self, x - self.b) -#             -#    def gradient(self, x): -#        return ValueError('Not Differentiable') -#             -#    def convex_conjugate(self,x): -#        if self.b is None: -#            return SimpleL1Norm.convex_conjugate(self, x) -#        else: -#            return SimpleL1Norm.convex_conjugate(self, x) + (self.b * x).sum() -#     -#    def proximal(self, x, tau): -#         -#        if self.b is None: -#            return SimpleL1Norm.proximal(self, x, tau) -#        else: -#            return self.b + SimpleL1Norm.proximal(self, x - self.b , tau) -#         -#    def proximal_conjugate(self, x, tau): -#         -#        if self.b is None: -#            return SimpleL1Norm.proximal_conjugate(self, x, tau) -#        else: -#            return SimpleL1Norm.proximal_conjugate(self, x - tau*self.b, tau) -#         - -###############################################################################                 -              -                 - - -if __name__ == '__main__':    -     -    from ccpi.framework import ImageGeometry -    import numpy -    N, M = 40,40 -    ig = ImageGeometry(N, M) -    scalar = 10 -    b = ig.allocate('random_int') -    u = ig.allocate('random_int')  -     -    f = L1Norm() -    f_scaled = scalar * L1Norm() -     -    f_b = L1Norm(b=b) -    f_scaled_b = scalar * L1Norm(b=b) -     -    # call -     -    a1 = f(u) -    a2 = f_scaled(u) -    numpy.testing.assert_equal(scalar * a1, a2) -     -    a3 = f_b(u) -    a4 = f_scaled_b(u) -    numpy.testing.assert_equal(scalar * a3, a4)  -     -    # proximal -    tau = 0.4 -    b1 = f.proximal(u, tau*scalar) -    b2 = f_scaled.proximal(u, tau) -         -    numpy.testing.assert_array_almost_equal(b1.as_array(), b2.as_array(), decimal=4) -     -    b3 = f_b.proximal(u, tau*scalar) -    b4 = f_scaled_b.proximal(u, tau) -     -    z1 = b + (u-b).sign() * ((u-b).abs() - tau * scalar).maximum(0) -         -    numpy.testing.assert_array_almost_equal(b3.as_array(), b4.as_array(), decimal=4)     -#         -#    #proximal conjugate -#     -    c1 = f_scaled.proximal_conjugate(u, tau) -    c2 = u.divide((u.abs()/scalar).maximum(1.0)) -     -    numpy.testing.assert_array_almost_equal(c1.as_array(), c2.as_array(), decimal=4)  -     -    c3 = f_scaled_b.proximal_conjugate(u, tau) -    c4 = (u - tau*b).divide( ((u-tau*b).abs()/scalar).maximum(1.0) ) -     -    numpy.testing.assert_array_almost_equal(c3.as_array(), c4.as_array(), decimal=4)      -     -     -     - - - -     -             -         - -         -         -         -      
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py deleted file mode 100644 index b77d472..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/L2NormSquared.py +++ /dev/null @@ -1,286 +0,0 @@ -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License. - -from ccpi.optimisation.functions import Function -from ccpi.optimisation.functions.ScaledFunction import ScaledFunction -from ccpi.optimisation.functions import FunctionOperatorComposition - -class L2NormSquared(Function): -     -    ''' -             -    Cases:  a) f(x) = \|x\|^{2}_{2} -     -            b) f(x) = ||x - b||^{2}_{2} -                              -    '''     -     -    def __init__(self, **kwargs): -                                 -        super(L2NormSquared, self).__init__() -        self.b = kwargs.get('b',None)   -        self.L = 2 - -    def __call__(self, x): -         -        ''' Evaluate L2NormSquared at x: f(x) ''' -             -        y = x -        if self.b is not None:  -            y = x - self.b -        try: -            return y.squared_norm() -        except AttributeError as ae: -            # added for compatibility with SIRF  -            return (y.norm()**2) -                 -    def gradient(self, x, out=None):         -         -        ''' Evaluate gradient of L2NormSquared at x: f'(x) ''' -                 -        if out is not None: -             -            out.fill(x) -            if self.b is not None: -                out -= self.b -            out *= 2 -             -        else: -             -            y = x -            if self.b is not None: -                y = x - self.b -            return 2*y -         -                                                        -    def convex_conjugate(self, x): -         -        ''' Evaluate convex conjugate of L2NormSquared at x: f^{*}(x)''' -         -        tmp = 0 -         -        if self.b is not None: -            tmp = (x * self.b).sum() -             -        return (1./4.) * x.squared_norm() + tmp - - -    def proximal(self, x, tau, out = None): - -        ''' Evaluate Proximal Operator of tau * f(\cdot) at x: -         -            prox_{tau*f(\cdot)}(x) = \argmin_{z} \frac{1}{2}|| z - x ||^{2}_{2} + tau * f(z) -         -        '''         -         -        if out is None: -             -            if self.b is None: -                return x/(1+2*tau) -            else: -                tmp = x.subtract(self.b) -                tmp /= (1+2*tau) -                tmp += self.b -                return tmp - -        else: -            if self.b is not None: -                x.subtract(self.b, out=out) -                out /= (1+2*tau) -                out += self.b -            else: -                x.divide((1+2*tau), out=out) - -     -    def proximal_conjugate(self, x, tau, out=None): -         -        ''' Evaluate Proximal Operator of tau * f^{*}(\cdot) at x (i.e., the convex conjugate of f) : -         -            prox_{tau*f(\cdot)}(x) = \argmin_{z} \frac{1}{2}|| z - x ||^{2}_{2} + tau * f^{*}(z) -         -        '''          -         -        if out is None: -            if self.b is not None: -                return (x - tau*self.b)/(1 + tau/2)  -            else: -                return x/(1 + tau/2) -        else: -            if self.b is not None: -                x.subtract(tau*self.b, out=out) -                out.divide(1+tau/2, out=out) -            else: -                x.divide(1 + tau/2, out=out) -                                         -    def __rmul__(self, scalar): -         -        ''' Multiplication of L2NormSquared with a scalar -         -        Returns: ScaledFunction -                         -        ''' -         -        return ScaledFunction(self, scalar)   - - -    def composition(self, operator): -         -        return FunctionOperatorComposition(operator) -       - - -if __name__ == '__main__': -     -    from ccpi.framework import ImageGeometry -    import numpy -    # TESTS for L2 and scalar * L2 -     -    M, N, K = 2,3,5 -    ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N, voxel_num_z = K) -    u = ig.allocate('random_int') -    b = ig.allocate('random_int')  -     -    # check grad/call no data -    f = L2NormSquared() -    a1 = f.gradient(u) -    a2 = 2 * u -    numpy.testing.assert_array_almost_equal(a1.as_array(), a2.as_array(), decimal=4) -    numpy.testing.assert_equal(f(u), u.squared_norm()) - -    # check grad/call with data -    f1 = L2NormSquared(b=b) -    b1 = f1.gradient(u) -    b2 = 2 * (u-b) -         -    numpy.testing.assert_array_almost_equal(b1.as_array(), b2.as_array(), decimal=4) -    numpy.testing.assert_equal(f1(u), (u-b).squared_norm()) -     -    #check convex conjuagate no data -    c1 = f.convex_conjugate(u) -    c2 = 1/4 * u.squared_norm() -    numpy.testing.assert_equal(c1, c2) -     -    #check convex conjuagate with data -    d1 = f1.convex_conjugate(u) -    d2 = (1/4) * u.squared_norm() + (u*b).sum() -    numpy.testing.assert_equal(d1, d2)   -     -    # check proximal no data -    tau = 5 -    e1 = f.proximal(u, tau) -    e2 = u/(1+2*tau) -    numpy.testing.assert_array_almost_equal(e1.as_array(), e2.as_array(), decimal=4) -     -    # check proximal with data -    tau = 5 -    h1 = f1.proximal(u, tau) -    h2 = (u-b)/(1+2*tau) + b -    numpy.testing.assert_array_almost_equal(h1.as_array(), h2.as_array(), decimal=4)     -     -    # check proximal conjugate no data -    tau = 0.2 -    k1 = f.proximal_conjugate(u, tau) -    k2 = u/(1 + tau/2 ) -    numpy.testing.assert_array_almost_equal(k1.as_array(), k2.as_array(), decimal=4)  -     -    # check proximal conjugate with data -    l1 = f1.proximal_conjugate(u, tau) -    l2 = (u - tau * b)/(1 + tau/2 ) -    numpy.testing.assert_array_almost_equal(l1.as_array(), l2.as_array(), decimal=4)      -     -         -    # check scaled function properties -     -    # scalar  -    scalar = 100 -    f_scaled_no_data = scalar * L2NormSquared() -    f_scaled_data = scalar * L2NormSquared(b=b) -     -    # call -    numpy.testing.assert_equal(f_scaled_no_data(u), scalar*f(u)) -    numpy.testing.assert_equal(f_scaled_data(u), scalar*f1(u)) -     -    # grad -    numpy.testing.assert_array_almost_equal(f_scaled_no_data.gradient(u).as_array(), scalar*f.gradient(u).as_array(), decimal=4) -    numpy.testing.assert_array_almost_equal(f_scaled_data.gradient(u).as_array(), scalar*f1.gradient(u).as_array(), decimal=4) -     -    # conj -    numpy.testing.assert_almost_equal(f_scaled_no_data.convex_conjugate(u), \ -                               f.convex_conjugate(u/scalar) * scalar, decimal=4) -     -    numpy.testing.assert_almost_equal(f_scaled_data.convex_conjugate(u), \ -                               scalar * f1.convex_conjugate(u/scalar), decimal=4) -     -    # proximal -    numpy.testing.assert_array_almost_equal(f_scaled_no_data.proximal(u, tau).as_array(), \ -                                            f.proximal(u, tau*scalar).as_array()) -     -     -    numpy.testing.assert_array_almost_equal(f_scaled_data.proximal(u, tau).as_array(), \ -                                            f1.proximal(u, tau*scalar).as_array()) -                                -     -    # proximal conjugate -    numpy.testing.assert_array_almost_equal(f_scaled_no_data.proximal_conjugate(u, tau).as_array(), \ -                                            (u/(1 + tau/(2*scalar) )).as_array(), decimal=4) -     -    numpy.testing.assert_array_almost_equal(f_scaled_data.proximal_conjugate(u, tau).as_array(), \ -                                            ((u - tau * b)/(1 + tau/(2*scalar) )).as_array(), decimal=4)    -     -     -     -    print( " ####### check without out ######### " ) -           -           -    u_out_no_out = ig.allocate('random_int')          -    res_no_out = f_scaled_data.proximal_conjugate(u_out_no_out, 0.5)           -    print(res_no_out.as_array()) -     -    print( " ####### check with out ######### " )  -           -    res_out = ig.allocate()         -    f_scaled_data.proximal_conjugate(u_out_no_out, 0.5, out = res_out) -     -    print(res_out.as_array())    - -    numpy.testing.assert_array_almost_equal(res_no_out.as_array(), \ -                                            res_out.as_array(), decimal=4)   -     -     -     -    ig1 = ImageGeometry(2,3) -     -    tau = 0.1 -     -    u = ig1.allocate('random_int') -    b = ig1.allocate('random_int') -     -    scalar = 0.5 -    f_scaled = scalar * L2NormSquared(b=b) -    f_noscaled = L2NormSquared(b=b) -     -     -    res1 = f_scaled.proximal(u, tau) -    res2 = f_noscaled.proximal(u, tau*scalar) -     -#    res2 = (u + tau*b)/(1+tau) -     -    numpy.testing.assert_array_almost_equal(res1.as_array(), \ -                                            res2.as_array(), decimal=4) -                                             diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/MixedL21Norm.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/MixedL21Norm.py deleted file mode 100644 index e8f6da4..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/MixedL21Norm.py +++ /dev/null @@ -1,159 +0,0 @@ -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License. - -from ccpi.optimisation.functions import Function, ScaledFunction -from ccpi.framework import BlockDataContainer - -import functools - -class MixedL21Norm(Function): -     -     -    ''' -        f(x) = ||x||_{2,1} = \sum |x|_{2}                    -    '''       -     -    def __init__(self, **kwargs): - -        super(MixedL21Norm, self).__init__()                       -        self.SymTensor = kwargs.get('SymTensor',False) -         -    def __call__(self, x): -         -        ''' Evaluates L2,1Norm at point x -             -            :param: x is a BlockDataContainer -                                 -        ''' -        if not isinstance(x, BlockDataContainer): -            raise ValueError('__call__ expected BlockDataContainer, got {}'.format(type(x)))  -                                          -        tmp = [ el**2 for el in x.containers ] -        res = sum(tmp).sqrt().sum() - -        return res -                             -    def gradient(self, x, out=None): -        return ValueError('Not Differentiable') -                             -    def convex_conjugate(self,x): -         -        ''' This is the Indicator function of ||\cdot||_{2, \infty} -            which is either 0 if ||x||_{2, \infty} or \infty         -        ''' -         -        return 0.0 -         -        #tmp = [ el**2 for el in x.containers ] -        #print(sum(tmp).sqrt().as_array().max()) -        #return sum(tmp).sqrt().as_array().max() -     -    def proximal(self, x, tau, out=None): -         -        ''' -            For this we need to define a MixedL2,2 norm acting on BDC, -            different form L2NormSquared which acts on DC -         -        ''' -        pass -     -    def proximal_conjugate(self, x, tau, out=None):  - - -        if out is None:                                         -            tmp = [ el*el for el in x.containers] -            res = sum(tmp).sqrt().maximum(1.0)  -            frac = [el/res for el in x.containers] -            return  BlockDataContainer(*frac)    -         -             -        #TODO this is slow, why??? -#                return x.divide(x.pnorm().maximum(1.0)) -        else: -                             -            res1 = functools.reduce(lambda a,b: a + b*b, x.containers, x.get_item(0) * 0 ) -            res = res1.sqrt().maximum(1.0) -            x.divide(res, out=out) -             -#                x.divide(sum([el*el for el in x.containers]).sqrt().maximum(1.0), out=out) -            #TODO this is slow, why ??? -#                 x.divide(x.pnorm().maximum(1.0), out=out) -                               - -    def __rmul__(self, scalar): -         -        ''' Multiplication of L2NormSquared with a scalar -         -        Returns: ScaledFunction -              -        '''          -        return ScaledFunction(self, scalar)  - - -# -if __name__ == '__main__': -     -    M, N, K = 2,3,5 -    from ccpi.framework import BlockGeometry -    import numpy -     -    ig = ImageGeometry(M, N) -     -    BG = BlockGeometry(ig, ig) -     -    U = BG.allocate('random_int') -     -    # Define no scale and scaled -    f_no_scaled = MixedL21Norm()  -    f_scaled = 0.5 * MixedL21Norm()   -     -    # call -     -    a1 = f_no_scaled(U) -    a2 = f_scaled(U)     -    print(a1, 2*a2) -         -     -    print( " ####### check without out ######### " ) -           -           -    u_out_no_out = BG.allocate('random_int')          -    res_no_out = f_scaled.proximal_conjugate(u_out_no_out, 0.5)           -    print(res_no_out[0].as_array()) -     -    print( " ####### check with out ######### " )  -#           -    res_out = BG.allocate()         -    f_scaled.proximal_conjugate(u_out_no_out, 0.5, out = res_out) -#     -    print(res_out[0].as_array())    -# -    numpy.testing.assert_array_almost_equal(res_no_out[0].as_array(), \ -                                            res_out[0].as_array(), decimal=4) - -    numpy.testing.assert_array_almost_equal(res_no_out[1].as_array(), \ -                                            res_out[1].as_array(), decimal=4)      -#     -     -     -     - -     - -     diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/Norm2Sq.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/Norm2Sq.py deleted file mode 100644 index b553d7c..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/Norm2Sq.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2018-2019 Jakob Jorgensen, Daniil Kazantsev and Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License. -from ccpi.optimisation.functions import Function -import numpy -import warnings - -# Define a class for squared 2-norm -class Norm2sq(Function): -    ''' -    f(x) = c*||A*x-b||_2^2 -     -    which has  -     -    grad[f](x) = 2*c*A^T*(A*x-b) -     -    and Lipschitz constant -     -    L = 2*c*||A||_2^2 = 2*s1(A)^2 -     -    where s1(A) is the largest singular value of A. -     -    ''' -     -    def __init__(self,A,b,c=1.0,memopt=False): -        super(Norm2sq, self).__init__() -     -        self.A = A  # Should be an operator, default identity -        self.b = b  # Default zero DataSet? -        self.c = c  # Default 1. -        if memopt: -            try: -                self.range_tmp = A.range_geometry().allocate() -                self.domain_tmp = A.domain_geometry().allocate() -                self.memopt = True -            except NameError as ne: -                warnings.warn(str(ne)) -                self.memopt = False -            except NotImplementedError as nie: -                print (nie) -                warnings.warn(str(nie)) -                self.memopt = False -        else: -            self.memopt = False -         -        # Compute the Lipschitz parameter from the operator if possible -        # Leave it initialised to None otherwise -        try: -            self.L = 2.0*self.c*(self.A.norm()**2) -        except AttributeError as ae: -            pass -        except NotImplementedError as noe: -            pass -         -    #def grad(self,x): -    #    return self.gradient(x, out=None) - -    def __call__(self,x): -        #return self.c* np.sum(np.square((self.A.direct(x) - self.b).ravel())) -        #if out is None: -        #    return self.c*( ( (self.A.direct(x)-self.b)**2).sum() ) -        #else: -        y = self.A.direct(x) -        y.__isub__(self.b) -        #y.__imul__(y) -        #return y.sum() * self.c -        try: -            return y.squared_norm() * self.c -        except AttributeError as ae: -            # added for compatibility with SIRF  -            return (y.norm()**2) * self.c -     -    def gradient(self, x, out = None): -        if self.memopt: -            #return 2.0*self.c*self.A.adjoint( self.A.direct(x) - self.b ) -             -            self.A.direct(x, out=self.range_tmp) -            self.range_tmp -= self.b  -            self.A.adjoint(self.range_tmp, out=out) -            #self.direct_placehold.multiply(2.0*self.c, out=out) -            out *= (self.c * 2.0) -        else: -            return (2.0*self.c)*self.A.adjoint( self.A.direct(x) - self.b ) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/ScaledFunction.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/ScaledFunction.py deleted file mode 100644 index 1db223b..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/ScaledFunction.py +++ /dev/null @@ -1,150 +0,0 @@ -# -*- coding: utf-8 -*-
 -#   This work is part of the Core Imaging Library developed by
 -#   Visual Analytics and Imaging System Group of the Science Technology
 -#   Facilities Council, STFC
 -
 -#   Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca
 -
 -#   Licensed under the Apache License, Version 2.0 (the "License");
 -#   you may not use this file except in compliance with the License.
 -#   You may obtain a copy of the License at
 -
 -#       http://www.apache.org/licenses/LICENSE-2.0
 -
 -#   Unless required by applicable law or agreed to in writing, software
 -#   distributed under the License is distributed on an "AS IS" BASIS,
 -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 -#   See the License for the specific language governing permissions and
 -#   limitations under the License.
 -from numbers import Number
 -import numpy
 -
 -class ScaledFunction(object):
 -    
 -    '''ScaledFunction
 -
 -    A class to represent the scalar multiplication of an Function with a scalar.
 -    It holds a function and a scalar. Basically it returns the multiplication
 -    of the product of the function __call__, convex_conjugate and gradient with the scalar.
 -    For the rest it behaves like the function it holds.
 -
 -    Args:
 -       function (Function): a Function or BlockOperator
 -       scalar (Number): a scalar multiplier
 -    Example:
 -       The scaled operator behaves like the following:
 -       
 -    '''
 -    def __init__(self, function, scalar):
 -        super(ScaledFunction, self).__init__()
 -
 -        if not isinstance (scalar, Number):
 -            raise TypeError('expected scalar: got {}'.format(type(scalar)))
 -        self.scalar = scalar
 -        self.function = function
 -        
 -        if self.function.L is not None:        
 -            self.L = self.scalar * self.function.L         
 -
 -    def __call__(self,x, out=None):
 -        '''Evaluates the function at x '''
 -        return self.scalar * self.function(x)
 -
 -    def convex_conjugate(self, x):
 -        '''returns the convex_conjugate of the scaled function '''
 -        return self.scalar * self.function.convex_conjugate(x/self.scalar)
 -    
 -    def gradient(self, x, out=None):
 -        '''Returns the gradient of the function at x, if the function is differentiable'''
 -        if out is None:            
 -            return self.scalar * self.function.gradient(x)    
 -        else:
 -            self.function.gradient(x, out=out)
 -            out *= self.scalar
 -
 -    def proximal(self, x, tau, out=None):
 -        '''This returns the proximal operator for the function at x, tau
 -        '''
 -        if out is None:
 -            return self.function.proximal(x, tau*self.scalar)     
 -        else:
 -            self.function.proximal(x, tau*self.scalar, out = out)
 -
 -    def proximal_conjugate(self, x, tau, out = None):
 -        '''This returns the proximal operator for the function at x, tau
 -        '''
 -        if out is None:
 -            return self.scalar * self.function.proximal_conjugate(x/self.scalar, tau/self.scalar)
 -        else:
 -            self.function.proximal_conjugate(x/self.scalar, tau/self.scalar, out=out)
 -            out *= self.scalar
 -
 -    def grad(self, x):
 -        '''Alias of gradient(x,None)'''
 -        warnings.warn('''This method will disappear in following 
 -        versions of the CIL. Use gradient instead''', DeprecationWarning)
 -        return self.gradient(x, out=None)
 -
 -    def prox(self, x, tau):
 -        '''Alias of proximal(x, tau, None)'''
 -        warnings.warn('''This method will disappear in following 
 -        versions of the CIL. Use proximal instead''', DeprecationWarning)
 -        return self.proximal(x, out=None)
 -
 -
 -            
 -if __name__ == '__main__':        
 -
 -    from ccpi.optimisation.functions import L2NormSquared, MixedL21Norm
 -    from ccpi.framework import ImageGeometry, BlockGeometry
 -    
 -    M, N, K = 2,3,5
 -    ig = ImageGeometry(voxel_num_x=M, voxel_num_y = N, voxel_num_z = K)
 -    
 -    u = ig.allocate('random_int')
 -    b = ig.allocate('random_int') 
 -    
 -    BG = BlockGeometry(ig, ig)
 -    U = BG.allocate('random_int')
 -        
 -    f2 = 0.5 * L2NormSquared(b=b)
 -    f1 = 30 * MixedL21Norm()
 -    tau = 0.355
 -    
 -    res_no_out1 =  f1.proximal_conjugate(U, tau)
 -    res_no_out2 =  f2.proximal_conjugate(u, tau)    
 -    
 -    
 -#    print( " ######## with out ######## ")
 -    res_out1 = BG.allocate()
 -    res_out2 = ig.allocate()
 -    
 -    f1.proximal_conjugate(U, tau, out = res_out1)
 -    f2.proximal_conjugate(u, tau, out = res_out2)
 -
 -
 -    numpy.testing.assert_array_almost_equal(res_no_out1[0].as_array(), \
 -                                            res_out1[0].as_array(), decimal=4) 
 -    
 -    numpy.testing.assert_array_almost_equal(res_no_out2.as_array(), \
 -                                            res_out2.as_array(), decimal=4)     
 -      
 -          
 -          
 -    
 -    
 -    
 -    
 -    
 -    
 -    
 -    
 -    
 -    
 -    
 -    
 -
 -    
 -
 -
 -    
 diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/ZeroFunction.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/ZeroFunction.py deleted file mode 100644 index a019815..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/ZeroFunction.py +++ /dev/null @@ -1,55 +0,0 @@ -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2018-2019 Evangelos Papoutsellis and Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License. - -from ccpi.optimisation.functions import Function -from ccpi.framework import BlockDataContainer  - -class ZeroFunction(Function): -     -    ''' ZeroFunction: f(x) = 0 -     -     -    ''' -     -    def __init__(self): -        super(ZeroFunction, self).__init__() -               -    def __call__(self,x): -        return 0 -     -    def convex_conjugate(self, x): -         -        ''' This is the support function sup <x, x^{*}>  which in fact is the  -        indicator function for the set = {0} -        So 0 if x=0, or inf if x neq 0                 -        ''' -        return x.maximum(0).sum() -         -     -    def proximal(self, x, tau, out=None): -        if out is None: -            return x.copy() -        else: -            out.fill(x) -         -    def proximal_conjugate(self, x, tau, out = None): -        if out is None: -            return 0 -        else: -            return 0 diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/functions/__init__.py b/Wrappers/Python/build/lib/ccpi/optimisation/functions/__init__.py deleted file mode 100644 index a82ee3e..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/functions/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# -*- coding: utf-8 -*- - -from .Function import Function -from .ZeroFunction import ZeroFunction -from .L1Norm import L1Norm -from .L2NormSquared import L2NormSquared -from .ScaledFunction import ScaledFunction -from .BlockFunction import BlockFunction -from .FunctionOperatorComposition import FunctionOperatorComposition -from .MixedL21Norm import MixedL21Norm -from .IndicatorBox import IndicatorBox -from .KullbackLeibler import KullbackLeibler -from .Norm2Sq import Norm2sq diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockOperator.py deleted file mode 100644 index c8bd546..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockOperator.py +++ /dev/null @@ -1,417 +0,0 @@ -# -*- coding: utf-8 -*- -""" -Created on Thu Feb 14 12:36:40 2019 - -@author: ofn77899 -""" -#from ccpi.optimisation.ops import Operator -import numpy -from numbers import Number -import functools -from ccpi.framework import AcquisitionData, ImageData, BlockDataContainer, DataContainer -from ccpi.optimisation.operators import Operator, LinearOperator -from ccpi.optimisation.operators.BlockScaledOperator import BlockScaledOperator -from ccpi.framework import BlockGeometry -        -class BlockOperator(Operator): -    '''A Block matrix containing Operators - -    The Block Framework is a generic strategy to treat variational problems in the -    following form: - -    .. math:: -     -      min Regulariser + Fidelity - -     -    BlockOperators have a generic shape M x N, and when applied on an  -    Nx1 BlockDataContainer, will yield and Mx1 BlockDataContainer. -    Notice: BlockDatacontainer are only allowed to have the shape of N x 1, with -    N rows and 1 column. -     -    User may specify the shape of the block, by default is a row vector - -    Operators in a Block are required to have the same domain column-wise and the -    same range row-wise. -    ''' -    __array_priority__ = 1 -    def __init__(self, *args, **kwargs): -        ''' -        Class creator - -        Note: -            Do not include the `self` parameter in the ``Args`` section. - -        Args: -            :param: vararg (Operator): Operators in the block. -            :param: shape (:obj:`tuple`, optional): If shape is passed the Operators in  -                  vararg are considered input in a row-by-row fashion.  -                  Shape and number of Operators must match. -                   -        Example: -            BlockOperator(op0,op1) results in a row block -            BlockOperator(op0,op1,shape=(1,2)) results in a column block -        ''' -        self.operators = args -        shape = kwargs.get('shape', None) -        if shape is None: -            shape = (len(args),1) -        self.shape = shape -        n_elements = functools.reduce(lambda x,y: x*y, shape, 1) -        if len(args) != n_elements: -            raise ValueError( -                    'Dimension and size do not match: expected {} got {}' -                    .format(n_elements,len(args))) -        # test if operators are compatible -        if not self.column_wise_compatible(): -            raise ValueError('Operators in each column must have the same domain') -        if not self.row_wise_compatible(): -            raise ValueError('Operators in each row must have the same range') -     -    def column_wise_compatible(self): -        '''Operators in a Block should have the same domain per column''' -        rows, cols = self.shape -        compatible = True -        for col in range(cols): -            column_compatible = True -            for row in range(1,rows): -                dg0 = self.get_item(row-1,col).domain_geometry() -                dg1 = self.get_item(row,col).domain_geometry() -                column_compatible = dg0.__dict__ == dg1.__dict__ and column_compatible -            compatible = compatible and column_compatible -        return compatible -     -    def row_wise_compatible(self): -        '''Operators in a Block should have the same range per row''' -        rows, cols = self.shape -        compatible = True -        for row in range(rows): -            row_compatible = True -            for col in range(1,cols): -                dg0 = self.get_item(row,col-1).range_geometry() -                dg1 = self.get_item(row,col).range_geometry() -                row_compatible = dg0.__dict__ == dg1.__dict__ and row_compatible -            compatible = compatible and row_compatible -        return compatible - -    def get_item(self, row, col): -        '''returns the Operator at specified row and col''' -        if row > self.shape[0]: -            raise ValueError('Requested row {} > max {}'.format(row, self.shape[0])) -        if col > self.shape[1]: -            raise ValueError('Requested col {} > max {}'.format(col, self.shape[1])) -         -        index = row*self.shape[1]+col -        return self.operators[index] -     -    def norm(self): -        norm = [op.norm()**2 for op in self.operators] -        return numpy.sqrt(sum(norm))     -     -    def direct(self, x, out=None): -        '''Direct operation for the BlockOperator - -        BlockOperator work on BlockDataContainer, but they will work on DataContainers -        and inherited classes by simple wrapping the input in a BlockDataContainer of shape (1,1) -        ''' -         -        if not isinstance (x, BlockDataContainer): -            x_b = BlockDataContainer(x) -        else: -            x_b = x -        shape = self.get_output_shape(x_b.shape) -        res = [] -         -        if out is None: -         -            for row in range(self.shape[0]): -                for col in range(self.shape[1]): -                    if col == 0: -                        prod = self.get_item(row,col).direct(x_b.get_item(col)) -                    else: -                        prod += self.get_item(row,col).direct(x_b.get_item(col)) -                res.append(prod) -            return BlockDataContainer(*res, shape=shape) -                 -        else: -             -            tmp = self.range_geometry().allocate() -            for row in range(self.shape[0]): -                for col in range(self.shape[1]): -                    if col == 0:        -                        self.get_item(row,col).direct( -                                                      x_b.get_item(col), -                                                      out=out.get_item(row))                         -                    else: -                        a = out.get_item(row) -                        self.get_item(row,col).direct( -                                                      x_b.get_item(col),  -                                                      out=tmp.get_item(row)) -                        a += tmp.get_item(row) -                 -    def adjoint(self, x, out=None): -        '''Adjoint operation for the BlockOperator - -        BlockOperator may contain both LinearOperator and Operator -        This method exists in BlockOperator as it is not known what type of -        Operator it will contain. - -        BlockOperator work on BlockDataContainer, but they will work on DataContainers -        and inherited classes by simple wrapping the input in a BlockDataContainer of shape (1,1) - -        Raises: ValueError if the contained Operators are not linear -        ''' -        if not self.is_linear(): -            raise ValueError('Not all operators in Block are linear.') -        if not isinstance (x, BlockDataContainer): -            x_b = BlockDataContainer(x) -        else: -            x_b = x -        shape = self.get_output_shape(x_b.shape, adjoint=True) -        if out is None: -            res = [] -            for col in range(self.shape[1]): -                for row in range(self.shape[0]): -                    if row == 0: -                        prod = self.get_item(row, col).adjoint(x_b.get_item(row)) -                    else: -                        prod += self.get_item(row, col).adjoint(x_b.get_item(row)) -                res.append(prod) -            if self.shape[1]==1: -                return ImageData(*res) -            else: -                return BlockDataContainer(*res, shape=shape) -        else: -            #tmp = self.domain_geometry().allocate() - -            for col in range(self.shape[1]): -                for row in range(self.shape[0]): -                    if row == 0: -                        if issubclass(out.__class__, DataContainer): -                            self.get_item(row, col).adjoint( -                                                x_b.get_item(row), -                                                out=out) -                        else: -                            op = self.get_item(row,col) -                            self.get_item(row, col).adjoint( -                                                x_b.get_item(row), -                                                out=out.get_item(col)) -                    else: -                        if issubclass(out.__class__, DataContainer): -                            out += self.get_item(row,col).adjoint( -                                                        x_b.get_item(row)) -                        else: -                            a = out.get_item(col) -                            a += self.get_item(row,col).adjoint( -                                                        x_b.get_item(row), -                                                        ) -    def is_linear(self): -        '''returns whether all the elements of the BlockOperator are linear''' -        return functools.reduce(lambda x, y: x and y.is_linear(), self.operators, True) - -    def get_output_shape(self, xshape, adjoint=False): -        '''returns the shape of the output BlockDataContainer -         -        A(N,M) direct u(M,1) -> N,1 -        A(N,M)^T adjoint u(N,1) -> M,1 -        ''' -        rows , cols = self.shape -        xrows, xcols = xshape -        if xcols != 1: -            raise ValueError('BlockDataContainer cannot have more than 1 column') -        if adjoint: -            if rows != xrows: -                raise ValueError('Incompatible shapes {} {}'.format(self.shape, xshape)) -            return (cols,xcols) -        if cols != xrows: -            raise ValueError('Incompatible shapes {} {}'.format((rows,cols), xshape)) -        return (rows,xcols) -         -    def __rmul__(self, scalar): -        '''Defines the left multiplication with a scalar - -        Args: scalar (number or iterable containing numbers): - -        Returns: a block operator with Scaled Operators inside''' -        if isinstance (scalar, list) or isinstance(scalar, tuple) or \ -                isinstance(scalar, numpy.ndarray): -            if len(scalar) != len(self.operators): -                raise ValueError('dimensions of scalars and operators do not match') -            scalars = scalar -        else: -            scalars = [scalar for _ in self.operators] -        # create a list of ScaledOperator-s -        ops = [ v * op for v,op in zip(scalars, self.operators)] -        #return BlockScaledOperator(self, scalars ,shape=self.shape) -        return type(self)(*ops, shape=self.shape) -    @property -    def T(self): -        '''Return the transposed of self -         -        input in a row-by-row''' -        newshape = (self.shape[1], self.shape[0]) -        oplist = [] -        for col in range(newshape[1]): -            for row in range(newshape[0]): -                oplist.append(self.get_item(col,row)) -        return type(self)(*oplist, shape=newshape) - -    def domain_geometry(self): -        '''returns the domain of the BlockOperator - -        If the shape of the BlockOperator is (N,1) the domain is a ImageGeometry or AcquisitionGeometry. -        Otherwise it is a BlockGeometry. -        ''' -        if self.shape[1] == 1: -            # column BlockOperator -            return self.get_item(0,0).domain_geometry() -        else: -            # get the geometries column wise -            # we need only the geometries from the first row -            # since it is compatible from __init__ -            tmp = [] -            for i in range(self.shape[1]): -                tmp.append(self.get_item(0,i).domain_geometry()) -            return BlockGeometry(*tmp)                 -                                     -            #shape = (self.shape[0], 1) -            #return BlockGeometry(*[el.domain_geometry() for el in self.operators], -            #        shape=self.shape) - -    def range_geometry(self): -        '''returns the range of the BlockOperator''' -         -        tmp = [] -        for i in range(self.shape[0]): -            tmp.append(self.get_item(i,0).range_geometry()) -        return BlockGeometry(*tmp)             -         -         -        #shape = (self.shape[1], 1) -        #return BlockGeometry(*[el.range_geometry() for el in self.operators], -        #            shape=shape) -         -    def sum_abs_row(self): -         -        res = [] -        for row in range(self.shape[0]): -            for col in range(self.shape[1]):                             -                if col == 0: -                    prod = self.get_item(row,col).sum_abs_row() -                else: -                    prod += self.get_item(row,col).sum_abs_row() -            res.append(prod) -             -        if self.shape[1]==1: -            tmp = sum(res) -            return ImageData(tmp) -        else: -             -            return BlockDataContainer(*res) -         -    def sum_abs_col(self): - -        res = [] -        for row in range(self.shape[0]): -            for col in range(self.shape[1]): -                if col == 0: -                    prod = self.get_item(row, col).sum_abs_col() -                else: -                    prod += self.get_item(row, col).sum_abs_col() -            res.append(prod) - -        return BlockDataContainer(*res)         -         -         -         -if __name__ == '__main__': -     -    from ccpi.framework import ImageGeometry -    from ccpi.optimisation.operators import Gradient, Identity, \ -                            SparseFiniteDiff, SymmetrizedGradient, ZeroOperator - -         -    M, N = 4, 3 -    ig = ImageGeometry(M, N) -    arr = ig.allocate('random_int')   -     -    G = Gradient(ig) -    Id = Identity(ig) -     -    B = BlockOperator(G, Id) -     -    print(B.sum_abs_row()) -#     -    Gx = SparseFiniteDiff(ig, direction=1, bnd_cond='Neumann') -    Gy = SparseFiniteDiff(ig, direction=0, bnd_cond='Neumann') -     -    d1 = abs(Gx.matrix()).toarray().sum(axis=0) -    d2 = abs(Gy.matrix()).toarray().sum(axis=0) -    d3 = abs(Id.matrix()).toarray().sum(axis=0) -        -     -    d_res = numpy.reshape(d1 + d2 + d3, ig.shape, 'F') -     -    print(d_res) -#     -    z1 = abs(Gx.matrix()).toarray().sum(axis=1) -    z2 = abs(Gy.matrix()).toarray().sum(axis=1) -    z3 = abs(Id.matrix()).toarray().sum(axis=1) -# -    z_res = BlockDataContainer(BlockDataContainer(ImageData(numpy.reshape(z2, ig.shape, 'F')),\ -                                                  ImageData(numpy.reshape(z1, ig.shape, 'F'))),\ -                                                  ImageData(numpy.reshape(z3, ig.shape, 'F'))) -# -    ttt = B.sum_abs_col() -#     -    #TODO this is not working -#    numpy.testing.assert_array_almost_equal(z_res[0][0].as_array(), ttt[0][0].as_array(), decimal=4)     -#    numpy.testing.assert_array_almost_equal(z_res[0][1].as_array(), ttt[0][1].as_array(), decimal=4)     -#    numpy.testing.assert_array_almost_equal(z_res[1].as_array(), ttt[1].as_array(), decimal=4)     - - -    u = ig.allocate('random_int') -     -    z1 = B.direct(u) -    res = B.range_geometry().allocate() -     -    B.direct(u, out = res) -     -     -     -    ########################################################################### -    # Block Operator for TGV reconstruction -     -    M, N = 2,3 -    ig = ImageGeometry(M, N) -    ag = ig -     -    op11 = Gradient(ig) -    op12 = Identity(op11.range_geometry()) -     -    op22 = SymmetrizedGradient(op11.domain_geometry()) -     -    op21 = ZeroOperator(ig, op22.range_geometry()) -     -     -    op31 = Identity(ig, ag) -    op32 = ZeroOperator(op22.domain_geometry(), ag) -     -    operator = BlockOperator(op11, -1*op12, op21, op22, op31, op32, shape=(3,2) )     -     -    z1 = operator.domain_geometry() -    z2 = operator.range_geometry() -     -    print(z1.shape) -    print(z2.shape) -     -     -     -     -     -     -     -     -     -     -    
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockScaledOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockScaledOperator.py deleted file mode 100644 index aeb6c53..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/BlockScaledOperator.py +++ /dev/null @@ -1,67 +0,0 @@ -from numbers import Number -import numpy -from ccpi.optimisation.operators import ScaledOperator -import functools - -class BlockScaledOperator(ScaledOperator): -    '''ScaledOperator - -    A class to represent the scalar multiplication of an Operator with a scalar. -    It holds an operator and a scalar. Basically it returns the multiplication -    of the result of direct and adjoint of the operator with the scalar. -    For the rest it behaves like the operator it holds. - -    Args: -       operator (Operator): a Operator or LinearOperator -       scalar (Number): a scalar multiplier -    Example: -       The scaled operator behaves like the following: -       sop = ScaledOperator(operator, scalar) -       sop.direct(x) = scalar * operator.direct(x) -       sop.adjoint(x) = scalar * operator.adjoint(x) -       sop.norm() = operator.norm() -       sop.range_geometry() = operator.range_geometry() -       sop.domain_geometry() = operator.domain_geometry() -    ''' -    def __init__(self, operator, scalar, shape=None): -        if shape is None: -            shape = operator.shape -         -        if isinstance(scalar, (list, tuple, numpy.ndarray)): -            size = functools.reduce(lambda x,y:x*y, shape, 1) -            if len(scalar) != size: -                raise ValueError('Scalar and operators size do not match: {}!={}' -                .format(len(scalar), len(operator))) -            self.scalar = scalar[:] -            print ("BlockScaledOperator ", self.scalar) -        elif isinstance (scalar, Number): -            self.scalar = scalar -        else: -            raise TypeError('expected scalar to be a number of an iterable: got {}'.format(type(scalar))) -        self.operator = operator -        self.shape = shape -    def direct(self, x, out=None): -        print ("BlockScaledOperator self.scalar", self.scalar) -        #print ("self.scalar", self.scalar[0]* x.get_item(0).as_array()) -        return self.scalar * (self.operator.direct(x, out=out)) -    def adjoint(self, x, out=None): -        if self.operator.is_linear(): -            return self.scalar * self.operator.adjoint(x, out=out) -        else: -            raise TypeError('Operator is not linear') -    def norm(self): -        return numpy.abs(self.scalar) * self.operator.norm() -    def range_geometry(self): -        return self.operator.range_geometry() -    def domain_geometry(self): -        return self.operator.domain_geometry() -    @property -    def T(self): -        '''Return the transposed of self''' -        #print ("transpose before" , self.shape) -        #shape = (self.shape[1], self.shape[0]) -        ##self.shape = shape -        ##self.operator.shape = shape -        #print ("transpose" , shape) -        #return self -        return type(self)(self.operator.T, self.scalar)
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator.py deleted file mode 100644 index f459334..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator.py +++ /dev/null @@ -1,372 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar  1 22:51:17 2019 - -@author: evangelos -""" - -from ccpi.optimisation.operators import LinearOperator -from ccpi.framework import ImageData, BlockDataContainer -import numpy as np - -class FiniteDiff(LinearOperator): -     -    # Works for Neum/Symmetric &  periodic boundary conditions -    # TODO add central differences??? -    # TODO not very well optimised, too many conditions -    # TODO add discretisation step, should get that from imageGeometry -     -    # Grad_order = ['channels', 'direction_z', 'direction_y', 'direction_x'] -    # Grad_order = ['channels', 'direction_y', 'direction_x'] -    # Grad_order = ['direction_z', 'direction_y', 'direction_x'] -    # Grad_order = ['channels', 'direction_z', 'direction_y', 'direction_x'] -     -    def __init__(self, gm_domain, gm_range=None, direction=0, bnd_cond = 'Neumann'): -        '''''' -        super(FiniteDiff, self).__init__()  -        '''FIXME: domain and range should be geometries''' -        self.gm_domain = gm_domain -        self.gm_range = gm_range -         -        self.direction = direction -        self.bnd_cond = bnd_cond -         -        # Domain Geometry = Range Geometry if not stated -        if self.gm_range is None: -            self.gm_range = self.gm_domain -        # check direction and "length" of geometry -        if self.direction + 1 > len(self.gm_domain.shape): -            raise ValueError('Gradient directions more than geometry domain')       -         -        #self.voxel_size = kwargs.get('voxel_size',1) -        # this wrongly assumes a homogeneous voxel size -#        self.voxel_size = self.gm_domain.voxel_size_x - - -    def direct(self, x, out=None): -         -        x_asarr = x.as_array() -        x_sz = len(x.shape) -         -        if out is None:         -            out = np.zeros_like(x_asarr) -        else: -            out = out.as_array() -            out[:]=0 - -                   - -        ######################## Direct for 2D  ############################### -        if x_sz == 2: -             -            if self.direction == 1: -                 -                np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = out[:,0:-1] ) -                 -                if self.bnd_cond == 'Neumann': -                    pass -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[:,0], x_asarr[:,-1], out = out[:,-1] ) -                else:  -                    raise ValueError('No valid boundary conditions') -                 -            if self.direction == 0: -                 -                np.subtract( x_asarr[1:], x_asarr[0:-1], out = out[0:-1,:] ) - -                if self.bnd_cond == 'Neumann': -                    pass                                         -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[0,:], x_asarr[-1,:], out = out[-1,:] )  -                else:     -                    raise ValueError('No valid boundary conditions')  -                     -        ######################## Direct for 3D  ###############################                         -        elif x_sz == 3: -                     -            if self.direction == 0:   -                 -                np.subtract( x_asarr[1:,:,:], x_asarr[0:-1,:,:], out = out[0:-1,:,:] ) -                 -                if self.bnd_cond == 'Neumann': -                    pass -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[0,:,:], x_asarr[-1,:,:], out = out[-1,:,:] )  -                else:     -                    raise ValueError('No valid boundary conditions')                       -                                                              -            if self.direction == 1: -                 -                np.subtract( x_asarr[:,1:,:], x_asarr[:,0:-1,:], out = out[:,0:-1,:] )  -                 -                if self.bnd_cond == 'Neumann': -                    pass -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[:,0,:], x_asarr[:,-1,:], out = out[:,-1,:] ) -                else:     -                    raise ValueError('No valid boundary conditions')                       -                                 -              -            if self.direction == 2: -                 -                np.subtract( x_asarr[:,:,1:], x_asarr[:,:,0:-1], out = out[:,:,0:-1] )  -                 -                if self.bnd_cond == 'Neumann': -                    pass -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[:,:,0], x_asarr[:,:,-1], out = out[:,:,-1] ) -                else:     -                    raise ValueError('No valid boundary conditions')   -                     -        ######################## Direct for 4D  ############################### -        elif x_sz == 4: -                     -            if self.direction == 0:                             -                np.subtract( x_asarr[1:,:,:,:], x_asarr[0:-1,:,:,:], out = out[0:-1,:,:,:] ) -                 -                if self.bnd_cond == 'Neumann': -                    pass -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[0,:,:,:], x_asarr[-1,:,:,:], out = out[-1,:,:,:] ) -                else:     -                    raise ValueError('No valid boundary conditions')  -                                                 -            if self.direction == 1: -                np.subtract( x_asarr[:,1:,:,:], x_asarr[:,0:-1,:,:], out = out[:,0:-1,:,:] )  -                 -                if self.bnd_cond == 'Neumann': -                    pass -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[:,0,:,:], x_asarr[:,-1,:,:], out = out[:,-1,:,:] ) -                else:     -                    raise ValueError('No valid boundary conditions')                  -                 -            if self.direction == 2: -                np.subtract( x_asarr[:,:,1:,:], x_asarr[:,:,0:-1,:], out = out[:,:,0:-1,:] )  -                 -                if self.bnd_cond == 'Neumann': -                    pass -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[:,:,0,:], x_asarr[:,:,-1,:], out = out[:,:,-1,:] ) -                else:     -                    raise ValueError('No valid boundary conditions')                    -                 -            if self.direction == 3: -                np.subtract( x_asarr[:,:,:,1:], x_asarr[:,:,:,0:-1], out = out[:,:,:,0:-1] )                  - -                if self.bnd_cond == 'Neumann': -                    pass -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[:,:,:,0], x_asarr[:,:,:,-1], out = out[:,:,:,-1] ) -                else:     -                    raise ValueError('No valid boundary conditions')                    -                                 -        else: -            raise NotImplementedError                 -          -#        res = out #/self.voxel_size  -        return type(x)(out) - -                     -    def adjoint(self, x, out=None): -         -        x_asarr = x.as_array() -        x_sz = len(x.shape) -         -        if out is None:         -            out = np.zeros_like(x_asarr) -        else: -            out = out.as_array()         -            out[:]=0 - -         -        ######################## Adjoint for 2D  ############################### -        if x_sz == 2:         -         -            if self.direction == 1: -                 -                np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = out[:,1:] ) -                 -                if self.bnd_cond == 'Neumann': -                    np.subtract( x_asarr[:,0], 0, out = out[:,0] ) -                    np.subtract( -x_asarr[:,-2], 0, out = out[:,-1] ) -                     -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[:,0], x_asarr[:,-1], out = out[:,0] )                                         -                     -                else:    -                    raise ValueError('No valid boundary conditions')  -                                     -            if self.direction == 0: -                 -                np.subtract( x_asarr[1:,:], x_asarr[0:-1,:], out = out[1:,:] ) -                 -                if self.bnd_cond == 'Neumann': -                    np.subtract( x_asarr[0,:], 0, out = out[0,:] ) -                    np.subtract( -x_asarr[-2,:], 0, out = out[-1,:] )  -                     -                elif self.bnd_cond == 'Periodic':   -                    np.subtract( x_asarr[0,:], x_asarr[-1,:], out = out[0,:] )  -                     -                else:    -                    raise ValueError('No valid boundary conditions')      -         -        ######################## Adjoint for 3D  ###############################         -        elif x_sz == 3:                 -                 -            if self.direction == 0:           -                   -                np.subtract( x_asarr[1:,:,:], x_asarr[0:-1,:,:], out = out[1:,:,:] ) -                 -                if self.bnd_cond == 'Neumann': -                    np.subtract( x_asarr[0,:,:], 0, out = out[0,:,:] ) -                    np.subtract( -x_asarr[-2,:,:], 0, out = out[-1,:,:] ) -                elif self.bnd_cond == 'Periodic':                      -                    np.subtract( x_asarr[0,:,:], x_asarr[-1,:,:], out = out[0,:,:] ) -                else:    -                    raise ValueError('No valid boundary conditions')                      -                                     -            if self.direction == 1: -                np.subtract( x_asarr[:,1:,:], x_asarr[:,0:-1,:], out = out[:,1:,:] ) -                 -                if self.bnd_cond == 'Neumann': -                    np.subtract( x_asarr[:,0,:], 0, out = out[:,0,:] ) -                    np.subtract( -x_asarr[:,-2,:], 0, out = out[:,-1,:] ) -                elif self.bnd_cond == 'Periodic':                      -                    np.subtract( x_asarr[:,0,:], x_asarr[:,-1,:], out = out[:,0,:] ) -                else:    -                    raise ValueError('No valid boundary conditions')                                  -                 -            if self.direction == 2: -                np.subtract( x_asarr[:,:,1:], x_asarr[:,:,0:-1], out = out[:,:,1:] ) -                 -                if self.bnd_cond == 'Neumann': -                    np.subtract( x_asarr[:,:,0], 0, out = out[:,:,0] )  -                    np.subtract( -x_asarr[:,:,-2], 0, out = out[:,:,-1] )  -                elif self.bnd_cond == 'Periodic':                      -                    np.subtract( x_asarr[:,:,0], x_asarr[:,:,-1], out = out[:,:,0] ) -                else:    -                    raise ValueError('No valid boundary conditions')                                  -         -        ######################## Adjoint for 4D  ###############################         -        elif x_sz == 4:                 -                 -            if self.direction == 0:                             -                np.subtract( x_asarr[1:,:,:,:], x_asarr[0:-1,:,:,:], out = out[1:,:,:,:] ) -                 -                if self.bnd_cond == 'Neumann': -                    np.subtract( x_asarr[0,:,:,:], 0, out = out[0,:,:,:] ) -                    np.subtract( -x_asarr[-2,:,:,:], 0, out = out[-1,:,:,:] ) -                     -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[0,:,:,:], x_asarr[-1,:,:,:], out = out[0,:,:,:] ) -                else:     -                    raise ValueError('No valid boundary conditions')  -                                 -            if self.direction == 1: -                np.subtract( x_asarr[:,1:,:,:], x_asarr[:,0:-1,:,:], out = out[:,1:,:,:] ) -                 -                if self.bnd_cond == 'Neumann': -                   np.subtract( x_asarr[:,0,:,:], 0, out = out[:,0,:,:] ) -                   np.subtract( -x_asarr[:,-2,:,:], 0, out = out[:,-1,:,:] ) -                     -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[:,0,:,:], x_asarr[:,-1,:,:], out = out[:,0,:,:] ) -                else:     -                    raise ValueError('No valid boundary conditions')  -                     -                 -            if self.direction == 2: -                np.subtract( x_asarr[:,:,1:,:], x_asarr[:,:,0:-1,:], out = out[:,:,1:,:] ) -                 -                if self.bnd_cond == 'Neumann': -                    np.subtract( x_asarr[:,:,0,:], 0, out = out[:,:,0,:] )  -                    np.subtract( -x_asarr[:,:,-2,:], 0, out = out[:,:,-1,:] )  -                     -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[:,:,0,:], x_asarr[:,:,-1,:], out = out[:,:,0,:] ) -                else:     -                    raise ValueError('No valid boundary conditions')                  -                 -            if self.direction == 3: -                np.subtract( x_asarr[:,:,:,1:], x_asarr[:,:,:,0:-1], out = out[:,:,:,1:] ) -                 -                if self.bnd_cond == 'Neumann': -                    np.subtract( x_asarr[:,:,:,0], 0, out = out[:,:,:,0] )  -                    np.subtract( -x_asarr[:,:,:,-2], 0, out = out[:,:,:,-1] )    -                     -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[:,:,:,0], x_asarr[:,:,:,-1], out = out[:,:,:,0] ) -                else:     -                    raise ValueError('No valid boundary conditions')                   -                               -        else: -            raise NotImplementedError -             -        out *= -1 #/self.voxel_size -        return type(x)(out) -             -    def range_geometry(self): -        '''Returns the range geometry''' -        return self.gm_range -     -    def domain_geometry(self): -        '''Returns the domain geometry''' -        return self.gm_domain -        -    def norm(self): -        x0 = self.gm_domain.allocate('random_int') -        self.s1, sall, svec = LinearOperator.PowerMethod(self, 25, x0) -        return self.s1 -     -     -if __name__ == '__main__': -     -    from ccpi.framework import ImageGeometry -    import numpy -     -    N, M = 2, 3 - -    ig = ImageGeometry(N, M) - - -    FD = FiniteDiff(ig, direction = 1, bnd_cond = 'Neumann') -    u = FD.domain_geometry().allocate('random_int') -         -    res = FD.domain_geometry().allocate() -    res1 = FD.range_geometry().allocate() -    FD.direct(u, out=res) - -    z = FD.direct(u)     -#    print(z.as_array(), res.as_array()) - -    for i in range(10): -#         -        z1 = FD.direct(u)  -        FD.direct(u, out=res) -         -        u = ig.allocate('random_int') -        res = u -        z1  = u -        numpy.testing.assert_array_almost_equal(z1.as_array(), \ -                                                res.as_array(), decimal=4) -         -#        print(z1.as_array(), res.as_array()) -        z2 = FD.adjoint(z1)  -        FD.adjoint(z1, out=res1)   -        numpy.testing.assert_array_almost_equal(z2.as_array(), \ -                                                res1.as_array(), decimal=4)         -         -         -         - -         -         -         -     -#    w = G.range_geometry().allocate('random_int') -     - -     -    
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator_old.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator_old.py deleted file mode 100644 index 387fb4b..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/FiniteDifferenceOperator_old.py +++ /dev/null @@ -1,374 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar  1 22:51:17 2019 - -@author: evangelos -""" - -from ccpi.optimisation.operators import LinearOperator -from ccpi.optimisation.ops import PowerMethodNonsquare -from ccpi.framework import ImageData, BlockDataContainer -import numpy as np - -class FiniteDiff(LinearOperator): -     -    # Works for Neum/Symmetric &  periodic boundary conditions -    # TODO add central differences??? -    # TODO not very well optimised, too many conditions -    # TODO add discretisation step, should get that from imageGeometry -     -    # Grad_order = ['channels', 'direction_z', 'direction_y', 'direction_x'] -    # Grad_order = ['channels', 'direction_y', 'direction_x'] -    # Grad_order = ['direction_z', 'direction_y', 'direction_x'] -    # Grad_order = ['channels', 'direction_z', 'direction_y', 'direction_x'] -     -    def __init__(self, gm_domain, gm_range=None, direction=0, bnd_cond = 'Neumann'): -        '''''' -        super(FiniteDiff, self).__init__()  -        '''FIXME: domain and range should be geometries''' -        self.gm_domain = gm_domain -        self.gm_range = gm_range -         -        self.direction = direction -        self.bnd_cond = bnd_cond -         -        # Domain Geometry = Range Geometry if not stated -        if self.gm_range is None: -            self.gm_range = self.gm_domain -        # check direction and "length" of geometry -        if self.direction + 1 > len(self.gm_domain.shape): -            raise ValueError('Gradient directions more than geometry domain')       -         -        #self.voxel_size = kwargs.get('voxel_size',1) -        # this wrongly assumes a homogeneous voxel size -        self.voxel_size = self.gm_domain.voxel_size_x - - -    def direct(self, x, out=None): -         -        x_asarr = x.as_array() -        x_sz = len(x.shape) -         -        if out is None:         -            out = np.zeros_like(x_asarr) -            fd_arr = out -        else: -            fd_arr = out.as_array()    -#            fd_arr[:]=0 -         -#        if out is None:         -#            out = self.gm_domain.allocate().as_array() -#         -#        fd_arr = out.as_array() -#        fd_arr = self.gm_domain.allocate().as_array() -           -        ######################## Direct for 2D  ############################### -        if x_sz == 2: -             -            if self.direction == 1: -                 -                np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = fd_arr[:,0:-1] ) -                 -                if self.bnd_cond == 'Neumann': -                    pass                                         -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[:,0], x_asarr[:,-1], out = fd_arr[:,-1] ) -                else:  -                    raise ValueError('No valid boundary conditions') -                 -            if self.direction == 0: -                 -                np.subtract( x_asarr[1:], x_asarr[0:-1], out = fd_arr[0:-1,:] ) - -                if self.bnd_cond == 'Neumann': -                    pass                                         -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[0,:], x_asarr[-1,:], out = fd_arr[-1,:] )  -                else:     -                    raise ValueError('No valid boundary conditions')  -                     -        ######################## Direct for 3D  ###############################                         -        elif x_sz == 3: -                     -            if self.direction == 0:   -                 -                np.subtract( x_asarr[1:,:,:], x_asarr[0:-1,:,:], out = fd_arr[0:-1,:,:] ) -                 -                if self.bnd_cond == 'Neumann': -                    pass -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[0,:,:], x_asarr[-1,:,:], out = fd_arr[-1,:,:] )  -                else:     -                    raise ValueError('No valid boundary conditions')                       -                                                              -            if self.direction == 1: -                 -                np.subtract( x_asarr[:,1:,:], x_asarr[:,0:-1,:], out = fd_arr[:,0:-1,:] )  -                 -                if self.bnd_cond == 'Neumann': -                    pass -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[:,0,:], x_asarr[:,-1,:], out = fd_arr[:,-1,:] ) -                else:     -                    raise ValueError('No valid boundary conditions')                       -                                 -              -            if self.direction == 2: -                 -                np.subtract( x_asarr[:,:,1:], x_asarr[:,:,0:-1], out = fd_arr[:,:,0:-1] )  -                 -                if self.bnd_cond == 'Neumann': -                    pass -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[:,:,0], x_asarr[:,:,-1], out = fd_arr[:,:,-1] ) -                else:     -                    raise ValueError('No valid boundary conditions')   -                     -        ######################## Direct for 4D  ############################### -        elif x_sz == 4: -                     -            if self.direction == 0:                             -                np.subtract( x_asarr[1:,:,:,:], x_asarr[0:-1,:,:,:], out = fd_arr[0:-1,:,:,:] ) -                 -                if self.bnd_cond == 'Neumann': -                    pass -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[0,:,:,:], x_asarr[-1,:,:,:], out = fd_arr[-1,:,:,:] ) -                else:     -                    raise ValueError('No valid boundary conditions')  -                                                 -            if self.direction == 1: -                np.subtract( x_asarr[:,1:,:,:], x_asarr[:,0:-1,:,:], out = fd_arr[:,0:-1,:,:] )  -                 -                if self.bnd_cond == 'Neumann': -                    pass -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[:,0,:,:], x_asarr[:,-1,:,:], out = fd_arr[:,-1,:,:] ) -                else:     -                    raise ValueError('No valid boundary conditions')                  -                 -            if self.direction == 2: -                np.subtract( x_asarr[:,:,1:,:], x_asarr[:,:,0:-1,:], out = fd_arr[:,:,0:-1,:] )  -                 -                if self.bnd_cond == 'Neumann': -                    pass -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[:,:,0,:], x_asarr[:,:,-1,:], out = fd_arr[:,:,-1,:] ) -                else:     -                    raise ValueError('No valid boundary conditions')                    -                 -            if self.direction == 3: -                np.subtract( x_asarr[:,:,:,1:], x_asarr[:,:,:,0:-1], out = fd_arr[:,:,:,0:-1] )                  - -                if self.bnd_cond == 'Neumann': -                    pass -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[:,:,:,0], x_asarr[:,:,:,-1], out = fd_arr[:,:,:,-1] ) -                else:     -                    raise ValueError('No valid boundary conditions')                    -                                 -        else: -            raise NotImplementedError                 -          -#        res = out #/self.voxel_size  -        return type(x)(out) - -                     -    def adjoint(self, x, out=None): -         -        x_asarr = x.as_array() -        #x_asarr = x -        x_sz = len(x.shape) -         -        if out is None:         -            out = np.zeros_like(x_asarr) -            fd_arr = out -        else: -            fd_arr = out.as_array()           -         -#        if out is None:         -#            out = self.gm_domain.allocate().as_array() -#            fd_arr = out -#        else: -#            fd_arr = out.as_array() -##        fd_arr = self.gm_domain.allocate().as_array() -         -        ######################## Adjoint for 2D  ############################### -        if x_sz == 2:         -         -            if self.direction == 1: -                 -                np.subtract( x_asarr[:,1:], x_asarr[:,0:-1], out = fd_arr[:,1:] ) -                 -                if self.bnd_cond == 'Neumann': -                    np.subtract( x_asarr[:,0], 0, out = fd_arr[:,0] ) -                    np.subtract( -x_asarr[:,-2], 0, out = fd_arr[:,-1] ) -                     -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[:,0], x_asarr[:,-1], out = fd_arr[:,0] )                                         -                     -                else:    -                    raise ValueError('No valid boundary conditions')  -                                     -            if self.direction == 0: -                 -                np.subtract( x_asarr[1:,:], x_asarr[0:-1,:], out = fd_arr[1:,:] ) -                 -                if self.bnd_cond == 'Neumann': -                    np.subtract( x_asarr[0,:], 0, out = fd_arr[0,:] ) -                    np.subtract( -x_asarr[-2,:], 0, out = fd_arr[-1,:] )  -                     -                elif self.bnd_cond == 'Periodic':   -                    np.subtract( x_asarr[0,:], x_asarr[-1,:], out = fd_arr[0,:] )  -                     -                else:    -                    raise ValueError('No valid boundary conditions')      -         -        ######################## Adjoint for 3D  ###############################         -        elif x_sz == 3:                 -                 -            if self.direction == 0:           -                   -                np.subtract( x_asarr[1:,:,:], x_asarr[0:-1,:,:], out = fd_arr[1:,:,:] ) -                 -                if self.bnd_cond == 'Neumann': -                    np.subtract( x_asarr[0,:,:], 0, out = fd_arr[0,:,:] ) -                    np.subtract( -x_asarr[-2,:,:], 0, out = fd_arr[-1,:,:] ) -                elif self.bnd_cond == 'Periodic':                      -                    np.subtract( x_asarr[0,:,:], x_asarr[-1,:,:], out = fd_arr[0,:,:] ) -                else:    -                    raise ValueError('No valid boundary conditions')                      -                                     -            if self.direction == 1: -                np.subtract( x_asarr[:,1:,:], x_asarr[:,0:-1,:], out = fd_arr[:,1:,:] ) -                 -                if self.bnd_cond == 'Neumann': -                    np.subtract( x_asarr[:,0,:], 0, out = fd_arr[:,0,:] ) -                    np.subtract( -x_asarr[:,-2,:], 0, out = fd_arr[:,-1,:] ) -                elif self.bnd_cond == 'Periodic':                      -                    np.subtract( x_asarr[:,0,:], x_asarr[:,-1,:], out = fd_arr[:,0,:] ) -                else:    -                    raise ValueError('No valid boundary conditions')                                  -                 -            if self.direction == 2: -                np.subtract( x_asarr[:,:,1:], x_asarr[:,:,0:-1], out = fd_arr[:,:,1:] ) -                 -                if self.bnd_cond == 'Neumann': -                    np.subtract( x_asarr[:,:,0], 0, out = fd_arr[:,:,0] )  -                    np.subtract( -x_asarr[:,:,-2], 0, out = fd_arr[:,:,-1] )  -                elif self.bnd_cond == 'Periodic':                      -                    np.subtract( x_asarr[:,:,0], x_asarr[:,:,-1], out = fd_arr[:,:,0] ) -                else:    -                    raise ValueError('No valid boundary conditions')                                  -         -        ######################## Adjoint for 4D  ###############################         -        elif x_sz == 4:                 -                 -            if self.direction == 0:                             -                np.subtract( x_asarr[1:,:,:,:], x_asarr[0:-1,:,:,:], out = fd_arr[1:,:,:,:] ) -                 -                if self.bnd_cond == 'Neumann': -                    np.subtract( x_asarr[0,:,:,:], 0, out = fd_arr[0,:,:,:] ) -                    np.subtract( -x_asarr[-2,:,:,:], 0, out = fd_arr[-1,:,:,:] ) -                     -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[0,:,:,:], x_asarr[-1,:,:,:], out = fd_arr[0,:,:,:] ) -                else:     -                    raise ValueError('No valid boundary conditions')  -                                 -            if self.direction == 1: -                np.subtract( x_asarr[:,1:,:,:], x_asarr[:,0:-1,:,:], out = fd_arr[:,1:,:,:] ) -                 -                if self.bnd_cond == 'Neumann': -                   np.subtract( x_asarr[:,0,:,:], 0, out = fd_arr[:,0,:,:] ) -                   np.subtract( -x_asarr[:,-2,:,:], 0, out = fd_arr[:,-1,:,:] ) -                     -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[:,0,:,:], x_asarr[:,-1,:,:], out = fd_arr[:,0,:,:] ) -                else:     -                    raise ValueError('No valid boundary conditions')  -                     -                 -            if self.direction == 2: -                np.subtract( x_asarr[:,:,1:,:], x_asarr[:,:,0:-1,:], out = fd_arr[:,:,1:,:] ) -                 -                if self.bnd_cond == 'Neumann': -                    np.subtract( x_asarr[:,:,0,:], 0, out = fd_arr[:,:,0,:] )  -                    np.subtract( -x_asarr[:,:,-2,:], 0, out = fd_arr[:,:,-1,:] )  -                     -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[:,:,0,:], x_asarr[:,:,-1,:], out = fd_arr[:,:,0,:] ) -                else:     -                    raise ValueError('No valid boundary conditions')                  -                 -            if self.direction == 3: -                np.subtract( x_asarr[:,:,:,1:], x_asarr[:,:,:,0:-1], out = fd_arr[:,:,:,1:] ) -                 -                if self.bnd_cond == 'Neumann': -                    np.subtract( x_asarr[:,:,:,0], 0, out = fd_arr[:,:,:,0] )  -                    np.subtract( -x_asarr[:,:,:,-2], 0, out = fd_arr[:,:,:,-1] )    -                     -                elif self.bnd_cond == 'Periodic': -                    np.subtract( x_asarr[:,:,:,0], x_asarr[:,:,:,-1], out = fd_arr[:,:,:,0] ) -                else:     -                    raise ValueError('No valid boundary conditions')                   -                               -        else: -            raise NotImplementedError -             -        out *= -1 #/self.voxel_size -        return type(x)(out) -             -    def range_geometry(self): -        '''Returns the range geometry''' -        return self.gm_range -     -    def domain_geometry(self): -        '''Returns the domain geometry''' -        return self.gm_domain -        -    def norm(self): -        x0 = self.gm_domain.allocate() -        x0.fill( np.random.random_sample(x0.shape) ) -        self.s1, sall, svec = PowerMethodNonsquare(self, 25, x0) -        return self.s1 -     -     -if __name__ == '__main__': -     -    from ccpi.framework import ImageGeometry -    import numpy -     -    N, M = 2, 3 - -    ig = ImageGeometry(N, M) - - -    FD = FiniteDiff(ig, direction = 0, bnd_cond = 'Neumann') -    u = FD.domain_geometry().allocate('random_int') -     -     -    res = FD.domain_geometry().allocate() -    FD.direct(u, out=res) - -    z = FD.direct(u)     -    print(z.as_array(), res.as_array()) - -    for i in range(10): -         -        z1 = FD.direct(u)  -        FD.direct(u, out=res) -        numpy.testing.assert_array_almost_equal(z1.as_array(), \ -                                                res.as_array(), decimal=4) -         - -         -         -         -     -#    w = G.range_geometry().allocate('random_int') -     - -     -    
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py deleted file mode 100644 index 6ffaf70..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/GradientOperator.py +++ /dev/null @@ -1,242 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar  1 22:50:04 2019 - -@author: evangelos -""" - -from ccpi.optimisation.operators import Operator, LinearOperator, ScaledOperator -from ccpi.framework import ImageData, ImageGeometry, BlockGeometry, BlockDataContainer -import numpy  -from ccpi.optimisation.operators import FiniteDiff, SparseFiniteDiff - -#%% - -class Gradient(LinearOperator): -     -    def __init__(self, gm_domain, bnd_cond = 'Neumann', **kwargs): -         -        super(Gradient, self).__init__()  -                 -        self.gm_domain = gm_domain # Domain of Grad Operator -         -        self.correlation = kwargs.get('correlation','Space') -         -        if self.correlation=='Space': -            if self.gm_domain.channels>1: -                self.gm_range = BlockGeometry(*[self.gm_domain for _ in range(self.gm_domain.length-1)] )  -                self.ind = numpy.arange(1,self.gm_domain.length) -            else:     -                self.gm_range = BlockGeometry(*[self.gm_domain for _ in range(self.gm_domain.length) ] ) -                self.ind = numpy.arange(self.gm_domain.length) -        elif self.correlation=='SpaceChannels': -            if self.gm_domain.channels>1: -                self.gm_range = BlockGeometry(*[self.gm_domain for _ in range(self.gm_domain.length)]) -                self.ind = range(self.gm_domain.length) -            else: -                raise ValueError('No channels to correlate') -          -        self.bnd_cond = bnd_cond  -         -        # Call FiniteDiff operator -         -        self.FD = FiniteDiff(self.gm_domain, direction = 0, bnd_cond = self.bnd_cond) -                                                          -         -    def direct(self, x, out=None): -         -                 -        if out is not None: -             -            for i in range(self.gm_range.shape[0]): -                self.FD.direction = self.ind[i] -                self.FD.direct(x, out = out[i]) -        else: -            tmp = self.gm_range.allocate()         -            for i in range(tmp.shape[0]): -                self.FD.direction=self.ind[i] -                tmp.get_item(i).fill(self.FD.direct(x)) -            return tmp     -         -    def adjoint(self, x, out=None): -         -        if out is not None: - -            tmp = self.gm_domain.allocate()             -            for i in range(x.shape[0]): -                self.FD.direction=self.ind[i]  -                self.FD.adjoint(x.get_item(i), out = tmp) -                if i == 0: -                    out.fill(tmp) -                else: -                    out += tmp -        else:             -            tmp = self.gm_domain.allocate() -            for i in range(x.shape[0]): -                self.FD.direction=self.ind[i] - -                tmp += self.FD.adjoint(x.get_item(i)) -            return tmp     -             -     -    def domain_geometry(self): -        return self.gm_domain -     -    def range_geometry(self): -        return self.gm_range -                                    -    def norm(self): - -        x0 = self.gm_domain.allocate('random') -        self.s1, sall, svec = LinearOperator.PowerMethod(self, 10, x0) -        return self.s1 -     -    def __rmul__(self, scalar): -        return ScaledOperator(self, scalar)  -     -    ########################################################################### -    ###############  For preconditioning ###################################### -    ########################################################################### -    def matrix(self): -         -        tmp = self.gm_range.allocate() -         -        mat = [] -        for i in range(tmp.shape[0]): -             -            spMat = SparseFiniteDiff(self.gm_domain, direction=self.ind[i], bnd_cond=self.bnd_cond) -            mat.append(spMat.matrix()) -     -        return BlockDataContainer(*mat)     - - -    def sum_abs_col(self): -         -        tmp = self.gm_range.allocate() -        res = self.gm_domain.allocate() -        for i in range(tmp.shape[0]): -            spMat = SparseFiniteDiff(self.gm_domain, direction=self.ind[i], bnd_cond=self.bnd_cond) -            res += spMat.sum_abs_row() -        return res -     -    def sum_abs_row(self): -         -        tmp = self.gm_range.allocate() -        res = [] -        for i in range(tmp.shape[0]): -            spMat = SparseFiniteDiff(self.gm_domain, direction=self.ind[i], bnd_cond=self.bnd_cond) -            res.append(spMat.sum_abs_col()) -        return BlockDataContainer(*res) -    -        -if __name__ == '__main__': -     -     -    from ccpi.optimisation.operators import Identity, BlockOperator -     -     -    M, N = 2, 3 -    ig = ImageGeometry(M, N) -    arr = ig.allocate('random_int' ) -     -    # check direct of Gradient and sparse matrix -    G = Gradient(ig) -    G_sp = G.matrix() -     -    res1 = G.direct(arr) -    res1y = numpy.reshape(G_sp[0].toarray().dot(arr.as_array().flatten('F')), ig.shape, 'F') -     -    print(res1[0].as_array()) -    print(res1y) -     -    res1x = numpy.reshape(G_sp[1].toarray().dot(arr.as_array().flatten('F')), ig.shape, 'F') -     -    print(res1[1].as_array()) -    print(res1x)     -     -    #check sum abs row -    conc_spmat = numpy.abs(numpy.concatenate( (G_sp[0].toarray(), G_sp[1].toarray() ))) -    print(numpy.reshape(conc_spmat.sum(axis=0), ig.shape, 'F'))     -    print(G.sum_abs_row().as_array()) -     -    print(numpy.reshape(conc_spmat.sum(axis=1), ((2,) + ig.shape), 'F')) -     -    print(G.sum_abs_col()[0].as_array()) -    print(G.sum_abs_col()[1].as_array())    -     -    # Check Blockoperator sum abs col and row -     -    op1 = Gradient(ig) -    op2 = Identity(ig) -     -    B = BlockOperator( op1, op2) -     -    Brow = B.sum_abs_row() -    Bcol = B.sum_abs_col() -     -    concB = numpy.concatenate( (numpy.abs(numpy.concatenate( (G_sp[0].toarray(), G_sp[1].toarray() ))), op2.matrix().toarray())) -     -    print(numpy.reshape(concB.sum(axis=0), ig.shape, 'F')) -    print(Brow.as_array()) -     -    print(numpy.reshape(concB.sum(axis=1)[0:12], ((2,) + ig.shape), 'F')) -    print(Bcol[1].as_array())     -     -         -#    print(numpy.concatene(G_sp[0].toarray()+ )) -#    print(G_sp[1].toarray()) -#     -#    d1 = G.sum_abs_row() -#    print(d1.as_array()) -#     -#    d2 = G_neum.sum_abs_col() -##    print(d2)     -#     -#     -#    ########################################################### -    a = BlockDataContainer( BlockDataContainer(arr, arr), arr) -    b = BlockDataContainer( BlockDataContainer(arr+5, arr+3), arr+2) -    c = a/b -     -    print(c[0][0].as_array(), (arr/(arr+5)).as_array()) -    print(c[0][1].as_array(), (arr/(arr+3)).as_array()) -    print(c[1].as_array(), (arr/(arr+2)).as_array()) -     -     -    a1 = BlockDataContainer( arr, BlockDataContainer(arr, arr)) -#     -#    c1 = arr + a -#    c2 = arr + a -#    c2 = a1 + arr -     -    from ccpi.framework import ImageGeometry -#    from ccpi.optimisation.operators import Gradient -# -    N, M = 2, 3 -#     -    ig = ImageGeometry(N, M) -# -    G = Gradient(ig) -# -    u = G.domain_geometry().allocate('random_int') -    w = G.range_geometry().allocate('random_int') -     -     -    print( "################   without out #############") -           -    print( (G.direct(u)*w).sum(),  (u*G.adjoint(w)).sum() )  -         - -    print( "################   with out #############") -     -    res = G.range_geometry().allocate() -    res1 = G.domain_geometry().allocate() -    G.direct(u, out = res)           -    G.adjoint(w, out = res1) -           -    print( (res*w).sum(),  (u*res1).sum() )      -     -     -     - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py deleted file mode 100644 index a853b8d..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/IdentityOperator.py +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Wed Mar  6 19:30:51 2019 - -@author: evangelos -""" - -from ccpi.optimisation.operators import LinearOperator -import scipy.sparse as sp -import numpy as np -from ccpi.framework import ImageData - - -class Identity(LinearOperator): -     -    def __init__(self, gm_domain, gm_range=None): - -        self.gm_domain = gm_domain -        self.gm_range = gm_range   -        if self.gm_range is None: -            self.gm_range = self.gm_domain -         -        super(Identity, self).__init__() -         -    def direct(self,x,out=None): -        if out is None: -            return x.copy() -        else: -            out.fill(x) -     -    def adjoint(self,x, out=None): -        if out is None: -            return x.copy() -        else: -            out.fill(x) -         -    def norm(self): -        return 1.0 -         -    def domain_geometry(self):        -        return self.gm_domain -         -    def range_geometry(self): -        return self.gm_range -     -    def matrix(self): -         -        return sp.eye(np.prod(self.gm_domain.shape)) -     -    def sum_abs_row(self): -         -        return self.gm_range.allocate(1)#ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=0), self.gm_domain.shape, 'F'))) -  -    def sum_abs_col(self): -         -        return self.gm_domain.allocate(1)#ImageData(np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'F'))) -             -     -if __name__ == '__main__': -     -    from ccpi.framework import ImageGeometry - -    M, N = 2, 3 -    ig = ImageGeometry(M, N) -    arr = ig.allocate('random_int') -     -    Id = Identity(ig) -    d = Id.matrix() -    print(d.toarray()) -     -    d1 = Id.sum_abs_col() -    print(d1.as_array()) -     -     - -             -     -    
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperator.py deleted file mode 100644 index f304cc6..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperator.py +++ /dev/null @@ -1,67 +0,0 @@ -# -*- coding: utf-8 -*-
 -"""
 -Created on Tue Mar  5 15:57:52 2019
 -
 -@author: ofn77899
 -"""
 -
 -from ccpi.optimisation.operators import Operator
 -from ccpi.framework import ImageGeometry
 -import numpy
 -
 -
 -class LinearOperator(Operator):
 -    '''A Linear Operator that maps from a space X <-> Y'''
 -    def __init__(self):
 -        super(LinearOperator, self).__init__()
 -    def is_linear(self):
 -        '''Returns if the operator is linear'''
 -        return True
 -    def adjoint(self,x, out=None):
 -        '''returns the adjoint/inverse operation
 -        
 -        only available to linear operators'''
 -        raise NotImplementedError
 -    
 -    @staticmethod
 -    def PowerMethod(operator, iterations, x_init=None):
 -        '''Power method to calculate iteratively the Lipschitz constant'''
 -        
 -        # Initialise random
 -        if x_init is None:
 -            x0 = operator.domain_geometry().allocate(ImageGeometry.RANDOM_INT)
 -        else:
 -            x0 = x_init.copy()
 -            
 -        x1 = operator.domain_geometry().allocate()
 -        y_tmp = operator.range_geometry().allocate()
 -        s = numpy.zeros(iterations)
 -        # Loop
 -        for it in numpy.arange(iterations):
 -            operator.direct(x0,out=y_tmp)
 -            operator.adjoint(y_tmp,out=x1)
 -            x1norm = x1.norm()
 -            s[it] = x1.dot(x0) / x0.squared_norm()
 -            x1.multiply((1.0/x1norm), out=x0)
 -        return numpy.sqrt(s[-1]), numpy.sqrt(s), x0
 -
 -    @staticmethod
 -    def PowerMethodNonsquare(op,numiters , x_init=None):
 -        '''Power method to calculate iteratively the Lipschitz constant'''
 -        
 -        if x_init is None:
 -            x0 = op.allocate_direct()
 -            x0.fill(numpy.random.randn(*x0.shape))
 -        else:
 -            x0 = x_init.copy()
 -        
 -        s = numpy.zeros(numiters)
 -        # Loop
 -        for it in numpy.arange(numiters):
 -            x1 = op.adjoint(op.direct(x0))
 -            x1norm = x1.norm()
 -            s[it] = (x1*x0).sum() / (x0.squared_norm())
 -            x0 = (1.0/x1norm)*x1
 -        return numpy.sqrt(s[-1]), numpy.sqrt(s), x0
 -
 -
 diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperatorMatrix.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperatorMatrix.py deleted file mode 100644 index 62e22e0..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/LinearOperatorMatrix.py +++ /dev/null @@ -1,51 +0,0 @@ -import numpy -from scipy.sparse.linalg import svds -from ccpi.framework import DataContainer -from ccpi.framework import AcquisitionData -from ccpi.framework import ImageData -from ccpi.framework import ImageGeometry -from ccpi.framework import AcquisitionGeometry -from numbers import Number -from ccpi.optimisation.operators import LinearOperator -class LinearOperatorMatrix(LinearOperator): -    def __init__(self,A): -        self.A = A -        self.s1 = None   # Largest singular value, initially unknown -        super(LinearOperatorMatrix, self).__init__() -         -    def direct(self,x, out=None): -        if out is None: -            return type(x)(numpy.dot(self.A,x.as_array())) -        else: -            numpy.dot(self.A, x.as_array(), out=out.as_array()) -             -     -    def adjoint(self,x, out=None): -        if out is None: -            return type(x)(numpy.dot(self.A.transpose(),x.as_array())) -        else: -            numpy.dot(self.A.transpose(),x.as_array(), out=out.as_array()) -             -     -    def size(self): -        return self.A.shape -     -    def get_max_sing_val(self): -        # If unknown, compute and store. If known, simply return it. -        if self.s1 is None: -            self.s1 = svds(self.A,1,return_singular_vectors=False)[0] -            return self.s1 -        else: -            return self.s1 -    def allocate_direct(self): -        '''allocates the memory to hold the result of adjoint''' -        #numpy.dot(self.A.transpose(),x.as_array()) -        M_A, N_A = self.A.shape -        out = numpy.zeros((N_A,1)) -        return DataContainer(out) -    def allocate_adjoint(self): -        '''allocate the memory to hold the result of direct''' -        #numpy.dot(self.A.transpose(),x.as_array()) -        M_A, N_A = self.A.shape -        out = numpy.zeros((M_A,1)) -        return DataContainer(out) diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/Operator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/Operator.py deleted file mode 100644 index 2d2089b..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/Operator.py +++ /dev/null @@ -1,30 +0,0 @@ -# -*- coding: utf-8 -*-
 -"""
 -Created on Tue Mar  5 15:55:56 2019
 -
 -@author: ofn77899
 -"""
 -from ccpi.optimisation.operators.ScaledOperator import ScaledOperator
 -
 -class Operator(object):
 -    '''Operator that maps from a space X -> Y'''
 -    def is_linear(self):
 -        '''Returns if the operator is linear'''
 -        return False
 -    def direct(self,x, out=None):
 -        '''Returns the application of the Operator on x'''
 -        raise NotImplementedError
 -    def norm(self):
 -        '''Returns the norm of the Operator'''
 -        raise NotImplementedError
 -    def range_geometry(self):
 -        '''Returns the range of the Operator: Y space'''
 -        raise NotImplementedError
 -    def domain_geometry(self):
 -        '''Returns the domain of the Operator: X space'''
 -        raise NotImplementedError
 -    def __rmul__(self, scalar):
 -        '''Defines the multiplication by a scalar on the left
 -
 -        returns a ScaledOperator'''
 -        return ScaledOperator(self, scalar)
 diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/ScaledOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/ScaledOperator.py deleted file mode 100644 index ba0049e..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/ScaledOperator.py +++ /dev/null @@ -1,51 +0,0 @@ -from numbers import Number -import numpy - -class ScaledOperator(object): -    '''ScaledOperator -    A class to represent the scalar multiplication of an Operator with a scalar. -    It holds an operator and a scalar. Basically it returns the multiplication -    of the result of direct and adjoint of the operator with the scalar. -    For the rest it behaves like the operator it holds. -    Args: -       operator (Operator): a Operator or LinearOperator -       scalar (Number): a scalar multiplier -    Example: -       The scaled operator behaves like the following: -       sop = ScaledOperator(operator, scalar) -       sop.direct(x) = scalar * operator.direct(x) -       sop.adjoint(x) = scalar * operator.adjoint(x) -       sop.norm() = operator.norm() -       sop.range_geometry() = operator.range_geometry() -       sop.domain_geometry() = operator.domain_geometry() -    ''' -    def __init__(self, operator, scalar): -        super(ScaledOperator, self).__init__() -        if not isinstance (scalar, Number): -            raise TypeError('expected scalar: got {}'.format(type(scalar))) -        self.scalar = scalar -        self.operator = operator -    def direct(self, x, out=None): -        if out is None: -            return self.scalar * self.operator.direct(x, out=out) -        else: -            self.operator.direct(x, out=out) -            out *= self.scalar -    def adjoint(self, x, out=None): -        if self.operator.is_linear(): -            if out is None: -                return self.scalar * self.operator.adjoint(x, out=out) -            else: -                self.operator.adjoint(x, out=out) -                out *= self.scalar -        else: -            raise TypeError('Operator is not linear') -    def norm(self): -        return numpy.abs(self.scalar) * self.operator.norm() -    def range_geometry(self): -        return self.operator.range_geometry() -    def domain_geometry(self): -        return self.operator.domain_geometry() -    def is_linear(self): -        return self.operator.is_linear() - diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/ShrinkageOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/ShrinkageOperator.py deleted file mode 100644 index f47c655..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/ShrinkageOperator.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Wed Mar  6 19:30:51 2019 - -@author: evangelos -""" - -from ccpi.framework import DataContainer - -class ShrinkageOperator(): -     -    def __init__(self): -        pass - -    def __call__(self, x, tau, out=None): -         -        return x.sign() * (x.abs() - tau).maximum(0)  -   
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py deleted file mode 100644 index c5c2ec9..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/SparseFiniteDiff.py +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Tue Apr  2 14:06:15 2019 - -@author: vaggelis -""" - -import scipy.sparse as sp -import numpy as np -from ccpi.framework import ImageData - -class SparseFiniteDiff(): -     -    def __init__(self, gm_domain, gm_range=None, direction=0, bnd_cond = 'Neumann'): -         -        super(SparseFiniteDiff, self).__init__()  -        self.gm_domain = gm_domain -        self.gm_range = gm_range -        self.direction = direction -        self.bnd_cond = bnd_cond -         -        if self.gm_range is None: -            self.gm_range = self.gm_domain -             -        self.get_dims = [i for i in gm_domain.shape]   -         -        if self.direction + 1 > len(self.gm_domain.shape): -            raise ValueError('Gradient directions more than geometry domain')          -             -    def matrix(self):     -         -            i = self.direction  -             -            mat = sp.spdiags(np.vstack([-np.ones((1,self.get_dims[i])),np.ones((1,self.get_dims[i]))]), [0,1], self.get_dims[i], self.get_dims[i], format = 'lil') - -            if self.bnd_cond == 'Neumann': -                mat[-1,:] = 0 -            elif self.bnd_cond == 'Periodic': -                mat[-1,0] = 1     -                 -            tmpGrad = mat if i == 0 else sp.eye(self.get_dims[0]) -             -            for j in range(1, self.gm_domain.length): - -                tmpGrad = sp.kron(mat, tmpGrad ) if j == i else sp.kron(sp.eye(self.get_dims[j]), tmpGrad )  -                 -            return tmpGrad     -                          -    def T(self):         -        return self.matrix().T -      -    def direct(self, x): -         -        x_asarr = x.as_array() -        res = np.reshape( self.matrix() * x_asarr.flatten('F'), self.gm_domain.shape, 'F') -        return type(x)(res) -     -    def adjoint(self, x): -         -        x_asarr = x.as_array() -        res = np.reshape( self.matrix().T * x_asarr.flatten('F'), self.gm_domain.shape, 'F') -        return type(x)(res)  -     -    def sum_abs_row(self): -         -        res = np.array(np.reshape(abs(self.matrix()).sum(axis=0), self.gm_domain.shape, 'F')) -        #res[res==0]=0 -        return ImageData(res) -     -    def sum_abs_col(self): -         -        res = np.array(np.reshape(abs(self.matrix()).sum(axis=1), self.gm_domain.shape, 'F') ) -        #res[res==0]=0 -        return ImageData(res) -         -if __name__ == '__main__': -     -    from ccpi.framework import ImageGeometry -    from ccpi.optimisation.operators import FiniteDiff -     -    # 2D -    M, N= 2, 3 -    ig = ImageGeometry(M, N) -    arr = ig.allocate('random_int') -     -    for i in [0,1]: -     -        # Neumann -        sFD_neum = SparseFiniteDiff(ig, direction=i, bnd_cond='Neumann') -        G_neum = FiniteDiff(ig, direction=i, bnd_cond='Neumann') -         -        # Periodic -        sFD_per = SparseFiniteDiff(ig, direction=i, bnd_cond='Periodic') -        G_per = FiniteDiff(ig, direction=i, bnd_cond='Periodic')     -     -        u_neum_direct = G_neum.direct(arr) -        u_neum_sp_direct = sFD_neum.direct(arr) -        np.testing.assert_array_almost_equal(u_neum_direct.as_array(), u_neum_sp_direct.as_array(), decimal=4) -         -        u_neum_adjoint = G_neum.adjoint(arr) -        u_neum_sp_adjoint = sFD_neum.adjoint(arr) -        np.testing.assert_array_almost_equal(u_neum_adjoint.as_array(), u_neum_sp_adjoint.as_array(), decimal=4)     -         -        u_per_direct = G_neum.direct(arr) -        u_per_sp_direct = sFD_neum.direct(arr)   -        np.testing.assert_array_almost_equal(u_per_direct.as_array(), u_per_sp_direct.as_array(), decimal=4) -         -        u_per_adjoint = G_per.adjoint(arr) -        u_per_sp_adjoint = sFD_per.adjoint(arr) -        np.testing.assert_array_almost_equal(u_per_adjoint.as_array(), u_per_sp_adjoint.as_array(), decimal=4)       -     -    # 3D -    M, N, K = 2, 3, 4 -    ig3D = ImageGeometry(M, N, K) -    arr3D = ig3D.allocate('random_int') -     -    for i in [0,1,2]: -     -        # Neumann -        sFD_neum3D = SparseFiniteDiff(ig3D, direction=i, bnd_cond='Neumann') -        G_neum3D = FiniteDiff(ig3D, direction=i, bnd_cond='Neumann') -         -        # Periodic -        sFD_per3D = SparseFiniteDiff(ig3D, direction=i, bnd_cond='Periodic') -        G_per3D = FiniteDiff(ig3D, direction=i, bnd_cond='Periodic')     -     -        u_neum_direct3D = G_neum3D.direct(arr3D) -        u_neum_sp_direct3D = sFD_neum3D.direct(arr3D) -        np.testing.assert_array_almost_equal(u_neum_direct3D.as_array(), u_neum_sp_direct3D.as_array(), decimal=4) -         -        u_neum_adjoint3D = G_neum3D.adjoint(arr3D) -        u_neum_sp_adjoint3D = sFD_neum3D.adjoint(arr3D) -        np.testing.assert_array_almost_equal(u_neum_adjoint3D.as_array(), u_neum_sp_adjoint3D.as_array(), decimal=4)     -         -        u_per_direct3D = G_neum3D.direct(arr3D) -        u_per_sp_direct3D = sFD_neum3D.direct(arr3D)   -        np.testing.assert_array_almost_equal(u_per_direct3D.as_array(), u_per_sp_direct3D.as_array(), decimal=4) -         -        u_per_adjoint3D = G_per3D.adjoint(arr3D) -        u_per_sp_adjoint3D = sFD_per3D.adjoint(arr3D) -        np.testing.assert_array_almost_equal(u_per_adjoint3D.as_array(), u_per_sp_adjoint3D.as_array(), decimal=4)       -     -    
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/SymmetrizedGradientOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/SymmetrizedGradientOperator.py deleted file mode 100644 index 243809b..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/SymmetrizedGradientOperator.py +++ /dev/null @@ -1,244 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Fri Mar  1 22:53:55 2019 - -@author: evangelos -""" - -from ccpi.optimisation.operators import Gradient, Operator, LinearOperator, ScaledOperator -from ccpi.framework import ImageData, ImageGeometry, BlockGeometry, BlockDataContainer -import numpy  -from ccpi.optimisation.operators import FiniteDiff, SparseFiniteDiff - - -class SymmetrizedGradient(Gradient): -     -    ''' Symmetrized Gradient, denoted by E: V --> W -        where V is the Range of the Gradient Operator -        and W is the Range of the Symmetrized Gradient.                         -    ''' -     -     -    def __init__(self, gm_domain, bnd_cond = 'Neumann', **kwargs): -         -        super(SymmetrizedGradient, self).__init__(gm_domain, bnd_cond, **kwargs)  -         -        ''' -         Domain of SymGrad is the Range of Gradient -        ''' -         -        self.gm_domain = self.gm_range  -        self.bnd_cond = bnd_cond -         -        self.channels = self.gm_range.get_item(0).channels -         -        tmp_gm = len(self.gm_domain.geometries)*self.gm_domain.geometries -         -        self.gm_range = BlockGeometry(*tmp_gm) -         -        self.FD = FiniteDiff(self.gm_domain, direction = 0, bnd_cond = self.bnd_cond) -         -        if self.gm_domain.shape[0]==2: -            self.order_ind = [0,2,1,3] -        else: -            self.order_ind = [0,3,6,1,4,7,2,5,8]             -                 -         -    def direct(self, x, out=None): -         -        if out is None: -             -            tmp = [] -            for i in range(self.gm_domain.shape[0]): -                for j in range(x.shape[0]): -                    self.FD.direction = i -                    tmp.append(self.FD.adjoint(x.get_item(j))) -                     -            tmp1 = [tmp[i] for i in self.order_ind]         -                     -            res = [0.5 * sum(x) for x in zip(tmp, tmp1)]    -                     -            return BlockDataContainer(*res)  -     -        else: -             -            ind = 0 -            for i in range(self.gm_domain.shape[0]): -                for j in range(x.shape[0]): -                    self.FD.direction = i -                    self.FD.adjoint(x.get_item(j), out=out[ind]) -                    ind+=1                     -            out1 = BlockDataContainer(*[out[i] for i in self.order_ind])           -            out.fill( 0.5 * (out + out1) ) -             -                                                -    def adjoint(self, x, out=None): -         -        if out is None: -             -            tmp = [None]*self.gm_domain.shape[0] -            i = 0 -             -            for k in range(self.gm_domain.shape[0]): -                tmp1 = 0 -                for j in range(self.gm_domain.shape[0]): -                    self.FD.direction = j -                    tmp1 += self.FD.direct(x[i])                     -                    i+=1 -                tmp[k] = tmp1   -            return BlockDataContainer(*tmp) -             - -        else: -             -            tmp = self.gm_domain.allocate()  -            i = 0 -            for k in range(self.gm_domain.shape[0]): -                tmp1 = 0 -                for j in range(self.gm_domain.shape[0]): -                    self.FD.direction = j -                    self.FD.direct(x[i], out=tmp[j]) -                    i+=1 -                    tmp1+=tmp[j] -                out[k].fill(tmp1) -#            tmp = self.adjoint(x) -#            out.fill(tmp) -                     -                      -    def domain_geometry(self): -        return self.gm_domain -     -    def range_geometry(self): -        return self.gm_range -                                    -    def norm(self): - -#        TODO need dot method for BlockDataContainer -#        return numpy.sqrt(4*self.gm_domain.shape[0]) -     -#        x0 = self.gm_domain.allocate('random') -        self.s1, sall, svec = LinearOperator.PowerMethod(self, 50)         -        return self.s1 -     - - -if __name__ == '__main__':    -     -    ###########################################################################   -    ## Symmetrized Gradient Tests -    from ccpi.framework import DataContainer -    from ccpi.optimisation.operators import Gradient, BlockOperator, FiniteDiff -    import numpy as np -     -    N, M = 2, 3 -    K = 2 -    C = 2 -     -    ig1 = ImageGeometry(N, M) -    ig2 = ImageGeometry(N, M, channels=C) -     -    E1 = SymmetrizedGradient(ig1, correlation = 'Space', bnd_cond='Neumann') -     -    try: -        E1 = SymmetrizedGradient(ig1, correlation = 'SpaceChannels', bnd_cond='Neumann') -    except: -        print("No Channels to correlate") -         -    E2 = SymmetrizedGradient(ig2, correlation = 'SpaceChannels', bnd_cond='Neumann')   - -    print(E1.domain_geometry().shape, E1.range_geometry().shape) -    print(E2.domain_geometry().shape, E2.range_geometry().shape)    -     -    #check Linear operator property -     -    u1 = E1.domain_geometry().allocate('random_int') -    u2 = E2.domain_geometry().allocate('random_int') -     -    # Need to allocate random_int at the Range of SymGradient -     -    #a1 = ig1.allocate('random_int') -    #a2 = ig1.allocate('random_int') -    #a3 = ig1.allocate('random_int') -     -    #a4 = ig1.allocate('random_int') -    #a5 = ig1.allocate('random_int')     -    #a6 = ig1.allocate('random_int') -     -    # TODO allocate has to create this symmetry by default!!!!! -    #w1 = BlockDataContainer(*[a1, a2, \ -    #                           a2, a3])  -    w1 = E1.range_geometry().allocate('random_int',symmetry=True) -     -    LHS = (E1.direct(u1) * w1).sum() -    RHS = (u1 * E1.adjoint(w1)).sum() -     -    numpy.testing.assert_equal(LHS, RHS)      -     -    u2 = E2.gm_domain.allocate('random_int') -     -    #aa1 = ig2.allocate('random_int') -    #aa2 = ig2.allocate('random_int') -    #aa3 = ig2.allocate('random_int') -    #aa4 = ig2.allocate('random_int') -    #aa5 = ig2.allocate('random_int') -    #aa6 = ig2.allocate('random_int')   -     -    #w2 = BlockDataContainer(*[aa1, aa2, aa3, \ -    #                          aa2, aa4, aa5, \ -    #                          aa3, aa5, aa6])      -    w2 = E2.range_geometry().allocate('random_int',symmetry=True) -  -     -    LHS1 = (E2.direct(u2) * w2).sum() -    RHS1 = (u2 * E2.adjoint(w2)).sum() -     -    numpy.testing.assert_equal(LHS1, RHS1)       -     -    out = E1.range_geometry().allocate() -    E1.direct(u1, out=out) -    a1 = E1.direct(u1) -    numpy.testing.assert_array_equal(a1[0].as_array(), out[0].as_array())  -    numpy.testing.assert_array_equal(a1[1].as_array(), out[1].as_array())  -    numpy.testing.assert_array_equal(a1[2].as_array(), out[2].as_array())  -    numpy.testing.assert_array_equal(a1[3].as_array(), out[3].as_array())  -     -     -    out1 = E1.domain_geometry().allocate() -    E1.adjoint(w1, out=out1) -    b1 = E1.adjoint(w1)     -     -    LHS_out = (out * w1).sum() -    RHS_out = (u1 * out1).sum() -    print(LHS_out, RHS_out) -     -     -    out2 = E2.range_geometry().allocate() -    E2.direct(u2, out=out2) -    a2 = E2.direct(u2) -     -    out21 = E2.domain_geometry().allocate() -    E2.adjoint(w2, out=out21) -    b2 = E2.adjoint(w2)     -     -    LHS_out = (out2 * w2).sum() -    RHS_out = (u2 * out21).sum() -    print(LHS_out, RHS_out)     -     -     -    out = E1.range_geometry().allocate() -    E1.direct(u1, out=out) -    E1.adjoint(out, out=out1) -     -    print(E1.norm()) -    print(E2.norm()) -     -     -     -     -     - -#     -#     -#     -#   
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/ZeroOperator.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/ZeroOperator.py deleted file mode 100644 index 8168f0b..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/ZeroOperator.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -Created on Wed Mar  6 19:25:53 2019 - -@author: evangelos -""" - -import numpy as np -from ccpi.framework import ImageData -from ccpi.optimisation.operators import LinearOperator - -class ZeroOperator(LinearOperator): -     -    def __init__(self, gm_domain, gm_range=None): -         -        super(ZeroOperator, self).__init__()              - -        self.gm_domain = gm_domain -        self.gm_range = gm_range   -        if self.gm_range is None: -            self.gm_range = self.gm_domain -                    -         -    def direct(self,x,out=None): -        if out is None: -            return self.gm_range.allocate() -        else: -            out.fill(self.gm_range.allocate()) -     -    def adjoint(self,x, out=None): -        if out is None: -            return self.gm_domain.allocate() -        else: -            out.fill(self.gm_domain.allocate()) -         -    def norm(self): -        return 0 -     -    def domain_geometry(self):        -        return self.gm_domain   -         -    def range_geometry(self): -        return self.gm_range
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/optimisation/operators/__init__.py b/Wrappers/Python/build/lib/ccpi/optimisation/operators/__init__.py deleted file mode 100644 index 23222d4..0000000 --- a/Wrappers/Python/build/lib/ccpi/optimisation/operators/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# -*- coding: utf-8 -*-
 -"""
 -Created on Tue Mar  5 15:56:27 2019
 -
 -@author: ofn77899
 -"""
 -
 -from .Operator import Operator
 -from .LinearOperator import LinearOperator
 -from .ScaledOperator import ScaledOperator
 -from .BlockOperator import BlockOperator
 -from .BlockScaledOperator import BlockScaledOperator
 -
 -from .SparseFiniteDiff import SparseFiniteDiff
 -from .ShrinkageOperator import ShrinkageOperator
 -
 -from .FiniteDifferenceOperator import FiniteDiff
 -from .GradientOperator import Gradient
 -from .SymmetrizedGradientOperator import SymmetrizedGradient
 -from .IdentityOperator import Identity
 -from .ZeroOperator import ZeroOperator
 -from .LinearOperatorMatrix import LinearOperatorMatrix
 -
 diff --git a/Wrappers/Python/build/lib/ccpi/processors/CenterOfRotationFinder.py b/Wrappers/Python/build/lib/ccpi/processors/CenterOfRotationFinder.py deleted file mode 100644 index 936dc05..0000000 --- a/Wrappers/Python/build/lib/ccpi/processors/CenterOfRotationFinder.py +++ /dev/null @@ -1,408 +0,0 @@ -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2018 Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License - -from ccpi.framework import DataProcessor, DataContainer, AcquisitionData,\ - AcquisitionGeometry, ImageGeometry, ImageData -import numpy -from scipy import ndimage - -class CenterOfRotationFinder(DataProcessor): -    '''Processor to find the center of rotation in a parallel beam experiment -     -    This processor read in a AcquisitionDataSet and finds the center of rotation  -    based on Nghia Vo's method. https://doi.org/10.1364/OE.22.019078 -     -    Input: AcquisitionDataSet -     -    Output: float. center of rotation in pixel coordinate -    ''' -     -    def __init__(self): -        kwargs = { -                   -                  } -         -        #DataProcessor.__init__(self, **kwargs) -        super(CenterOfRotationFinder, self).__init__(**kwargs) -     -    def check_input(self, dataset): -        if dataset.number_of_dimensions == 3: -            if dataset.geometry.geom_type == 'parallel': -                return True -            else: -                raise ValueError('{0} is suitable only for parallel beam geometry'\ -                                 .format(self.__class__.__name__)) -        else: -            raise ValueError("Expected input dimensions is 3, got {0}"\ -                             .format(dataset.number_of_dimensions)) -         -     -    # ######################################################################### -    # Copyright (c) 2015, UChicago Argonne, LLC. All rights reserved.         # -    #                                                                         # -    # Copyright 2015. UChicago Argonne, LLC. This software was produced       # -    # under U.S. Government contract DE-AC02-06CH11357 for Argonne National   # -    # Laboratory (ANL), which is operated by UChicago Argonne, LLC for the    # -    # U.S. Department of Energy. The U.S. Government has rights to use,       # -    # reproduce, and distribute this software.  NEITHER THE GOVERNMENT NOR    # -    # UChicago Argonne, LLC MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR        # -    # ASSUMES ANY LIABILITY FOR THE USE OF THIS SOFTWARE.  If software is     # -    # modified to produce derivative works, such modified software should     # -    # be clearly marked, so as not to confuse it with the version available   # -    # from ANL.                                                               # -    #                                                                         # -    # Additionally, redistribution and use in source and binary forms, with   # -    # or without modification, are permitted provided that the following      # -    # conditions are met:                                                     # -    #                                                                         # -    #     * Redistributions of source code must retain the above copyright    # -    #       notice, this list of conditions and the following disclaimer.     # -    #                                                                         # -    #     * Redistributions in binary form must reproduce the above copyright # -    #       notice, this list of conditions and the following disclaimer in   # -    #       the documentation and/or other materials provided with the        # -    #       distribution.                                                     # -    #                                                                         # -    #     * Neither the name of UChicago Argonne, LLC, Argonne National       # -    #       Laboratory, ANL, the U.S. Government, nor the names of its        # -    #       contributors may be used to endorse or promote products derived   # -    #       from this software without specific prior written permission.     # -    #                                                                         # -    # THIS SOFTWARE IS PROVIDED BY UChicago Argonne, LLC AND CONTRIBUTORS     # -    # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT       # -    # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS       # -    # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL UChicago     # -    # Argonne, LLC OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,        # -    # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,    # -    # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;        # -    # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER        # -    # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT      # -    # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN       # -    # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE         # -    # POSSIBILITY OF SUCH DAMAGE.                                             # -    # ######################################################################### -     -    @staticmethod -    def as_ndarray(arr, dtype=None, copy=False): -        if not isinstance(arr, numpy.ndarray): -            arr = numpy.array(arr, dtype=dtype, copy=copy) -        return arr -     -    @staticmethod -    def as_dtype(arr, dtype, copy=False): -        if not arr.dtype == dtype: -            arr = numpy.array(arr, dtype=dtype, copy=copy) -        return arr -     -    @staticmethod -    def as_float32(arr): -        arr = CenterOfRotationFinder.as_ndarray(arr, numpy.float32) -        return CenterOfRotationFinder.as_dtype(arr, numpy.float32) -     -     -     -     -    @staticmethod -    def find_center_vo(tomo, ind=None, smin=-40, smax=40, srad=10, step=0.5, -                       ratio=2., drop=20): -        """ -        Find rotation axis location using Nghia Vo's method. :cite:`Vo:14`. -     -        Parameters -        ---------- -        tomo : ndarray -            3D tomographic data. -        ind : int, optional -            Index of the slice to be used for reconstruction. -        smin, smax : int, optional -            Reference to the horizontal center of the sinogram. -        srad : float, optional -            Fine search radius. -        step : float, optional -            Step of fine searching. -        ratio : float, optional -            The ratio between the FOV of the camera and the size of object. -            It's used to generate the mask. -        drop : int, optional -            Drop lines around vertical center of the mask. -     -        Returns -        ------- -        float -            Rotation axis location. -             -        Notes -        ----- -        The function may not yield a correct estimate, if: -         -        - the sample size is bigger than the field of view of the camera.  -          In this case the ``ratio`` argument need to be set larger -          than the default of 2.0. -         -        - there is distortion in the imaging hardware. If there's  -          no correction applied, the center of the projection image may  -          yield a better estimate. -         -        - the sample contrast is weak. Paganin's filter need to be applied  -          to overcome this.  -        -        - the sample was changed during the scan.  -        """ -        tomo = CenterOfRotationFinder.as_float32(tomo) -     -        if ind is None: -            ind = tomo.shape[1] // 2 -        _tomo = tomo[:, ind, :] -     -         -     -        # Reduce noise by smooth filters. Use different filters for coarse and fine search  -        _tomo_cs = ndimage.filters.gaussian_filter(_tomo, (3, 1)) -        _tomo_fs = ndimage.filters.median_filter(_tomo, (2, 2)) -     -        # Coarse and fine searches for finding the rotation center. -        if _tomo.shape[0] * _tomo.shape[1] > 4e6:  # If data is large (>2kx2k) -            #_tomo_coarse = downsample(numpy.expand_dims(_tomo_cs,1), level=2)[:, 0, :] -            #init_cen = _search_coarse(_tomo_coarse, smin, smax, ratio, drop) -            #fine_cen = _search_fine(_tomo_fs, srad, step, init_cen*4, ratio, drop) -            init_cen = CenterOfRotationFinder._search_coarse(_tomo_cs, smin,  -                                                             smax, ratio, drop) -            fine_cen = CenterOfRotationFinder._search_fine(_tomo_fs, srad,  -                                                           step, init_cen,  -                                                           ratio, drop) -        else: -            init_cen = CenterOfRotationFinder._search_coarse(_tomo_cs,  -                                                             smin, smax,  -                                                             ratio, drop) -            fine_cen = CenterOfRotationFinder._search_fine(_tomo_fs, srad,  -                                                           step, init_cen,  -                                                           ratio, drop) -     -        #logger.debug('Rotation center search finished: %i', fine_cen) -        return fine_cen - - -    @staticmethod -    def _search_coarse(sino, smin, smax, ratio, drop): -        """ -        Coarse search for finding the rotation center. -        """ -        (Nrow, Ncol) = sino.shape -        centerfliplr = (Ncol - 1.0) / 2.0 -     -        # Copy the sinogram and flip left right, the purpose is to -        # make a full [0;2Pi] sinogram -        _copy_sino = numpy.fliplr(sino[1:]) -     -        # This image is used for compensating the shift of sinogram 2 -        temp_img = numpy.zeros((Nrow - 1, Ncol), dtype='float32') -        temp_img[:] = sino[-1] -     -        # Start coarse search in which the shift step is 1 -        listshift = numpy.arange(smin, smax + 1) -        listmetric = numpy.zeros(len(listshift), dtype='float32') -        mask = CenterOfRotationFinder._create_mask(2 * Nrow - 1, Ncol,  -                                                   0.5 * ratio * Ncol, drop) -        for i in listshift: -            _sino = numpy.roll(_copy_sino, i, axis=1) -            if i >= 0: -                _sino[:, 0:i] = temp_img[:, 0:i] -            else: -                _sino[:, i:] = temp_img[:, i:] -            listmetric[i - smin] = numpy.sum(numpy.abs(numpy.fft.fftshift( -                #pyfftw.interfaces.numpy_fft.fft2( -                #    numpy.vstack((sino, _sino))) -                numpy.fft.fft2(numpy.vstack((sino, _sino))) -                )) * mask) -        minpos = numpy.argmin(listmetric) -        return centerfliplr + listshift[minpos] / 2.0 -     -    @staticmethod -    def _search_fine(sino, srad, step, init_cen, ratio, drop): -        """ -        Fine search for finding the rotation center. -        """ -        Nrow, Ncol = sino.shape -        centerfliplr = (Ncol + 1.0) / 2.0 - 1.0 -        # Use to shift the sinogram 2 to the raw CoR. -        shiftsino = numpy.int16(2 * (init_cen - centerfliplr)) -        _copy_sino = numpy.roll(numpy.fliplr(sino[1:]), shiftsino, axis=1) -        if init_cen <= centerfliplr: -            lefttake = numpy.int16(numpy.ceil(srad + 1)) -            righttake = numpy.int16(numpy.floor(2 * init_cen - srad - 1)) -        else: -            lefttake = numpy.int16(numpy.ceil( -                init_cen - (Ncol - 1 - init_cen) + srad + 1)) -            righttake = numpy.int16(numpy.floor(Ncol - 1 - srad - 1)) -        Ncol1 = righttake - lefttake + 1 -        mask = CenterOfRotationFinder._create_mask(2 * Nrow - 1, Ncol1,  -                                                   0.5 * ratio * Ncol, drop) -        numshift = numpy.int16((2 * srad) / step) + 1 -        listshift = numpy.linspace(-srad, srad, num=numshift) -        listmetric = numpy.zeros(len(listshift), dtype='float32') -        factor1 = numpy.mean(sino[-1, lefttake:righttake]) -        num1 = 0 -        for i in listshift: -            _sino = ndimage.interpolation.shift( -                _copy_sino, (0, i), prefilter=False) -            factor2 = numpy.mean(_sino[0,lefttake:righttake]) -            _sino = _sino * factor1 / factor2 -            sinojoin = numpy.vstack((sino, _sino)) -            listmetric[num1] = numpy.sum(numpy.abs(numpy.fft.fftshift( -                #pyfftw.interfaces.numpy_fft.fft2( -                #    sinojoin[:, lefttake:righttake + 1]) -                numpy.fft.fft2(sinojoin[:, lefttake:righttake + 1]) -                )) * mask) -            num1 = num1 + 1 -        minpos = numpy.argmin(listmetric) -        return init_cen + listshift[minpos] / 2.0 -     -    @staticmethod -    def _create_mask(nrow, ncol, radius, drop): -        du = 1.0 / ncol -        dv = (nrow - 1.0) / (nrow * 2.0 * numpy.pi) -        centerrow = numpy.ceil(nrow / 2) - 1 -        centercol = numpy.ceil(ncol / 2) - 1 -        # added by Edoardo Pasca -        centerrow = int(centerrow) -        centercol = int(centercol) -        mask = numpy.zeros((nrow, ncol), dtype='float32') -        for i in range(nrow): -            num1 = numpy.round(((i - centerrow) * dv / radius) / du) -            (p1, p2) = numpy.int16(numpy.clip(numpy.sort( -                (-num1 + centercol, num1 + centercol)), 0, ncol - 1)) -            mask[i, p1:p2 + 1] = numpy.ones(p2 - p1 + 1, dtype='float32') -        if drop < centerrow: -            mask[centerrow - drop:centerrow + drop + 1, -                 :] = numpy.zeros((2 * drop + 1, ncol), dtype='float32') -        mask[:,centercol-1:centercol+2] = numpy.zeros((nrow, 3), dtype='float32') -        return mask -     -    def process(self, out=None): -         -        projections = self.get_input() -         -        cor = CenterOfRotationFinder.find_center_vo(projections.as_array()) -         -        return cor - -             -class AcquisitionDataPadder(DataProcessor): -    '''Normalization based on flat and dark -     -    This processor read in a AcquisitionData and normalises it based on  -    the instrument reading with and without incident photons or neutrons. -     -    Input: AcquisitionData -    Parameter: 2D projection with flat field (or stack) -               2D projection with dark field (or stack) -    Output: AcquisitionDataSetn -    ''' -     -    def __init__(self,  -                 center_of_rotation   = None, -                 acquisition_geometry = None, -                 pad_value            = 1e-5): -        kwargs = { -                  'acquisition_geometry' : acquisition_geometry,  -                  'center_of_rotation'   : center_of_rotation, -                  'pad_value'            : pad_value -                  } -         -        super(AcquisitionDataPadder, self).__init__(**kwargs) -         -    def check_input(self, dataset): -        if self.acquisition_geometry is None: -            self.acquisition_geometry = dataset.geometry -        if dataset.number_of_dimensions == 3: -               return True -        else: -            raise ValueError("Expected input dimensions is 2 or 3, got {0}"\ -                             .format(dataset.number_of_dimensions)) - -    def process(self, out=None): -        projections = self.get_input() -        w = projections.get_dimension_size('horizontal') -        delta = w - 2 * self.center_of_rotation -                -        padded_width = int ( -                numpy.ceil(abs(delta)) + w -                ) -        delta_pix = padded_width - w -         -        voxel_per_pixel = 1 -        geom = pbalg.pb_setup_geometry_from_acquisition(projections.as_array(), -                                            self.acquisition_geometry.angles, -                                            self.center_of_rotation, -                                            voxel_per_pixel ) -         -        padded_geometry = self.acquisition_geometry.clone() -         -        padded_geometry.pixel_num_h = geom['n_h'] -        padded_geometry.pixel_num_v = geom['n_v'] -         -        delta_pix_h = padded_geometry.pixel_num_h - self.acquisition_geometry.pixel_num_h -        delta_pix_v = padded_geometry.pixel_num_v - self.acquisition_geometry.pixel_num_v -         -        if delta_pix_h == 0: -            delta_pix_h = delta_pix -            padded_geometry.pixel_num_h = padded_width -        #initialize a new AcquisitionData with values close to 0 -        out = AcquisitionData(geometry=padded_geometry) -        out = out + self.pad_value -         -         -        #pad in the horizontal-vertical plane -> slice on angles -        if delta > 0: -            #pad left of middle -            command = "out.array[" -            for i in range(out.number_of_dimensions): -                if out.dimension_labels[i] == 'horizontal': -                    value = '{0}:{1}'.format(delta_pix_h, delta_pix_h+w) -                    command = command + str(value) -                else: -                    if out.dimension_labels[i] == 'vertical' : -                        value = '{0}:'.format(delta_pix_v) -                        command = command + str(value) -                    else: -                        command = command + ":" -                if i < out.number_of_dimensions -1: -                    command = command + ',' -            command = command + '] = projections.array' -            #print (command)     -        else: -            #pad right of middle -            command = "out.array[" -            for i in range(out.number_of_dimensions): -                if out.dimension_labels[i] == 'horizontal': -                    value = '{0}:{1}'.format(0, w) -                    command = command + str(value) -                else: -                    if out.dimension_labels[i] == 'vertical' : -                        value = '{0}:'.format(delta_pix_v) -                        command = command + str(value) -                    else: -                        command = command + ":" -                if i < out.number_of_dimensions -1: -                    command = command + ',' -            command = command + '] = projections.array' -            #print (command)     -            #cleaned = eval(command) -        exec(command) -        return out
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/processors/Normalizer.py b/Wrappers/Python/build/lib/ccpi/processors/Normalizer.py deleted file mode 100644 index da65e5c..0000000 --- a/Wrappers/Python/build/lib/ccpi/processors/Normalizer.py +++ /dev/null @@ -1,124 +0,0 @@ -# -*- coding: utf-8 -*- -#   This work is part of the Core Imaging Library developed by -#   Visual Analytics and Imaging System Group of the Science Technology -#   Facilities Council, STFC - -#   Copyright 2018 Edoardo Pasca - -#   Licensed under the Apache License, Version 2.0 (the "License"); -#   you may not use this file except in compliance with the License. -#   You may obtain a copy of the License at - -#       http://www.apache.org/licenses/LICENSE-2.0 - -#   Unless required by applicable law or agreed to in writing, software -#   distributed under the License is distributed on an "AS IS" BASIS, -#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#   See the License for the specific language governing permissions and -#   limitations under the License - -from ccpi.framework import DataProcessor, DataContainer, AcquisitionData,\ - AcquisitionGeometry, ImageGeometry, ImageData -import numpy - -class Normalizer(DataProcessor): -    '''Normalization based on flat and dark -     -    This processor read in a AcquisitionData and normalises it based on  -    the instrument reading with and without incident photons or neutrons. -     -    Input: AcquisitionData -    Parameter: 2D projection with flat field (or stack) -               2D projection with dark field (or stack) -    Output: AcquisitionDataSetn -    ''' -     -    def __init__(self, flat_field = None, dark_field = None, tolerance = 1e-5): -        kwargs = { -                  'flat_field'  : flat_field,  -                  'dark_field'  : dark_field, -                  # very small number. Used when there is a division by zero -                  'tolerance'   : tolerance -                  } -         -        #DataProcessor.__init__(self, **kwargs) -        super(Normalizer, self).__init__(**kwargs) -        if not flat_field is None: -            self.set_flat_field(flat_field) -        if not dark_field is None: -            self.set_dark_field(dark_field) -     -    def check_input(self, dataset): -        if dataset.number_of_dimensions == 3 or\ -           dataset.number_of_dimensions == 2: -               return True -        else: -            raise ValueError("Expected input dimensions is 2 or 3, got {0}"\ -                             .format(dataset.number_of_dimensions)) -     -    def set_dark_field(self, df): -        if type(df) is numpy.ndarray: -            if len(numpy.shape(df)) == 3: -                raise ValueError('Dark Field should be 2D') -            elif len(numpy.shape(df)) == 2: -                self.dark_field = df -        elif issubclass(type(df), DataContainer): -            self.dark_field = self.set_dark_field(df.as_array()) -     -    def set_flat_field(self, df): -        if type(df) is numpy.ndarray: -            if len(numpy.shape(df)) == 3: -                raise ValueError('Flat Field should be 2D') -            elif len(numpy.shape(df)) == 2: -                self.flat_field = df -        elif issubclass(type(df), DataContainer): -            self.flat_field = self.set_flat_field(df.as_array()) -     -    @staticmethod -    def normalize_projection(projection, flat, dark, tolerance): -        a = (projection - dark) -        b = (flat-dark) -        with numpy.errstate(divide='ignore', invalid='ignore'): -            c = numpy.true_divide( a, b ) -            c[ ~ numpy.isfinite( c )] = tolerance # set to not zero if 0/0  -        return c -     -    @staticmethod -    def estimate_normalised_error(projection, flat, dark, delta_flat, delta_dark): -        '''returns the estimated relative error of the normalised projection -         -        n = (projection - dark) / (flat - dark) -        Dn/n = (flat-dark + projection-dark)/((flat-dark)*(projection-dark))*(Df/f + Dd/d) -        '''  -        a = (projection - dark) -        b = (flat-dark) -        df = delta_flat / flat -        dd = delta_dark / dark -        rel_norm_error = (b + a) / (b * a) * (df + dd) -        return rel_norm_error -         -    def process(self, out=None): -         -        projections = self.get_input() -        dark = self.dark_field -        flat = self.flat_field -         -        if projections.number_of_dimensions == 3: -            if not (projections.shape[1:] == dark.shape and \ -               projections.shape[1:] == flat.shape): -                raise ValueError('Flats/Dark and projections size do not match.') -                 -                    -            a = numpy.asarray( -                    [ Normalizer.normalize_projection( -                            projection, flat, dark, self.tolerance) \ -                     for projection in projections.as_array() ] -                    ) -        elif projections.number_of_dimensions == 2: -            a = Normalizer.normalize_projection(projections.as_array(),  -                                                flat, dark, self.tolerance) -        y = type(projections)( a , True,  -                    dimension_labels=projections.dimension_labels, -                    geometry=projections.geometry) -        return y -    
\ No newline at end of file diff --git a/Wrappers/Python/build/lib/ccpi/processors/__init__.py b/Wrappers/Python/build/lib/ccpi/processors/__init__.py deleted file mode 100644 index f8d050e..0000000 --- a/Wrappers/Python/build/lib/ccpi/processors/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# -*- coding: utf-8 -*-
 -"""
 -Created on Tue Apr 30 13:51:09 2019
 -
 -@author: ofn77899
 -"""
 -
 -from .CenterOfRotationFinder import CenterOfRotationFinder
 -from .Normalizer import Normalizer
 diff --git a/Wrappers/Python/ccpi/optimisation/__pycache__/__init__.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/__pycache__/__init__.cpython-36.pycBinary files differ deleted file mode 100644 index c7dd009..0000000 --- a/Wrappers/Python/ccpi/optimisation/__pycache__/__init__.cpython-36.pyc +++ /dev/null diff --git a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/BlockOperator.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/operators/__pycache__/BlockOperator.cpython-36.pycBinary files differ deleted file mode 100644 index b4da27c..0000000 --- a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/BlockOperator.cpython-36.pyc +++ /dev/null diff --git a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/BlockScaledOperator.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/operators/__pycache__/BlockScaledOperator.cpython-36.pycBinary files differ deleted file mode 100644 index 32b7f28..0000000 --- a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/BlockScaledOperator.cpython-36.pyc +++ /dev/null diff --git a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/FiniteDifferenceOperator.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/operators/__pycache__/FiniteDifferenceOperator.cpython-36.pycBinary files differ deleted file mode 100644 index 8484ac0..0000000 --- a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/FiniteDifferenceOperator.cpython-36.pyc +++ /dev/null diff --git a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/GradientOperator.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/operators/__pycache__/GradientOperator.cpython-36.pycBinary files differ deleted file mode 100644 index 727b35a..0000000 --- a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/GradientOperator.cpython-36.pyc +++ /dev/null diff --git a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/IdentityOperator.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/operators/__pycache__/IdentityOperator.cpython-36.pycBinary files differ deleted file mode 100644 index 95297d5..0000000 --- a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/IdentityOperator.cpython-36.pyc +++ /dev/null diff --git a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/LinearOperator.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/operators/__pycache__/LinearOperator.cpython-36.pycBinary files differ deleted file mode 100644 index 83bc87b..0000000 --- a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/LinearOperator.cpython-36.pyc +++ /dev/null diff --git a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/LinearOperatorMatrix.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/operators/__pycache__/LinearOperatorMatrix.cpython-36.pycBinary files differ deleted file mode 100644 index 16c7bdc..0000000 --- a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/LinearOperatorMatrix.cpython-36.pyc +++ /dev/null diff --git a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/Operator.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/operators/__pycache__/Operator.cpython-36.pycBinary files differ deleted file mode 100644 index 3d1ae4e..0000000 --- a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/Operator.cpython-36.pyc +++ /dev/null diff --git a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/ScaledOperator.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/operators/__pycache__/ScaledOperator.cpython-36.pycBinary files differ deleted file mode 100644 index 980891e..0000000 --- a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/ScaledOperator.cpython-36.pyc +++ /dev/null diff --git a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/ShrinkageOperator.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/operators/__pycache__/ShrinkageOperator.cpython-36.pycBinary files differ deleted file mode 100644 index 3087afc..0000000 --- a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/ShrinkageOperator.cpython-36.pyc +++ /dev/null diff --git a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/SparseFiniteDiff.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/operators/__pycache__/SparseFiniteDiff.cpython-36.pycBinary files differ deleted file mode 100644 index 93f4f2d..0000000 --- a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/SparseFiniteDiff.cpython-36.pyc +++ /dev/null diff --git a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/SymmetrizedGradientOperator.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/operators/__pycache__/SymmetrizedGradientOperator.cpython-36.pycBinary files differ deleted file mode 100644 index ff62cae..0000000 --- a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/SymmetrizedGradientOperator.cpython-36.pyc +++ /dev/null diff --git a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/ZeroOperator.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/operators/__pycache__/ZeroOperator.cpython-36.pycBinary files differ deleted file mode 100644 index 21a2a3a..0000000 --- a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/ZeroOperator.cpython-36.pyc +++ /dev/null diff --git a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/__init__.cpython-36.pyc b/Wrappers/Python/ccpi/optimisation/operators/__pycache__/__init__.cpython-36.pycBinary files differ deleted file mode 100644 index c5db258..0000000 --- a/Wrappers/Python/ccpi/optimisation/operators/__pycache__/__init__.cpython-36.pyc +++ /dev/null | 
